Subversion Repositories Kolibri OS

Compare Revisions

No changes between revisions

Regard whitespace Rev 5078 → Rev 5077

/drivers/video/drm/i915/main.c
186,7 → 186,7
if( GetService("DISPLAY") != 0 )
return 0;
 
printf("\ni915 v3.17-rc3 build %s %s\nusage: i915 [options]\n"
printf("\ni915 v3.17-rc2 build %s %s\nusage: i915 [options]\n"
"-pm=<0,1> Enable powersavings, fbc, downclocking, etc. (default: 1 - true)\n",
__DATE__, __TIME__);
printf("-rc6=<-1,0-7> Enable power-saving render C-state 6.\n"
/drivers/video/drm/vmwgfx/Makefile
1,5 → 1,4
 
 
CC = gcc
LD = ld
AS = as
14,7 → 13,7
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \
-I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/uapi
 
CFLAGS = -c -Os $(INCLUDES) -fomit-frame-pointer -fno-builtin-printf
CFLAGS = -c -O2 $(INCLUDES) -march=i686 -fomit-frame-pointer -fno-builtin-printf
CFLAGS+= -mno-ms-bitfields
 
LIBPATH:= $(DRV_TOPDIR)/ddk
94,6 → 93,7
 
$(NAME).dll: $(NAME_OBJS) $(LIBPATH)/libcore.a $(LIBPATH)/libddk.a vmw.lds Makefile
$(LD) -L$(LIBPATH) $(LDFLAGS) -T vmw.lds -o $@ $(NAME_OBJS) $(LIBS)
kpack $@
 
 
%.o : %.c $(HFILES) Makefile
/drivers/video/drm/vmwgfx/main.c
31,14 → 31,22
 
int vmw_init(void);
int kms_init(struct drm_device *dev);
void vmw_driver_thread();
void kms_update();
 
void cpu_detect();
 
void parse_cmdline(char *cmdline, char *log);
int _stdcall display_handler(ioctl_t *io);
 
int srv_blit_bitmap(u32 hbitmap, int dst_x, int dst_y,
int src_x, int src_y, u32 w, u32 h);
 
int blit_textured(u32 hbitmap, int dst_x, int dst_y,
int src_x, int src_y, u32 w, u32 h);
 
int blit_tex(u32 hbitmap, int dst_x, int dst_y,
int src_x, int src_y, u32 w, u32 h);
 
void get_pci_info(struct pci_device *dev);
int gem_getparam(struct drm_device *dev, void *data);
 
56,6 → 64,23
 
int kms_modeset = 1;
 
 
void vmw_driver_thread()
{
dbgprintf("%s\n",__FUNCTION__);
 
// run_workqueue(dev_priv->wq);
 
while(driver_wq_state)
{
kms_update();
delay(1);
};
__asm__ __volatile__ (
"int $0x40"
::"a"(-1));
}
 
u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
{
 
178,9 → 203,9
break;
 
case SRV_ENUM_MODES:
// dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n",
// inp, io->inp_size, io->out_size );
// check_output(4);
dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n",
inp, io->inp_size, io->out_size );
check_output(4);
// check_input(*outp * sizeof(videomode_t));
if( kms_modeset)
retval = get_videomodes((videomode_t*)inp, outp);
187,9 → 212,9
break;
 
case SRV_SET_MODE:
// dbgprintf("SRV_SET_MODE inp %x inp_size %x\n",
// inp, io->inp_size);
// check_input(sizeof(videomode_t));
dbgprintf("SRV_SET_MODE inp %x inp_size %x\n",
inp, io->inp_size);
check_input(sizeof(videomode_t));
if( kms_modeset )
retval = set_user_mode((videomode_t*)inp);
break;
805,7 → 830,6
uint32_t hot_y;
 
struct list_head list;
void *priv;
}cursor_t;
 
#define CURSOR_WIDTH 64
841,8 → 865,9
u32 check_m_pixel;
};
 
display_t *os_display;
 
static display_t *os_display;
 
static int count_connector_modes(struct drm_connector* connector)
{
struct drm_display_mode *mode;
864,8 → 889,6
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
 
du->cursor_x = x;
du->cursor_y = y;
vmw_cursor_update_position(dev_priv, true, x,y);
};
 
872,7 → 895,6
static cursor_t* __stdcall select_cursor_kms(cursor_t *cursor)
{
struct vmw_private *dev_priv = vmw_priv(os_display->ddev);
struct vmw_display_unit *du = vmw_crtc_to_du(os_display->crtc);
cursor_t *old;
 
old = os_display->cursor;
880,38 → 902,30
 
vmw_cursor_update_image(dev_priv, cursor->data,
64, 64, cursor->hot_x, cursor->hot_y);
vmw_cursor_update_position(dev_priv, true,
du->cursor_x, du->cursor_y);
 
// vmw_cursor_update_position(dev_priv, true,
// du->cursor_x + du->hotspot_x,
// du->cursor_y + du->hotspot_y);
 
return old;
};
 
void vmw_driver_thread()
{
DRM_DEBUG_KMS("%s\n",__FUNCTION__);
 
select_cursor_kms(os_display->cursor);
 
while(driver_wq_state)
{
kms_update();
delay(2);
};
__asm__ __volatile__ (
"int $0x40"
::"a"(-1));
}
 
int kms_init(struct drm_device *dev)
{
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
struct drm_crtc *crtc = NULL;
struct vmw_display_unit *du;
struct drm_framebuffer *fb;
 
cursor_t *cursor;
int mode_count;
u32_t ifl;
int err;
 
ENTER();
 
crtc = list_entry(dev->mode_config.crtc_list.next, typeof(*crtc), head);
encoder = list_entry(dev->mode_config.encoder_list.next, typeof(*encoder), head);
connector = list_entry(dev->mode_config.connector_list.next, typeof(*connector), head);
930,20 → 944,24
mode_count++;
};
 
printf("%s %d\n",__FUNCTION__, mode_count);
 
DRM_DEBUG_KMS("CONNECTOR %x ID:%d status:%d ENCODER %x CRTC %x ID:%d\n",
connector, connector->base.id,
connector->status, connector->encoder,
crtc, crtc->base.id );
 
DRM_DEBUG_KMS("[Select CRTC:%d]\n", crtc->base.id);
 
os_display = GetDisplay();
 
ifl = safe_cli();
{
os_display->ddev = dev;
os_display->connector = connector;
os_display->crtc = crtc;
os_display->supported_modes = mode_count;
 
ifl = safe_cli();
{
os_display->restore_cursor(0,0);
os_display->select_cursor = select_cursor_kms;
os_display->show_cursor = NULL;
950,14 → 968,16
os_display->move_cursor = move_cursor_kms;
os_display->restore_cursor = restore_cursor;
os_display->disable_mouse = disable_mouse;
select_cursor_kms(os_display->cursor);
};
safe_sti(ifl);
 
du = vmw_crtc_to_du(os_display->crtc);
du->cursor_x = os_display->width/2;
du->cursor_y = os_display->height/2;
select_cursor_kms(os_display->cursor);
#ifdef __HWA__
err = init_bitmaps();
#endif
 
LEAVE();
 
return 0;
};
 
966,7 → 986,6
{
struct vmw_private *dev_priv = vmw_priv(main_device);
size_t fifo_size;
u32_t ifl;
int i;
 
struct {
985,8 → 1004,8
cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
cmd->body.x = 0;
cmd->body.y = 0;
cmd->body.width = os_display->width;
cmd->body.height = os_display->height;
cmd->body.width = os_display->width; //cpu_to_le32(clips->x2 - clips->x1);
cmd->body.height = os_display->height; //cpu_to_le32(clips->y2 - clips->y1);
 
vmw_fifo_commit(dev_priv, fifo_size);
}
993,9 → 1012,10
 
int get_videomodes(videomode_t *mode, int *count)
{
struct drm_display_mode *drmmode;
int err = -1;
 
dbgprintf("mode %x count %d\n", mode, *count);
 
if( *count == 0 )
{
*count = os_display->supported_modes;
1003,6 → 1023,7
}
else if( mode != NULL )
{
struct drm_display_mode *drmmode;
int i = 0;
 
if( *count > os_display->supported_modes)
1015,17 → 1036,15
mode->width = drm_mode_width(drmmode);
mode->height = drm_mode_height(drmmode);
mode->bpp = 32;
mode->freq = drmmode->vrefresh;
mode->freq = drm_mode_vrefresh(drmmode);
i++;
mode++;
}
else break;
};
 
*count = i;
err = 0;
};
 
return err;
};
 
/drivers/video/drm/vmwgfx/vmwgfx_drv.c
142,11 → 142,11
 
static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
vmw_kms_cursor_bypass_ioctl,
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
159,28 → 159,29
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
 
VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
vmw_fence_obj_signaled_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_FENCE_EVENT,
vmw_fence_event_ioctl,
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_UNLOCKED),
 
/* these allow direct access to the framebuffers mark as master only */
VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
193,19 → 194,19
DRM_MASTER | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_CREATE_SHADER,
vmw_shader_define_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_UNREF_SHADER,
vmw_shader_destroy_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
vmw_gb_surface_define_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
vmw_gb_surface_reference_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_SYNCCPU,
vmw_user_dmabuf_synccpu_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
DRM_AUTH | DRM_UNLOCKED),
};
#endif
 
314,7 → 315,7
if (unlikely(ret != 0))
return ret;
 
ret = ttm_bo_reserve(bo, false, true, false, NULL);
ret = ttm_bo_reserve(bo, false, true, false, 0);
BUG_ON(ret != 0);
 
ret = ttm_bo_kmap(bo, 0, 1, &map);
340,6 → 341,7
static int vmw_request_device(struct vmw_private *dev_priv)
{
int ret;
ENTER();
 
ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
if (unlikely(ret != 0)) {
352,8 → 354,8
// goto out_no_query_bo;
// vmw_dummy_query_bo_prepare(dev_priv);
 
LEAVE();
 
 
return 0;
 
out_no_query_bo:
532,6 → 534,8
enum vmw_res_type i;
bool refuse_dma = false;
 
ENTER();
 
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (unlikely(dev_priv == NULL)) {
DRM_ERROR("Failed allocating a device private struct.\n");
548,7 → 552,6
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem);
 
for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->res_idr[i]);
567,6 → 570,9
dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
 
printk("io: %x vram: %x mmio: %x\n",dev_priv->io_start,
dev_priv->vram_start,dev_priv->mmio_start);
 
dev_priv->enable_fb = enable_fbdev;
 
mutex_lock(&dev_priv->hw_mutex);
610,7 → 616,6
dev_priv->memory_size = 512*1024*1024;
}
dev_priv->max_mob_pages = 0;
dev_priv->max_mob_size = 0;
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
uint64_t mem_size =
vmw_read(dev_priv,
620,8 → 625,6
dev_priv->prim_bb_mem =
vmw_read(dev_priv,
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
dev_priv->max_mob_size =
vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
} else
dev_priv->prim_bb_mem = dev_priv->vram_size;
 
664,9 → 667,7
 
ret = ttm_bo_device_init(&dev_priv->bdev,
dev_priv->bo_global_ref.ref.object,
&vmw_bo_driver,
NULL,
VMWGFX_FILE_PAGE_OFFSET,
&vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
716,14 → 717,14
goto out_err4;
}
 
dev_priv->tdev = ttm_object_device_init
(dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
// dev_priv->tdev = ttm_object_device_init
// (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
 
if (unlikely(dev_priv->tdev == NULL)) {
DRM_ERROR("Unable to initialize TTM object management.\n");
ret = -ENOMEM;
goto out_err4;
}
// if (unlikely(dev_priv->tdev == NULL)) {
// DRM_ERROR("Unable to initialize TTM object management.\n");
// ret = -ENOMEM;
// goto out_err4;
// }
 
dev->dev_private = dev_priv;
 
730,7 → 731,7
#if 0
 
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
ret = drm_irq_install(dev, dev->pdev->irq);
ret = drm_irq_install(dev);
if (ret != 0) {
DRM_ERROR("Failed installing irq: %d\n", ret);
goto out_no_irq;
760,6 → 761,7
 
main_device = dev;
 
LEAVE();
return 0;
 
out_no_fifo:
888,6 → 890,7
// goto out_no_tfile;
 
file_priv->driver_priv = vmw_fp;
// dev_priv->bdev.dev_mapping = dev->dev_mapping;
 
return 0;
 
1091,11 → 1094,12
{
struct vmw_private *dev_priv =
container_of(nb, struct vmw_private, pm_nb);
struct vmw_master *vmaster = dev_priv->active_master;
 
switch (val) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
ttm_suspend_lock(&dev_priv->reservation_sem);
ttm_suspend_lock(&vmaster->lock);
 
/**
* This empties VRAM and unbinds all GMR bindings.
1109,7 → 1113,7
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
case PM_POST_RESTORE:
ttm_suspend_unlock(&dev_priv->reservation_sem);
ttm_suspend_unlock(&vmaster->lock);
 
break;
case PM_RESTORE_PREPARE:
1197,7 → 1201,7
 
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_MODESET | DRIVER_RENDER,
DRIVER_MODESET,
.load = vmw_driver_load,
// .unload = vmw_driver_unload,
// .lastclose = vmw_lastclose,
1244,6 → 1248,7
const struct pci_device_id *ent;
int err;
 
ENTER();
 
ent = find_pci_device(&device, vmw_pci_id_list);
if( unlikely(ent == NULL) )
1258,6 → 1263,7
device.pci_dev.device);
 
err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
LEAVE();
 
return err;
}
/drivers/video/drm/vmwgfx/vmwgfx_fifo.c
106,6 → 106,8
uint32_t min;
uint32_t dummy;
 
ENTER();
 
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
fifo->static_buffer = KernelAlloc(fifo->static_buffer_size);
if (unlikely(fifo->static_buffer == NULL))
165,6 → 167,7
vmw_marker_queue_init(&fifo->marker_queue);
 
int ret = 0; //vmw_fifo_send_fence(dev_priv, &dummy);
LEAVE();
return ret;
}
 
230,7 → 233,7
unsigned long timeout)
{
int ret = 0;
unsigned long end_jiffies = jiffies + timeout;
unsigned long end_jiffies = GetTimerTicks() + timeout;
// DEFINE_WAIT(__wait);
 
DRM_INFO("Fifo wait noirq.\n");
241,7 → 244,7
// TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (!vmw_fifo_is_full(dev_priv, bytes))
break;
if (time_after_eq(jiffies, end_jiffies)) {
if (time_after_eq(GetTimerTicks(), end_jiffies)) {
ret = -EBUSY;
DRM_ERROR("SVGA device lockup.\n");
break;
408,6 → 411,8
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
fifo_state->dynamic_buffer : fifo_state->static_buffer;
 
ENTER();
 
if (bytes < chunk_size)
chunk_size = bytes;
 
418,6 → 423,8
if (rest)
memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
rest);
LEAVE();
 
}
 
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
427,6 → 434,7
{
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
fifo_state->dynamic_buffer : fifo_state->static_buffer;
ENTER();
 
while (bytes > 0) {
iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
438,6 → 446,7
mb();
bytes -= sizeof(uint32_t);
}
LEAVE();
}
 
void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
449,6 → 458,8
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
 
// ENTER();
 
BUG_ON((bytes & 3) != 0);
BUG_ON(bytes > fifo_state->reserved_size);
 
484,6 → 495,8
// up_write(&fifo_state->rwsem);
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
mutex_unlock(&fifo_state->fifo_mutex);
 
// LEAVE();
}
 
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
/drivers/video/drm/vmwgfx/vmwgfx_kms.c
118,7 → 118,7
*dst++ = 0;
}
for(i = 0; i < 64*(64-32); i++)
*dst++ = 0;
*image++ = 0;
 
cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
cmd->cursor.id = cpu_to_le32(0);
148,7 → 148,7
kmap_offset = 0;
kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
ret = ttm_bo_reserve(&dmabuf->base, true, false, false, NULL);
ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return -EINVAL;
200,7 → 200,7
* can do this since the caller in the drm core doesn't check anything
* which is protected by any looks.
*/
drm_modeset_unlock(&crtc->mutex);
mutex_unlock(&crtc->mutex);
drm_modeset_lock_all(dev_priv->dev);
 
/* A lot of the code assumes this */
265,7 → 265,7
ret = 0;
out:
drm_modeset_unlock_all(dev_priv->dev);
drm_modeset_lock(&crtc->mutex, NULL);
mutex_lock(&crtc->mutex);
 
return ret;
}
286,7 → 286,7
* can do this since the caller in the drm core doesn't check anything
* which is protected by any looks.
*/
drm_modeset_unlock(&crtc->mutex);
mutex_unlock(&crtc->mutex);
drm_modeset_lock_all(dev_priv->dev);
 
vmw_cursor_update_position(dev_priv, shown,
294,7 → 294,7
du->cursor_y + du->hotspot_y);
 
drm_modeset_unlock_all(dev_priv->dev);
drm_modeset_lock(&crtc->mutex, NULL);
mutex_lock(&crtc->mutex);
 
return 0;
}
356,7 → 356,7
kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
kmap_num = (64*64*4) >> PAGE_SHIFT;
 
ret = ttm_bo_reserve(bo, true, false, false, NULL);
ret = ttm_bo_reserve(bo, true, false, false, 0);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return;
478,7 → 478,7
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
head) {
if (crtc->primary->fb != &framebuffer->base)
if (crtc->fb != &framebuffer->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
606,6 → 606,7
unsigned num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
struct drm_clip_rect norect;
620,7 → 621,7
 
drm_modeset_lock_all(dev_priv->dev);
 
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0)) {
drm_modeset_unlock_all(dev_priv->dev);
return ret;
641,7 → 642,7
flags, color,
clips, num_clips, inc, NULL);
 
ttm_read_unlock(&dev_priv->reservation_sem);
ttm_read_unlock(&vmaster->lock);
 
drm_modeset_unlock_all(dev_priv->dev);
 
892,7 → 893,7
 
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
if (crtc->primary->fb != &framebuffer->base)
if (crtc->fb != &framebuffer->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
963,6 → 964,7
unsigned num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
struct drm_clip_rect norect;
970,7 → 972,7
 
drm_modeset_lock_all(dev_priv->dev);
 
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0)) {
drm_modeset_unlock_all(dev_priv->dev);
return ret;
997,7 → 999,7
clips, num_clips, increment, NULL);
}
 
ttm_read_unlock(&dev_priv->reservation_sem);
ttm_read_unlock(&vmaster->lock);
 
drm_modeset_unlock_all(dev_priv->dev);
 
1255,7 → 1257,7
 
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
if (crtc->primary->fb != &vfb->base)
if (crtc->fb != &vfb->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
1392,7 → 1394,7
 
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
if (crtc->primary->fb != &vfb->base)
if (crtc->fb != &vfb->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
1478,6 → 1480,8
struct drm_device *dev = dev_priv->dev;
int ret;
 
ENTER();
 
drm_mode_config_init(dev);
dev->mode_config.funcs = &vmw_kms_funcs;
dev->mode_config.min_width = 1;
1487,7 → 1491,11
dev->mode_config.max_height = 8192;
 
ret = vmw_kms_init_screen_object_display(dev_priv);
// if (ret) /* Fallback */
// (void)vmw_kms_init_legacy_display_system(dev_priv);
 
LEAVE();
 
return 0;
}
 
1512,6 → 1520,7
{
struct drm_vmw_cursor_bypass_arg *arg = data;
struct vmw_display_unit *du;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
int ret = 0;
 
1529,12 → 1538,13
return 0;
}
 
crtc = drm_crtc_find(dev, arg->crtc_id);
if (!crtc) {
obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -ENOENT;
goto out;
}
 
crtc = obj_to_crtc(obj);
du = vmw_crtc_to_du(crtc);
 
du->hotspot_x = arg->xhot;
1734,7 → 1744,7
uint32_t page_flip_flags)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct drm_framebuffer *old_fb = crtc->fb;
struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
struct drm_file *file_priv ;
struct vmw_fence_obj *fence = NULL;
1752,7 → 1762,7
if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
return -EINVAL;
 
crtc->primary->fb = fb;
crtc->fb = fb;
 
/* do a full screen dirty update */
clips.x1 = clips.y1 = 0;
1792,7 → 1802,7
return ret;
 
out_no_fence:
crtc->primary->fb = old_fb;
crtc->fb = old_fb;
return ret;
}
#endif
2016,7 → 2026,7
if (du->pref_mode)
list_move(&du->pref_mode->head, &connector->probed_modes);
 
drm_mode_connector_list_update(connector, true);
drm_mode_connector_list_update(connector);
 
return 1;
}
2035,6 → 2045,7
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_update_layout_arg *arg =
(struct drm_vmw_update_layout_arg *)data;
struct vmw_master *vmaster = vmw_master(file_priv->master);
void __user *user_rects;
struct drm_vmw_rect *rects;
unsigned rects_size;
2042,7 → 2053,7
int i;
struct drm_mode_config *mode_config = &dev->mode_config;
 
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
 
2084,7 → 2095,7
out_free:
kfree(rects);
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
ttm_read_unlock(&vmaster->lock);
return ret;
}
#endif
/drivers/video/drm/vmwgfx/svga3d_reg.h
261,7 → 261,12
/* Planar video formats. */
SVGA3D_YV12 = 121,
 
SVGA3D_FORMAT_MAX = 122,
/* Shader constant formats. */
SVGA3D_SURFACE_SHADERCONST_FLOAT = 122,
SVGA3D_SURFACE_SHADERCONST_INT = 123,
SVGA3D_SURFACE_SHADERCONST_BOOL = 124,
 
SVGA3D_FORMAT_MAX = 125,
} SVGA3dSurfaceFormat;
 
typedef uint32 SVGA3dColor; /* a, r, g, b */
1218,20 → 1223,10
#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129
 
#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130
#define SVGA_3D_CMD_GB_SCREEN_DMA 1131
#define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132
#define SVGA_3D_CMD_GB_MOB_FENCE 1133
#define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134
 
#define SVGA_3D_CMD_DEFINE_GB_MOB64 1135
#define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136
#define SVGA_3D_CMD_NOP_ERROR 1137
 
#define SVGA_3D_CMD_RESERVED1 1138
#define SVGA_3D_CMD_RESERVED2 1139
#define SVGA_3D_CMD_RESERVED3 1140
#define SVGA_3D_CMD_RESERVED4 1141
#define SVGA_3D_CMD_RESERVED5 1142
 
#define SVGA_3D_CMD_MAX 1142
#define SVGA_3D_CMD_FUTURE_MAX 3000
 
1978,7 → 1973,8
uint32 sizeInBytes;
uint32 validSizeInBytes;
SVGAMobFormat ptDepth;
} __packed
}
__attribute__((__packed__))
SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */
 
typedef
1988,13 → 1984,15
uint32 sizeInBytes;
uint32 validSizeInBytes;
SVGAMobFormat ptDepth;
} __packed
}
__attribute__((__packed__))
SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
 
typedef
struct {
SVGAOTableType type;
} __packed
}
__attribute__((__packed__))
SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */
 
/*
2007,7 → 2005,8
SVGAMobFormat ptDepth;
PPN base;
uint32 sizeInBytes;
} __packed
}
__attribute__((__packed__))
SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
 
 
2018,7 → 2017,8
typedef
struct SVGA3dCmdDestroyGBMob {
SVGAMobId mobid;
} __packed
}
__attribute__((__packed__))
SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
 
/*
2031,7 → 2031,8
SVGAMobFormat ptDepth;
PPN base;
uint32 sizeInBytes;
} __packed
}
__attribute__((__packed__))
SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */
 
/*
2044,7 → 2045,8
SVGAMobFormat ptDepth;
PPN64 base;
uint32 sizeInBytes;
} __packed
}
__attribute__((__packed__))
SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
 
/*
2057,7 → 2059,8
SVGAMobFormat ptDepth;
PPN64 base;
uint32 sizeInBytes;
} __packed
}
__attribute__((__packed__))
SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
 
/*
2067,7 → 2070,8
typedef
struct SVGA3dCmdUpdateGBMobMapping {
SVGAMobId mobid;
} __packed
}
__attribute__((__packed__))
SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
 
/*
2083,8 → 2087,7
uint32 multisampleCount;
SVGA3dTextureFilter autogenFilter;
SVGA3dSize size;
} __packed
SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
} SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
 
/*
* Destroy a guest-backed surface.
2093,8 → 2096,7
typedef
struct SVGA3dCmdDestroyGBSurface {
uint32 sid;
} __packed
SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
} SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
 
/*
* Bind a guest-backed surface to an object.
2104,8 → 2106,7
struct SVGA3dCmdBindGBSurface {
uint32 sid;
SVGAMobId mobid;
} __packed
SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
} SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
 
/*
* Conditionally bind a mob to a guest backed surface if testMobid
2122,7 → 2123,7
SVGAMobId testMobid;
SVGAMobId mobid;
uint32 flags;
} __packed
}
SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
 
/*
2134,8 → 2135,7
struct SVGA3dCmdUpdateGBImage {
SVGA3dSurfaceImageId image;
SVGA3dBox box;
} __packed
SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
} SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
 
/*
* Update an entire guest-backed surface.
2145,8 → 2145,7
typedef
struct SVGA3dCmdUpdateGBSurface {
uint32 sid;
} __packed
SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
} SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
 
/*
* Readback an image in a guest-backed surface.
2156,8 → 2155,7
typedef
struct SVGA3dCmdReadbackGBImage {
SVGA3dSurfaceImageId image;
} __packed
SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
} SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
 
/*
* Readback an entire guest-backed surface.
2167,8 → 2165,7
typedef
struct SVGA3dCmdReadbackGBSurface {
uint32 sid;
} __packed
SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
} SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
 
/*
* Readback a sub rect of an image in a guest-backed surface. After
2182,7 → 2179,7
SVGA3dSurfaceImageId image;
SVGA3dBox box;
uint32 invertBox;
} __packed
}
SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
 
/*
2193,8 → 2190,7
typedef
struct SVGA3dCmdInvalidateGBImage {
SVGA3dSurfaceImageId image;
} __packed
SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
} SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
 
/*
* Invalidate an entire guest-backed surface.
2204,8 → 2200,7
typedef
struct SVGA3dCmdInvalidateGBSurface {
uint32 sid;
} __packed
SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
} SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
 
/*
* Invalidate a sub rect of an image in a guest-backed surface. After
2219,7 → 2214,7
SVGA3dSurfaceImageId image;
SVGA3dBox box;
uint32 invertBox;
} __packed
}
SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
 
/*
2229,8 → 2224,7
typedef
struct SVGA3dCmdDefineGBContext {
uint32 cid;
} __packed
SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
} SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
 
/*
* Destroy a guest-backed context.
2239,8 → 2233,7
typedef
struct SVGA3dCmdDestroyGBContext {
uint32 cid;
} __packed
SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
} SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
 
/*
* Bind a guest-backed context.
2259,8 → 2252,7
uint32 cid;
SVGAMobId mobid;
uint32 validContents;
} __packed
SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
} SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
 
/*
* Readback a guest-backed context.
2270,8 → 2262,7
typedef
struct SVGA3dCmdReadbackGBContext {
uint32 cid;
} __packed
SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
} SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
 
/*
* Invalidate a guest-backed context.
2279,8 → 2270,7
typedef
struct SVGA3dCmdInvalidateGBContext {
uint32 cid;
} __packed
SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
} SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
 
/*
* Define a guest-backed shader.
2291,8 → 2281,7
uint32 shid;
SVGA3dShaderType type;
uint32 sizeInBytes;
} __packed
SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
} SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
 
/*
* Bind a guest-backed shader.
2302,8 → 2291,7
uint32 shid;
SVGAMobId mobid;
uint32 offsetInBytes;
} __packed
SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
} SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
 
/*
* Destroy a guest-backed shader.
2311,8 → 2299,7
 
typedef struct SVGA3dCmdDestroyGBShader {
uint32 shid;
} __packed
SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
} SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
 
typedef
struct {
2327,8 → 2314,7
* Note that FLOAT and INT constants are 4-dwords in length, while
* BOOL constants are 1-dword in length.
*/
} __packed
SVGA3dCmdSetGBShaderConstInline;
} SVGA3dCmdSetGBShaderConstInline;
/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
 
typedef
2335,8 → 2321,7
struct {
uint32 cid;
SVGA3dQueryType type;
} __packed
SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
} SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
 
typedef
struct {
2344,8 → 2329,7
SVGA3dQueryType type;
SVGAMobId mobid;
uint32 offset;
} __packed
SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
} SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
 
 
/*
2362,8 → 2346,7
SVGA3dQueryType type;
SVGAMobId mobid;
uint32 offset;
} __packed
SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
} SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
 
typedef
struct {
2370,7 → 2353,7
SVGAMobId mobid;
uint32 fbOffset;
uint32 initalized;
} __packed
}
SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */
 
typedef
2377,7 → 2360,7
struct {
SVGAMobId mobid;
uint32 gartOffset;
} __packed
}
SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
 
 
2385,7 → 2368,7
struct {
uint32 gartOffset;
uint32 numPages;
} __packed
}
SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */
 
 
2402,13 → 2385,13
int32 xRoot;
int32 yRoot;
uint32 flags;
} __packed
}
SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
 
typedef
struct {
uint32 stid;
} __packed
}
SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
 
typedef
2415,7 → 2398,7
struct {
uint32 stid;
SVGA3dSurfaceImageId image;
} __packed
}
SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
 
typedef
2422,7 → 2405,7
struct {
uint32 stid;
SVGA3dBox box;
} __packed
}
SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
 
/*
2600,28 → 2583,4
float f;
} SVGA3dDevCapResult;
 
typedef enum {
SVGA3DCAPS_RECORD_UNKNOWN = 0,
SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
} SVGA3dCapsRecordType;
 
typedef
struct SVGA3dCapsRecordHeader {
uint32 length;
SVGA3dCapsRecordType type;
}
SVGA3dCapsRecordHeader;
 
typedef
struct SVGA3dCapsRecord {
SVGA3dCapsRecordHeader header;
uint32 data[1];
}
SVGA3dCapsRecord;
 
 
typedef uint32 SVGA3dCapPair[2];
 
#endif /* _SVGA3D_REG_H_ */
/drivers/video/drm/vmwgfx/svga_reg.h
169,17 → 169,10
SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */
SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */
SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */
SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */
SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */
SVGA_REG_CMD_PREPEND_LOW = 53,
SVGA_REG_CMD_PREPEND_HIGH = 54,
SVGA_REG_SCREENTARGET_MAX_WIDTH = 55,
SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56,
SVGA_REG_MOB_MAX_SIZE = 57,
SVGA_REG_TOP = 58, /* Must be 1 more than the last register */
SVGA_REG_TOP = 53, /* Must be 1 more than the last register */
 
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
/drivers/video/drm/vmwgfx/vmwgfx_context.c
33,12 → 33,11
struct ttm_base_object base;
struct vmw_resource res;
struct vmw_ctx_binding_state cbs;
struct vmw_cmdbuf_res_manager *man;
};
 
 
 
typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *);
 
static void vmw_user_context_free(struct vmw_resource *res);
static struct vmw_resource *
51,11 → 50,9
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_context_destroy(struct vmw_resource *res);
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
bool rebind);
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi);
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi);
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi);
static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
static uint64_t vmw_user_context_size;
 
104,8 → 101,7
 
static void vmw_hw_context_destroy(struct vmw_resource *res)
{
struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res);
 
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
115,11 → 111,7
 
if (res->func->destroy == vmw_gb_context_destroy) {
mutex_lock(&dev_priv->cmdbuf_mutex);
vmw_cmdbuf_res_man_destroy(uctx->man);
mutex_lock(&dev_priv->binding_mutex);
(void) vmw_context_binding_state_kill(&uctx->cbs);
(void) vmw_gb_context_destroy(res);
mutex_unlock(&dev_priv->binding_mutex);
if (dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid)
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
154,17 → 146,14
ret = vmw_resource_init(dev_priv, res, true,
res_free, &vmw_gb_context_func);
res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
if (unlikely(ret != 0))
goto out_err;
 
if (dev_priv->has_mob) {
uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
if (unlikely(IS_ERR(uctx->man))) {
ret = PTR_ERR(uctx->man);
uctx->man = NULL;
goto out_err;
if (unlikely(ret != 0)) {
if (res_free)
res_free(res);
else
kfree(res);
return ret;
}
}
 
memset(&uctx->cbs, 0, sizeof(uctx->cbs));
INIT_LIST_HEAD(&uctx->cbs.list);
171,13 → 160,6
 
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
 
out_err:
if (res_free)
res_free(res);
else
kfree(res);
return ret;
}
 
static int vmw_context_init(struct vmw_private *dev_priv,
346,7 → 328,7
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 
mutex_lock(&dev_priv->binding_mutex);
vmw_context_binding_state_scrub(&uctx->cbs);
vmw_context_binding_state_kill(&uctx->cbs);
 
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
 
396,7 → 378,11
SVGA3dCmdHeader header;
SVGA3dCmdDestroyGBContext body;
} *cmd;
struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res);
 
BUG_ON(!list_empty(&uctx->cbs.list));
 
if (likely(res->id == -1))
return 0;
 
475,6 → 461,7
struct vmw_resource *tmp;
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
 
 
484,10 → 471,9
*/
 
if (unlikely(vmw_user_context_size == 0))
vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
 
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
 
534,7 → 520,7
out_err:
vmw_resource_unreference(&res);
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
ttm_read_unlock(&vmaster->lock);
return ret;
 
}
544,9 → 530,8
* vmw_context_scrub_shader - scrub a shader binding from a context.
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*/
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
565,7 → 550,7
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = bi->ctx->id;
cmd->body.type = bi->i1.shader_type;
cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
cmd->body.shid = SVGA3D_INVALID_ID;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
return 0;
576,10 → 561,8
* from a context.
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*/
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
bool rebind)
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
598,7 → 581,7
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = bi->ctx->id;
cmd->body.type = bi->i1.rt_type;
cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
cmd->body.target.sid = SVGA3D_INVALID_ID;
cmd->body.target.face = 0;
cmd->body.target.mipmap = 0;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
610,13 → 593,11
* vmw_context_scrub_texture - scrub a texture binding from a context.
*
* @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*
* TODO: Possibly complement this function with a function that takes
* a list of texture bindings and combines them to a single command.
*/
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
bool rebind)
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
640,7 → 621,7
cmd->body.c.cid = bi->ctx->id;
cmd->body.s1.stage = bi->i1.texture_stage;
cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
return 0;
713,7 → 694,6
vmw_context_binding_drop(loc);
 
loc->bi = *bi;
loc->bi.scrubbed = false;
list_add_tail(&loc->ctx_list, &cbs->list);
INIT_LIST_HEAD(&loc->res_list);
 
749,12 → 729,13
if (loc->bi.ctx != NULL)
vmw_context_binding_drop(loc);
 
if (bi->res != NULL) {
loc->bi = *bi;
list_add_tail(&loc->ctx_list, &cbs->list);
if (bi->res != NULL)
list_add_tail(&loc->res_list, &bi->res->binding_head);
else
INIT_LIST_HEAD(&loc->res_list);
}
}
 
/**
* vmw_context_binding_kill - Kill a binding on the device
767,10 → 748,7
*/
static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
{
if (!cb->bi.scrubbed) {
(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
cb->bi.scrubbed = true;
}
(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi);
vmw_context_binding_drop(cb);
}
 
792,27 → 770,6
}
 
/**
* vmw_context_binding_state_scrub - Scrub all bindings associated with a
* struct vmw_ctx_binding state structure.
*
* @cbs: Pointer to the context binding state tracker.
*
* Emits commands to scrub all bindings associated with the
* context binding state tracker.
*/
static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
{
struct vmw_ctx_binding *entry;
 
list_for_each_entry(entry, &cbs->list, ctx_list) {
if (!entry->bi.scrubbed) {
(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
entry->bi.scrubbed = true;
}
}
}
 
/**
* vmw_context_binding_res_list_kill - Kill all bindings on a
* resource binding list
*
830,27 → 787,6
}
 
/**
* vmw_context_binding_res_list_scrub - Scrub all bindings on a
* resource binding list
*
* @head: list head of resource binding list
*
* Scrub all bindings associated with a specific resource. Typically
* called before the resource is evicted.
*/
void vmw_context_binding_res_list_scrub(struct list_head *head)
{
struct vmw_ctx_binding *entry;
 
list_for_each_entry(entry, head, res_list) {
if (!entry->bi.scrubbed) {
(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
entry->bi.scrubbed = true;
}
}
}
 
/**
* vmw_context_binding_state_transfer - Commit staged binding info
*
* @ctx: Pointer to context to commit the staged binding info to.
869,55 → 805,3
list_for_each_entry_safe(entry, next, &from->list, ctx_list)
vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
}
 
/**
* vmw_context_rebind_all - Rebind all scrubbed bindings of a context
*
* @ctx: The context resource
*
* Walks through the context binding list and rebinds all scrubbed
* resources.
*/
int vmw_context_rebind_all(struct vmw_resource *ctx)
{
struct vmw_ctx_binding *entry;
struct vmw_user_context *uctx =
container_of(ctx, struct vmw_user_context, res);
struct vmw_ctx_binding_state *cbs = &uctx->cbs;
int ret;
 
list_for_each_entry(entry, &cbs->list, ctx_list) {
if (likely(!entry->bi.scrubbed))
continue;
 
if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
SVGA3D_INVALID_ID))
continue;
 
ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
if (unlikely(ret != 0))
return ret;
 
entry->bi.scrubbed = false;
}
 
return 0;
}
 
/**
* vmw_context_binding_list - Return a list of context bindings
*
* @ctx: The context resource
*
* Returns the current list of bindings of the given context. Note that
* this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
*/
struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
{
return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
}
 
struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
{
return container_of(ctx, struct vmw_user_context, res)->man;
}
/drivers/video/drm/vmwgfx/vmwgfx_dmabuf.c
52,6 → 52,7
struct ttm_placement *placement,
bool interruptible)
{
// struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base;
int ret;
 
61,7 → 62,7
 
vmw_execbuf_release_pinned_bo(dev_priv);
 
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err;
 
94,6 → 95,7
struct vmw_dma_buffer *buf,
bool pin, bool interruptible)
{
// struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement *placement;
int ret;
105,7 → 107,7
if (pin)
vmw_execbuf_release_pinned_bo(dev_priv);
 
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err;
 
196,6 → 198,7
struct vmw_dma_buffer *buf,
bool pin, bool interruptible)
{
// struct vmw_master *vmaster = dev_priv->active_master;
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement placement;
int ret = 0;
212,7 → 215,7
 
if (pin)
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0))
goto err_unlock;
 
/drivers/video/drm/vmwgfx/vmwgfx_drv.h
36,15 → 36,15
//#include <linux/suspend.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_lock.h>
//#include <drm/ttm/ttm_lock.h>
#include <drm/ttm/ttm_execbuf_util.h>
//#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
 
#define VMWGFX_DRIVER_DATE "20140704"
#define VMWGFX_DRIVER_DATE "20121114"
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 6
#define VMWGFX_DRIVER_PATCHLEVEL 1
#define VMWGFX_DRIVER_MINOR 5
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
89,12 → 89,13
return v;
}
 
struct ttm_lock{};
struct ww_acquire_ctx{};
 
struct vmw_fpriv {
// struct drm_master *locked_master;
struct ttm_object_file *tfile;
struct list_head fence_events;
bool gb_aware;
};
 
struct vmw_dma_buffer {
136,10 → 137,6
void (*hw_destroy) (struct vmw_resource *res);
};
 
 
/*
* Resources that are managed using ioctls.
*/
enum vmw_res_type {
vmw_res_context,
vmw_res_surface,
148,15 → 145,6
vmw_res_max
};
 
/*
* Resources that are managed using command streams.
*/
enum vmw_cmdbuf_res_type {
vmw_cmdbuf_res_compat_shader
};
 
struct vmw_cmdbuf_res_manager;
 
struct vmw_cursor_snooper {
struct drm_crtc *crtc;
size_t age;
184,8 → 172,8
 
struct vmw_marker_queue {
struct list_head head;
u64 lag;
u64 lag_time;
struct timespec lag;
struct timespec lag_time;
spinlock_t lock;
};
 
301,7 → 289,6
struct vmw_resource *ctx;
struct vmw_resource *res;
enum vmw_ctx_binding_type bt;
bool scrubbed;
union {
SVGA3dShaderType shader_type;
SVGA3dRenderTargetType rt_type;
348,7 → 335,7
struct drm_open_hash res_ht;
bool res_ht_initialized;
bool kernel; /**< is the called made from the kernel */
struct vmw_fpriv *fp;
struct ttm_object_file *tfile;
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
366,7 → 353,6
bool needs_post_query_barrier;
struct vmw_resource *error_resource;
struct vmw_ctx_binding_state staged_bindings;
struct list_head staged_cmd_res;
};
 
struct vmw_legacy_display;
411,7 → 397,6
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;
uint32_t max_mob_pages;
uint32_t max_mob_size;
uint32_t memory_size;
bool has_gmr;
bool has_mob;
512,11 → 497,6
uint32_t num_3d_resources;
 
/*
* Replace this with an rwsem as soon as we have down_xx_interruptible()
*/
struct ttm_lock reservation_sem;
 
/*
* Query processing. These members
* are protected by the cmdbuf mutex.
*/
606,8 → 586,6
 
extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
extern struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource *res);
extern int vmw_resource_validate(struct vmw_resource *res);
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
982,9 → 960,6
vmw_context_binding_state_transfer(struct vmw_resource *res,
struct vmw_ctx_binding_state *cbs);
extern void vmw_context_binding_res_list_kill(struct list_head *head);
extern void vmw_context_binding_res_list_scrub(struct list_head *head);
extern int vmw_context_rebind_all(struct vmw_resource *ctx);
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
 
/*
* Surface management - vmwgfx_surface.c
1004,27 → 979,6
*/
 
extern const struct vmw_user_resource_conv *user_shader_converter;
extern struct vmw_cmdbuf_res_manager *
vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
extern size_t vmw_cmdbuf_res_man_size(void);
extern struct vmw_resource *
vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
u32 user_key);
extern void vmw_cmdbuf_res_revert(struct list_head *list);
extern void vmw_cmdbuf_res_commit(struct list_head *list);
extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
u32 user_key,
struct vmw_resource *res,
struct list_head *list);
extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
enum vmw_cmdbuf_res_type res_type,
u32 user_key,
struct list_head *list);
 
 
/**
* Inline helper functions
*/
/drivers/video/drm/vmwgfx/vmwgfx_execbuf.c
114,10 → 114,8
* persistent context binding tracker.
*/
if (unlikely(val->staged_bindings)) {
if (!backoff) {
vmw_context_binding_state_transfer
(val->res, val->staged_bindings);
}
kfree(val->staged_bindings);
val->staged_bindings = NULL;
}
180,44 → 178,6
}
 
/**
* vmw_resource_context_res_add - Put resources previously bound to a context on
* the validation list
*
* @dev_priv: Pointer to a device private structure
* @sw_context: Pointer to a software context used for this command submission
* @ctx: Pointer to the context resource
*
* This function puts all resources that were previously bound to @ctx on
* the resource validation list. This is part of the context state reemission
*/
static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
struct vmw_resource *ctx)
{
struct list_head *binding_list;
struct vmw_ctx_binding *entry;
int ret = 0;
struct vmw_resource *res;
 
mutex_lock(&dev_priv->binding_mutex);
binding_list = vmw_context_binding_list(ctx);
 
list_for_each_entry(entry, binding_list, ctx_list) {
res = vmw_resource_reference_unless_doomed(entry->bi.res);
if (unlikely(res == NULL))
continue;
 
ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
vmw_resource_unreference(&res);
if (unlikely(ret != 0))
break;
}
 
mutex_unlock(&dev_priv->binding_mutex);
return ret;
}
 
/**
* vmw_resource_relocation_add - Add a relocation to the relocation list
*
* @list: Pointer to head of relocation list.
273,13 → 233,9
{
struct vmw_resource_relocation *rel;
 
list_for_each_entry(rel, list, head) {
if (likely(rel->res != NULL))
list_for_each_entry(rel, list, head)
cb[rel->offset] = rel->res->id;
else
cb[rel->offset] = SVGA_3D_CMD_NOP;
}
}
 
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
422,72 → 378,7
return 0;
}
 
 
/**
* vmw_cmd_res_reloc_add - Add a resource to a software context's
* relocation- and validation lists.
*
* @dev_priv: Pointer to a struct vmw_private identifying the device.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @id_loc: Pointer to where the id that needs translation is located.
* @res: Valid pointer to a struct vmw_resource.
* @p_val: If non null, a pointer to the struct vmw_resource_validate_node
* used for this resource is returned here.
*/
static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
enum vmw_res_type res_type,
uint32_t *id_loc,
struct vmw_resource *res,
struct vmw_resource_val_node **p_val)
{
int ret;
struct vmw_resource_val_node *node;
 
*p_val = NULL;
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
id_loc - sw_context->buf_start);
if (unlikely(ret != 0))
goto out_err;
 
ret = vmw_resource_val_add(sw_context, res, &node);
if (unlikely(ret != 0))
goto out_err;
 
if (res_type == vmw_res_context && dev_priv->has_mob &&
node->first_usage) {
 
/*
* Put contexts first on the list to be able to exit
* list traversal for contexts early.
*/
list_del(&node->head);
list_add(&node->head, &sw_context->resource_list);
 
ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
if (unlikely(ret != 0))
goto out_err;
node->staged_bindings =
kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
if (node->staged_bindings == NULL) {
DRM_ERROR("Failed to allocate context binding "
"information.\n");
goto out_err;
}
INIT_LIST_HEAD(&node->staged_bindings->list);
}
 
if (p_val)
*p_val = node;
 
out_err:
return ret;
}
 
 
/**
* vmw_cmd_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there.
*
495,17 → 386,14
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @converter: User-space visisble type specific information.
* @id_loc: Pointer to the location in the command buffer currently being
* @id: Pointer to the location in the command buffer currently being
* parsed from where the user-space resource id handle is located.
* @p_val: Pointer to pointer to resource validalidation node. Populated
* on exit.
*/
static int
vmw_cmd_res_check(struct vmw_private *dev_priv,
static int vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
enum vmw_res_type res_type,
const struct vmw_user_resource_conv *converter,
uint32_t *id_loc,
uint32_t *id,
struct vmw_resource_val_node **p_val)
{
struct vmw_res_cache_entry *rcache =
514,7 → 402,7
struct vmw_resource_val_node *node;
int ret;
 
if (*id_loc == SVGA3D_INVALID_ID) {
if (*id == SVGA3D_INVALID_ID) {
if (p_val)
*p_val = NULL;
if (res_type == vmw_res_context) {
529,7 → 417,7
* resource
*/
 
if (likely(rcache->valid && *id_loc == rcache->handle)) {
if (likely(rcache->valid && *id == rcache->handle)) {
const struct vmw_resource *res = rcache->res;
 
rcache->node->first_usage = false;
538,33 → 426,50
 
return vmw_resource_relocation_add
(&sw_context->res_relocations, res,
id_loc - sw_context->buf_start);
id - sw_context->buf_start);
}
 
ret = vmw_user_resource_lookup_handle(dev_priv,
sw_context->fp->tfile,
*id_loc,
sw_context->tfile,
*id,
converter,
&res);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use resource 0x%08x.\n",
(unsigned) *id_loc);
.. dump_stack();
(unsigned) *id);
// dump_stack();
return ret;
}
 
rcache->valid = true;
rcache->res = res;
rcache->handle = *id_loc;
rcache->handle = *id;
 
ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
res, &node);
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
id - sw_context->buf_start);
if (unlikely(ret != 0))
goto out_no_reloc;
 
ret = vmw_resource_val_add(sw_context, res, &node);
if (unlikely(ret != 0))
goto out_no_reloc;
 
rcache->node = node;
if (p_val)
*p_val = node;
 
if (node->first_usage && res_type == vmw_res_context) {
node->staged_bindings =
kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
if (node->staged_bindings == NULL) {
DRM_ERROR("Failed to allocate context binding "
"information.\n");
goto out_no_reloc;
}
INIT_LIST_HEAD(&node->staged_bindings->list);
}
 
vmw_resource_unreference(&res);
return 0;
 
576,34 → 481,6
}
 
/**
* vmw_rebind_contexts - Rebind all resources previously bound to
* referenced contexts.
*
* @sw_context: Pointer to the software context.
*
* Rebind context binding points that have been scrubbed because of eviction.
*/
static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
{
struct vmw_resource_val_node *val;
int ret;
 
list_for_each_entry(val, &sw_context->resource_list, head) {
if (unlikely(!val->staged_bindings))
break;
 
ret = vmw_context_rebind_all(val->res);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to rebind context.\n");
return ret;
}
}
 
return 0;
}
 
/**
* vmw_cmd_cid_check - Check a command header for valid context information.
*
* @dev_priv: Pointer to a device private structure.
619,7 → 496,7
{
struct vmw_cid_cmd {
SVGA3dCmdHeader header;
uint32_t cid;
__le32 cid;
} *cmd;
 
cmd = container_of(header, struct vmw_cid_cmd, header);
890,7 → 767,7
struct vmw_relocation *reloc;
int ret;
 
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n");
return -EINVAL;
951,7 → 828,7
struct vmw_relocation *reloc;
int ret;
 
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
return -EINVAL;
1231,19 → 1108,8
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int ret;
SVGA3dCmdSurfaceDMASuffix *suffix;
uint32_t bo_size;
 
cmd = container_of(header, struct vmw_dma_cmd, header);
suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
header->size - sizeof(*suffix));
 
/* Make sure device and verifier stays in sync. */
if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
DRM_ERROR("Invalid DMA suffix size.\n");
return -EINVAL;
}
 
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->dma.guest.ptr,
&vmw_bo);
1250,17 → 1116,6
if (unlikely(ret != 0))
return ret;
 
/* Make sure DMA doesn't cross BO boundaries. */
bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
DRM_ERROR("Invalid DMA offset.\n");
return -EINVAL;
}
 
bo_size -= cmd->dma.guest.ptr.offset;
if (unlikely(suffix->maximumOffset > bo_size))
suffix->maximumOffset = bo_size;
 
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, &cmd->dma.host.sid,
NULL);
1623,7 → 1478,6
&cmd->body.sid, NULL);
}
 
#if 0
/**
* vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
* command
1640,9 → 1494,7
SVGA3dCmdHeader header;
SVGA3dCmdSetShader body;
} *cmd;
struct vmw_resource_val_node *ctx_node, *res_node = NULL;
struct vmw_ctx_bindinfo bi;
struct vmw_resource *res = NULL;
struct vmw_resource_val_node *ctx_node;
int ret;
 
cmd = container_of(header, struct vmw_set_shader_cmd,
1654,34 → 1506,15
if (unlikely(ret != 0))
return ret;
 
if (!dev_priv->has_mob)
return 0;
if (dev_priv->has_mob) {
struct vmw_ctx_bindinfo bi;
struct vmw_resource_val_node *res_node;
 
if (cmd->body.shid != SVGA3D_INVALID_ID) {
res = vmw_compat_shader_lookup
(vmw_context_res_man(ctx_node->res),
cmd->body.shid,
cmd->body.type);
 
if (!IS_ERR(res)) {
ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
vmw_res_shader,
&cmd->body.shid, res,
&res_node);
vmw_resource_unreference(&res);
if (unlikely(ret != 0))
return ret;
}
}
 
if (!res_node) {
ret = vmw_cmd_res_check(dev_priv, sw_context,
vmw_res_shader,
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
user_shader_converter,
&cmd->body.shid, &res_node);
if (unlikely(ret != 0))
return ret;
}
 
bi.ctx = ctx_node->res;
bi.res = res_node ? res_node->res : NULL;
1689,42 → 1522,10
bi.i1.shader_type = cmd->body.type;
return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
}
#endif
 
/**
* vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
* command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_set_shader_const_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSetShaderConst body;
} *cmd;
int ret;
 
cmd = container_of(header, struct vmw_set_shader_const_cmd,
header);
 
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
NULL);
if (unlikely(ret != 0))
return ret;
 
if (dev_priv->has_mob)
header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
 
return 0;
}
 
#if 0
/**
* vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
* command
1750,7 → 1551,6
&cmd->body.shid, &cmd->body.mobid,
cmd->body.offsetInBytes);
}
#endif
 
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
1795,7 → 1595,7
return 0;
}
 
static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
false, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1834,14 → 1634,14
true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
false, false, false),
// VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
// true, false, false),
// VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
// true, false, false),
// VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
// true, false, false),
// VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
// true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check,
true, true, false),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check,
true, true, false),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check,
true, true, false),
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1926,8 → 1726,8
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
false, false, true),
// VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
// true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
1992,9 → 1792,6
goto out_invalid;
 
entry = &vmw_cmd_entries[cmd_id];
if (unlikely(!entry->func))
goto out_invalid;
 
if (unlikely(!entry->user_allow && !sw_context->kernel))
goto out_privileged;
 
2332,8 → 2129,6
}
}
 
 
 
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands,
2377,7 → 2172,7
} else */
sw_context->kernel = true;
 
sw_context->fp = vmw_fpriv(file_priv);
sw_context->tfile = vmw_fpriv(file_priv)->tfile;
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
sw_context->fence_flags = 0;
2394,17 → 2189,16
goto out_unlock;
sw_context->res_ht_initialized = true;
}
INIT_LIST_HEAD(&sw_context->staged_cmd_res);
 
INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
if (unlikely(ret != 0))
goto out_err_nores;
goto out_err;
 
ret = vmw_resources_reserve(sw_context);
if (unlikely(ret != 0))
goto out_err_nores;
goto out_err;
 
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
if (unlikely(ret != 0))
2432,12 → 2226,6
goto out_err;
}
 
if (dev_priv->has_mob) {
ret = vmw_rebind_contexts(sw_context);
if (unlikely(ret != 0))
goto out_unlock_binding;
}
 
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
2489,7 → 2277,6
}
 
list_splice_init(&sw_context->resource_list, &resource_list);
vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
mutex_unlock(&dev_priv->cmdbuf_mutex);
 
/*
2503,11 → 2290,10
out_unlock_binding:
mutex_unlock(&dev_priv->binding_mutex);
out_err:
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
out_err_nores:
vmw_resource_list_unreserve(&sw_context->resource_list, true);
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
vmw_clear_validations(sw_context);
if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid))
2516,7 → 2302,6
list_splice_init(&sw_context->resource_list, &resource_list);
error_resource = sw_context->error_resource;
sw_context->error_resource = NULL;
vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
mutex_unlock(&dev_priv->cmdbuf_mutex);
 
/*
2673,6 → 2458,7
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
// struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
 
/*
2689,7 → 2475,7
return -EINVAL;
}
 
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
// ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
 
2705,6 → 2491,6
// vmw_kms_cursor_post_execbuf(dev_priv);
 
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
// ttm_read_unlock(&vmaster->lock);
return ret;
}
/drivers/video/drm/vmwgfx/vmwgfx_fence.c
701,7 → 701,7
 
if (!arg->cookie_valid) {
arg->cookie_valid = 1;
arg->kernel_cookie = jiffies + wait_timeout;
arg->kernel_cookie = GetTimerTicks() + wait_timeout;
}
 
base = ttm_base_object_lookup(tfile, arg->handle);
714,7 → 714,7
 
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
 
timeout = jiffies;
timeout = GetTimerTicks();
if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ?
0 : -EBUSY);
/drivers/video/drm/vmwgfx/vmwgfx_gmrid_manager.c
47,7 → 47,6
static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
uint32_t flags,
struct ttm_mem_reg *mem)
{
struct vmwgfx_gmrid_man *gman =
/drivers/video/drm/vmwgfx/vmwgfx_irq.c
128,7 → 128,7
uint32_t count = 0;
uint32_t signal_seq;
int ret;
unsigned long end_jiffies = jiffies + timeout;
unsigned long end_jiffies = GetTimerTicks() + timeout;
bool (*wait_condition)(struct vmw_private *, uint32_t);
DEFINE_WAIT(__wait);
 
150,7 → 150,7
// TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (wait_condition(dev_priv, seqno))
break;
if (time_after_eq(jiffies, end_jiffies)) {
if (time_after_eq(GetTimerTicks(), end_jiffies)) {
DRM_ERROR("SVGA device lockup.\n");
break;
}
/drivers/video/drm/vmwgfx/vmwgfx_mob.c
134,7 → 134,6
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
 
188,10 → 187,9
 
bo = otable->page_table->pt_bo;
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for OTable "
"takedown.\n");
} else {
if (unlikely(cmd == NULL))
DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
 
memset(cmd, 0, sizeof(*cmd));
cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
cmd->header.size = sizeof(cmd->body);
201,7 → 199,6
cmd->body.validSizeInBytes = 0;
cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
 
if (bo) {
int ret;
565,12 → 562,11
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for Memory "
"Object unbinding.\n");
} else {
}
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
cmd->header.size = sizeof(cmd->body);
cmd->body.mobid = mob->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
if (bo) {
vmw_fence_single_bo(bo, NULL);
ttm_bo_unreserve(bo);
/drivers/video/drm/vmwgfx/vmwgfx_resource.c
122,7 → 122,7
if (res->backup) {
struct ttm_buffer_object *bo = &res->backup->base;
 
ttm_bo_reserve(bo, false, false, false, NULL);
ttm_bo_reserve(bo, false, false, false, 0);
if (!list_empty(&res->mob_head) &&
res->func->unbind != NULL) {
struct ttm_validate_buffer val_buf;
136,12 → 136,8
vmw_dmabuf_unreference(&res->backup);
}
 
if (likely(res->hw_destroy != NULL)) {
if (likely(res->hw_destroy != NULL))
res->hw_destroy(res);
mutex_lock(&dev_priv->binding_mutex);
vmw_context_binding_res_list_kill(&res->binding_head);
mutex_unlock(&dev_priv->binding_mutex);
}
 
id = res->id;
if (res->res_free != NULL)
422,7 → 418,8
INIT_LIST_HEAD(&vmw_bo->res_list);
 
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
(user) ? ttm_bo_type_device :
ttm_bo_type_kernel, placement,
0, interruptible,
NULL, acc_size, NULL, bo_free);
return ret;
535,13 → 532,8
return -EPERM;
 
vmw_user_bo = vmw_user_dma_buffer(bo);
 
/* Check that the caller has opened the object. */
if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
return 0;
 
DRM_ERROR("Could not grant buffer access.\n");
return -EPERM;
return (vmw_user_bo->prime.base.tfile == tfile ||
vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
}
 
/**
561,7 → 553,7
{
struct ttm_buffer_object *bo = &user_bo->dma.base;
bool existed;
int ret;
int ret=0;
 
if (flags & drm_vmw_synccpu_allow_cs) {
struct ttm_bo_device *bdev = bo->bdev;
679,9 → 671,10
struct drm_vmw_dmabuf_rep *rep = &arg->rep;
struct vmw_dma_buffer *dma_buf;
uint32_t handle;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
 
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
 
698,7 → 691,7
vmw_dmabuf_unreference(&dma_buf);
 
out_no_dmabuf:
ttm_read_unlock(&dev_priv->reservation_sem);
ttm_read_unlock(&vmaster->lock);
 
return ret;
}
813,7 → 806,7
container_of(res, struct vmw_user_stream, stream.res);
struct vmw_private *dev_priv = res->dev_priv;
 
ttm_base_object_kfree(stream, base);
// ttm_base_object_kfree(stream, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_stream_size);
}
877,6 → 870,7
struct vmw_resource *tmp;
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
 
/*
887,7 → 881,7
if (unlikely(vmw_user_stream_size == 0))
vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
 
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
 
935,7 → 929,7
out_err:
vmw_resource_unreference(&res);
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
ttm_read_unlock(&vmaster->lock);
return ret;
}
#endif
985,7 → 979,7
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
 
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
 
997,7 → 991,7
 
vmw_dmabuf_unreference(&dma_buf);
out_no_dmabuf:
ttm_read_unlock(&dev_priv->reservation_sem);
ttm_read_unlock(&vmaster->lock);
return ret;
}
#endif
/drivers/video/drm/vmwgfx/vmwgfx_scrn.c
100,7 → 100,7
/**
* Send the fifo command to create a screen.
*/
static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
int vmw_sou_fifo_create(struct vmw_private *dev_priv,
struct vmw_screen_object_unit *sou,
uint32_t x, uint32_t y,
struct drm_display_mode *mode)
114,8 → 114,10
SVGAScreenObject obj;
} *cmd;
 
BUG_ON(!sou->buffer);
// BUG_ON(!sou->buffer);
 
ENTER();
 
fifo_size = sizeof(*cmd);
cmd = vmw_fifo_reserve(dev_priv, fifo_size);
/* The hardware has hung, nothing we can do about it here. */
141,7 → 143,10
}
 
/* Ok to assume that buffer is pinned in vram */
vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
// vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
 
cmd->obj.backingStore.ptr.gmrId = SVGA_GMR_FRAMEBUFFER;
cmd->obj.backingStore.ptr.offset = 0;
cmd->obj.backingStore.pitch = mode->hdisplay * 4;
 
vmw_fifo_commit(dev_priv, fifo_size);
148,6 → 153,8
 
sou->defined = true;
 
LEAVE();
 
return 0;
}
 
307,7 → 314,7
 
connector->encoder = NULL;
encoder->crtc = NULL;
crtc->primary->fb = NULL;
crtc->fb = NULL;
crtc->x = 0;
crtc->y = 0;
crtc->enabled = false;
368,7 → 375,7
 
connector->encoder = NULL;
encoder->crtc = NULL;
crtc->primary->fb = NULL;
crtc->fb = NULL;
crtc->x = 0;
crtc->y = 0;
crtc->enabled = false;
381,7 → 388,7
connector->encoder = encoder;
encoder->crtc = crtc;
crtc->mode = *mode;
crtc->primary->fb = fb;
crtc->fb = fb;
crtc->x = set->x;
crtc->y = set->y;
crtc->enabled = true;
440,6 → 447,8
struct drm_encoder *encoder;
struct drm_crtc *crtc;
 
ENTER();
 
sou = kzalloc(sizeof(*sou), GFP_KERNEL);
if (!sou)
return -ENOMEM;
467,8 → 476,6
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
 
(void) drm_connector_register(connector);
 
drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
 
drm_mode_crtc_set_gamma_size(crtc, 256);
476,7 → 483,7
drm_object_attach_property(&connector->base,
dev->mode_config.dirty_info_property,
1);
 
LEAVE();
return 0;
}
 
485,6 → 492,8
struct drm_device *dev = dev_priv->dev;
int i, ret;
 
ENTER();
 
if (dev_priv->sou_priv) {
DRM_INFO("sou system already on\n");
return -EINVAL;
514,6 → 523,7
 
DRM_INFO("Screen objects system initialized\n");
 
LEAVE();
return 0;
 
err_vblank_cleanup:
569,7 → 579,7
BUG_ON(!sou->base.is_implicit);
 
dev_priv->sou_priv->implicit_fb =
vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb);
vmw_framebuffer_to_vfb(sou->base.crtc.fb);
}
 
#include "bitmap.h"
629,6 → 639,8
 
bool ret = false;
 
ENTER();
 
// dbgprintf("width %d height %d vrefresh %d\n",
// reqmode->width, reqmode->height, reqmode->freq);
 
706,7 → 718,6
vmw_write(dev_priv,SVGA_REG_WIDTH, mode->hdisplay);
vmw_write(dev_priv,SVGA_REG_HEIGHT, mode->vdisplay);
vmw_write(dev_priv,SVGA_REG_BITS_PER_PIXEL, 32);
os_display->select_cursor(os_display->cursor);
ret = 0;
#endif
if (ret == 0)
726,5 → 737,6
os_display->width, os_display->height, crtc);
}
 
LEAVE();
return ret;
};
/drivers/video/drm/vmwgfx/vmwgfx_shader.c
29,9 → 29,6
#include "vmwgfx_resource_priv.h"
#include "ttm/ttm_placement.h"
 
#define VMW_COMPAT_SHADER_HT_ORDER 12
 
#if 0
struct vmw_shader {
struct vmw_resource res;
SVGA3dShaderType type;
43,50 → 40,6
struct vmw_shader shader;
};
 
/**
* enum vmw_compat_shader_state - Staging state for compat shaders
*/
enum vmw_compat_shader_state {
VMW_COMPAT_COMMITED,
VMW_COMPAT_ADD,
VMW_COMPAT_DEL
};
 
/**
* struct vmw_compat_shader - Metadata for compat shaders.
*
* @handle: The TTM handle of the guest backed shader.
* @tfile: The struct ttm_object_file the guest backed shader is registered
* with.
* @hash: Hash item for lookup.
* @head: List head for staging lists or the compat shader manager list.
* @state: Staging state.
*
* The structure is protected by the cmdbuf lock.
*/
struct vmw_compat_shader {
u32 handle;
struct ttm_object_file *tfile;
struct drm_hash_item hash;
struct list_head head;
enum vmw_compat_shader_state state;
};
 
/**
* struct vmw_compat_shader_manager - Compat shader manager.
*
* @shaders: Hash table containing staged and commited compat shaders
* @list: List of commited shaders.
* @dev_priv: Pointer to a device private structure.
*
* @shaders and @list are protected by the cmdbuf mutex for now.
*/
struct vmw_compat_shader_manager {
struct drm_open_hash shaders;
struct list_head list;
struct vmw_private *dev_priv;
};
 
static void vmw_user_shader_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_shader_base_to_res(struct ttm_base_object *base);
99,6 → 52,8
struct ttm_validate_buffer *val_buf);
static int vmw_gb_shader_destroy(struct vmw_resource *res);
 
static uint64_t vmw_user_shader_size;
 
static const struct vmw_user_resource_conv user_shader_conv = {
.object_type = VMW_RES_SHADER,
.base_obj_to_res = vmw_user_shader_base_to_res,
303,7 → 258,7
return 0;
 
mutex_lock(&dev_priv->binding_mutex);
vmw_context_binding_res_list_scrub(&res->binding_head);
vmw_context_binding_res_list_kill(&res->binding_head);
 
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
341,9 → 296,9
container_of(res, struct vmw_user_shader, shader.res);
struct vmw_private *dev_priv = res->dev_priv;
 
ttm_base_object_kfree(ushader, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_shader_size);
// ttm_base_object_kfree(ushader, base);
// ttm_mem_global_free(vmw_mem_glob(dev_priv),
// vmw_user_shader_size);
}
 
/**
370,84 → 325,18
TTM_REF_USAGE);
}
 
static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buffer,
size_t shader_size,
size_t offset,
SVGA3dShaderType shader_type,
struct ttm_object_file *tfile,
u32 *handle)
{
struct vmw_user_shader *ushader;
struct vmw_resource *res, *tmp;
int ret;
 
/*
* Approximate idr memory usage with 128 bytes. It will be limited
* by maximum number_of shaders anyway.
*/
if (unlikely(vmw_user_shader_size == 0))
vmw_user_shader_size =
ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
 
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_shader_size,
false, true);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for shader "
"creation.\n");
goto out;
}
 
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
if (unlikely(ushader == NULL)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_shader_size);
ret = -ENOMEM;
goto out;
}
 
res = &ushader->shader.res;
ushader->base.shareable = false;
ushader->base.tfile = NULL;
 
/*
* From here on, the destructor takes over resource freeing.
*/
 
ret = vmw_gb_shader_init(dev_priv, res, shader_size,
offset, shader_type, buffer,
vmw_user_shader_free);
if (unlikely(ret != 0))
goto out;
 
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &ushader->base, false,
VMW_RES_SHADER,
&vmw_user_shader_base_release, NULL);
 
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
goto out_err;
}
 
if (handle)
*handle = ushader->base.hash.key;
out_err:
vmw_resource_unreference(&res);
out:
return ret;
}
 
 
#if 0
int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_shader *ushader;
struct vmw_resource *res;
struct vmw_resource *tmp;
struct drm_vmw_shader_create_arg *arg =
(struct drm_vmw_shader_create_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_dma_buffer *buffer = NULL;
SVGA3dShaderType shader_type;
int ret;
485,164 → 374,70
goto out_bad_arg;
}
 
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
goto out_bad_arg;
/*
* Approximate idr memory usage with 128 bytes. It will be limited
* by maximum number_of shaders anyway.
*/
 
ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
shader_type, tfile, &arg->shader_handle);
if (unlikely(vmw_user_shader_size == 0))
vmw_user_shader_size = ttm_round_pot(sizeof(*ushader))
+ 128;
 
ttm_read_unlock(&dev_priv->reservation_sem);
out_bad_arg:
vmw_dmabuf_unreference(&buffer);
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
}
 
/**
* vmw_compat_shader_id_ok - Check whether a compat shader user key and
* shader type are within valid bounds.
*
* @user_key: User space id of the shader.
* @shader_type: Shader type.
*
* Returns true if valid false if not.
*/
static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
{
return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_shader_size,
false, true);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for shader"
" creation.\n");
goto out_unlock;
}
 
/**
* vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
*
* @user_key: User space id of the shader.
* @shader_type: Shader type.
*
* Returns a hash key suitable for a command buffer managed resource
* manager hash table.
*/
static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
{
return user_key | (shader_type << 20);
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
if (unlikely(ushader == NULL)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_shader_size);
ret = -ENOMEM;
goto out_unlock;
}
 
/**
* vmw_compat_shader_remove - Stage a compat shader for removal.
*
* @man: Pointer to the compat shader manager identifying the shader namespace.
* @user_key: The key that is used to identify the shader. The key is
* unique to the shader type.
* @shader_type: Shader type.
* @list: Caller's list of staged command buffer resource actions.
*/
int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
u32 user_key, SVGA3dShaderType shader_type,
struct list_head *list)
{
if (!vmw_compat_shader_id_ok(user_key, shader_type))
return -EINVAL;
res = &ushader->shader.res;
ushader->base.shareable = false;
ushader->base.tfile = NULL;
 
return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader,
vmw_compat_shader_key(user_key,
shader_type),
list);
}
 
/**
* vmw_compat_shader_add - Create a compat shader and stage it for addition
* as a command buffer managed resource.
*
* @man: Pointer to the compat shader manager identifying the shader namespace.
* @user_key: The key that is used to identify the shader. The key is
* unique to the shader type.
* @bytecode: Pointer to the bytecode of the shader.
* @shader_type: Shader type.
* @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
* to be created with.
* @list: Caller's list of staged command buffer resource actions.
*
/*
* From here on, the destructor takes over resource freeing.
*/
int vmw_compat_shader_add(struct vmw_private *dev_priv,
struct vmw_cmdbuf_res_manager *man,
u32 user_key, const void *bytecode,
SVGA3dShaderType shader_type,
size_t size,
struct list_head *list)
{
struct vmw_dma_buffer *buf;
struct ttm_bo_kmap_obj map;
bool is_iomem;
int ret;
struct vmw_resource *res;
 
if (!vmw_compat_shader_id_ok(user_key, shader_type))
return -EINVAL;
 
/* Allocate and pin a DMA buffer */
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (unlikely(buf == NULL))
return -ENOMEM;
 
ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
true, vmw_dmabuf_bo_free);
ret = vmw_gb_shader_init(dev_priv, res, arg->size,
arg->offset, shader_type, buffer,
vmw_user_shader_free);
if (unlikely(ret != 0))
goto out;
goto out_unlock;
 
ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
if (unlikely(ret != 0))
goto no_reserve;
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &ushader->base, false,
VMW_RES_SHADER,
&vmw_user_shader_base_release, NULL);
 
/* Map and copy shader bytecode. */
ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
&map);
if (unlikely(ret != 0)) {
ttm_bo_unreserve(&buf->base);
goto no_reserve;
vmw_resource_unreference(&tmp);
goto out_err;
}
 
memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
WARN_ON(is_iomem);
arg->shader_handle = ushader->base.hash.key;
out_err:
vmw_resource_unreference(&res);
out_unlock:
ttm_read_unlock(&vmaster->lock);
out_bad_arg:
vmw_dmabuf_unreference(&buffer);
 
ttm_bo_kunmap(&map);
ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
WARN_ON(ret != 0);
ttm_bo_unreserve(&buf->base);
 
res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
if (unlikely(ret != 0))
goto no_reserve;
 
ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader,
vmw_compat_shader_key(user_key, shader_type),
res, list);
vmw_resource_unreference(&res);
no_reserve:
vmw_dmabuf_unreference(&buf);
out:
return ret;
}
 
/**
* vmw_compat_shader_lookup - Look up a compat shader
*
* @man: Pointer to the command buffer managed resource manager identifying
* the shader namespace.
* @user_key: The user space id of the shader.
* @shader_type: The shader type.
*
* Returns a refcounted pointer to a struct vmw_resource if the shader was
* found. An error pointer otherwise.
*/
struct vmw_resource *
vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
u32 user_key,
SVGA3dShaderType shader_type)
{
if (!vmw_compat_shader_id_ok(user_key, shader_type))
return ERR_PTR(-EINVAL);
 
return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader,
vmw_compat_shader_key(user_key,
shader_type));
}
#endif
/drivers/video/drm/vmwgfx/vmwgfx_surface.c
36,7 → 36,6
* @base: The TTM base object handling user-space visibility.
* @srf: The surface metadata.
* @size: TTM accounting size for the surface.
* @master: master of the creating client. Used for security check.
*/
struct vmw_user_surface {
struct ttm_prime_object prime;
682,6 → 681,7
struct vmw_surface_offset *cur_offset;
uint32_t num_sizes;
uint32_t size;
struct vmw_master *vmaster = vmw_master(file_priv->master);
const struct svga3d_surface_desc *desc;
 
if (unlikely(vmw_user_surface_size == 0))
707,7 → 707,7
return -EINVAL;
}
 
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
 
828,7 → 828,7
rep->sid = user_srf->prime.base.hash.key;
vmw_resource_unreference(&res);
 
ttm_read_unlock(&dev_priv->reservation_sem);
ttm_read_unlock(&vmaster->lock);
return 0;
out_no_copy:
kfree(srf->offsets);
839,8 → 839,7
out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
 
ttm_read_unlock(&vmaster->lock);
return ret;
}
 
865,16 → 864,27
struct vmw_user_surface *user_srf;
struct drm_vmw_size __user *user_sizes;
struct ttm_base_object *base;
int ret;
int ret = -EINVAL;
 
ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
req->handle_type, &base);
if (unlikely(ret != 0))
return ret;
base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
if (unlikely(base == NULL)) {
DRM_ERROR("Could not find surface to reference.\n");
return -EINVAL;
}
 
if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
goto out_bad_resource;
 
user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
 
ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
TTM_REF_USAGE, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_no_reference;
}
 
rep->flags = srf->flags;
rep->format = srf->format;
memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
882,12 → 892,11
rep->size_addr;
 
if (user_sizes)
ret = copy_to_user(user_sizes, &srf->base_size,
sizeof(srf->base_size));
ret = copy_to_user(user_sizes, srf->sizes,
srf->num_sizes * sizeof(*srf->sizes));
if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes);
ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
ret = -EFAULT;
}
out_bad_resource:
/drivers/video/drm/vmwgfx/Makefile.lto
6,18 → 6,19
 
DEFINES = -D__KERNEL__ -DCONFIG_X86_32
 
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk
DRV_INCLUDES = /d/kos/kolibri/drivers/include
DRV_TOPDIR = $(CURDIR)/../../..
DRM_TOPDIR = $(CURDIR)/..
 
DRV_INCLUDES = $(DRV_TOPDIR)/include
 
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \
-I$(DRV_INCLUDES)/linux -I$(DRV_INCLUDES)/linux/uapi
-I$(DRV_INCLUDES)/linux
 
CFLAGS_OPT = -Os -fomit-frame-pointer -fno-builtin-printf -mno-stack-arg-probe
CFLAGS_OPT = -Os -march=i686 -msse2 -fomit-frame-pointer -fno-builtin-printf -mno-stack-arg-probe
CFLAGS_OPT+= -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields -flto
CFLAGS = -c $(INCLUDES) $(DEFINES) $(CFLAGS_OPT)
 
LIBPATH:= ../../../ddk
LIBPATH:= $(DRV_TOPDIR)/ddk
 
LIBS:= -lddk -lcore -lgcc
 
51,10 → 52,8
vmwgfx_irq.c \
vmwgfx_kms.c \
vmwgfx_marker.c \
vmwgfx_mob.c \
vmwgfx_resource.c \
vmwgfx_scrn.c \
vmwgfx_shader.c \
vmwgfx_surface.c \
vmwgfx_ttm_glue.c \
../hdmi.c \
62,7 → 61,6
../ttm/ttm_bo.c \
../ttm/ttm_bo_manager.c \
../ttm/ttm_execbuf_util.c \
../ttm/ttm_lock.c \
../ttm/ttm_memory.c \
../ttm/ttm_object.c \
../ttm/ttm_page_alloc.c \
92,7 → 90,7
all: $(NAME).dll
 
$(NAME).dll: $(NAME_OBJS) $(SRC_DEP) $(HFILES) vmw.lds Makefile
$(CC) $(CFLAGS_OPT) -fwhole-program -nostdlib -Wl,-L$(LIBPATH),$(LDFLAGS),-T,vmw.lds -o $@ $(NAME_OBJS) $(LIBS)
$(CC) $(CFLAGS_OPT) -fwhole-program -nostdlib -Wl,-L$(LIBPATH),$(LDFLAGS),-T,vmw.lds -o $@ $(NAME_OBJS) libddk.a libcore.a libgcc.a
kpack $@
 
 
103,5 → 101,6
as -o $@ $<
 
 
clean:
-rm -f */*.o
 
 
/drivers/video/drm/vmwgfx/vmwgfx_ttm_glue.c
53,6 → 53,7
 
static void vmw_ttm_mem_global_release(struct drm_global_reference *ref)
{
// ttm_mem_global_release(ref->object);
}
 
int vmw_ttm_global_init(struct vmw_private *dev_priv)
/drivers/video/drm/vmwgfx/svga3d_surfacedefs.h
38,11 → 38,8
 
#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
#define min_t(type, x, y) ((x) < (y) ? (x) : (y))
#define surf_size_struct SVGA3dSize
#define u32 uint32
#define u64 uint64_t
#define U32_MAX ((u32)~0U)
 
#endif /* __KERNEL__ */
 
707,8 → 704,8
 
static inline u32 clamped_umul32(u32 a, u32 b)
{
u64 tmp = (u64) a*b;
return (tmp > (u64) U32_MAX) ? U32_MAX : tmp;
uint64_t tmp = (uint64_t) a*b;
return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
}
 
static inline const struct svga3d_surface_desc *
837,7 → 834,7
bool cubemap)
{
const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
u64 total_size = 0;
u32 total_size = 0;
u32 mip;
 
for (mip = 0; mip < num_mip_levels; mip++) {
850,7 → 847,7
if (cubemap)
total_size *= SVGA3D_MAX_SURFACE_FACES;
 
return (u32) min_t(u64, total_size, (u64) U32_MAX);
return total_size;
}
 
 
/drivers/video/drm/vmwgfx/vmwgfx_marker.c
27,18 → 27,19
 
 
#include "vmwgfx_drv.h"
#include <linux/time.h>
 
struct vmw_marker {
struct list_head head;
uint32_t seqno;
u64 submitted;
struct timespec submitted;
};
 
void vmw_marker_queue_init(struct vmw_marker_queue *queue)
{
INIT_LIST_HEAD(&queue->head);
queue->lag = 0;
queue->lag_time = ktime_get_raw_ns();
queue->lag = ns_to_timespec(0);
// getrawmonotonic(&queue->lag_time);
spin_lock_init(&queue->lock);
}
 
62,7 → 63,7
return -ENOMEM;
 
marker->seqno = seqno;
marker->submitted = ktime_get_raw_ns();
// getrawmonotonic(&marker->submitted);
spin_lock(&queue->lock);
list_add_tail(&marker->head, &queue->head);
spin_unlock(&queue->lock);
74,14 → 75,14
uint32_t signaled_seqno)
{
struct vmw_marker *marker, *next;
struct timespec now;
bool updated = false;
u64 now;
 
spin_lock(&queue->lock);
now = ktime_get_raw_ns();
// getrawmonotonic(&now);
 
if (list_empty(&queue->head)) {
queue->lag = 0;
// queue->lag = ns_to_timespec(0);
queue->lag_time = now;
updated = true;
goto out_unlock;
91,7 → 92,7
if (signaled_seqno - marker->seqno > (1 << 30))
continue;
 
queue->lag = now - marker->submitted;
// queue->lag = timespec_sub(now, marker->submitted);
queue->lag_time = now;
updated = true;
list_del(&marker->head);
104,13 → 105,27
return (updated) ? 0 : -EBUSY;
}
 
static u64 vmw_fifo_lag(struct vmw_marker_queue *queue)
static struct timespec vmw_timespec_add(struct timespec t1,
struct timespec t2)
{
u64 now;
t1.tv_sec += t2.tv_sec;
t1.tv_nsec += t2.tv_nsec;
if (t1.tv_nsec >= 1000000000L) {
t1.tv_sec += 1;
t1.tv_nsec -= 1000000000L;
}
 
return t1;
}
 
static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
{
struct timespec now;
 
spin_lock(&queue->lock);
now = ktime_get_raw_ns();
queue->lag += now - queue->lag_time;
// getrawmonotonic(&now);
// queue->lag = vmw_timespec_add(queue->lag,
// timespec_sub(now, queue->lag_time));
queue->lag_time = now;
spin_unlock(&queue->lock);
return queue->lag;
120,9 → 135,11
static bool vmw_lag_lt(struct vmw_marker_queue *queue,
uint32_t us)
{
u64 cond = (u64) us * NSEC_PER_USEC;
struct timespec lag, cond;
 
return vmw_fifo_lag(queue) <= cond;
cond = ns_to_timespec((s64) us * 1000);
lag = vmw_fifo_lag(queue);
return (timespec_compare(&lag, &cond) < 1);
}
 
int vmw_wait_lag(struct vmw_private *dev_priv,
/drivers/video/drm/ttm/ttm_lock.c
File deleted
/drivers/video/drm/ttm/ttm_bo.c
39,35 → 39,92
#include <linux/mm.h>
#include <linux/module.h>
 
#define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
 
#define TTM_ASSERT_LOCKED(param)
#define TTM_DEBUG(fmt, arg...)
#define TTM_BO_HASH_ORDER 13
 
#define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
 
 
static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
{
int i;
 
for (i = 0; i <= TTM_PL_PRIV5; i++)
if (flags & (1 << i)) {
*mem_type = i;
mutex_lock(&man->io_reserve_mutex);
return 0;
}
return -EINVAL;
 
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
{
if (likely(man->io_reserve_fastpath))
return;
 
mutex_unlock(&man->io_reserve_mutex);
}
 
 
#if 0
static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
{
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 
pr_err(" has_type: %d\n", man->has_type);
pr_err(" use_type: %d\n", man->use_type);
pr_err(" flags: 0x%08X\n", man->flags);
pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset);
pr_err(" size: %llu\n", man->size);
pr_err(" available_caching: 0x%08X\n", man->available_caching);
pr_err(" default_caching: 0x%08X\n", man->default_caching);
if (mem_type != TTM_PL_SYSTEM)
(*man->func->debug)(man, TTM_PFX);
}
 
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
int i, ret, mem_type;
 
pr_err("No space for %p (%lu pages, %luK, %luM)\n",
bo, bo->mem.num_pages, bo->mem.size >> 10,
bo->mem.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
ret = ttm_mem_type_from_flags(placement->placement[i],
&mem_type);
if (ret)
return;
pr_err(" placement[%d]=0x%08X (%d)\n",
i, placement->placement[i], mem_type);
ttm_mem_type_debug(bo->bdev, mem_type);
}
}
 
static ssize_t ttm_bo_global_show(struct kobject *kobj,
struct attribute *attr,
char *buffer)
{
struct ttm_bo_global *glob =
container_of(kobj, struct ttm_bo_global, kobj);
 
return snprintf(buffer, PAGE_SIZE, "%lu\n",
(unsigned long) atomic_read(&glob->bo_count));
}
 
static struct attribute *ttm_bo_global_attrs[] = {
&ttm_bo_count,
NULL
};
 
static const struct sysfs_ops ttm_bo_global_ops = {
.show = &ttm_bo_global_show
};
 
static struct kobj_type ttm_bo_glob_kobj_type = {
.release = &ttm_bo_global_kobj_release,
.sysfs_ops = &ttm_bo_global_ops,
.default_attrs = ttm_bo_global_attrs
};
#endif
 
 
static inline uint32_t ttm_bo_type_flags(unsigned type)
{
return 1 << (type);
91,14 → 148,12
if (bo->ttm)
ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->glob->bo_count);
if (bo->resv == &bo->ttm_resv)
reservation_object_fini(&bo->ttm_resv);
mutex_destroy(&bo->wu_mutex);
if (bo->destroy)
bo->destroy(bo);
else {
kfree(bo);
}
ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
}
 
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
106,7 → 161,7
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
 
lockdep_assert_held(&bo->resv->lock.base);
// BUG_ON(!ttm_bo_is_reserved(bo));
 
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
 
213,6 → 268,7
return ret;
}
 
#if 0
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem,
bool evict, bool interruptible,
292,11 → 348,9
 
moved:
if (bo->evicted) {
if (bdev->driver->invalidate_caches) {
ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
if (ret)
pr_err("Can not flush read caches\n");
}
bo->evicted = false;
}
 
353,7 → 407,7
int ret;
 
spin_lock(&glob->lru_lock);
ret = __ttm_bo_reserve(bo, false, true, false, NULL);
ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
 
spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, true);
384,7 → 438,7
ttm_bo_add_to_lru(bo);
}
 
__ttm_bo_unreserve(bo);
ww_mutex_unlock(&bo->resv->lock);
}
 
kref_get(&bo->list_kref);
395,8 → 449,8
driver->sync_obj_flush(sync_obj);
driver->sync_obj_unref(&sync_obj);
}
// schedule_delayed_work(&bdev->wq,
// ((HZ / 100) < 1) ? 1 : HZ / 100);
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
}
 
/**
435,7 → 489,7
sync_obj = driver->sync_obj_ref(bo->sync_obj);
spin_unlock(&bdev->fence_lock);
 
__ttm_bo_unreserve(bo);
ww_mutex_unlock(&bo->resv->lock);
spin_unlock(&glob->lru_lock);
 
ret = driver->sync_obj_wait(sync_obj, false, interruptible);
455,7 → 509,7
return ret;
 
spin_lock(&glob->lru_lock);
ret = __ttm_bo_reserve(bo, false, true, false, NULL);
ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
 
/*
* We raced, and lost, someone else holds the reservation now,
473,7 → 527,7
spin_unlock(&bdev->fence_lock);
 
if (ret || unlikely(list_empty(&bo->ddestroy))) {
__ttm_bo_unreserve(bo);
ww_mutex_unlock(&bo->resv->lock);
spin_unlock(&glob->lru_lock);
return ret;
}
518,11 → 572,11
kref_get(&nentry->list_kref);
}
 
ret = __ttm_bo_reserve(entry, false, true, false, NULL);
ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
if (remove_all && ret) {
spin_unlock(&glob->lru_lock);
ret = __ttm_bo_reserve(entry, false, false,
false, NULL);
ret = ttm_bo_reserve_nolru(entry, false, false,
false, 0);
spin_lock(&glob->lru_lock);
}
 
561,6 → 615,7
((HZ / 100) < 1) ? 1 : HZ / 100);
}
}
#endif
 
static void ttm_bo_release(struct kref *kref)
{
571,10 → 626,10
 
drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
ttm_mem_io_lock(man, false);
ttm_mem_io_free_vm(bo);
// ttm_mem_io_free_vm(bo);
ttm_mem_io_unlock(man);
ttm_bo_cleanup_refs_or_queue(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
// ttm_bo_cleanup_refs_or_queue(bo);
// kref_put(&bo->list_kref, ttm_bo_release_list);
}
 
void ttm_bo_unref(struct ttm_buffer_object **p_bo)
586,6 → 641,121
}
EXPORT_SYMBOL(ttm_bo_unref);
 
#if 0
int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
{
return cancel_delayed_work_sync(&bdev->wq);
}
EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
 
void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
{
if (resched)
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
}
EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
 
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_reg evict_mem;
struct ttm_placement placement;
int ret = 0;
 
spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bdev->fence_lock);
 
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS) {
pr_err("Failed to expire sync object before buffer eviction\n");
}
goto out;
}
 
// BUG_ON(!ttm_bo_is_reserved(bo));
 
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
evict_mem.bus.io_reserved_vm = false;
evict_mem.bus.io_reserved_count = 0;
 
placement.fpfn = 0;
placement.lpfn = 0;
placement.num_placement = 0;
placement.num_busy_placement = 0;
bdev->driver->evict_flags(bo, &placement);
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS) {
pr_err("Failed to find memory space for buffer 0x%p eviction\n",
bo);
ttm_bo_mem_space_debug(bo, &placement);
}
goto out;
}
 
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS)
pr_err("Buffer eviction failed\n");
ttm_bo_mem_put(bo, &evict_mem);
goto out;
}
bo->evicted = true;
out:
return ret;
}
 
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
bool interruptible,
bool no_wait_gpu)
{
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_buffer_object *bo;
int ret = -EBUSY, put_count;
 
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) {
ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
if (!ret)
break;
}
 
if (ret) {
spin_unlock(&glob->lru_lock);
return ret;
}
 
kref_get(&bo->list_kref);
 
if (!list_empty(&bo->ddestroy)) {
ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
no_wait_gpu);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
 
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
 
BUG_ON(ret != 0);
 
ttm_bo_list_ref_sub(bo, put_count, true);
 
ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
ttm_bo_unreserve(bo);
 
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
 
void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
611,15 → 781,15
int ret;
 
do {
ret = (*man->func->get_node)(man, bo, placement, 0, mem);
ret = (*man->func->get_node)(man, bo, placement, mem);
if (unlikely(ret != 0))
return ret;
if (mem->mm_node)
break;
// ret = ttm_mem_evict_first(bdev, mem_type,
// interruptible, no_wait_gpu);
// if (unlikely(ret != 0))
// return ret;
ret = ttm_mem_evict_first(bdev, mem_type,
interruptible, no_wait_gpu);
if (unlikely(ret != 0))
return ret;
} while (1);
if (mem->mm_node == NULL)
return -ENOMEM;
724,8 → 894,7
 
if (man->has_type && man->use_type) {
type_found = true;
ret = (*man->func->get_node)(man, bo, placement,
cur_flags, mem);
ret = (*man->func->get_node)(man, bo, placement, mem);
if (unlikely(ret))
return ret;
}
765,6 → 934,7
ttm_flag_masked(&cur_flags, placement->busy_placement[i],
~TTM_PL_MASK_MEMTYPE);
 
 
if (mem_type == TTM_PL_SYSTEM) {
mem->mem_type = mem_type;
mem->placement = cur_flags;
795,7 → 965,7
struct ttm_mem_reg mem;
struct ttm_bo_device *bdev = bo->bdev;
 
lockdep_assert_held(&bo->resv->lock.base);
// BUG_ON(!ttm_bo_is_reserved(bo));
 
/*
* FIXME: It's possible to pipeline buffer moves.
826,6 → 996,7
ttm_bo_mem_put(bo, &mem);
return ret;
}
#endif
 
static bool ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem,
863,7 → 1034,7
int ret;
uint32_t new_flags;
 
lockdep_assert_held(&bo->resv->lock.base);
// BUG_ON(!ttm_bo_is_reserved(bo));
/* Check that range is valid */
if (placement->lpfn || placement->fpfn)
if (placement->fpfn > placement->lpfn ||
873,8 → 1044,8
* Check whether we need to move buffer.
*/
if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
ret = ttm_bo_move_buffer(bo, placement, interruptible,
no_wait_gpu);
// ret = ttm_bo_move_buffer(bo, placement, interruptible,
// no_wait_gpu);
if (ret)
return ret;
} else {
920,8 → 1091,19
{
int ret = 0;
unsigned long num_pages;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
bool locked;
 
// ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
if (ret) {
pr_err("Out of kernel memory\n");
if (destroy)
(*destroy)(bo);
else
kfree(bo);
return -ENOMEM;
}
 
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (num_pages == 0) {
pr_err("Illegal buffer object size\n");
929,6 → 1111,7
(*destroy)(bo);
else
kfree(bo);
// ttm_mem_global_free(mem_glob, acc_size);
return -EINVAL;
}
bo->destroy = destroy;
958,7 → 1141,7
bo->acc_size = acc_size;
bo->sg = sg;
bo->resv = &bo->ttm_resv;
reservation_object_init(bo->resv);
// reservation_object_init(bo->resv);
atomic_inc(&bo->glob->bo_count);
drm_vma_node_reset(&bo->vma_node);
 
968,23 → 1151,19
* For ttm_bo_type_device buffers, allocate
* address space from the device.
*/
if (likely(!ret) &&
(bo->type == ttm_bo_type_device ||
bo->type == ttm_bo_type_sg))
ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
bo->mem.num_pages);
// if (likely(!ret) &&
// (bo->type == ttm_bo_type_device ||
// bo->type == ttm_bo_type_sg))
// ret = ttm_bo_setup_vm(bo);
 
locked = ww_mutex_trylock(&bo->resv->lock);
WARN_ON(!locked);
// if (likely(!ret))
// ret = ttm_bo_validate(bo, placement, interruptible, false);
 
if (likely(!ret))
ret = ttm_bo_validate(bo, placement, interruptible, false);
// ttm_bo_unreserve(bo);
 
ttm_bo_unreserve(bo);
// if (unlikely(ret))
// ttm_bo_unref(&bo);
 
if (unlikely(ret))
ttm_bo_unref(&bo);
 
return ret;
}
EXPORT_SYMBOL(ttm_bo_init);
1018,6 → 1197,53
}
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
 
int ttm_bo_create(struct ttm_bo_device *bdev,
unsigned long size,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
bool interruptible,
struct file *persistent_swap_storage,
struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo;
size_t acc_size;
int ret;
 
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (unlikely(bo == NULL))
return -ENOMEM;
 
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
interruptible, persistent_swap_storage, acc_size,
NULL, NULL);
if (likely(ret == 0))
*p_bo = bo;
 
return ret;
}
EXPORT_SYMBOL(ttm_bo_create);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
unsigned long p_size)
{
1024,6 → 1250,8
int ret = -EINVAL;
struct ttm_mem_type_manager *man;
 
ENTER();
 
BUG_ON(type >= TTM_NUM_MEM_TYPES);
man = &bdev->man[type];
BUG_ON(man->has_type);
1049,9 → 1277,12
 
INIT_LIST_HEAD(&man->lru);
 
LEAVE();
 
return 0;
}
EXPORT_SYMBOL(ttm_bo_init_mm);
 
 
void ttm_bo_global_release(struct drm_global_reference *ref)
{
struct ttm_bo_global *glob = ref->object;
1066,10 → 1297,12
struct ttm_bo_global *glob = ref->object;
int ret;
 
ENTER();
 
mutex_init(&glob->device_list_mutex);
spin_lock_init(&glob->lru_lock);
glob->mem_glob = bo_ref->mem_glob;
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
glob->dummy_read_page = AllocPage();
 
if (unlikely(glob->dummy_read_page == NULL)) {
ret = -ENOMEM;
1081,6 → 1314,8
 
atomic_set(&glob->bo_count, 0);
 
LEAVE();
 
return 0;
 
out_no_drp:
1089,15 → 1324,17
}
EXPORT_SYMBOL(ttm_bo_global_init);
 
 
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_global *glob,
struct ttm_bo_driver *driver,
struct address_space *mapping,
uint64_t file_page_offset,
bool need_dma32)
{
int ret = -EINVAL;
 
ENTER();
 
bdev->driver = driver;
 
memset(bdev->man, 0, sizeof(bdev->man));
1112,9 → 1349,9
 
drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
0x10000000);
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
// INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = mapping;
bdev->dev_mapping = NULL;
bdev->glob = glob;
bdev->need_dma32 = need_dma32;
bdev->val_seq = 0;
1123,6 → 1360,8
list_add_tail(&bdev->device_list, &glob->device_list);
mutex_unlock(&glob->device_list_mutex);
 
LEAVE();
 
return 0;
out_no_sys:
return ret;
1150,28 → 1389,6
return true;
}
 
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
 
drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
ttm_mem_io_free_vm(bo);
}
 
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
 
ttm_mem_io_lock(man, false);
ttm_bo_unmap_virtual_locked(bo);
ttm_mem_io_unlock(man);
}
 
 
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 
 
int ttm_bo_wait(struct ttm_buffer_object *bo,
bool lazy, bool interruptible, bool no_wait)
{
1183,47 → 1400,25
if (likely(bo->sync_obj == NULL))
return 0;
 
while (bo->sync_obj) {
 
if (driver->sync_obj_signaled(bo->sync_obj)) {
void *tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
spin_unlock(&bdev->fence_lock);
driver->sync_obj_unref(&tmp_obj);
spin_lock(&bdev->fence_lock);
continue;
return 0;
}
EXPORT_SYMBOL(ttm_bo_wait);
 
if (no_wait)
return -EBUSY;
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{
struct ttm_bo_device *bdev = bo->bdev;
int ret = 0;
 
sync_obj = driver->sync_obj_ref(bo->sync_obj);
spin_unlock(&bdev->fence_lock);
ret = driver->sync_obj_wait(sync_obj,
lazy, interruptible);
if (unlikely(ret != 0)) {
driver->sync_obj_unref(&sync_obj);
spin_lock(&bdev->fence_lock);
/*
* Using ttm_bo_reserve makes sure the lru lists are updated.
*/
 
return ret;
}
spin_lock(&bdev->fence_lock);
if (likely(bo->sync_obj == sync_obj)) {
void *tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
clear_bit(TTM_BO_PRIV_FLAG_MOVING,
&bo->priv_flags);
spin_unlock(&bdev->fence_lock);
driver->sync_obj_unref(&sync_obj);
driver->sync_obj_unref(&tmp_obj);
spin_lock(&bdev->fence_lock);
} else {
spin_unlock(&bdev->fence_lock);
driver->sync_obj_unref(&sync_obj);
spin_lock(&bdev->fence_lock);
EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
 
void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
{
atomic_dec(&bo->cpu_writers);
}
}
return 0;
}
EXPORT_SYMBOL(ttm_bo_wait);
 
EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
/drivers/video/drm/ttm/ttm_bo_util.c
27,25 → 27,17
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#define iowrite32(v, addr) writel((v), (addr))
#define ioread32(addr) readl(addr)
 
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_vma_manager.h>
//#include <linux/io.h>
//#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/highmem.h>
#include <linux/wait.h>
#include <linux/slab.h>
//#include <linux/vmalloc.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
 
#define __pgprot(x) ((pgprot_t) { (x) } )
#define PAGE_KERNEL __pgprot(3)
 
void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
 
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
{
ttm_bo_mem_put(bo, &bo->mem);
164,7 → 156,6
}
EXPORT_SYMBOL(ttm_mem_io_free);
 
#if 0
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
struct ttm_mem_reg *mem = &bo->mem;
184,7 → 175,6
}
return 0;
}
#endif
 
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
{
217,7 → 207,7
if (mem->placement & TTM_PL_FLAG_WC)
addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
else
addr = ioremap(mem->bus.base + mem->bus.offset, mem->bus.size);
addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
if (!addr) {
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bdev, mem);
268,14 → 258,27
 
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
 
dst = (void*)MapIoMem((addr_t)d, 4096, PG_SW);
 
#ifdef CONFIG_X86
dst = kmap_atomic_prot(d, prot);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
dst = vmap(&d, 1, 0, prot);
else
dst = kmap(d);
#endif
if (!dst)
return -ENOMEM;
 
memcpy(dst, src, PAGE_SIZE);
memcpy_fromio(dst, src, PAGE_SIZE);
 
FreeKernelSpace(dst);
#ifdef CONFIG_X86
kunmap_atomic(dst);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
vunmap(dst);
else
kunmap(d);
#endif
 
return 0;
}
291,15 → 294,27
return -ENOMEM;
 
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
 
src = (void*)MapIoMem((addr_t)s, 4096, PG_SW);
 
#ifdef CONFIG_X86
src = kmap_atomic_prot(s, prot);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
src = vmap(&s, 1, 0, prot);
else
src = kmap(s);
#endif
if (!src)
return -ENOMEM;
 
memcpy(dst, src, PAGE_SIZE);
memcpy_toio(dst, src, PAGE_SIZE);
 
FreeKernelSpace(src);
#ifdef CONFIG_X86
kunmap_atomic(src);
#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
vunmap(src);
else
kunmap(s);
#endif
 
return 0;
}
337,12 → 352,8
/*
* Don't move nonexistent data. Clear destination instead.
*/
if (old_iomap == NULL &&
(ttm == NULL || (ttm->state == tt_unpopulated &&
!(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
memset(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
if (old_iomap == NULL && ttm == NULL)
goto out2;
}
 
/*
* TTM might be null for moves within the same region.
472,6 → 483,29
 
pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
{
#if defined(__i386__) || defined(__x86_64__)
if (caching_flags & TTM_PL_FLAG_WC)
tmp = pgprot_writecombine(tmp);
else if (boot_cpu_data.x86 > 3)
tmp = pgprot_noncached(tmp);
 
#elif defined(__powerpc__)
if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
pgprot_val(tmp) |= _PAGE_NO_CACHE;
if (caching_flags & TTM_PL_FLAG_UNCACHED)
pgprot_val(tmp) |= _PAGE_GUARDED;
}
#endif
#if defined(__ia64__)
if (caching_flags & TTM_PL_FLAG_WC)
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
#endif
#if defined(__sparc__) || defined(__mips__)
if (!(caching_flags & TTM_PL_FLAG_CACHED))
tmp = pgprot_noncached(tmp);
#endif
return tmp;
}
EXPORT_SYMBOL(ttm_io_prot);
492,7 → 526,7
map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
else
map->virtual = ioremap(bo->mem.bus.base + bo->mem.bus.offset + offset,
map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
size);
}
return (!map->virtual) ? -ENOMEM : 0;
523,7 → 557,7
 
map->bo_kmap_type = ttm_bo_map_kmap;
map->page = ttm->pages[start_page];
map->virtual = (void*)MapIoMem(page_to_phys(map->page), 4096, PG_SW);
map->virtual = kmap(map->page);
} else {
/*
* We need to use vmap to get the desired page protection
587,8 → 621,10
iounmap(map->virtual);
break;
case ttm_bo_map_vmap:
vunmap(map->virtual);
break;
case ttm_bo_map_kmap:
FreeKernelSpace(map->virtual);
kunmap(map->page);
break;
case ttm_bo_map_premapped:
break;
677,25 → 713,3
return 0;
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
 
 
void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot)
{
void *vaddr;
char *tmp;
int i;
 
vaddr = AllocKernelSpace(count << 12);
if(vaddr == NULL)
return NULL;
 
for(i = 0, tmp = vaddr; i < count; i++)
{
MapPage(tmp, page_to_phys(pages[i]), PG_SW);
tmp+= 4096;
};
 
return vaddr;
};
 
/drivers/video/drm/ttm/ttm_page_alloc.c
41,7 → 41,7
#include <linux/mm.h>
#include <linux/seq_file.h> /* for seq_printf */
#include <linux/slab.h>
//#include <linux/dma-mapping.h>
#include <linux/dma-mapping.h>
 
//#include <linux/atomic.h>
 
58,6 → 58,12
/* times are in msecs */
#define PAGE_FREE_INTERVAL 1000
 
#define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
 
 
 
#if 0
/**
* struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
*
109,6 → 115,7
**/
struct ttm_pool_manager {
struct kobject kobj;
struct shrinker mm_shrink;
struct ttm_pool_opts options;
 
union {
122,10 → 129,134
};
};
 
static struct attribute ttm_page_pool_max = {
.name = "pool_max_size",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_page_pool_small = {
.name = "pool_small_allocation",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_page_pool_alloc_size = {
.name = "pool_allocation_size",
.mode = S_IRUGO | S_IWUSR
};
 
static struct attribute *ttm_pool_attrs[] = {
&ttm_page_pool_max,
&ttm_page_pool_small,
&ttm_page_pool_alloc_size,
NULL
};
 
static void ttm_pool_kobj_release(struct kobject *kobj)
{
struct ttm_pool_manager *m =
container_of(kobj, struct ttm_pool_manager, kobj);
kfree(m);
}
 
static ssize_t ttm_pool_store(struct kobject *kobj,
struct attribute *attr, const char *buffer, size_t size)
{
struct ttm_pool_manager *m =
container_of(kobj, struct ttm_pool_manager, kobj);
int chars;
unsigned val;
chars = sscanf(buffer, "%u", &val);
if (chars == 0)
return size;
 
/* Convert kb to number of pages */
val = val / (PAGE_SIZE >> 10);
 
if (attr == &ttm_page_pool_max)
m->options.max_size = val;
else if (attr == &ttm_page_pool_small)
m->options.small = val;
else if (attr == &ttm_page_pool_alloc_size) {
if (val > NUM_PAGES_TO_ALLOC*8) {
pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
return size;
} else if (val > NUM_PAGES_TO_ALLOC) {
pr_warn("Setting allocation size to larger than %lu is not recommended\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
}
m->options.alloc_size = val;
}
 
return size;
}
 
static ssize_t ttm_pool_show(struct kobject *kobj,
struct attribute *attr, char *buffer)
{
struct ttm_pool_manager *m =
container_of(kobj, struct ttm_pool_manager, kobj);
unsigned val = 0;
 
if (attr == &ttm_page_pool_max)
val = m->options.max_size;
else if (attr == &ttm_page_pool_small)
val = m->options.small;
else if (attr == &ttm_page_pool_alloc_size)
val = m->options.alloc_size;
 
val = val * (PAGE_SIZE >> 10);
 
return snprintf(buffer, PAGE_SIZE, "%u\n", val);
}
 
static const struct sysfs_ops ttm_pool_sysfs_ops = {
.show = &ttm_pool_show,
.store = &ttm_pool_store,
};
 
static struct kobj_type ttm_pool_kobj_type = {
.release = &ttm_pool_kobj_release,
.sysfs_ops = &ttm_pool_sysfs_ops,
.default_attrs = ttm_pool_attrs,
};
 
static struct ttm_pool_manager *_manager;
 
#ifndef CONFIG_X86
static int set_pages_array_wb(struct page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
int i;
 
for (i = 0; i < addrinarray; i++)
unmap_page_from_agp(pages[i]);
#endif
return 0;
}
 
static int set_pages_array_wc(struct page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
int i;
 
for (i = 0; i < addrinarray; i++)
map_page_into_agp(pages[i]);
#endif
return 0;
}
 
static int set_pages_array_uc(struct page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
int i;
 
for (i = 0; i < addrinarray; i++)
map_page_into_agp(pages[i]);
#endif
return 0;
}
#endif
 
/**
* Select the right pool or requested caching state and ttm flags. */
static struct ttm_page_pool *ttm_get_pool(int flags,
151,6 → 282,8
static void ttm_pages_put(struct page *pages[], unsigned npages)
{
unsigned i;
if (set_pages_array_wb(pages, npages))
pr_err("Failed to set %d pages to wb!\n", npages);
for (i = 0; i < npages; ++i)
__free_page(pages[i]);
}
162,21 → 295,407
pool->nfrees += freed_pages;
}
 
/**
* Free pages from pool.
*
* To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
* number of pages in one go.
*
* @pool: to free the pages from
* @free_all: If set to true will free all pages in pool
**/
static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
{
unsigned long irq_flags;
struct page *p;
struct page **pages_to_free;
unsigned freed_pages = 0,
npages_to_free = nr_free;
 
if (NUM_PAGES_TO_ALLOC < nr_free)
npages_to_free = NUM_PAGES_TO_ALLOC;
 
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
GFP_KERNEL);
if (!pages_to_free) {
pr_err("Failed to allocate memory for pool free operation\n");
return 0;
}
 
restart:
spin_lock_irqsave(&pool->lock, irq_flags);
 
list_for_each_entry_reverse(p, &pool->list, lru) {
if (freed_pages >= npages_to_free)
break;
 
pages_to_free[freed_pages++] = p;
/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
if (freed_pages >= NUM_PAGES_TO_ALLOC) {
/* remove range of pages from the pool */
__list_del(p->lru.prev, &pool->list);
 
ttm_pool_update_free_locked(pool, freed_pages);
/**
* Because changing page caching is costly
* we unlock the pool to prevent stalling.
*/
spin_unlock_irqrestore(&pool->lock, irq_flags);
 
ttm_pages_put(pages_to_free, freed_pages);
if (likely(nr_free != FREE_ALL_PAGES))
nr_free -= freed_pages;
 
if (NUM_PAGES_TO_ALLOC >= nr_free)
npages_to_free = nr_free;
else
npages_to_free = NUM_PAGES_TO_ALLOC;
 
freed_pages = 0;
 
/* free all so restart the processing */
if (nr_free)
goto restart;
 
/* Not allowed to fall through or break because
* following context is inside spinlock while we are
* outside here.
*/
goto out;
 
}
}
 
/* remove range of pages from the pool */
if (freed_pages) {
__list_del(&p->lru, &pool->list);
 
ttm_pool_update_free_locked(pool, freed_pages);
nr_free -= freed_pages;
}
 
spin_unlock_irqrestore(&pool->lock, irq_flags);
 
if (freed_pages)
ttm_pages_put(pages_to_free, freed_pages);
out:
kfree(pages_to_free);
return nr_free;
}
 
/**
* Callback for mm to request pool to reduce number of page held.
*
* XXX: (dchinner) Deadlock warning!
*
* ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
* this can deadlock when called a sc->gfp_mask that is not equal to
* GFP_KERNEL.
*
* This code is crying out for a shrinker per pool....
*/
static unsigned long
ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned i;
unsigned pool_offset = atomic_add_return(1, &start_pool);
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
unsigned long freed = 0;
 
pool_offset = pool_offset % NUM_POOLS;
/* select start pool in round robin fashion */
for (i = 0; i < NUM_POOLS; ++i) {
unsigned nr_free = shrink_pages;
if (shrink_pages == 0)
break;
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
shrink_pages = ttm_page_pool_free(pool, nr_free);
freed += nr_free - shrink_pages;
}
return freed;
}
 
 
static unsigned long
ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
unsigned i;
unsigned long count = 0;
 
for (i = 0; i < NUM_POOLS; ++i)
count += _manager->pools[i].npages;
 
return count;
}
 
static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
manager->mm_shrink.count_objects = ttm_pool_shrink_count;
manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
 
static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
{
unregister_shrinker(&manager->mm_shrink);
}
 
static int ttm_set_pages_caching(struct page **pages,
enum ttm_caching_state cstate, unsigned cpages)
{
int r = 0;
/* Set page caching */
switch (cstate) {
case tt_uncached:
r = set_pages_array_uc(pages, cpages);
if (r)
pr_err("Failed to set %d pages to uc!\n", cpages);
break;
case tt_wc:
r = set_pages_array_wc(pages, cpages);
if (r)
pr_err("Failed to set %d pages to wc!\n", cpages);
break;
default:
break;
}
return r;
}
 
/**
* Free pages the pages that failed to change the caching state. If there is
* any pages that have changed their caching state already put them to the
* pool.
*/
static void ttm_handle_caching_state_failure(struct list_head *pages,
int ttm_flags, enum ttm_caching_state cstate,
struct page **failed_pages, unsigned cpages)
{
unsigned i;
/* Failed pages have to be freed */
for (i = 0; i < cpages; ++i) {
list_del(&failed_pages[i]->lru);
__free_page(failed_pages[i]);
}
}
 
/**
* Allocate new pages with correct caching.
*
* This function is reentrant if caller updates count depending on number of
* pages returned in pages array.
*/
static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
int ttm_flags, enum ttm_caching_state cstate, unsigned count)
{
struct page **caching_array;
struct page *p;
int r = 0;
unsigned i, cpages;
unsigned max_cpages = min(count,
(unsigned)(PAGE_SIZE/sizeof(struct page *)));
 
/* allocate array for page caching change */
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
 
if (!caching_array) {
pr_err("Unable to allocate table for new pages\n");
return -ENOMEM;
}
 
for (i = 0, cpages = 0; i < count; ++i) {
p = alloc_page(gfp_flags);
 
if (!p) {
pr_err("Unable to get page %u\n", i);
 
/* store already allocated pages in the pool after
* setting the caching state */
if (cpages) {
r = ttm_set_pages_caching(caching_array,
cstate, cpages);
if (r)
ttm_handle_caching_state_failure(pages,
ttm_flags, cstate,
caching_array, cpages);
}
r = -ENOMEM;
goto out;
}
 
#ifdef CONFIG_HIGHMEM
/* gfp flags of highmem page should never be dma32 so we
* we should be fine in such case
*/
if (!PageHighMem(p))
#endif
{
caching_array[cpages++] = p;
if (cpages == max_cpages) {
 
r = ttm_set_pages_caching(caching_array,
cstate, cpages);
if (r) {
ttm_handle_caching_state_failure(pages,
ttm_flags, cstate,
caching_array, cpages);
goto out;
}
cpages = 0;
}
}
 
list_add(&p->lru, pages);
}
 
if (cpages) {
r = ttm_set_pages_caching(caching_array, cstate, cpages);
if (r)
ttm_handle_caching_state_failure(pages,
ttm_flags, cstate,
caching_array, cpages);
}
out:
kfree(caching_array);
 
return r;
}
 
/**
* Fill the given pool if there aren't enough pages and the requested number of
* pages is small.
*/
static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
int ttm_flags, enum ttm_caching_state cstate, unsigned count,
unsigned long *irq_flags)
{
struct page *p;
int r;
unsigned cpages = 0;
/**
* Only allow one pool fill operation at a time.
* If pool doesn't have enough pages for the allocation new pages are
* allocated from outside of pool.
*/
if (pool->fill_lock)
return;
 
pool->fill_lock = true;
 
/* If allocation request is small and there are not enough
* pages in a pool we fill the pool up first. */
if (count < _manager->options.small
&& count > pool->npages) {
struct list_head new_pages;
unsigned alloc_size = _manager->options.alloc_size;
 
/**
* Can't change page caching if in irqsave context. We have to
* drop the pool->lock.
*/
spin_unlock_irqrestore(&pool->lock, *irq_flags);
 
INIT_LIST_HEAD(&new_pages);
r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
cstate, alloc_size);
spin_lock_irqsave(&pool->lock, *irq_flags);
 
if (!r) {
list_splice(&new_pages, &pool->list);
++pool->nrefills;
pool->npages += alloc_size;
} else {
pr_err("Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
list_for_each_entry(p, &pool->list, lru) {
++cpages;
}
list_splice(&new_pages, &pool->list);
pool->npages += cpages;
}
 
}
pool->fill_lock = false;
}
 
/**
* Cut 'count' number of pages from the pool and put them on the return list.
*
* @return count of pages still required to fulfill the request.
*/
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
struct list_head *pages,
int ttm_flags,
enum ttm_caching_state cstate,
unsigned count)
{
unsigned long irq_flags;
struct list_head *p;
unsigned i;
 
spin_lock_irqsave(&pool->lock, irq_flags);
ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
 
if (count >= pool->npages) {
/* take all pages from the pool */
list_splice_init(&pool->list, pages);
count -= pool->npages;
pool->npages = 0;
goto out;
}
/* find the last pages to include for requested number of pages. Split
* pool to begin and halve it to reduce search space. */
if (count <= pool->npages/2) {
i = 0;
list_for_each(p, &pool->list) {
if (++i == count)
break;
}
} else {
i = pool->npages + 1;
list_for_each_prev(p, &pool->list) {
if (--i == count)
break;
}
}
/* Cut 'count' number of pages from the pool */
list_cut_position(pages, &pool->list, p);
pool->npages -= count;
count = 0;
out:
spin_unlock_irqrestore(&pool->lock, irq_flags);
return count;
}
#endif
 
/* Put all pages in pages list to correct pool to wait for reuse */
static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
enum ttm_caching_state cstate)
{
unsigned long irq_flags;
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
// struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
unsigned i;
 
if (1) {
for (i = 0; i < npages; i++) {
if (pages[i]) {
// if (page_count(pages[i]) != 1)
// pr_err("Erroneous page count. Leaking pages.\n");
FreePage(pages[i]);
pages[i] = NULL;
}
}
return;
 
#if 0
if (pool == NULL) {
/* No pool for this memory type so free the pages */
for (i = 0; i < npages; i++) {
if (pages[i]) {
if (page_count(pages[i]) != 1)
pr_err("Erroneous page count. Leaking pages.\n");
__free_page(pages[i]);
pages[i] = NULL;
}
184,8 → 703,32
return;
}
 
spin_lock_irqsave(&pool->lock, irq_flags);
for (i = 0; i < npages; i++) {
if (pages[i]) {
if (page_count(pages[i]) != 1)
pr_err("Erroneous page count. Leaking pages.\n");
list_add_tail(&pages[i]->lru, &pool->list);
pages[i] = NULL;
pool->npages++;
}
}
/* Check that we don't go over the pool limit */
npages = 0;
if (pool->npages > _manager->options.max_size) {
npages = pool->npages - _manager->options.max_size;
/* free at least NUM_PAGES_TO_ALLOC number of pages
* to reduce calls to set_memory_wb */
if (npages < NUM_PAGES_TO_ALLOC)
npages = NUM_PAGES_TO_ALLOC;
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
if (npages)
ttm_page_pool_free(pool, npages);
#endif
 
}
 
/*
* On success pages list will hold count number of correctly
* cached pages.
193,21 → 736,44
static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
enum ttm_caching_state cstate)
{
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
// struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct list_head plist;
struct page *p = NULL;
gfp_t gfp_flags = 0;
// gfp_t gfp_flags = GFP_USER;
unsigned count;
int r;
 
for (r = 0; r < npages; ++r) {
p = AllocPage();
if (!p) {
pr_err("Unable to allocate page\n");
return -ENOMEM;
}
 
pages[r] = p;
}
return 0;
 
#if 0
 
 
/* set zero flag for page allocation if required */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
 
/* No pool for cached pages */
if (1) {
if (pool == NULL) {
if (flags & TTM_PAGE_FLAG_DMA32)
gfp_flags |= GFP_DMA32;
else
gfp_flags |= GFP_HIGHUSER;
 
for (r = 0; r < npages; ++r) {
p = alloc_page(gfp_flags);
if (!p) {
 
pr_err("Unable to allocate page\n");
return -ENOMEM;
}
 
216,12 → 782,52
return 0;
}
 
/* combine zero flag to pool flags */
gfp_flags |= pool->gfp_flags;
 
/* First we take pages from the pool */
INIT_LIST_HEAD(&plist);
npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
count = 0;
list_for_each_entry(p, &plist, lru) {
pages[count++] = p;
}
 
/* clear the pages coming from the pool if requested */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
list_for_each_entry(p, &plist, lru) {
if (PageHighMem(p))
clear_highpage(p);
else
clear_page(page_address(p));
}
}
 
/* If pool didn't have enough pages allocate new one. */
if (npages > 0) {
/* ttm_alloc_new_pages doesn't reference pool so we can run
* multiple requests in parallel.
**/
INIT_LIST_HEAD(&plist);
r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
list_for_each_entry(p, &plist, lru) {
pages[count++] = p;
}
if (r) {
/* If there is any pages in the list put them back to
* the pool. */
pr_err("Failed to allocate extra pages for large request\n");
ttm_put_pages(pages, count, flags, cstate);
return r;
}
}
#endif
 
return 0;
}
 
static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
#if 0
static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
char *name)
{
spin_lock_init(&pool->lock);
238,12 → 844,34
 
WARN_ON(_manager);
 
pr_info("Initializing pool allocator\n");
 
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
 
ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
 
ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
 
ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
GFP_USER | GFP_DMA32, "wc dma");
 
ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
GFP_USER | GFP_DMA32, "uc dma");
 
_manager->options.max_size = max_pages;
_manager->options.small = SMALL_ALLOCATION;
_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
 
ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
&glob->kobj, "pool");
if (unlikely(ret != 0)) {
kobject_put(&_manager->kobj);
_manager = NULL;
return ret;
}
 
ttm_pool_mm_shrink_init(_manager);
 
return 0;
}
 
251,9 → 879,18
{
int i;
 
pr_info("Finalizing pool allocator\n");
ttm_pool_mm_shrink_fini(_manager);
 
for (i = 0; i < NUM_POOLS; ++i)
ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
 
kobject_put(&_manager->kobj);
_manager = NULL;
}
 
#endif
 
int ttm_pool_populate(struct ttm_tt *ttm)
{
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
271,7 → 908,9
ttm_pool_unpopulate(ttm);
return -ENOMEM;
}
 
}
 
ttm->state = tt_unbound;
return 0;
}
283,6 → 922,8
 
for (i = 0; i < ttm->num_pages; ++i) {
if (ttm->pages[i]) {
ttm_mem_global_free_page(ttm->glob->mem_glob,
ttm->pages[i]);
ttm_put_pages(&ttm->pages[i], 1,
ttm->page_flags,
ttm->caching_state);
/drivers/video/drm/ttm/ttm_tt.c
57,12 → 57,9
 
static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
{
ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
sizeof(*ttm->ttm.pages) +
sizeof(*ttm->dma_address) +
sizeof(*ttm->cpu_address));
ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages);
ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
sizeof(*ttm->dma_address));
}
 
#ifdef CONFIG_X86
121,8 → 118,8
return 0;
}
 
if (ttm->caching_state == tt_cached)
drm_clflush_pages(ttm->pages, ttm->num_pages);
// if (ttm->caching_state == tt_cached)
// drm_clflush_pages(ttm->pages, ttm->num_pages);
 
for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages[i];
175,8 → 172,8
ttm_tt_unbind(ttm);
}
 
if (ttm->state == tt_unbound)
ttm_tt_unpopulate(ttm);
// if (ttm->state == tt_unbound)
// ttm_tt_unpopulate(ttm);
 
// if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
// ttm->swap_storage)
233,7 → 230,7
 
INIT_LIST_HEAD(&ttm_dma->pages_list);
ttm_dma_tt_alloc_page_directory(ttm_dma);
if (!ttm->pages) {
if (!ttm->pages || !ttm_dma->dma_address) {
ttm_tt_destroy(ttm);
printf("Failed allocating page table\n");
return -ENOMEM;
248,7 → 245,7
 
drm_free_large(ttm->pages);
ttm->pages = NULL;
ttm_dma->cpu_address = NULL;
drm_free_large(ttm_dma->dma_address);
ttm_dma->dma_address = NULL;
}
EXPORT_SYMBOL(ttm_dma_tt_fini);
264,6 → 261,8
}
}
 
#if 0
 
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
int ret = 0;
287,8 → 286,9
return 0;
}
EXPORT_SYMBOL(ttm_tt_bind);
#endif
 
#if 0
/*
int ttm_tt_swapin(struct ttm_tt *ttm)
{
struct address_space *swap_space;
380,20 → 380,7
 
return ret;
}
#endif
 
static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
{
pgoff_t i;
struct page **page = ttm->pages;
*/
 
}
 
void ttm_tt_unpopulate(struct ttm_tt *ttm)
{
if (ttm->state == tt_unpopulated)
return;
 
ttm_tt_clear_mapping(ttm);
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
}
/drivers/video/drm/ttm/ttm_bo_manager.c
50,13 → 50,11
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
uint32_t flags,
struct ttm_mem_reg *mem)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
struct drm_mm *mm = &rman->mm;
struct drm_mm_node *node = NULL;
enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
unsigned long lpfn;
int ret;
 
68,15 → 66,11
if (!node)
return -ENOMEM;
 
if (flags & TTM_PL_FLAG_TOPDOWN)
aflags = DRM_MM_CREATE_TOP;
 
spin_lock(&rman->lock);
ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
mem->page_alignment, 0,
ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
mem->page_alignment,
placement->fpfn, lpfn,
DRM_MM_SEARCH_BEST,
aflags);
DRM_MM_SEARCH_BEST);
spin_unlock(&rman->lock);
 
if (unlikely(ret)) {
/drivers/video/drm/ttm/ttm_execbuf_util.c
24,6 → 24,7
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
struct ww_acquire_ctx{};
 
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_bo_driver.h>
32,9 → 33,8
#include <linux/sched.h>
#include <linux/module.h>
 
DEFINE_WW_CLASS(reservation_ww_class);
 
static void ttm_eu_backoff_reservation_locked(struct list_head *list)
static void ttm_eu_backoff_reservation_locked(struct list_head *list,
struct ww_acquire_ctx *ticket)
{
struct ttm_validate_buffer *entry;
 
48,7 → 48,7
ttm_bo_add_to_lru(bo);
entry->removed = false;
}
__ttm_bo_unreserve(bo);
// ww_mutex_unlock(&bo->resv->lock);
}
}
 
94,9 → 94,8
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
spin_lock(&glob->lru_lock);
ttm_eu_backoff_reservation_locked(list);
if (ticket)
ww_acquire_fini(ticket);
ttm_eu_backoff_reservation_locked(list, ticket);
// ww_acquire_fini(ticket);
spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
132,8 → 131,7
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
 
if (ticket)
ww_acquire_init(ticket, &reservation_ww_class);
// ww_acquire_init(ticket, &reservation_ww_class);
retry:
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
142,21 → 140,20
if (entry->reserved)
continue;
 
ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true,
ticket);
 
ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);
 
if (ret == -EDEADLK) {
/* uh oh, we lost out, drop every reservation and try
* to only reserve this buffer, then start over if
* this succeeds.
*/
BUG_ON(ticket == NULL);
spin_lock(&glob->lru_lock);
ttm_eu_backoff_reservation_locked(list);
ttm_eu_backoff_reservation_locked(list, ticket);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
ticket);
// ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
// ticket);
if (unlikely(ret != 0)) {
if (ret == -EINTR)
ret = -ERESTARTSYS;
179,8 → 176,7
}
}
 
if (ticket)
ww_acquire_done(ticket);
// ww_acquire_done(ticket);
spin_lock(&glob->lru_lock);
ttm_eu_del_from_lru_locked(list);
spin_unlock(&glob->lru_lock);
189,14 → 185,12
 
err:
spin_lock(&glob->lru_lock);
ttm_eu_backoff_reservation_locked(list);
ttm_eu_backoff_reservation_locked(list, ticket);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
err_fini:
if (ticket) {
ww_acquire_done(ticket);
ww_acquire_fini(ticket);
}
// ww_acquire_done(ticket);
// ww_acquire_fini(ticket);
return ret;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
226,13 → 220,12
entry->old_sync_obj = bo->sync_obj;
bo->sync_obj = driver->sync_obj_ref(sync_obj);
ttm_bo_add_to_lru(bo);
__ttm_bo_unreserve(bo);
// ww_mutex_unlock(&bo->resv->lock);
entry->reserved = false;
}
spin_unlock(&bdev->fence_lock);
spin_unlock(&glob->lru_lock);
if (ticket)
ww_acquire_fini(ticket);
// ww_acquire_fini(ticket);
 
list_for_each_entry(entry, list, head) {
if (entry->old_sync_obj)
/drivers/video/drm/ttm/ttm_memory.c
37,40 → 37,534
#include <linux/module.h>
#include <linux/slab.h>
 
struct sysinfo {
u32_t totalram; /* Total usable main memory size */
u32_t freeram; /* Available memory size */
u32_t sharedram; /* Amount of shared memory */
u32_t bufferram; /* Memory used by buffers */
u32_t totalswap; /* Total swap space size */
u32_t freeswap; /* swap space still available */
u32_t totalhigh; /* Total high memory size */
u32_t freehigh; /* Available high memory size */
u32_t mem_unit; /* Memory unit size in bytes */
};
 
 
int ttm_mem_global_init(struct ttm_mem_global *glob)
#define TTM_MEMORY_ALLOC_RETRIES 4
 
struct ttm_mem_zone {
struct kobject kobj;
struct ttm_mem_global *glob;
const char *name;
uint64_t zone_mem;
uint64_t emer_mem;
uint64_t max_mem;
uint64_t swap_limit;
uint64_t used_mem;
};
 
#if 0
 
static struct attribute ttm_mem_sys = {
.name = "zone_memory",
.mode = S_IRUGO
};
static struct attribute ttm_mem_emer = {
.name = "emergency_memory",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_mem_max = {
.name = "available_memory",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_mem_swap = {
.name = "swap_limit",
.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_mem_used = {
.name = "used_memory",
.mode = S_IRUGO
};
#endif
 
static void ttm_mem_zone_kobj_release(struct kobject *kobj)
{
struct ttm_mem_zone *zone =
container_of(kobj, struct ttm_mem_zone, kobj);
 
pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
zone->name, (unsigned long long)zone->used_mem >> 10);
kfree(zone);
}
 
#if 0
static ssize_t ttm_mem_zone_show(struct kobject *kobj,
struct attribute *attr,
char *buffer)
{
struct ttm_mem_zone *zone =
container_of(kobj, struct ttm_mem_zone, kobj);
uint64_t val = 0;
 
spin_lock(&zone->glob->lock);
if (attr == &ttm_mem_sys)
val = zone->zone_mem;
else if (attr == &ttm_mem_emer)
val = zone->emer_mem;
else if (attr == &ttm_mem_max)
val = zone->max_mem;
else if (attr == &ttm_mem_swap)
val = zone->swap_limit;
else if (attr == &ttm_mem_used)
val = zone->used_mem;
spin_unlock(&zone->glob->lock);
 
return snprintf(buffer, PAGE_SIZE, "%llu\n",
(unsigned long long) val >> 10);
}
 
static void ttm_check_swapping(struct ttm_mem_global *glob);
 
static ssize_t ttm_mem_zone_store(struct kobject *kobj,
struct attribute *attr,
const char *buffer,
size_t size)
{
struct ttm_mem_zone *zone =
container_of(kobj, struct ttm_mem_zone, kobj);
int chars;
unsigned long val;
uint64_t val64;
 
chars = sscanf(buffer, "%lu", &val);
if (chars == 0)
return size;
 
val64 = val;
val64 <<= 10;
 
spin_lock(&zone->glob->lock);
if (val64 > zone->zone_mem)
val64 = zone->zone_mem;
if (attr == &ttm_mem_emer) {
zone->emer_mem = val64;
if (zone->max_mem > val64)
zone->max_mem = val64;
} else if (attr == &ttm_mem_max) {
zone->max_mem = val64;
if (zone->emer_mem < val64)
zone->emer_mem = val64;
} else if (attr == &ttm_mem_swap)
zone->swap_limit = val64;
spin_unlock(&zone->glob->lock);
 
ttm_check_swapping(zone->glob);
 
return size;
}
#endif
 
//static struct attribute *ttm_mem_zone_attrs[] = {
// &ttm_mem_sys,
// &ttm_mem_emer,
// &ttm_mem_max,
// &ttm_mem_swap,
// &ttm_mem_used,
// NULL
//};
 
//static const struct sysfs_ops ttm_mem_zone_ops = {
// .show = &ttm_mem_zone_show,
// .store = &ttm_mem_zone_store
//};
 
static struct kobj_type ttm_mem_zone_kobj_type = {
.release = &ttm_mem_zone_kobj_release,
// .sysfs_ops = &ttm_mem_zone_ops,
// .default_attrs = ttm_mem_zone_attrs,
};
 
static void ttm_mem_global_kobj_release(struct kobject *kobj)
{
struct ttm_mem_global *glob =
container_of(kobj, struct ttm_mem_global, kobj);
 
kfree(glob);
}
 
static struct kobj_type ttm_mem_glob_kobj_type = {
.release = &ttm_mem_global_kobj_release,
};
 
#if 0
static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
bool from_wq, uint64_t extra)
{
unsigned int i;
struct ttm_mem_zone *zone;
uint64_t target;
 
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
 
if (from_wq)
target = zone->swap_limit;
else if (capable(CAP_SYS_ADMIN))
target = zone->emer_mem;
else
target = zone->max_mem;
 
target = (extra > target) ? 0ULL : target;
 
if (zone->used_mem > target)
return true;
}
return false;
}
 
/**
* At this point we only support a single shrink callback.
* Extend this if needed, perhaps using a linked list of callbacks.
* Note that this function is reentrant:
* many threads may try to swap out at any given time.
*/
 
static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
uint64_t extra)
{
int ret;
int i;
struct ttm_mem_shrink *shrink;
 
spin_lock_init(&glob->lock);
spin_lock(&glob->lock);
if (glob->shrink == NULL)
goto out;
 
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
shrink = glob->shrink;
spin_unlock(&glob->lock);
ret = shrink->do_shrink(shrink);
spin_lock(&glob->lock);
if (unlikely(ret != 0))
goto out;
}
out:
spin_unlock(&glob->lock);
}
 
 
ttm_page_alloc_init(glob, 4*1024);
 
static void ttm_shrink_work(struct work_struct *work)
{
struct ttm_mem_global *glob =
container_of(work, struct ttm_mem_global, work);
 
ttm_shrink(glob, true, 0ULL);
}
#endif
 
static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
const struct sysinfo *si)
{
struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
uint64_t mem;
int ret;
 
if (unlikely(!zone))
return -ENOMEM;
 
// mem = si->totalram - si->totalhigh;
// mem *= si->mem_unit;
 
zone->name = "kernel";
zone->zone_mem = mem;
zone->max_mem = mem >> 1;
zone->emer_mem = (mem >> 1) + (mem >> 2);
zone->swap_limit = zone->max_mem - (mem >> 3);
zone->used_mem = 0;
zone->glob = glob;
glob->zone_kernel = zone;
ret = kobject_init_and_add(
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
}
glob->zones[glob->num_zones++] = zone;
return 0;
out_no_zone:
ttm_mem_global_release(glob);
}
 
#if 0
#ifdef CONFIG_HIGHMEM
static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
const struct sysinfo *si)
{
struct ttm_mem_zone *zone;
uint64_t mem;
int ret;
 
if (si->totalhigh == 0)
return 0;
 
zone = kzalloc(sizeof(*zone), GFP_KERNEL);
if (unlikely(!zone))
return -ENOMEM;
 
mem = si->totalram;
mem *= si->mem_unit;
 
zone->name = "highmem";
zone->zone_mem = mem;
zone->max_mem = mem >> 1;
zone->emer_mem = (mem >> 1) + (mem >> 2);
zone->swap_limit = zone->max_mem - (mem >> 3);
zone->used_mem = 0;
zone->glob = glob;
glob->zone_highmem = zone;
ret = kobject_init_and_add(
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
}
EXPORT_SYMBOL(ttm_mem_global_init);
glob->zones[glob->num_zones++] = zone;
return 0;
}
#else
static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
const struct sysinfo *si)
{
struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
uint64_t mem;
int ret;
 
if (unlikely(!zone))
return -ENOMEM;
 
mem = si->totalram;
mem *= si->mem_unit;
 
/**
* No special dma32 zone needed.
*/
 
if (mem <= ((uint64_t) 1ULL << 32)) {
kfree(zone);
return 0;
}
 
/*
* Limit max dma32 memory to 4GB for now
* until we can figure out how big this
* zone really is.
*/
 
mem = ((uint64_t) 1ULL << 32);
zone->name = "dma32";
zone->zone_mem = mem;
zone->max_mem = mem >> 1;
zone->emer_mem = (mem >> 1) + (mem >> 2);
zone->swap_limit = zone->max_mem - (mem >> 3);
zone->used_mem = 0;
zone->glob = glob;
glob->zone_dma32 = zone;
ret = kobject_init_and_add(
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
}
glob->zones[glob->num_zones++] = zone;
return 0;
}
#endif
 
 
 
void ttm_mem_global_release(struct ttm_mem_global *glob)
{
unsigned int i;
struct ttm_mem_zone *zone;
 
/* let the page allocator first stop the shrink work. */
// ttm_page_alloc_fini();
// ttm_dma_page_alloc_fini();
ttm_page_alloc_fini();
ttm_dma_page_alloc_fini();
 
 
flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue);
glob->swap_queue = NULL;
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
kobject_del(&zone->kobj);
kobject_put(&zone->kobj);
}
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
}
EXPORT_SYMBOL(ttm_mem_global_release);
 
static void ttm_check_swapping(struct ttm_mem_global *glob)
{
bool needs_swapping = false;
unsigned int i;
struct ttm_mem_zone *zone;
 
spin_lock(&glob->lock);
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (zone->used_mem > zone->swap_limit) {
needs_swapping = true;
break;
}
}
 
spin_unlock(&glob->lock);
 
if (unlikely(needs_swapping))
(void)queue_work(glob->swap_queue, &glob->work);
 
}
 
static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
uint64_t amount)
{
unsigned int i;
struct ttm_mem_zone *zone;
 
spin_lock(&glob->lock);
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (single_zone && zone != single_zone)
continue;
zone->used_mem -= amount;
}
spin_unlock(&glob->lock);
}
 
void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount)
{
return ttm_mem_global_free_zone(glob, NULL, amount);
}
EXPORT_SYMBOL(ttm_mem_global_free);
 
static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
uint64_t amount, bool reserve)
{
uint64_t limit;
int ret = -ENOMEM;
unsigned int i;
struct ttm_mem_zone *zone;
 
spin_lock(&glob->lock);
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (single_zone && zone != single_zone)
continue;
 
limit = zone->emer_mem;
 
if (zone->used_mem > limit)
goto out_unlock;
}
 
if (reserve) {
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
if (single_zone && zone != single_zone)
continue;
zone->used_mem += amount;
}
}
 
ret = 0;
out_unlock:
spin_unlock(&glob->lock);
ttm_check_swapping(glob);
 
return ret;
}
 
 
static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
uint64_t memory,
bool no_wait, bool interruptible)
{
int count = TTM_MEMORY_ALLOC_RETRIES;
 
while (unlikely(ttm_mem_global_reserve(glob,
single_zone,
memory, true)
!= 0)) {
if (no_wait)
return -ENOMEM;
if (unlikely(count-- == 0))
return -ENOMEM;
ttm_shrink(glob, false, memory + (memory >> 2) + 16);
}
 
return 0;
}
 
int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
bool no_wait, bool interruptible)
{
/**
* Normal allocations of kernel memory are registered in
* all zones.
*/
 
return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
interruptible);
}
EXPORT_SYMBOL(ttm_mem_global_alloc);
 
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
struct page *page,
bool no_wait, bool interruptible)
{
 
struct ttm_mem_zone *zone = NULL;
 
/**
* Page allocations may be registed in a single zone
* only if highmem or !dma32.
*/
 
#ifdef CONFIG_HIGHMEM
if (PageHighMem(page) && glob->zone_highmem != NULL)
zone = glob->zone_highmem;
#else
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
zone = glob->zone_kernel;
#endif
return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
interruptible);
}
 
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
{
struct ttm_mem_zone *zone = NULL;
 
#ifdef CONFIG_HIGHMEM
if (PageHighMem(page) && glob->zone_highmem != NULL)
zone = glob->zone_highmem;
#else
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
zone = glob->zone_kernel;
#endif
ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
}
 
#endif
 
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
{
 
}
 
size_t ttm_round_pot(size_t size)
{
if ((size & (size - 1)) == 0)
88,3 → 582,24
return 0;
}
EXPORT_SYMBOL(ttm_round_pot);
 
void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount)
{
return 0;
}
 
 
int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
bool no_wait, bool interruptible)
{
return 0;
}
 
EXPORT_SYMBOL(ttm_mem_global_alloc);
 
int ttm_mem_global_init(struct ttm_mem_global *glob)
{
return 0;
}
EXPORT_SYMBOL(ttm_mem_global_init);
/drivers/video/drm/radeon/rv770_dma.c
File deleted
/drivers/video/drm/radeon/ppsmc.h
File deleted
/drivers/video/drm/radeon/rs780_dpm.c
File deleted
/drivers/video/drm/radeon/rv740_dpm.c
File deleted
/drivers/video/drm/radeon/sumo_dpm.c
File deleted
/drivers/video/drm/radeon/rs780_dpm.h
File deleted
/drivers/video/drm/radeon/ci_smc.c
File deleted
/drivers/video/drm/radeon/cypress_dpm.c
File deleted
/drivers/video/drm/radeon/sumo_dpm.h
File deleted
/drivers/video/drm/radeon/ci_dpm.h
File deleted
/drivers/video/drm/radeon/cypress_dpm.h
File deleted
/drivers/video/drm/radeon/r100_track.h
File deleted
/drivers/video/drm/radeon/uvd_v3_1.c
File deleted
/drivers/video/drm/radeon/kv_smc.c
File deleted
/drivers/video/drm/radeon/ni_dma.c
File deleted
/drivers/video/drm/radeon/kv_dpm.h
File deleted
/drivers/video/drm/radeon/ni_dpm.c
File deleted
/drivers/video/drm/radeon/si_smc.c
File deleted
/drivers/video/drm/radeon/si_dpm.h
File deleted
/drivers/video/drm/radeon/clearstate_defs.h
File deleted
/drivers/video/drm/radeon/rv770_smc.c
File deleted
/drivers/video/drm/radeon/trinity_dpm.c
File deleted
/drivers/video/drm/radeon/cik_blit_shaders.c
File deleted
/drivers/video/drm/radeon/rv770_smc.h
File deleted
/drivers/video/drm/radeon/trinity_dpm.h
File deleted
/drivers/video/drm/radeon/btc_dpm.c
File deleted
/drivers/video/drm/radeon/smu7_discrete.h
File deleted
/drivers/video/drm/radeon/r600_dpm.c
File deleted
/drivers/video/drm/radeon/main.c
File deleted
/drivers/video/drm/radeon/cik_blit_shaders.h
File deleted
/drivers/video/drm/radeon/btc_dpm.h
File deleted
/drivers/video/drm/radeon/radeon_ib.c
File deleted
/drivers/video/drm/radeon/r600_dpm.h
File deleted
/drivers/video/drm/radeon/evergreen_dma.c
File deleted
/drivers/video/drm/radeon/sumod.h
File deleted
/drivers/video/drm/radeon/radeon_uvd.c
File deleted
/drivers/video/drm/radeon/rs780d.h
File deleted
/drivers/video/drm/radeon/rv740d.h
File deleted
/drivers/video/drm/radeon/rv6xx_dpm.c
File deleted
/drivers/video/drm/radeon/rv6xx_dpm.h
File deleted
/drivers/video/drm/radeon/radeon_object.c
File deleted
/drivers/video/drm/radeon/vce_v1_0.c
File deleted
/drivers/video/drm/radeon/vce_v2_0.c
File deleted
/drivers/video/drm/radeon/evergreen_cs.c
File deleted
/drivers/video/drm/radeon/r600_reg_safe.h
File deleted
/drivers/video/drm/radeon/nislands_smc.h
File deleted
/drivers/video/drm/radeon/radeon_ucode.c
File deleted
/drivers/video/drm/radeon/dce6_afmt.c
File deleted
/drivers/video/drm/radeon/radeon_ucode.h
File deleted
/drivers/video/drm/radeon/evergreen_smc.h
File deleted
/drivers/video/drm/radeon/trinity_smc.c
File deleted
/drivers/video/drm/radeon/radeon_vm.c
File deleted
/drivers/video/drm/radeon/radeon_cs.c
File deleted
/drivers/video/drm/radeon/rv730_dpm.c
File deleted
/drivers/video/drm/radeon/rv770_dpm.c
File deleted
/drivers/video/drm/radeon/utils.c
File deleted
/drivers/video/drm/radeon/smu7_fusion.h
File deleted
/drivers/video/drm/radeon/sumo_smc.c
File deleted
/drivers/video/drm/radeon/rv770_dpm.h
File deleted
/drivers/video/drm/radeon/ci_dpm.c
File deleted
/drivers/video/drm/radeon/rv6xxd.h
File deleted
/drivers/video/drm/radeon/uvd_v1_0.c
File deleted
/drivers/video/drm/radeon/uvd_v2_2.c
File deleted
/drivers/video/drm/radeon/uvd_v4_2.c
File deleted
/drivers/video/drm/radeon/kv_dpm.c
File deleted
/drivers/video/drm/radeon/ni_dpm.h
File deleted
/drivers/video/drm/radeon/si_dma.c
File deleted
/drivers/video/drm/radeon/dce3_1_afmt.c
File deleted
/drivers/video/drm/radeon/btcd.h
File deleted
/drivers/video/drm/radeon/si_dpm.c
File deleted
/drivers/video/drm/radeon/sislands_smc.h
File deleted
/drivers/video/drm/radeon/r600_dma.c
File deleted
/drivers/video/drm/radeon/radeon_cursor.c
File deleted
/drivers/video/drm/radeon/radeon_vce.c
File deleted
/drivers/video/drm/radeon/smu7.h
File deleted
/drivers/video/drm/radeon/r600_cs.c
File deleted
/drivers/video/drm/radeon/cik_reg.h
File deleted
/drivers/video/drm/radeon/clearstate_evergreen.h
File deleted
/drivers/video/drm/radeon/cik_sdma.c
File deleted
/drivers/video/drm/radeon/rv730d.h
File deleted
/drivers/video/drm/radeon/clearstate_cayman.h
File deleted
/drivers/video/drm/radeon/clearstate_ci.h
File deleted
/drivers/video/drm/radeon/clearstate_si.h
File deleted
/drivers/video/drm/radeon/trinityd.h
File deleted
/drivers/video/drm/radeon/radeon_test.c
File deleted
/drivers/video/drm/radeon/pptable.h
File deleted
/drivers/video/drm/radeon/cikd.h
File deleted
/drivers/video/drm/radeon/evergreen_reg_safe.h
File deleted
/drivers/video/drm/radeon/cik.c
File deleted
/drivers/video/drm/radeon/cayman_reg_safe.h
File deleted
/drivers/video/drm/radeon/Makefile
34,6 → 34,7
$(DRV_INCLUDES)/drm/drmP.h \
$(DRV_INCLUDES)/drm/drm_edid.h \
$(DRV_INCLUDES)/drm/drm_crtc.h \
$(DRV_INCLUDES)/drm/drm_mode.h \
$(DRV_INCLUDES)/drm/drm_mm.h \
atom.h \
radeon.h \
40,20 → 41,11
radeon_asic.h
 
NAME_SRC= \
main.c \
pci.c \
../ttm/ttm_bo.c \
../ttm/ttm_bo_manager.c \
../ttm/ttm_bo_util.c \
../ttm/ttm_execbuf_util.c \
../ttm/ttm_memory.c \
../ttm/ttm_page_alloc.c \
../ttm/ttm_tt.c \
$(DRM_TOPDIR)/drm_cache.c \
$(DRM_TOPDIR)/drm_crtc.c \
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_dp_helper.c \
$(DRM_TOPDIR)/drm_drv.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_fb_helper.c \
$(DRM_TOPDIR)/drm_gem.c \
61,42 → 53,26
$(DRM_TOPDIR)/drm_irq.c \
$(DRM_TOPDIR)/drm_mm.c \
$(DRM_TOPDIR)/drm_modes.c \
$(DRM_TOPDIR)/drm_modeset_lock.c \
$(DRM_TOPDIR)/drm_pci.c \
$(DRM_TOPDIR)/drm_plane_helper.c \
$(DRM_TOPDIR)/drm_probe_helper.c \
$(DRM_TOPDIR)/drm_rect.c \
$(DRM_TOPDIR)/drm_stub.c \
$(DRM_TOPDIR)/drm_vma_manager.c \
$(DRM_TOPDIR)/i2c/i2c-core.c \
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \
bitmap.c \
hmm.c \
r700_vs.c \
radeon_device.c \
evergreen.c \
evergreen_blit_shaders.c \
evergreen_blit_kms.c \
evergreen_hdmi.c \
cayman_blit_shaders.c \
radeon_clocks.c \
atom.c \
ni.c \
atombios_crtc.c \
atombios_dp.c \
atombios_encoders.c \
atombios_i2c.c \
btc_dpm.c \
cayman_blit_shaders.c \
ci_dpm.c \
ci_smc.c \
cik.c \
cik_blit_shaders.c \
cik_sdma.c \
cypress_dpm.c \
dce3_1_afmt.c \
dce6_afmt.c \
evergreen.c \
evergreen_blit_shaders.c \
evergreen_cs.c \
evergreen_dma.c \
evergreen_hdmi.c \
kv_dpm.c \
kv_smc.c \
ni.c \
ni_dma.c \
ni_dpm.c \
radeon_agp.c \
radeon_asic.c \
radeon_atombios.c \
104,8 → 80,6
radeon_bios.c \
radeon_combios.c \
radeon_connectors.c \
radeon_cs.c \
radeon_clocks.c \
radeon_display.c \
radeon_encoders.c \
radeon_fence.c \
113,65 → 87,35
radeon_gart.c \
radeon_gem.c \
radeon_i2c.c \
radeon_ib.c \
radeon_irq_kms.c \
radeon_legacy_crtc.c \
radeon_legacy_encoders.c \
radeon_legacy_tv.c \
radeon_object.c \
radeon_object_kos.c \
radeon_pm.c \
radeon_ring.c \
radeon_sa.c \
radeon_semaphore.c \
radeon_test.c \
radeon_ttm.c \
radeon_ucode.c \
radeon_uvd.c \
radeon_vce.c \
radeon_vm.c \
rdisplay_kms.c \
r100.c \
r200.c \
r300.c \
r420.c \
rv515.c \
rv730_dpm.c \
rv740_dpm.c \
r520.c \
r600.c \
r600_audio.c \
r600_blit_kms.c \
r600_blit_shaders.c \
r600_cs.c \
r600_dma.c \
r600_dpm.c \
r600_hdmi.c \
rs400.c \
rs600.c \
rs690.c \
rv6xx_dpm.c \
rs780_dpm.c \
rv770.c \
rv770_dma.c \
rv770_dpm.c \
rv770_smc.c \
rdisplay.c \
rdisplay_kms.c \
cmdline.c \
si.c \
si_blit_shaders.c \
si_dma.c \
si_dpm.c \
si_smc.c \
sumo_dpm.c \
sumo_smc.c \
trinity_dpm.c \
trinity_smc.c \
utils.c \
uvd_v1_0.c \
uvd_v2_2.c \
uvd_v3_1.c \
uvd_v4_2.c \
vce_v1_0.c \
vce_v2_0.c \
fwblob.asm
 
FW_BINS= \
/drivers/video/drm/radeon/Makefile.lto
7,28 → 7,24
 
DEFINES = -D__KERNEL__ -DCONFIG_X86_32
 
DDK_TOPDIR = d:\kos\kolibri\drivers\ddk
DRV_INCLUDES = /d/kos/kolibri/drivers/include
DRV_TOPDIR = $(CURDIR)/../../..
DRM_TOPDIR = $(CURDIR)/..
 
INCLUDES = -I$(DRV_INCLUDES)/linux/uapi -I$(DRV_INCLUDES)/linux \
-I$(DRV_INCLUDES)/linux/asm -I$(DRV_INCLUDES)/drm \
-I./ -I$(DRV_INCLUDES)
DRV_INCLUDES = $(DRV_TOPDIR)/include
 
CFLAGS_OPT = -Os -march=i686 -fno-ident -fomit-frame-pointer -fno-builtin-printf -mno-ms-bitfields
CFLAGS_OPT+= -mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -flto
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \
-I$(DRV_INCLUDES)/linux
 
CFLAGS_OPT = -Os -march=i686 -fomit-frame-pointer -fno-builtin-printf -mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -flto
CFLAGS = -c $(INCLUDES) $(DEFINES) $(CFLAGS_OPT)
 
LIBPATH:= $(DDK_TOPDIR)
LIBPATH:= $(DRV_TOPDIR)/ddk
 
LIBS:= -lddk -lcore -lgcc
LIBS:= -lddk -lcore
 
PE_FLAGS = --major-os-version,0,--minor-os-version,7,--major-subsystem-version,0,--minor-subsystem-version,5,--subsystem,native
LDFLAGS = -nostdlib,-shared,-s,-Map,atikms.map,--image-base,0,--file-alignment,512,--section-alignment,4096
 
 
LDFLAGS = -nostdlib,-shared,-s,$(PE_FLAGS),--image-base,0,--file-alignment,512,--section-alignment,4096
 
 
NAME:= atikms
 
HFILES:= $(DRV_INCLUDES)/linux/types.h \
38,6 → 34,7
$(DRV_INCLUDES)/drm/drmP.h \
$(DRV_INCLUDES)/drm/drm_edid.h \
$(DRV_INCLUDES)/drm/drm_crtc.h \
$(DRV_INCLUDES)/drm/drm_mode.h \
$(DRV_INCLUDES)/drm/drm_mm.h \
atom.h \
radeon.h \
44,63 → 41,29
radeon_asic.h
 
NAME_SRC= \
main.c \
pci.c \
../ttm/ttm_bo.c \
../ttm/ttm_bo_manager.c \
../ttm/ttm_bo_util.c \
../ttm/ttm_execbuf_util.c \
../ttm/ttm_memory.c \
../ttm/ttm_page_alloc.c \
../ttm/ttm_tt.c \
$(DRM_TOPDIR)/drm_cache.c \
$(DRM_TOPDIR)/drm_crtc.c \
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_dp_helper.c \
$(DRM_TOPDIR)/drm_drv.c \
$(DRM_TOPDIR)/drm_dp_i2c_helper.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_fb_helper.c \
$(DRM_TOPDIR)/drm_gem.c \
$(DRM_TOPDIR)/drm_global.c \
$(DRM_TOPDIR)/drm_irq.c \
$(DRM_TOPDIR)/drm_mm.c \
$(DRM_TOPDIR)/drm_modes.c \
$(DRM_TOPDIR)/drm_modeset_lock.c \
$(DRM_TOPDIR)/drm_pci.c \
$(DRM_TOPDIR)/drm_plane_helper.c \
$(DRM_TOPDIR)/drm_probe_helper.c \
$(DRM_TOPDIR)/drm_rect.c \
$(DRM_TOPDIR)/drm_stub.c \
$(DRM_TOPDIR)/drm_vma_manager.c \
$(DRM_TOPDIR)/i2c/i2c-core.c \
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \
hmm.c \
r700_vs.c \
radeon_device.c \
atom.c \
atombios_crtc.c \
atombios_dp.c \
atombios_encoders.c \
atombios_i2c.c \
btc_dpm.c \
cayman_blit_shaders.c \
ci_dpm.c \
ci_smc.c \
cik.c \
cik_blit_shaders.c \
cik_sdma.c \
cypress_dpm.c \
dce3_1_afmt.c \
dce6_afmt.c \
evergreen.c \
evergreen_blit_shaders.c \
evergreen_cs.c \
evergreen_dma.c \
evergreen_blit_kms.c \
evergreen_hdmi.c \
kv_dpm.c \
kv_smc.c \
cayman_blit_shaders.c \
radeon_clocks.c \
atom.c \
ni.c \
ni_dma.c \
ni_dpm.c \
radeon_agp.c \
radeon_asic.c \
radeon_atombios.c \
108,74 → 71,45
radeon_bios.c \
radeon_combios.c \
radeon_connectors.c \
radeon_cs.c \
radeon_clocks.c \
radeon_display.c \
atombios_crtc.c \
atombios_dp.c \
atombios_encoders.c \
atombios_i2c.c \
radeon_encoders.c \
radeon_fence.c \
radeon_fb.c \
radeon_gart.c \
radeon_gem.c \
radeon_i2c.c \
radeon_ib.c \
radeon_irq_kms.c \
radeon_legacy_crtc.c \
radeon_legacy_encoders.c \
radeon_legacy_tv.c \
radeon_object.c \
radeon_pm.c \
radeon_display.c \
radeon_gart.c \
radeon_ring.c \
radeon_object_kos.c \
radeon_sa.c \
radeon_semaphore.c \
radeon_test.c \
radeon_ttm.c \
radeon_ucode.c \
radeon_uvd.c \
radeon_vce.c \
radeon_vm.c \
rdisplay_kms.c \
radeon_pm.c \
r100.c \
r200.c \
r300.c \
r420.c \
rv515.c \
rv730_dpm.c \
rv740_dpm.c \
r520.c \
r600.c \
r600_audio.c \
r600_blit_kms.c \
r600_blit_shaders.c \
r600_cs.c \
r600_dma.c \
r600_dpm.c \
r600_hdmi.c \
rs400.c \
rs600.c \
rs690.c \
rv6xx_dpm.c \
rs780_dpm.c \
rv770.c \
rv770_dma.c \
rv770_dpm.c \
rv770_smc.c \
radeon_fb.c \
rdisplay.c \
rdisplay_kms.c \
cmdline.c \
si.c \
si_blit_shaders.c \
si_dma.c \
si_dpm.c \
si_smc.c \
sumo_dpm.c \
sumo_smc.c \
trinity_dpm.c \
trinity_smc.c \
utils.c \
uvd_v1_0.c \
uvd_v2_2.c \
uvd_v3_1.c \
uvd_v4_2.c \
vce_v1_0.c \
vce_v2_0.c \
fwblob.asm
 
FW_BINS= \
218,8 → 152,6
NAME_OBJS = $(patsubst %.S, %.o, $(patsubst %.asm, %.o,\
$(patsubst %.c, %.o, $(NAME_SRC))))
 
 
 
all: $(NAME).dll
 
$(NAME).dll: $(NAME_OBJS) $(FW_BINS) $(SRC_DEP) $(HFILES) $(LIBPATH)/libcore.a $(LIBPATH)/libddk.a atikms.lds Makefile.lto
/drivers/video/drm/radeon/atom.c
725,7 → 725,7
SDEBUG(" target: 0x%04X\n", target);
if (execute) {
if (ctx->last_jump == (ctx->start + target)) {
cjiffies = jiffies;
cjiffies = GetTimerTicks();
if (time_after(cjiffies, ctx->last_jump_jiffies)) {
cjiffies -= ctx->last_jump_jiffies;
if ((jiffies_to_msecs(cjiffies) > 5000)) {
734,11 → 734,11
}
} else {
/* jiffies wrap around we will just wait a little longer */
ctx->last_jump_jiffies = jiffies;
ctx->last_jump_jiffies = GetTimerTicks();
}
} else {
ctx->last_jump = ctx->start + target;
ctx->last_jump_jiffies = jiffies;
ctx->last_jump_jiffies = GetTimerTicks();
}
*ptr = ctx->start + target;
}
1220,8 → 1220,6
int r;
 
mutex_lock(&ctx->mutex);
/* reset data block */
ctx->data_block = 0;
/* reset reg block */
ctx->reg_block = 0;
/* reset fb window */
1228,9 → 1226,6
ctx->fb_base = 0;
/* reset io mode */
ctx->io_mode = ATOM_IO_MM;
/* reset divmul */
ctx->divmul[0] = 0;
ctx->divmul[1] = 0;
r = atom_execute_table_locked(ctx, index, params);
mutex_unlock(&ctx->mutex);
return r;
/drivers/video/drm/radeon/atombios.h
74,8 → 74,6
#define ATOM_PPLL2 1
#define ATOM_DCPLL 2
#define ATOM_PPLL0 2
#define ATOM_PPLL3 3
 
#define ATOM_EXT_PLL1 8
#define ATOM_EXT_PLL2 9
#define ATOM_EXT_CLOCK 10
261,7 → 259,7
USHORT AdjustDisplayPll; //Atomic Table, used by various SW componentes.
USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios
USHORT SetUniphyInstance; //Atomic Table, only used by Bios
USHORT ASIC_StaticPwrMgtStatusChange; //Obsolete , only used by Bios
USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2
USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3
USHORT HW_Misc_Operation; //Atomic Table, directly used by various SW components,latest version 1.1
273,7 → 271,7
USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1
USHORT PatchMCSetting; //only used by BIOS
USHORT MC_SEQ_Control; //only used by BIOS
USHORT Gfx_Harvesting; //Atomic Table, Obsolete from Ry6xx, Now only used by BIOS for GFX harvesting
USHORT TV1OutputControl; //Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
USHORT EnableScaler; //Atomic Table, used only by Bios
USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT EnableCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
330,7 → 328,7
#define UNIPHYTransmitterControl DIG1TransmitterControl
#define LVTMATransmitterControl DIG2TransmitterControl
#define SetCRTC_DPM_State GetConditionalGoldenSetting
#define ASIC_StaticPwrMgtStatusChange SetUniphyInstance
#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
#define HPDInterruptService ReadHWAssistedI2CStatus
#define EnableVGA_Access GetSCLKOverMCLKRatio
#define EnableYUV GetDispObjectInfo
340,8 → 338,8
#define TMDSAEncoderControl PatchMCSetting
#define LVDSEncoderControl MC_SEQ_Control
#define LCD1OutputControl HW_Misc_Operation
#define TV1OutputControl Gfx_Harvesting
 
 
typedef struct _ATOM_MASTER_COMMAND_TABLE
{
ATOM_COMMON_TABLE_HEADER sHeader;
480,11 → 478,11
typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4
{
#if ATOM_BIG_ENDIAN
ULONG ucPostDiv:8; //return parameter: post divider which is used to program to register directly
ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly
ULONG ulClock:24; //Input= target clock, output = actual clock
#else
ULONG ulClock:24; //Input= target clock, output = actual clock
ULONG ucPostDiv:8; //return parameter: post divider which is used to program to register directly
ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly
#endif
}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
 
506,32 → 504,6
UCHAR ucReserved;
}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5;
 
 
typedef struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6
{
ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
ULONG ulReserved[2];
}COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6;
 
//ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag
#define COMPUTE_GPUCLK_INPUT_FLAG_CLK_TYPE_MASK 0x0f
#define COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK 0x00
#define COMPUTE_GPUCLK_INPUT_FLAG_SCLK 0x01
 
typedef struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6
{
COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 ulClock; //Output Parameter: ucPostDiv=DFS divider
ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter: PLL FB divider
UCHAR ucPllRefDiv; //Output Parameter: PLL ref divider
UCHAR ucPllPostDiv; //Output Parameter: PLL post divider
UCHAR ucPllCntlFlag; //Output Flags: control flag
UCHAR ucReserved;
}COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6;
 
//ucPllCntlFlag
#define SPLL_CNTL_FLAG_VCO_MODE_MASK 0x03
 
 
// ucInputFlag
#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode
 
1711,12 → 1683,9
#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c
#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00
#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04
#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6 0x08 //for V6, the correct defintion for 36bpp should be 2 for 36bpp(2:1)
#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08
#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6 0x04 //for V6, the correct defintion for 30bpp should be 1 for 36bpp(5:4)
#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c
#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10
#define PIXEL_CLOCK_V6_MISC_GEN_DPREFCLK 0x40
 
typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
{
2133,17 → 2102,6
}DVO_ENCODER_CONTROL_PARAMETERS_V3;
#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3
 
typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V1_4
{
USHORT usPixelClock;
UCHAR ucDVOConfig;
UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
UCHAR ucBitPerColor; //please refer to definition of PANEL_xBIT_PER_COLOR
UCHAR ucReseved[3];
}DVO_ENCODER_CONTROL_PARAMETERS_V1_4;
#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V1_4 DVO_ENCODER_CONTROL_PARAMETERS_V1_4
 
 
//ucTableFormatRevision=1
//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for
// bit1=0: non-coherent mode
2225,7 → 2183,7
USHORT usVoltageLevel; // real voltage level
}SET_VOLTAGE_PARAMETERS_V2;
 
// used by both SetVoltageTable v1.3 and v1.4
 
typedef struct _SET_VOLTAGE_PARAMETERS_V1_3
{
UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
2242,10 → 2200,9
//SET_VOLTAGE_PARAMETERS_V3.ucVoltageMode
#define ATOM_SET_VOLTAGE 0 //Set voltage Level
#define ATOM_INIT_VOLTAGE_REGULATOR 3 //Init Regulator
#define ATOM_SET_VOLTAGE_PHASE 4 //Set Vregulator Phase, only for SVID/PVID regulator
#define ATOM_GET_MAX_VOLTAGE 6 //Get Max Voltage, not used from SetVoltageTable v1.3
#define ATOM_GET_VOLTAGE_LEVEL 6 //Get Voltage level from vitual voltage ID, not used for SetVoltage v1.4
#define ATOM_GET_LEAKAGE_ID 8 //Get Leakage Voltage Id ( starting from SMU7x IP ), SetVoltage v1.4
#define ATOM_SET_VOLTAGE_PHASE 4 //Set Vregulator Phase
#define ATOM_GET_MAX_VOLTAGE 6 //Get Max Voltage, not used in SetVoltageTable v1.3
#define ATOM_GET_VOLTAGE_LEVEL 6 //Get Voltage level from vitual voltage ID
 
// define vitual voltage id in usVoltageLevel
#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
2252,10 → 2209,6
#define ATOM_VIRTUAL_VOLTAGE_ID1 0xff02
#define ATOM_VIRTUAL_VOLTAGE_ID2 0xff03
#define ATOM_VIRTUAL_VOLTAGE_ID3 0xff04
#define ATOM_VIRTUAL_VOLTAGE_ID4 0xff05
#define ATOM_VIRTUAL_VOLTAGE_ID5 0xff06
#define ATOM_VIRTUAL_VOLTAGE_ID6 0xff07
#define ATOM_VIRTUAL_VOLTAGE_ID7 0xff08
 
typedef struct _SET_VOLTAGE_PS_ALLOCATION
{
2292,36 → 2245,15
#define ATOM_GET_VOLTAGE_VID 0x00
#define ATOM_GET_VOTLAGE_INIT_SEQ 0x03
#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID 0x04
#define ATOM_GET_VOLTAGE_SVID2 0x07 //Get SVI2 Regulator Info
 
// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
 
// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state
#define ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11
 
// undefined power state
#define ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12
#define ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13
 
// New Added from CI Hawaii for GetVoltageInfoTable, input parameter structure
typedef struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2
{
UCHAR ucVoltageType; // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
UCHAR ucVoltageMode; // Input: Indicate action: Get voltage info
USHORT usVoltageLevel; // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id
ULONG ulSCLKFreq; // Input: when ucVoltageMode= ATOM_GET_VOLTAGE_EVV_VOLTAGE, DPM state SCLK frequency, Define in PPTable SCLK/Voltage dependence table
}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2;
 
// New in GetVoltageInfo v1.2 ucVoltageMode
#define ATOM_GET_VOLTAGE_EVV_VOLTAGE 0x09
 
// New Added from CI Hawaii for EVV feature
typedef struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2
{
USHORT usVoltageLevel; // real voltage level in unit of mv
USHORT usVoltageId; // Voltage Id programmed in Voltage Regulator
ULONG ulReseved;
}GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2;
 
/****************************************************************************/
// Structures used by TVEncoderControlTable
/****************************************************************************/
2696,8 → 2628,7
ULONG ulFirmwareRevision;
ULONG ulDefaultEngineClock; //In 10Khz unit
ULONG ulDefaultMemoryClock; //In 10Khz unit
ULONG ulSPLL_OutputFreq; //In 10Khz unit
ULONG ulGPUPLL_OutputFreq; //In 10Khz unit
ULONG ulReserved[2];
ULONG ulReserved1; //Was ulMaxEngineClockPLL_Output; //In 10Khz unit*
ULONG ulReserved2; //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit*
ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
3882,14 → 3813,6
UCHAR ucGPIO_ID;
}ATOM_GPIO_PIN_ASSIGNMENT;
 
//ucGPIO_ID pre-define id for multiple usage
//from SMU7.x, if ucGPIO_ID=PP_AC_DC_SWITCH_GPIO_PINID in GPIO_LUTTable, AC/DC swithing feature is enable
#define PP_AC_DC_SWITCH_GPIO_PINID 60
//from SMU7.x, if ucGPIO_ID=VDDC_REGULATOR_VRHOT_GPIO_PINID in GPIO_LUTable, VRHot feature is enable
#define VDDC_VRHOT_GPIO_PINID 61
//if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, Peak Current Control feature is enabled
#define VDDC_PCC_GPIO_PINID 62
 
typedef struct _ATOM_GPIO_PIN_LUT
{
ATOM_COMMON_TABLE_HEADER sHeader;
4151,7 → 4074,6
 
//usCaps
#define EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE 0x01
#define EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN 0x02
 
typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
{
4162,8 → 4084,7
UCHAR uc3DStereoPinId; // use for eDP panel
UCHAR ucRemoteDisplayConfig;
UCHAR uceDPToLVDSRxId;
UCHAR ucFixDPVoltageSwing; // usCaps[1]=1, this indicate DP_LANE_SET value
UCHAR Reserved[3]; // for potential expansion
UCHAR Reserved[4]; // for potential expansion
}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
 
//Related definitions, all records are different but they have a commond header
4194,10 → 4115,10
#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19
#define ATOM_ENCODER_CAP_RECORD_TYPE 20
#define ATOM_BRACKET_LAYOUT_RECORD_TYPE 21
 
 
//Must be updated when new record type is added,equal to that record definition!
#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_BRACKET_LAYOUT_RECORD_TYPE
#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_ENCODER_CAP_RECORD_TYPE
 
typedef struct _ATOM_I2C_RECORD
{
4422,31 → 4343,6
USHORT usReserved;
}ATOM_CONNECTOR_REMOTE_CAP_RECORD;
 
typedef struct _ATOM_CONNECTOR_LAYOUT_INFO
{
USHORT usConnectorObjectId;
UCHAR ucConnectorType;
UCHAR ucPosition;
}ATOM_CONNECTOR_LAYOUT_INFO;
 
// define ATOM_CONNECTOR_LAYOUT_INFO.ucConnectorType to describe the display connector size
#define CONNECTOR_TYPE_DVI_D 1
#define CONNECTOR_TYPE_DVI_I 2
#define CONNECTOR_TYPE_VGA 3
#define CONNECTOR_TYPE_HDMI 4
#define CONNECTOR_TYPE_DISPLAY_PORT 5
#define CONNECTOR_TYPE_MINI_DISPLAY_PORT 6
 
typedef struct _ATOM_BRACKET_LAYOUT_RECORD
{
ATOM_COMMON_RECORD_HEADER sheader;
UCHAR ucLength;
UCHAR ucWidth;
UCHAR ucConnNum;
UCHAR ucReserved;
ATOM_CONNECTOR_LAYOUT_INFO asConnInfo[1];
}ATOM_BRACKET_LAYOUT_RECORD;
 
/****************************************************************************/
// ASIC voltage data table
/****************************************************************************/
4520,13 → 4416,6
#define VOLTAGE_CONTROL_ID_CHL822x 0x08
#define VOLTAGE_CONTROL_ID_VT1586M 0x09
#define VOLTAGE_CONTROL_ID_UP1637 0x0A
#define VOLTAGE_CONTROL_ID_CHL8214 0x0B
#define VOLTAGE_CONTROL_ID_UP1801 0x0C
#define VOLTAGE_CONTROL_ID_ST6788A 0x0D
#define VOLTAGE_CONTROL_ID_CHLIR3564SVI2 0x0E
#define VOLTAGE_CONTROL_ID_AD527x 0x0F
#define VOLTAGE_CONTROL_ID_NCP81022 0x10
#define VOLTAGE_CONTROL_ID_LTC2635 0x11
 
typedef struct _ATOM_VOLTAGE_OBJECT
{
4569,16 → 4458,6
USHORT usSize; //Size of Object
}ATOM_VOLTAGE_OBJECT_HEADER_V3;
 
// ATOM_VOLTAGE_OBJECT_HEADER_V3.ucVoltageMode
#define VOLTAGE_OBJ_GPIO_LUT 0 //VOLTAGE and GPIO Lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3
#define VOLTAGE_OBJ_VR_I2C_INIT_SEQ 3 //VOLTAGE REGULATOR INIT sequece through I2C -> ATOM_I2C_VOLTAGE_OBJECT_V3
#define VOLTAGE_OBJ_PHASE_LUT 4 //Set Vregulator Phase lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3
#define VOLTAGE_OBJ_SVID2 7 //Indicate voltage control by SVID2 ->ATOM_SVID2_VOLTAGE_OBJECT_V3
#define VOLTAGE_OBJ_EVV 8
#define VOLTAGE_OBJ_PWRBOOST_LEAKAGE_LUT 0x10 //Powerboost Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
#define VOLTAGE_OBJ_HIGH_STATE_LEAKAGE_LUT 0x11 //High voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
#define VOLTAGE_OBJ_HIGH1_STATE_LEAKAGE_LUT 0x12 //High1 voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
 
typedef struct _VOLTAGE_LUT_ENTRY_V2
{
ULONG ulVoltageId; // The Voltage ID which is used to program GPIO register
4594,7 → 4473,7
 
typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3
{
ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_VR_I2C_INIT_SEQ
ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
UCHAR ucVoltageRegulatorId; //Indicate Voltage Regulator Id
UCHAR ucVoltageControlI2cLine;
UCHAR ucVoltageControlAddress;
4603,13 → 4482,9
VOLTAGE_LUT_ENTRY asVolI2cLut[1]; // end with 0xff
}ATOM_I2C_VOLTAGE_OBJECT_V3;
 
// ATOM_I2C_VOLTAGE_OBJECT_V3.ucVoltageControlFlag
#define VOLTAGE_DATA_ONE_BYTE 0
#define VOLTAGE_DATA_TWO_BYTE 1
 
typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3
{
ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT
ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
UCHAR ucVoltageGpioCntlId; // default is 0 which indicate control through CG VID mode
UCHAR ucGpioEntryNum; // indiate the entry numbers of Votlage/Gpio value Look up table
UCHAR ucPhaseDelay; // phase delay in unit of micro second
4620,7 → 4495,7
 
typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
{
ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = 0x10/0x11/0x12
ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
UCHAR ucLeakageCntlId; // default is 0
UCHAR ucLeakageEntryNum; // indicate the entry number of LeakageId/Voltage Lut table
UCHAR ucReserved[2];
4628,27 → 4503,10
LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1];
}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3;
 
 
typedef struct _ATOM_SVID2_VOLTAGE_OBJECT_V3
{
ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_SVID2
// 14:7 – PSI0_VID
// 6 – PSI0_EN
// 5 – PSI1
// 4:2 – load line slope trim.
// 1:0 – offset trim,
USHORT usLoadLine_PSI;
// GPU GPIO pin Id to SVID2 regulator VRHot pin. possible value 0~31. 0 means GPIO0, 31 means GPIO31
UCHAR ucSVDGpioId; //0~31 indicate GPIO0~31
UCHAR ucSVCGpioId; //0~31 indicate GPIO0~31
ULONG ulReserved;
}ATOM_SVID2_VOLTAGE_OBJECT_V3;
 
typedef union _ATOM_VOLTAGE_OBJECT_V3{
ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj;
ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj;
ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj;
ATOM_SVID2_VOLTAGE_OBJECT_V3 asSVID2Obj;
}ATOM_VOLTAGE_OBJECT_V3;
 
typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1
4678,64 → 4536,6
ATOM_ASIC_PROFILE_VOLTAGE asVoltage;
}ATOM_ASIC_PROFILING_INFO;
 
typedef struct _ATOM_ASIC_PROFILING_INFO_V2_1
{
ATOM_COMMON_TABLE_HEADER asHeader;
UCHAR ucLeakageBinNum; // indicate the entry number of LeakageId/Voltage Lut table
USHORT usLeakageBinArrayOffset; // offset of USHORT Leakage Bin list array ( from lower LeakageId to higher)
 
UCHAR ucElbVDDC_Num;
USHORT usElbVDDC_IdArrayOffset; // offset of USHORT virtual VDDC voltage id ( 0xff01~0xff08 )
USHORT usElbVDDC_LevelArrayOffset; // offset of 2 dimension voltage level USHORT array
 
UCHAR ucElbVDDCI_Num;
USHORT usElbVDDCI_IdArrayOffset; // offset of USHORT virtual VDDCI voltage id ( 0xff01~0xff08 )
USHORT usElbVDDCI_LevelArrayOffset; // offset of 2 dimension voltage level USHORT array
}ATOM_ASIC_PROFILING_INFO_V2_1;
 
typedef struct _ATOM_ASIC_PROFILING_INFO_V3_1
{
ATOM_COMMON_TABLE_HEADER asHeader;
ULONG ulEvvDerateTdp;
ULONG ulEvvDerateTdc;
ULONG ulBoardCoreTemp;
ULONG ulMaxVddc;
ULONG ulMinVddc;
ULONG ulLoadLineSlop;
ULONG ulLeakageTemp;
ULONG ulLeakageVoltage;
ULONG ulCACmEncodeRange;
ULONG ulCACmEncodeAverage;
ULONG ulCACbEncodeRange;
ULONG ulCACbEncodeAverage;
ULONG ulKt_bEncodeRange;
ULONG ulKt_bEncodeAverage;
ULONG ulKv_mEncodeRange;
ULONG ulKv_mEncodeAverage;
ULONG ulKv_bEncodeRange;
ULONG ulKv_bEncodeAverage;
ULONG ulLkgEncodeLn_MaxDivMin;
ULONG ulLkgEncodeMin;
ULONG ulEfuseLogisticAlpha;
USHORT usPowerDpm0;
USHORT usCurrentDpm0;
USHORT usPowerDpm1;
USHORT usCurrentDpm1;
USHORT usPowerDpm2;
USHORT usCurrentDpm2;
USHORT usPowerDpm3;
USHORT usCurrentDpm3;
USHORT usPowerDpm4;
USHORT usCurrentDpm4;
USHORT usPowerDpm5;
USHORT usCurrentDpm5;
USHORT usPowerDpm6;
USHORT usCurrentDpm6;
USHORT usPowerDpm7;
USHORT usCurrentDpm7;
}ATOM_ASIC_PROFILING_INFO_V3_1;
 
 
typedef struct _ATOM_POWER_SOURCE_OBJECT
{
UCHAR ucPwrSrcId; // Power source
4852,8 → 4652,6
#define SYS_INFO_LVDSMISC__888_BPC 0x04
#define SYS_INFO_LVDSMISC__OVERRIDE_EN 0x08
#define SYS_INFO_LVDSMISC__BLON_ACTIVE_LOW 0x10
// new since Trinity
#define SYS_INFO_LVDSMISC__TRAVIS_LVDS_VOL_OVERRIDE_EN 0x20
 
// not used any more
#define SYS_INFO_LVDSMISC__VSYNC_ACTIVE_LOW 0x04
4954,29 → 4752,6
ATOM_INTEGRATED_SYSTEM_INFO_V6 sIntegratedSysInfo;
ULONG ulPowerplayTable[128];
}ATOM_FUSION_SYSTEM_INFO_V1;
 
 
typedef struct _ATOM_TDP_CONFIG_BITS
{
#if ATOM_BIG_ENDIAN
ULONG uReserved:2;
ULONG uTDP_Value:14; // Original TDP value in tens of milli watts
ULONG uCTDP_Value:14; // Override value in tens of milli watts
ULONG uCTDP_Enable:2; // = (uCTDP_Value > uTDP_Value? 2: (uCTDP_Value < uTDP_Value))
#else
ULONG uCTDP_Enable:2; // = (uCTDP_Value > uTDP_Value? 2: (uCTDP_Value < uTDP_Value))
ULONG uCTDP_Value:14; // Override value in tens of milli watts
ULONG uTDP_Value:14; // Original TDP value in tens of milli watts
ULONG uReserved:2;
#endif
}ATOM_TDP_CONFIG_BITS;
 
typedef union _ATOM_TDP_CONFIG
{
ATOM_TDP_CONFIG_BITS TDP_config;
ULONG TDP_config_all;
}ATOM_TDP_CONFIG;
 
/**********************************************************************************************************************
ATOM_FUSION_SYSTEM_INFO_V1 Description
sIntegratedSysInfo: refer to ATOM_INTEGRATED_SYSTEM_INFO_V6 definition.
5009,8 → 4784,7
UCHAR ucMemoryType;
UCHAR ucUMAChannelNumber;
UCHAR strVBIOSMsg[40];
ATOM_TDP_CONFIG asTdpConfig;
ULONG ulReserved[19];
ULONG ulReserved[20];
ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5];
ULONG ulGMCRestoreResetTime;
ULONG ulMinimumNClk;
5035,7 → 4809,7
USHORT GnbTdpLimit;
USHORT usMaxLVDSPclkFreqInSingleLink;
UCHAR ucLvdsMisc;
UCHAR ucTravisLVDSVolAdjust;
UCHAR ucLVDSReserved;
UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
5043,7 → 4817,7
UCHAR ucLVDSOffToOnDelay_in4Ms;
UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
UCHAR ucMinAllowedBL_Level;
UCHAR ucLVDSReserved1;
ULONG ulLCDBitDepthControlVal;
ULONG ulNbpStateMemclkFreq[4];
USHORT usNBP2Voltage;
5072,7 → 4846,6
#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02
#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08
#define SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS 0x10
 
/**********************************************************************************************************************
ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description
5172,9 → 4945,6
[bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color
[bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
[bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
[bit5] Travid LVDS output voltage override enable, when =1, use ucTravisLVDSVolAdjust value to overwrite Traivs register LVDS_CTRL_4
ucTravisLVDSVolAdjust When ucLVDSMisc[5]=1,it means platform SBIOS want to overwrite TravisLVDSVoltage. Then VBIOS will use ucTravisLVDSVolAdjust
value to program Travis register LVDS_CTRL_4
ucLVDSPwrOnSeqDIGONtoDE_in4Ms: LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
=0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
5194,241 → 4964,18
=0 means to use VBIOS default delay which is 125 ( 500ms ).
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
 
ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms:
LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active.
ucLVDSPwrOnVARY_BLtoBLON_in4Ms: LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active.
=0 means to use VBIOS default delay which is 0 ( 0ms ).
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
 
ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms:
LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off.
ucLVDSPwrOffBLONtoVARY_BL_in4Ms: LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off.
=0 means to use VBIOS default delay which is 0 ( 0ms ).
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
 
ucMinAllowedBL_Level: Lowest LCD backlight PWM level. This is customer platform specific parameters. By default it is 0.
 
ulNbpStateMemclkFreq[4]: system memory clock frequncey in unit of 10Khz in different NB pstate.
 
**********************************************************************************************************************/
 
// this IntegrateSystemInfoTable is used for Kaveri & Kabini APU
typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8
{
ATOM_COMMON_TABLE_HEADER sHeader;
ULONG ulBootUpEngineClock;
ULONG ulDentistVCOFreq;
ULONG ulBootUpUMAClock;
ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4];
ULONG ulBootUpReqDisplayVector;
ULONG ulVBIOSMisc;
ULONG ulGPUCapInfo;
ULONG ulDISP_CLK2Freq;
USHORT usRequestedPWMFreqInHz;
UCHAR ucHtcTmpLmt;
UCHAR ucHtcHystLmt;
ULONG ulReserved2;
ULONG ulSystemConfig;
ULONG ulCPUCapInfo;
ULONG ulReserved3;
USHORT usGPUReservedSysMemSize;
USHORT usExtDispConnInfoOffset;
USHORT usPanelRefreshRateRange;
UCHAR ucMemoryType;
UCHAR ucUMAChannelNumber;
UCHAR strVBIOSMsg[40];
ATOM_TDP_CONFIG asTdpConfig;
ULONG ulReserved[19];
ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5];
ULONG ulGMCRestoreResetTime;
ULONG ulReserved4;
ULONG ulIdleNClk;
ULONG ulDDR_DLL_PowerUpTime;
ULONG ulDDR_PLL_PowerUpTime;
USHORT usPCIEClkSSPercentage;
USHORT usPCIEClkSSType;
USHORT usLvdsSSPercentage;
USHORT usLvdsSSpreadRateIn10Hz;
USHORT usHDMISSPercentage;
USHORT usHDMISSpreadRateIn10Hz;
USHORT usDVISSPercentage;
USHORT usDVISSpreadRateIn10Hz;
ULONG ulGPUReservedSysMemBaseAddrLo;
ULONG ulGPUReservedSysMemBaseAddrHi;
ULONG ulReserved5[3];
USHORT usMaxLVDSPclkFreqInSingleLink;
UCHAR ucLvdsMisc;
UCHAR ucTravisLVDSVolAdjust;
UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
UCHAR ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
UCHAR ucLVDSOffToOnDelay_in4Ms;
UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
UCHAR ucMinAllowedBL_Level;
ULONG ulLCDBitDepthControlVal;
ULONG ulNbpStateMemclkFreq[4];
ULONG ulReserved6;
ULONG ulNbpStateNClkFreq[4];
USHORT usNBPStateVoltage[4];
USHORT usBootUpNBVoltage;
USHORT usReserved2;
ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
}ATOM_INTEGRATED_SYSTEM_INFO_V1_8;
 
/**********************************************************************************************************************
ATOM_INTEGRATED_SYSTEM_INFO_V1_8 Description
ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
sDISPCLK_Voltage: Report Display clock frequency requirement on GNB voltage(up to 4 voltage levels).
ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Trinity projects:
ATOM_DEVICE_CRT1_SUPPORT 0x0001
ATOM_DEVICE_DFP1_SUPPORT 0x0008
ATOM_DEVICE_DFP6_SUPPORT 0x0040
ATOM_DEVICE_DFP2_SUPPORT 0x0080
ATOM_DEVICE_DFP3_SUPPORT 0x0200
ATOM_DEVICE_DFP4_SUPPORT 0x0400
ATOM_DEVICE_DFP5_SUPPORT 0x0800
ATOM_DEVICE_LCD1_SUPPORT 0x0002
 
ulVBIOSMisc: Miscellenous flags for VBIOS requirement and interface
bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS.
=1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS.
bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS
=1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS
bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS
=1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS
bit[3]=0: VBIOS fast boot is disable
=1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open)
 
ulGPUCapInfo: bit[0~2]= Reserved
bit[3]=0: Enable AUX HW mode detection logic
=1: Disable AUX HW mode detection logic
bit[4]=0: Disable DFS bypass feature
=1: Enable DFS bypass feature
 
usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW).
Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
Changing BL using VBIOS function is functional in both driver and non-driver present environment;
and enabling VariBri under the driver environment from PP table is optional.
 
2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
that BL control from GPU is expected.
VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
it's per platform
and enabling VariBri under the driver environment from PP table is optional.
 
ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt. Threshold on value to enter HTC_active state.
ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt.
To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
 
ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled
=1: PCIE Power Gating Enabled
Bit[1]=0: DDR-DLL shut-down feature disabled.
1: DDR-DLL shut-down feature enabled.
Bit[2]=0: DDR-PLL Power down feature disabled.
1: DDR-PLL Power down feature enabled.
Bit[3]=0: GNB DPM is disabled
=1: GNB DPM is enabled
ulCPUCapInfo: TBD
 
usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure
usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
to indicate a range.
SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
 
ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3;=5:GDDR5; [7:4] is reserved.
ucUMAChannelNumber: System memory channel numbers.
 
strVBIOSMsg[40]: VBIOS boot up customized message string
 
sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
 
ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
ulIdleNClk: NCLK speed while memory runs in self-refresh state, used to calculate self-refresh latency. Unit in 10kHz.
ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns.
ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns.
 
usPCIEClkSSPercentage: PCIE Clock Spread Spectrum Percentage in unit 0.01%; 100 mean 1%.
usPCIEClkSSType: PCIE Clock Spread Spectrum Type. 0 for Down spread(default); 1 for Center spread.
usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
 
usGPUReservedSysMemSize: Reserved system memory size for ACP engine in APU GNB, units in MB. 0/2/4MB based on CMOS options, current default could be 0MB. KV only, not on KB.
ulGPUReservedSysMemBaseAddrLo: Low 32 bits base address to the reserved system memory.
ulGPUReservedSysMemBaseAddrHi: High 32 bits base address to the reserved system memory.
 
usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
[bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
[bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color
[bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
[bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
[bit5] Travid LVDS output voltage override enable, when =1, use ucTravisLVDSVolAdjust value to overwrite Traivs register LVDS_CTRL_4
ucTravisLVDSVolAdjust When ucLVDSMisc[5]=1,it means platform SBIOS want to overwrite TravisLVDSVoltage. Then VBIOS will use ucTravisLVDSVolAdjust
value to program Travis register LVDS_CTRL_4
ucLVDSPwrOnSeqDIGONtoDE_in4Ms:
LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
=0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
ucLVDSPwrOnDEtoVARY_BL_in4Ms:
LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ).
=0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
ucLVDSPwrOffVARY_BLtoDE_in4Ms:
LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off.
=0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
ucLVDSPwrOffDEtoDIGON_in4Ms:
LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off.
=0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
ucLVDSOffToOnDelay_in4Ms:
LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active.
=0 means to use VBIOS default delay which is 125 ( 500ms ).
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms:
LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active.
=0 means to use VBIOS default delay which is 0 ( 0ms ).
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
 
ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms:
LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off.
=0 means to use VBIOS default delay which is 0 ( 0ms ).
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
ucMinAllowedBL_Level: Lowest LCD backlight PWM level. This is customer platform specific parameters. By default it is 0.
 
ulLCDBitDepthControlVal: GPU display control encoder bit dither control setting, used to program register mmFMT_BIT_DEPTH_CONTROL
 
ulNbpStateMemclkFreq[4]: system memory clock frequncey in unit of 10Khz in different NB P-State(P0, P1, P2 & P3).
ulNbpStateNClkFreq[4]: NB P-State NClk frequency in different NB P-State
usNBPStateVoltage[4]: NB P-State (P0/P1 & P2/P3) voltage; NBP3 refers to lowes voltage
usBootUpNBVoltage: NB P-State voltage during boot up before driver loaded
sExtDispConnInfo: Display connector information table provided to VBIOS
 
**********************************************************************************************************************/
 
// this Table is used for Kaveri/Kabini APU
typedef struct _ATOM_FUSION_SYSTEM_INFO_V2
{
ATOM_INTEGRATED_SYSTEM_INFO_V1_8 sIntegratedSysInfo; // refer to ATOM_INTEGRATED_SYSTEM_INFO_V1_8 definition
ULONG ulPowerplayTable[128]; // Update comments here to link new powerplay table definition structure
}ATOM_FUSION_SYSTEM_INFO_V2;
 
 
/**************************************************************************/
// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design
//Memory SS Info Table
5489,14 → 5036,12
#define ASIC_INTERNAL_SS_ON_DCPLL 8
#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
#define ASIC_INTERNAL_VCE_SS 10
#define ASIC_INTERNAL_GPUPLL_SS 11
 
 
typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
{
ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
//For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
USHORT usSpreadSpectrumPercentage; //in unit of 0.01% or 0.001%, decided by ucSpreadSpectrumMode bit4
USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq
UCHAR ucClockIndication; //Indicate which clock source needs SS
UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
5534,11 → 5079,6
UCHAR ucReserved[2];
}ATOM_ASIC_SS_ASSIGNMENT_V3;
 
//ATOM_ASIC_SS_ASSIGNMENT_V3.ucSpreadSpectrumMode
#define SS_MODE_V3_CENTRE_SPREAD_MASK 0x01
#define SS_MODE_V3_EXTERNAL_SS_MASK 0x02
#define SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK 0x10
 
typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
{
ATOM_COMMON_TABLE_HEADER sHeader;
5907,8 → 5447,6
#define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0 0x0C
#define ATOM_S7_DOS_MODE_PIXEL_FORMATb0 0xF0
#define ATOM_S7_DOS_8BIT_DAC_ENb1 0x01
#define ATOM_S7_ASIC_INIT_COMPLETEb1 0x02
#define ATOM_S7_ASIC_INIT_COMPLETE_MASK 0x00000200
#define ATOM_S7_DOS_MODE_NUMBERw1 0x0FFFF
 
#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8
6181,7 → 5719,6
#define INDIRECT_IO_PCIE 3
#define INDIRECT_IO_PCIEP 4
#define INDIRECT_IO_NBMISC 5
#define INDIRECT_IO_SMU 5
 
#define INDIRECT_IO_PLL_READ INDIRECT_IO_PLL | INDIRECT_READ
#define INDIRECT_IO_PLL_WRITE INDIRECT_IO_PLL | INDIRECT_WRITE
6193,8 → 5730,6
#define INDIRECT_IO_PCIEP_WRITE INDIRECT_IO_PCIEP | INDIRECT_WRITE
#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ
#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE
#define INDIRECT_IO_SMU_READ INDIRECT_IO_SMU | INDIRECT_READ
#define INDIRECT_IO_SMU_WRITE INDIRECT_IO_SMU | INDIRECT_WRITE
 
typedef struct _ATOM_OEM_INFO
{
6340,10 → 5875,8
#define _64Mx32 0x43
#define _128Mx8 0x51
#define _128Mx16 0x52
#define _128Mx32 0x53
#define _256Mx8 0x61
#define _256Mx16 0x62
#define _512Mx8 0x71
 
#define SAMSUNG 0x1
#define INFINEON 0x2
6360,9 → 5893,7
#define PROMOS MOSEL
#define KRETON INFINEON
#define ELIXIR NANYA
#define MEZZA ELPIDA
 
 
/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
 
#define UCODE_ROM_START_ADDRESS 0x1b800
7090,15 → 6621,10
UCHAR ucMaxActiveDispEngineNum;
UCHAR ucMaxPPLLNum;
UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE
UCHAR ucDispCaps;
UCHAR ucReserved[2];
UCHAR ucReserved[3];
ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only
}ATOM_DISP_OUT_INFO_V3;
 
//ucDispCaps
#define DISPLAY_CAPS__DP_PCLK_FROM_PPLL 0x01
#define DISPLAY_CAPS__FORCE_DISPDEV_CONNECTED 0x02
 
typedef enum CORE_REF_CLK_SOURCE{
CLOCK_SRC_XTALIN=0,
CLOCK_SRC_XO_IN=1,
7303,17 → 6829,6
USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
}DIG_TRANSMITTER_INFO_HEADER_V3_1;
 
typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_2{
ATOM_COMMON_TABLE_HEADER sHeader;
USHORT usDPVsPreEmphSettingOffset; // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock
USHORT usPhyAnalogRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info
USHORT usPhyAnalogSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range
USHORT usPhyPllRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info
USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
USHORT usDPSSRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy SS Pll register Info
USHORT usDPSSSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy SS Pll Settings
}DIG_TRANSMITTER_INFO_HEADER_V3_2;
 
typedef struct _CLOCK_CONDITION_REGESTER_INFO{
USHORT usRegisterIndex;
UCHAR ucStartBit;
7337,12 → 6852,6
ULONG ulRegVal;
}PHY_CONDITION_REG_VAL;
 
typedef struct _PHY_CONDITION_REG_VAL_V2{
ULONG ulCondition;
UCHAR ucCondition2;
ULONG ulRegVal;
}PHY_CONDITION_REG_VAL_V2;
 
typedef struct _PHY_CONDITION_REG_INFO{
USHORT usRegIndex;
USHORT usSize;
7349,12 → 6858,6
PHY_CONDITION_REG_VAL asRegVal[1];
}PHY_CONDITION_REG_INFO;
 
typedef struct _PHY_CONDITION_REG_INFO_V2{
USHORT usRegIndex;
USHORT usSize;
PHY_CONDITION_REG_VAL_V2 asRegVal[1];
}PHY_CONDITION_REG_INFO_V2;
 
typedef struct _PHY_ANALOG_SETTING_INFO{
UCHAR ucEncodeMode;
UCHAR ucPhySel;
7362,25 → 6865,6
PHY_CONDITION_REG_INFO asAnalogSetting[1];
}PHY_ANALOG_SETTING_INFO;
 
typedef struct _PHY_ANALOG_SETTING_INFO_V2{
UCHAR ucEncodeMode;
UCHAR ucPhySel;
USHORT usSize;
PHY_CONDITION_REG_INFO_V2 asAnalogSetting[1];
}PHY_ANALOG_SETTING_INFO_V2;
 
typedef struct _GFX_HAVESTING_PARAMETERS {
UCHAR ucGfxBlkId; //GFX blk id to be harvested, like CU, RB or PRIM
UCHAR ucReserved; //reserved
UCHAR ucActiveUnitNumPerSH; //requested active CU/RB/PRIM number per shader array
UCHAR ucMaxUnitNumPerSH; //max CU/RB/PRIM number per shader array
} GFX_HAVESTING_PARAMETERS;
 
//ucGfxBlkId
#define GFX_HARVESTING_CU_ID 0
#define GFX_HARVESTING_RB_ID 1
#define GFX_HARVESTING_PRIM_ID 2
 
/****************************************************************************/
//Portion VI: Definitinos for vbios MC scratch registers that driver used
/****************************************************************************/
7391,17 → 6875,8
#define MC_MISC0__MEMORY_TYPE__GDDR3 0x30000000
#define MC_MISC0__MEMORY_TYPE__GDDR4 0x40000000
#define MC_MISC0__MEMORY_TYPE__GDDR5 0x50000000
#define MC_MISC0__MEMORY_TYPE__HBM 0x60000000
#define MC_MISC0__MEMORY_TYPE__DDR3 0xB0000000
 
#define ATOM_MEM_TYPE_DDR_STRING "DDR"
#define ATOM_MEM_TYPE_DDR2_STRING "DDR2"
#define ATOM_MEM_TYPE_GDDR3_STRING "GDDR3"
#define ATOM_MEM_TYPE_GDDR4_STRING "GDDR4"
#define ATOM_MEM_TYPE_GDDR5_STRING "GDDR5"
#define ATOM_MEM_TYPE_HBM_STRING "HBM"
#define ATOM_MEM_TYPE_DDR3_STRING "DDR3"
 
/****************************************************************************/
//Portion VI: Definitinos being oboselete
/****************************************************************************/
7764,7 → 7239,566
ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
}ATOM_POWERPLAY_INFO_V3;
 
/* New PPlib */
/**************************************************************************/
typedef struct _ATOM_PPLIB_THERMALCONTROLLER
 
{
UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_*
UCHAR ucI2cLine; // as interpreted by DAL I2C
UCHAR ucI2cAddress;
UCHAR ucFanParameters; // Fan Control Parameters.
UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only.
UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only.
UCHAR ucReserved; // ----
UCHAR ucFlags; // to be defined
} ATOM_PPLIB_THERMALCONTROLLER;
 
#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller.
 
#define ATOM_PP_THERMALCONTROLLER_NONE 0
#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib
#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib
#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib
#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib
#define ATOM_PP_THERMALCONTROLLER_LM64 5
#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib
#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
#define ATOM_PP_THERMALCONTROLLER_RV770 8
#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally
#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16
#define ATOM_PP_THERMALCONTROLLER_LM96163 17
 
// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
// We probably should reserve the bit 0x80 for this use.
// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
// The driver can pick the correct internal controller based on the ASIC.
 
#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller
 
typedef struct _ATOM_PPLIB_STATE
{
UCHAR ucNonClockStateIndex;
UCHAR ucClockStateIndices[1]; // variable-sized
} ATOM_PPLIB_STATE;
 
 
typedef struct _ATOM_PPLIB_FANTABLE
{
UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
UCHAR ucTHyst; // Temperature hysteresis. Integer.
USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
USHORT usTMed; // The middle temperature where we change slopes.
USHORT usTHigh; // The high point above TMed for adjusting the second slope.
USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
USHORT usPWMMed; // The PWM value (in percent) at TMed.
USHORT usPWMHigh; // The PWM value at THigh.
} ATOM_PPLIB_FANTABLE;
 
typedef struct _ATOM_PPLIB_FANTABLE2
{
ATOM_PPLIB_FANTABLE basicTable;
USHORT usTMax; // The max temperature
} ATOM_PPLIB_FANTABLE2;
 
typedef struct _ATOM_PPLIB_EXTENDEDHEADER
{
USHORT usSize;
ULONG ulMaxEngineClock; // For Overdrive.
ULONG ulMaxMemoryClock; // For Overdrive.
// Add extra system parameters here, always adjust size to include all fields.
USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table
} ATOM_PPLIB_EXTENDEDHEADER;
 
//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
 
 
typedef struct _ATOM_PPLIB_POWERPLAYTABLE
{
ATOM_COMMON_TABLE_HEADER sHeader;
 
UCHAR ucDataRevision;
 
UCHAR ucNumStates;
UCHAR ucStateEntrySize;
UCHAR ucClockInfoSize;
UCHAR ucNonClockSize;
 
// offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
USHORT usStateArrayOffset;
 
// offset from start of this table to array of ASIC-specific structures,
// currently ATOM_PPLIB_CLOCK_INFO.
USHORT usClockInfoArrayOffset;
 
// offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
USHORT usNonClockInfoArrayOffset;
 
USHORT usBackbiasTime; // in microseconds
USHORT usVoltageTime; // in microseconds
USHORT usTableSize; //the size of this structure, or the extended structure
 
ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_*
 
ATOM_PPLIB_THERMALCONTROLLER sThermalController;
 
USHORT usBootClockInfoOffset;
USHORT usBootNonClockInfoOffset;
 
} ATOM_PPLIB_POWERPLAYTABLE;
 
typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
{
ATOM_PPLIB_POWERPLAYTABLE basicTable;
UCHAR ucNumCustomThermalPolicy;
USHORT usCustomThermalPolicyArrayOffset;
}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
 
typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
{
ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
USHORT usFormatID; // To be used ONLY by PPGen.
USHORT usFanTableOffset;
USHORT usExtendendedHeaderOffset;
} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
 
typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
{
ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
ULONG ulGoldenPPID; // PPGen use only
ULONG ulGoldenRevision; // PPGen use only
USHORT usVddcDependencyOnSCLKOffset;
USHORT usVddciDependencyOnMCLKOffset;
USHORT usVddcDependencyOnMCLKOffset;
USHORT usMaxClockVoltageOnDCOffset;
USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
USHORT usReserved;
} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
 
typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
{
ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
ULONG ulTDPLimit;
ULONG ulNearTDPLimit;
ULONG ulSQRampingThreshold;
USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table
ULONG ulCACLeakage; // The iLeakage for driver calculated CAC leakage table
USHORT usTDPODLimit;
USHORT usLoadLineSlope; // in milliOhms * 100
} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
 
//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0
#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1
#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3
#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5
// 2, 4, 6, 7 are reserved
 
#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008
#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010
#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020
#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040
#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080
#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100
#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200
#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
 
//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002
#define ATOM_PPLIB_CLASSIFICATION2_MVC 0x0004 //Multi-View Codec (BD-3D)
 
//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
 
// 0 is 2.5Gb/s, 1 is 5Gb/s
#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004
#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2
 
// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8
#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3
 
// lookup into reduced refresh-rate table
#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00
#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
 
#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0
#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1
// 2-15 TBD as needed.
 
#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
 
#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
 
#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
 
//memory related flags
#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
 
//M3 Arb //2bits, current 3 sets of parameters in total
#define ATOM_PPLIB_M3ARB_MASK 0x00060000
#define ATOM_PPLIB_M3ARB_SHIFT 17
 
#define ATOM_PPLIB_ENABLE_DRR 0x00080000
 
// remaining 16 bits are reserved
typedef struct _ATOM_PPLIB_THERMAL_STATE
{
UCHAR ucMinTemperature;
UCHAR ucMaxTemperature;
UCHAR ucThermalAction;
}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
 
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
typedef struct _ATOM_PPLIB_NONCLOCK_INFO
{
USHORT usClassification;
UCHAR ucMinTemperature;
UCHAR ucMaxTemperature;
ULONG ulCapsAndSettings;
UCHAR ucRequiredPower;
USHORT usClassification2;
ULONG ulVCLK;
ULONG ulDCLK;
UCHAR ucUnused[5];
} ATOM_PPLIB_NONCLOCK_INFO;
 
// Contained in an array starting at the offset
// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
{
USHORT usEngineClockLow;
UCHAR ucEngineClockHigh;
 
USHORT usMemoryClockLow;
UCHAR ucMemoryClockHigh;
 
USHORT usVDDC;
USHORT usUnused1;
USHORT usUnused2;
 
ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
 
} ATOM_PPLIB_R600_CLOCK_INFO;
 
// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1
#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2
#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
 
typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
{
USHORT usEngineClockLow;
UCHAR ucEngineClockHigh;
 
USHORT usMemoryClockLow;
UCHAR ucMemoryClockHigh;
 
USHORT usVDDC;
USHORT usVDDCI;
USHORT usUnused;
 
ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
 
} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
 
typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
{
USHORT usEngineClockLow;
UCHAR ucEngineClockHigh;
 
USHORT usMemoryClockLow;
UCHAR ucMemoryClockHigh;
 
USHORT usVDDC;
USHORT usVDDCI;
UCHAR ucPCIEGen;
UCHAR ucUnused1;
 
ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
 
} ATOM_PPLIB_SI_CLOCK_INFO;
 
 
typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
 
{
USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600).
UCHAR ucLowEngineClockHigh;
USHORT usHighEngineClockLow; // High Engine clock in MHz.
UCHAR ucHighEngineClockHigh;
USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
UCHAR ucMemoryClockHigh; // Currentyl unused.
UCHAR ucPadding; // For proper alignment and size.
USHORT usVDDC; // For the 780, use: None, Low, High, Variable
UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
ULONG ulFlags;
} ATOM_PPLIB_RS780_CLOCK_INFO;
 
#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
 
#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
 
#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
 
typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz
UCHAR ucEngineClockHigh; //clockfrequency >> 16.
UCHAR vddcIndex; //2-bit vddc index;
USHORT tdpLimit;
//please initalize to 0
USHORT rsv1;
//please initialize to 0s
ULONG rsv2[2];
}ATOM_PPLIB_SUMO_CLOCK_INFO;
 
 
 
typedef struct _ATOM_PPLIB_STATE_V2
{
//number of valid dpm levels in this state; Driver uses it to calculate the whole
//size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
UCHAR ucNumDPMLevels;
//a index to the array of nonClockInfos
UCHAR nonClockInfoIndex;
/**
* Driver will read the first ucNumDPMLevels in this array
*/
UCHAR clockInfoIndex[1];
} ATOM_PPLIB_STATE_V2;
 
typedef struct _StateArray{
//how many states we have
UCHAR ucNumEntries;
ATOM_PPLIB_STATE_V2 states[1];
}StateArray;
 
 
typedef struct _ClockInfoArray{
//how many clock levels we have
UCHAR ucNumEntries;
//sizeof(ATOM_PPLIB_CLOCK_INFO)
UCHAR ucEntrySize;
UCHAR clockInfo[1];
}ClockInfoArray;
 
typedef struct _NonClockInfoArray{
 
//how many non-clock levels we have. normally should be same as number of states
UCHAR ucNumEntries;
//sizeof(ATOM_PPLIB_NONCLOCK_INFO)
UCHAR ucEntrySize;
ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
}NonClockInfoArray;
 
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
{
USHORT usClockLow;
UCHAR ucClockHigh;
USHORT usVoltage;
}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
 
typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
{
UCHAR ucNumEntries; // Number of entries.
ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
 
typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
{
USHORT usSclkLow;
UCHAR ucSclkHigh;
USHORT usMclkLow;
UCHAR ucMclkHigh;
USHORT usVddc;
USHORT usVddci;
}ATOM_PPLIB_Clock_Voltage_Limit_Record;
 
typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
{
UCHAR ucNumEntries; // Number of entries.
ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
}ATOM_PPLIB_Clock_Voltage_Limit_Table;
 
typedef struct _ATOM_PPLIB_CAC_Leakage_Record
{
USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations
ULONG ulLeakageValue;
}ATOM_PPLIB_CAC_Leakage_Record;
 
typedef struct _ATOM_PPLIB_CAC_Leakage_Table
{
UCHAR ucNumEntries; // Number of entries.
ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries.
}ATOM_PPLIB_CAC_Leakage_Table;
 
typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
{
USHORT usVoltage;
USHORT usSclkLow;
UCHAR ucSclkHigh;
USHORT usMclkLow;
UCHAR ucMclkHigh;
}ATOM_PPLIB_PhaseSheddingLimits_Record;
 
typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
{
UCHAR ucNumEntries; // Number of entries.
ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries.
}ATOM_PPLIB_PhaseSheddingLimits_Table;
 
typedef struct _VCEClockInfo{
USHORT usEVClkLow;
UCHAR ucEVClkHigh;
USHORT usECClkLow;
UCHAR ucECClkHigh;
}VCEClockInfo;
 
typedef struct _VCEClockInfoArray{
UCHAR ucNumEntries;
VCEClockInfo entries[1];
}VCEClockInfoArray;
 
typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
{
USHORT usVoltage;
UCHAR ucVCEClockInfoIndex;
}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
 
typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
{
UCHAR numEntries;
ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
 
typedef struct _ATOM_PPLIB_VCE_State_Record
{
UCHAR ucVCEClockInfoIndex;
UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
}ATOM_PPLIB_VCE_State_Record;
 
typedef struct _ATOM_PPLIB_VCE_State_Table
{
UCHAR numEntries;
ATOM_PPLIB_VCE_State_Record entries[1];
}ATOM_PPLIB_VCE_State_Table;
 
 
typedef struct _ATOM_PPLIB_VCE_Table
{
UCHAR revid;
// VCEClockInfoArray array;
// ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
// ATOM_PPLIB_VCE_State_Table states;
}ATOM_PPLIB_VCE_Table;
 
 
typedef struct _UVDClockInfo{
USHORT usVClkLow;
UCHAR ucVClkHigh;
USHORT usDClkLow;
UCHAR ucDClkHigh;
}UVDClockInfo;
 
typedef struct _UVDClockInfoArray{
UCHAR ucNumEntries;
UVDClockInfo entries[1];
}UVDClockInfoArray;
 
typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
{
USHORT usVoltage;
UCHAR ucUVDClockInfoIndex;
}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
 
typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
{
UCHAR numEntries;
ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
 
typedef struct _ATOM_PPLIB_UVD_State_Record
{
UCHAR ucUVDClockInfoIndex;
UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
}ATOM_PPLIB_UVD_State_Record;
 
typedef struct _ATOM_PPLIB_UVD_State_Table
{
UCHAR numEntries;
ATOM_PPLIB_UVD_State_Record entries[1];
}ATOM_PPLIB_UVD_State_Table;
 
 
typedef struct _ATOM_PPLIB_UVD_Table
{
UCHAR revid;
// UVDClockInfoArray array;
// ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
// ATOM_PPLIB_UVD_State_Table states;
}ATOM_PPLIB_UVD_Table;
 
/**************************************************************************/
 
 
// Following definitions are for compatibility issue in different SW components.
#define ATOM_MASTER_DATA_TABLE_REVISION 0x01
#define Object_Info Object_Header
7976,6 → 8010,3
 
 
#endif /* _ATOMBIOS_H */
 
#include "pptable.h"
 
/drivers/video/drm/radeon/atombios_crtc.c
209,16 → 209,6
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
static const u32 vga_control_regs[6] =
{
AVIVO_D1VGA_CONTROL,
AVIVO_D2VGA_CONTROL,
EVERGREEN_D3VGA_CONTROL,
EVERGREEN_D4VGA_CONTROL,
EVERGREEN_D5VGA_CONTROL,
EVERGREEN_D6VGA_CONTROL,
};
 
static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
226,24 → 216,14
struct radeon_device *rdev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
BLANK_CRTC_PS_ALLOCATION args;
u32 vga_control = 0;
 
memset(&args, 0, sizeof(args));
 
if (ASIC_IS_DCE8(rdev)) {
vga_control = RREG32(vga_control_regs[radeon_crtc->crtc_id]);
WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control | 1);
}
 
args.ucCRTC = radeon_crtc->crtc_id;
args.ucBlanking = state;
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
if (ASIC_IS_DCE8(rdev)) {
WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control);
}
}
 
static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
{
270,6 → 250,8
switch (mode) {
case DRM_MODE_DPMS_ON:
radeon_crtc->enabled = true;
/* adjust pm to dpms changes BEFORE enabling crtcs */
radeon_pm_compute_clocks(rdev);
atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
287,10 → 269,10
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
radeon_crtc->enabled = false;
/* adjust pm to dpms changes AFTER disabling crtcs */
radeon_pm_compute_clocks(rdev);
break;
}
/* adjust pm to dpms */
radeon_pm_compute_clocks(rdev);
}
 
static void
441,17 → 423,7
int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
union atom_enable_ss args;
 
if (enable) {
/* Don't mess with SS if percentage is 0 or external ss.
* SS is already disabled previously, and disabling it
* again can cause display problems if the pll is already
* programmed.
*/
if (ss->percentage == 0)
return;
if (ss->type & ATOM_EXTERNAL_SS_MASK)
return;
} else {
if (!enable) {
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->mode_info.crtcs[i] &&
rdev->mode_info.crtcs[i]->enabled &&
487,6 → 459,8
args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v3.ucEnable = enable;
if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE61(rdev))
args.v3.ucEnable = ATOM_DISABLE;
} else if (ASIC_IS_DCE4(rdev)) {
args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
506,6 → 480,8
args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v2.ucEnable = enable;
if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev))
args.v2.ucEnable = ATOM_DISABLE;
} else if (ASIC_IS_DCE3(rdev)) {
args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
527,7 → 503,8
args.lvds_ss_2.ucSpreadSpectrumRange = ss->range;
args.lvds_ss_2.ucEnable = enable;
} else {
if (enable == ATOM_DISABLE) {
if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
(ss->type & ATOM_EXTERNAL_SS_MASK)) {
atombios_disable_ss(rdev, pll_id);
return;
}
557,8 → 534,7
u32 adjusted_clock = mode->clock;
int encoder_mode = atombios_get_encoder_mode(encoder);
u32 dp_clock = mode->clock;
u32 clock = mode->clock;
int bpc = radeon_crtc->bpc;
int bpc = radeon_get_monitor_bpc(connector);
bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
 
/* reset the pll flags */
579,7 → 555,7
if (rdev->family < CHIP_RV770)
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
/* use frac fb div on APUs */
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
/* use frac fb div on RS780/RS880 */
if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
633,24 → 609,6
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
}
 
/* adjust pll for deep color modes */
if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
switch (bpc) {
case 8:
default:
break;
case 10:
clock = (clock * 5) / 4;
break;
case 12:
clock = (clock * 3) / 2;
break;
case 16:
clock = clock * 2;
break;
}
}
 
/* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
* accordingly based on the encoder/transmitter to work around
* special hw requirements.
672,7 → 630,7
switch (crev) {
case 1:
case 2:
args.v1.usPixelClock = cpu_to_le16(clock / 10);
args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
args.v1.ucEncodeMode = encoder_mode;
if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
684,7 → 642,7
adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
break;
case 3:
args.v3.sInput.usPixelClock = cpu_to_le16(clock / 10);
args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
args.v3.sInput.ucEncodeMode = encoder_mode;
args.v3.sInput.ucDispPllConfig = 0;
698,6 → 656,10
args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
if (encoder_mode == ATOM_ENCODER_MODE_HDMI)
/* deep color support */
args.v3.sInput.usPixelClock =
cpu_to_le16((mode->clock * bpc / 8) / 10);
if (dig->coherent_mode)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
781,7 → 743,7
* SetPixelClock provides the dividers
*/
args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
if (ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
if (ASIC_IS_DCE61(rdev))
args.v6.ucPpll = ATOM_EXT_PLL1;
else if (ASIC_IS_DCE6(rdev))
args.v6.ucPpll = ATOM_PPLL0;
877,7 → 839,6
args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC;
if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
switch (bpc) {
case 8:
default:
884,15 → 845,9
args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
break;
case 10:
/* yes this is correct, the atom define is wrong */
args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP;
break;
case 12:
/* yes this is correct, the atom define is wrong */
args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
break;
}
}
args.v5.ucTransmitterID = encoder_id;
args.v5.ucEncoderMode = encoder_mode;
args.v5.ucPpll = pll_id;
906,7 → 861,6
args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
switch (bpc) {
case 8:
default:
913,16 → 867,15
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
break;
case 10:
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6;
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
break;
case 12:
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6;
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
break;
case 16:
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
break;
}
}
args.v6.ucTransmitterID = encoder_id;
args.v6.ucEncoderMode = encoder_mode;
args.v6.ucPpll = pll_id;
962,9 → 915,6
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
int dp_clock;
 
/* Assign mode clock for hdmi deep color max clock limit check */
radeon_connector->pixelclock_for_modeset = mode->clock;
radeon_crtc->bpc = radeon_get_monitor_bpc(connector);
 
switch (encoder_mode) {
988,15 → 938,12
radeon_atombios_get_ppll_ss_info(rdev,
&radeon_crtc->ss,
ATOM_DP_SS_ID1);
} else {
} else
radeon_crtc->ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev,
&radeon_crtc->ss,
ATOM_DP_SS_ID1);
}
/* disable spread spectrum on DCE3 DP */
radeon_crtc->ss_enabled = false;
}
break;
case ATOM_ENCODER_MODE_LVDS:
if (ASIC_IS_DCE4(rdev))
1046,17 → 993,10
struct radeon_encoder *radeon_encoder =
to_radeon_encoder(radeon_crtc->encoder);
u32 pll_clock = mode->clock;
u32 clock = mode->clock;
u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
struct radeon_pll *pll;
int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
 
/* pass the actual clock to atombios_crtc_program_pll for DCE5,6 for HDMI */
if (ASIC_IS_DCE5(rdev) &&
(encoder_mode == ATOM_ENCODER_MODE_HDMI) &&
(radeon_crtc->bpc > 8))
clock = radeon_crtc->adjusted_clock;
 
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
pll = &rdev->clock.p1pll;
1091,7 → 1031,7
radeon_crtc->crtc_id, &radeon_crtc->ss);
 
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
encoder_mode, radeon_encoder->encoder_id, clock,
encoder_mode, radeon_encoder->encoder_id, mode->clock,
ref_div, fb_div, frac_fb_div, post_div,
radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss);
 
1099,17 → 1039,15
/* calculate ss amount and step size */
if (ASIC_IS_DCE4(rdev)) {
u32 step_size;
u32 amount = (((fb_div * 10) + frac_fb_div) *
(u32)radeon_crtc->ss.percentage) /
(100 * (u32)radeon_crtc->ss.percentage_divider);
u32 amount = (((fb_div * 10) + frac_fb_div) * radeon_crtc->ss.percentage) / 10000;
radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
step_size = (4 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) /
step_size = (4 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
(125 * 25 * pll->reference_freq / 100);
else
step_size = (2 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) /
step_size = (2 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
(125 * 25 * pll->reference_freq / 100);
radeon_crtc->ss.step = step_size;
}
1136,10 → 1074,9
u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
 
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
if (!atomic && !crtc->fb) {
DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
1149,8 → 1086,8
target_fb = fb;
}
else {
radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
target_fb = crtc->primary->fb;
radeon_fb = to_radeon_framebuffer(crtc->fb);
target_fb = crtc->fb;
}
 
/* If atomic, assume fb object is pinned & idle & fenced and
1175,44 → 1112,24
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo);
 
switch (target_fb->pixel_format) {
case DRM_FORMAT_C8:
switch (target_fb->bits_per_pixel) {
case 8:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
break;
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB4444:
case 15:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
#ifdef __BIG_ENDIAN
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
#endif
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
#ifdef __BIG_ENDIAN
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
#endif
break;
case DRM_FORMAT_BGRX5551:
case DRM_FORMAT_BGRA5551:
case 16:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
#ifdef __BIG_ENDIAN
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
#endif
break;
case DRM_FORMAT_RGB565:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
#ifdef __BIG_ENDIAN
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
#endif
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
case 24:
case 32:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
#ifdef __BIG_ENDIAN
1219,78 → 1136,16
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
#endif
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
#ifdef __BIG_ENDIAN
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_BGRA1010102:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
#ifdef __BIG_ENDIAN
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
default:
DRM_ERROR("Unsupported screen format %s\n",
drm_get_format_name(target_fb->pixel_format));
DRM_ERROR("Unsupported screen depth %d\n",
target_fb->bits_per_pixel);
return -EINVAL;
}
 
if (tiling_flags & RADEON_TILING_MACRO) {
evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
 
/* Set NUM_BANKS. */
if (rdev->family >= CHIP_TAHITI) {
unsigned index, num_banks;
 
if (rdev->family >= CHIP_BONAIRE) {
unsigned tileb, tile_split_bytes;
 
/* Calculate the macrotile mode index. */
tile_split_bytes = 64 << tile_split;
tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
tileb = min(tile_split_bytes, tileb);
 
for (index = 0; tileb > 64; index++)
tileb >>= 1;
 
if (index >= 16) {
DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
target_fb->bits_per_pixel, tile_split);
return -EINVAL;
}
 
num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
} else {
switch (target_fb->bits_per_pixel) {
case 8:
index = 10;
break;
case 16:
index = SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP;
break;
default:
case 32:
index = SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP;
break;
}
 
num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3;
}
 
fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
} else {
/* NI and older. */
if (rdev->family >= CHIP_CAYMAN)
if (rdev->family >= CHIP_TAHITI)
tmp = rdev->config.si.tile_config;
else if (rdev->family >= CHIP_CAYMAN)
tmp = rdev->config.cayman.tile_config;
else
tmp = rdev->config.evergreen.tile_config;
1307,33 → 1162,21
fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
break;
}
}
 
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
 
evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
if (rdev->family >= CHIP_BONAIRE) {
/* XXX need to know more about the surface tiling mode */
fb_format |= CIK_GRPH_MICRO_TILE_MODE(CIK_DISPLAY_MICRO_TILING);
}
} else if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
 
if (rdev->family >= CHIP_BONAIRE) {
/* Read the pipe config from the 2D TILED SCANOUT mode.
* It should be the same for the other modes too, but not all
* modes set the pipe config field. */
u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f;
 
fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config);
} else if ((rdev->family == CHIP_TAHITI) ||
if ((rdev->family == CHIP_TAHITI) ||
(rdev->family == CHIP_PITCAIRN))
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
else if ((rdev->family == CHIP_VERDE) ||
(rdev->family == CHIP_OLAND) ||
(rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */
else if (rdev->family == CHIP_VERDE)
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
 
switch (radeon_crtc->crtc_id) {
1370,18 → 1213,6
WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
 
/*
* The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
* for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
* retain the full precision throughout the pipeline.
*/
WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + radeon_crtc->crtc_offset,
(bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
~EVERGREEN_LUT_10BIT_BYPASS_EN);
 
if (bypass_lut)
DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
 
WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
1393,10 → 1224,6
WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
 
if (rdev->family >= CHIP_BONAIRE)
WREG32(CIK_LB_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
target_fb->height);
else
WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
target_fb->height);
x &= ~3;
1414,10 → 1241,10
tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
 
/* set pageflip to happen only at start of vblank interval (front porch) */
WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
/* set pageflip to happen anywhere in vblank interval */
WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
 
if (!atomic && fb && fb != crtc->primary->fb) {
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
1449,10 → 1276,9
u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
 
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
if (!atomic && !crtc->fb) {
DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
1462,8 → 1288,8
target_fb = fb;
}
else {
radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
target_fb = crtc->primary->fb;
radeon_fb = to_radeon_framebuffer(crtc->fb);
target_fb = crtc->fb;
}
 
obj = radeon_fb->obj;
1487,30 → 1313,18
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo);
 
switch (target_fb->pixel_format) {
case DRM_FORMAT_C8:
switch (target_fb->bits_per_pixel) {
case 8:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_8BPP |
AVIVO_D1GRPH_CONTROL_8BPP_INDEXED;
break;
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB4444:
case 15:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444;
#ifdef __BIG_ENDIAN
fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
#endif
break;
case DRM_FORMAT_XRGB1555:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555;
#ifdef __BIG_ENDIAN
fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
#endif
break;
case DRM_FORMAT_RGB565:
case 16:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
1518,8 → 1332,8
fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
#endif
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
case 24:
case 32:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
1527,20 → 1341,9
fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
#endif
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010;
#ifdef __BIG_ENDIAN
fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
default:
DRM_ERROR("Unsupported screen format %s\n",
drm_get_format_name(target_fb->pixel_format));
DRM_ERROR("Unsupported screen depth %d\n",
target_fb->bits_per_pixel);
return -EINVAL;
}
 
1579,13 → 1382,6
if (rdev->family >= CHIP_R600)
WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
 
/* LUT only has 256 slots for 8 bpc fb. Bypass for > 8 bpc scanout for precision */
WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset,
(bypass_lut ? AVIVO_LUT_10BIT_BYPASS_EN : 0), ~AVIVO_LUT_10BIT_BYPASS_EN);
 
if (bypass_lut)
DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
 
WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
1614,10 → 1410,10
tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
 
/* set pageflip to happen only at start of vblank interval (front porch) */
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
/* set pageflip to happen anywhere in vblank interval */
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
 
if (!atomic && fb && fb != crtc->primary->fb) {
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
1801,12 → 1597,6
*
* Asic specific PLL information
*
* DCE 8.x
* KB/KV
* - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
* CI
* - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
*
* DCE 6.1
* - PPLL2 is only available to UNIPHYA (both DP and non-DP)
* - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
1833,48 → 1623,7
u32 pll_in_use;
int pll;
 
if (ASIC_IS_DCE8(rdev)) {
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
if (rdev->clock.dp_extclk)
/* skip PPLL programming if using ext clock */
return ATOM_PPLL_INVALID;
else {
/* use the same PPLL for all DP monitors */
pll = radeon_get_shared_dp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
return pll;
}
} else {
/* use the same PPLL for all monitors with the same clock */
pll = radeon_get_shared_nondp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
return pll;
}
/* otherwise, pick one of the plls */
if ((rdev->family == CHIP_KAVERI) ||
(rdev->family == CHIP_KABINI) ||
(rdev->family == CHIP_MULLINS)) {
/* KB/KV/ML has PPLL1 and PPLL2 */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
if (!(pll_in_use & (1 << ATOM_PPLL1)))
return ATOM_PPLL1;
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
} else {
/* CI has PPLL0, PPLL1, and PPLL2 */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
if (!(pll_in_use & (1 << ATOM_PPLL1)))
return ATOM_PPLL1;
if (!(pll_in_use & (1 << ATOM_PPLL0)))
return ATOM_PPLL0;
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
}
} else if (ASIC_IS_DCE61(rdev)) {
if (ASIC_IS_DCE61(rdev)) {
struct radeon_encoder_atom_dig *dig =
radeon_encoder->enc_priv;
 
1907,20 → 1656,6
return ATOM_PPLL1;
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
} else if (ASIC_IS_DCE41(rdev)) {
/* Don't share PLLs on DCE4.1 chips */
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
if (rdev->clock.dp_extclk)
/* skip PPLL programming if using ext clock */
return ATOM_PPLL_INVALID;
}
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL1)))
return ATOM_PPLL1;
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
} else if (ASIC_IS_DCE4(rdev)) {
/* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
* depending on the asic:
2018,9 → 1753,6
(ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
is_tvcv = true;
 
if (!radeon_crtc->adjusted_clock)
return -EINVAL;
 
atombios_crtc_set_pll(crtc, adjusted_mode);
 
if (ASIC_IS_DCE4(rdev))
2039,9 → 1771,6
atombios_crtc_set_base(crtc, x, y, old_fb);
atombios_overscan_setup(crtc, mode, adjusted_mode);
atombios_scaler_setup(crtc);
/* update the hw version fpr dpm */
radeon_crtc->hw_mode = *adjusted_mode;
 
return 0;
}
 
2108,27 → 1837,6
int i;
 
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
int r;
struct radeon_framebuffer *radeon_fb;
struct radeon_bo *rbo;
 
radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r))
DRM_ERROR("failed to reserve rbo before unpin\n");
else {
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
}
/* disable the GRPH */
if (ASIC_IS_DCE4(rdev))
WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
else if (ASIC_IS_AVIVO(rdev))
WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
 
if (ASIC_IS_DCE6(rdev))
atombios_powergate_crtc(crtc, ATOM_ENABLE);
 
2153,9 → 1861,7
break;
case ATOM_PPLL0:
/* disable the ppll */
if ((rdev->family == CHIP_ARUBA) ||
(rdev->family == CHIP_BONAIRE) ||
(rdev->family == CHIP_HAWAII))
if (ASIC_IS_DCE61(rdev))
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
break;
/drivers/video/drm/radeon/atombios_dp.c
44,41 → 44,6
};
 
/***** radeon AUX functions *****/
 
/* Atom needs data in little endian format
* so swap as appropriate when copying data to
* or from atom. Note that atom operates on
* dw units.
*/
void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
{
#ifdef __BIG_ENDIAN
u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
u32 *dst32, *src32;
int i;
 
memcpy(src_tmp, src, num_bytes);
src32 = (u32 *)src_tmp;
dst32 = (u32 *)dst_tmp;
if (to_le) {
for (i = 0; i < ((num_bytes + 3) / 4); i++)
dst32[i] = cpu_to_le32(src32[i]);
memcpy(dst, dst_tmp, num_bytes);
} else {
u8 dws = num_bytes & ~3;
for (i = 0; i < ((num_bytes + 3) / 4); i++)
dst32[i] = le32_to_cpu(src32[i]);
memcpy(dst, dst_tmp, dws);
if (num_bytes % 4) {
for (i = 0; i < (num_bytes % 4); i++)
dst[dws+i] = dst_tmp[dws+i];
}
}
#else
memcpy(dst, src, num_bytes);
#endif
}
 
union aux_channel_transaction {
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
95,18 → 60,15
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base;
int recv_bytes;
int r = 0;
 
memset(&args, 0, sizeof(args));
 
mutex_lock(&chan->mutex);
 
base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
 
radeon_atom_copy_swap(base, send, send_bytes, true);
memcpy(base, send, send_bytes);
 
args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
args.v1.lpAuxRequest = 0 + 4;
args.v1.lpDataOut = 16 + 4;
args.v1.ucDataOutLen = 0;
args.v1.ucChannelID = chan->rec.i2c_id;
args.v1.ucDelay = delay / 10;
120,22 → 82,19
/* timeout */
if (args.v1.ucReplyStatus == 1) {
DRM_DEBUG_KMS("dp_aux_ch timeout\n");
r = -ETIMEDOUT;
goto done;
return -ETIMEDOUT;
}
 
/* flags not zero */
if (args.v1.ucReplyStatus == 2) {
DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
r = -EIO;
goto done;
return -EBUSY;
}
 
/* error */
if (args.v1.ucReplyStatus == 3) {
DRM_DEBUG_KMS("dp_aux_ch error\n");
r = -EIO;
goto done;
return -EIO;
}
 
recv_bytes = args.v1.ucDataOutLen;
143,91 → 102,189
recv_bytes = recv_size;
 
if (recv && recv_size)
radeon_atom_copy_swap(recv, base + 16, recv_bytes, false);
memcpy(recv, base + 16, recv_bytes);
 
r = recv_bytes;
done:
mutex_unlock(&chan->mutex);
return recv_bytes;
}
 
return r;
static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
u16 address, u8 *send, u8 send_bytes, u8 delay)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
int ret;
u8 msg[20];
int msg_bytes = send_bytes + 4;
u8 ack;
unsigned retry;
 
if (send_bytes > 16)
return -1;
 
msg[0] = address;
msg[1] = address >> 8;
msg[2] = AUX_NATIVE_WRITE << 4;
msg[3] = (msg_bytes << 4) | (send_bytes - 1);
memcpy(&msg[4], send, send_bytes);
 
for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, NULL, 0, delay, &ack);
if (ret == -EBUSY)
continue;
else if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
return send_bytes;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
udelay(400);
else
return -EIO;
}
 
#define BARE_ADDRESS_SIZE 3
#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
return -EIO;
}
 
static ssize_t
radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
u16 address, u8 *recv, int recv_bytes, u8 delay)
{
struct radeon_i2c_chan *chan =
container_of(aux, struct radeon_i2c_chan, aux);
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 msg[4];
int msg_bytes = 4;
u8 ack;
int ret;
u8 tx_buf[20];
size_t tx_size;
u8 ack, delay = 0;
unsigned retry;
 
if (WARN_ON(msg->size > 16))
return -E2BIG;
msg[0] = address;
msg[1] = address >> 8;
msg[2] = AUX_NATIVE_READ << 4;
msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
 
tx_buf[0] = msg->address & 0xff;
tx_buf[1] = msg->address >> 8;
tx_buf[2] = msg->request << 4;
tx_buf[3] = msg->size ? (msg->size - 1) : 0;
for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, recv, recv_bytes, delay, &ack);
if (ret == -EBUSY)
continue;
else if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
return ret;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
udelay(400);
else if (ret == 0)
return -EPROTO;
else
return -EIO;
}
 
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_NATIVE_WRITE:
case DP_AUX_I2C_WRITE:
/* tx_size needs to be 4 even for bare address packets since the atom
* table needs the info in tx_buf[3].
*/
tx_size = HEADER_SIZE + msg->size;
if (msg->size == 0)
tx_buf[3] |= BARE_ADDRESS_SIZE << 4;
return -EIO;
}
 
static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
u16 reg, u8 val)
{
radeon_dp_aux_native_write(radeon_connector, reg, &val, 1, 0);
}
 
static u8 radeon_read_dpcd_reg(struct radeon_connector *radeon_connector,
u16 reg)
{
u8 val = 0;
 
radeon_dp_aux_native_read(radeon_connector, reg, &val, 1, 0);
 
return val;
}
 
int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
u8 write_byte, u8 *read_byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
u16 address = algo_data->address;
u8 msg[5];
u8 reply[2];
unsigned retry;
int msg_bytes;
int reply_bytes = 1;
int ret;
u8 ack;
 
/* Set up the command byte */
if (mode & MODE_I2C_READ)
msg[2] = AUX_I2C_READ << 4;
else
tx_buf[3] |= tx_size << 4;
memcpy(tx_buf + HEADER_SIZE, msg->buffer, msg->size);
ret = radeon_process_aux_ch(chan,
tx_buf, tx_size, NULL, 0, delay, &ack);
if (ret >= 0)
/* Return payload size. */
ret = msg->size;
msg[2] = AUX_I2C_WRITE << 4;
 
if (!(mode & MODE_I2C_STOP))
msg[2] |= AUX_I2C_MOT << 4;
 
msg[0] = address;
msg[1] = address >> 8;
 
switch (mode) {
case MODE_I2C_WRITE:
msg_bytes = 5;
msg[3] = msg_bytes << 4;
msg[4] = write_byte;
break;
case DP_AUX_NATIVE_READ:
case DP_AUX_I2C_READ:
/* tx_size needs to be 4 even for bare address packets since the atom
* table needs the info in tx_buf[3].
*/
tx_size = HEADER_SIZE;
if (msg->size == 0)
tx_buf[3] |= BARE_ADDRESS_SIZE << 4;
else
tx_buf[3] |= tx_size << 4;
ret = radeon_process_aux_ch(chan,
tx_buf, tx_size, msg->buffer, msg->size, delay, &ack);
case MODE_I2C_READ:
msg_bytes = 4;
msg[3] = msg_bytes << 4;
break;
default:
ret = -EINVAL;
msg_bytes = 4;
msg[3] = 3 << 4;
break;
}
 
if (ret >= 0)
msg->reply = ack >> 4;
 
for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(auxch,
msg, msg_bytes, reply, reply_bytes, 0, &ack);
if (ret == -EBUSY)
continue;
else if (ret < 0) {
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
return ret;
}
 
void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
{
int ret;
switch (ack & AUX_NATIVE_REPLY_MASK) {
case AUX_NATIVE_REPLY_ACK:
/* I2C-over-AUX Reply field is only valid
* when paired with AUX ACK.
*/
break;
case AUX_NATIVE_REPLY_NACK:
DRM_DEBUG_KMS("aux_ch native nack\n");
return -EREMOTEIO;
case AUX_NATIVE_REPLY_DEFER:
DRM_DEBUG_KMS("aux_ch native defer\n");
udelay(400);
continue;
default:
DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack);
return -EREMOTEIO;
}
 
radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd;
radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer;
switch (ack & AUX_I2C_REPLY_MASK) {
case AUX_I2C_REPLY_ACK:
if (mode == MODE_I2C_READ)
*read_byte = reply[0];
return ret;
case AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("aux_i2c nack\n");
return -EREMOTEIO;
case AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_i2c defer\n");
udelay(400);
break;
default:
DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack);
return -EREMOTEIO;
}
}
 
ret = drm_dp_aux_register(&radeon_connector->ddc_bus->aux);
if (!ret)
radeon_connector->ddc_bus->has_aux = true;
 
WARN(ret, "drm_dp_aux_register() failed with error %d\n", ret);
DRM_DEBUG_KMS("aux i2c too many retries, giving up\n");
return -EREMOTEIO;
}
 
/***** general DP utility functions *****/
292,19 → 349,6
 
/***** radeon specific DP functions *****/
 
static int radeon_dp_get_max_link_rate(struct drm_connector *connector,
u8 dpcd[DP_DPCD_SIZE])
{
int max_link_rate;
 
if (radeon_connector_is_dp12_capable(connector))
max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
else
max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
 
return max_link_rate;
}
 
/* First get the min lane# when low rate is used according to pixel clock
* (prefer low rate), second check max lane# supported by DP panel,
* if the max lane# < low rate lane# then use max lane# instead.
314,7 → 358,7
int pix_clock)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
int max_link_rate = drm_dp_max_link_rate(dpcd);
int max_lane_num = drm_dp_max_lane_count(dpcd);
int lane_num;
int max_dp_pix_clock;
352,7 → 396,7
return 540000;
}
 
return radeon_dp_get_max_link_rate(connector, dpcd);
return drm_dp_max_link_rate(dpcd);
}
 
static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
375,11 → 419,12
 
u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
struct drm_device *dev = radeon_connector->base.dev;
struct radeon_device *rdev = dev->dev_private;
 
return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
radeon_connector->ddc_bus->rec.i2c_id, 0);
dig_connector->dp_i2c_bus->rec.i2c_id, 0);
}
 
static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
390,11 → 435,11
if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
 
if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3)
if (radeon_dp_aux_native_read(radeon_connector, DP_SINK_OUI, buf, 3, 0))
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
 
if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3)
if (radeon_dp_aux_native_read(radeon_connector, DP_BRANCH_OUI, buf, 3, 0))
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
}
403,19 → 448,17
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 msg[DP_DPCD_SIZE];
int ret;
int ret, i;
 
char dpcd_hex_dump[DP_DPCD_SIZE * 3];
 
ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
DP_DPCD_SIZE);
ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg,
DP_DPCD_SIZE, 0);
if (ret > 0) {
memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
DRM_DEBUG_KMS("DPCD: ");
for (i = 0; i < DP_DPCD_SIZE; i++)
DRM_DEBUG_KMS("%02x ", msg[i]);
DRM_DEBUG_KMS("\n");
 
hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd),
32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
 
radeon_dp_probe_oui(radeon_connector);
 
return true;
430,7 → 473,6
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
u8 tmp;
438,15 → 480,9
if (!ASIC_IS_DCE4(rdev))
return panel_mode;
 
if (!radeon_connector->con_priv)
return panel_mode;
 
dig_connector = radeon_connector->con_priv;
 
if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
/* DP bridge chips */
if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
if (tmp & 1)
panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
454,15 → 490,12
panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
else
panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
}
} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
/* eDP */
if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
if (tmp & 1)
panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
}
}
 
return panel_mode;
}
507,13 → 540,26
return MODE_OK;
}
 
static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
u8 link_status[DP_LINK_STATUS_SIZE])
{
int ret;
ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
link_status, DP_LINK_STATUS_SIZE, 100);
if (ret <= 0) {
return false;
}
 
DRM_DEBUG_KMS("link status %*ph\n", 6, link_status);
return true;
}
 
bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
{
u8 link_status[DP_LINK_STATUS_SIZE];
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
 
if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux, link_status)
<= 0)
if (!radeon_dp_get_link_status(radeon_connector, link_status))
return false;
if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
520,30 → 566,11
return true;
}
 
void radeon_dp_set_rx_power_state(struct drm_connector *connector,
u8 power_state)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
 
if (!radeon_connector->con_priv)
return;
 
dig_connector = radeon_connector->con_priv;
 
/* power up/down the sink */
if (dig_connector->dpcd[0] >= 0x11) {
drm_dp_dpcd_writeb(&radeon_connector->ddc_bus->aux,
DP_SET_POWER, power_state);
usleep_range(1000, 2000);
}
}
 
 
struct radeon_dp_link_train_info {
struct radeon_device *rdev;
struct drm_encoder *encoder;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
int enc_id;
int dp_clock;
int dp_lane_count;
553,7 → 580,6
u8 link_status[DP_LINK_STATUS_SIZE];
u8 tries;
bool use_dpencoder;
struct drm_dp_aux *aux;
};
 
static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info)
564,8 → 590,8
0, dp_info->train_set[0]); /* sets all lanes at once */
 
/* set the vs/emph on the sink */
drm_dp_dpcd_write(dp_info->aux, DP_TRAINING_LANE0_SET,
dp_info->train_set, dp_info->dp_lane_count);
radeon_dp_aux_native_write(dp_info->radeon_connector, DP_TRAINING_LANE0_SET,
dp_info->train_set, dp_info->dp_lane_count, 0);
}
 
static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
600,7 → 626,7
}
 
/* enable training pattern on the sink */
drm_dp_dpcd_writeb(dp_info->aux, DP_TRAINING_PATTERN_SET, tp);
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_TRAINING_PATTERN_SET, tp);
}
 
static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
610,30 → 636,33
u8 tmp;
 
/* power up the sink */
radeon_dp_set_rx_power_state(dp_info->connector, DP_SET_POWER_D0);
if (dp_info->dpcd[0] >= 0x11)
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_SET_POWER, DP_SET_POWER_D0);
 
/* possibly enable downspread on the sink */
if (dp_info->dpcd[3] & 0x1)
drm_dp_dpcd_writeb(dp_info->aux,
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
else
drm_dp_dpcd_writeb(dp_info->aux,
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_DOWNSPREAD_CTRL, 0);
 
if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
(dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
drm_dp_dpcd_writeb(dp_info->aux, DP_EDP_CONFIGURATION_SET, 1);
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
}
 
/* set the lane count on the sink */
tmp = dp_info->dp_lane_count;
if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 &&
dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
drm_dp_dpcd_writeb(dp_info->aux, DP_LANE_COUNT_SET, tmp);
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
 
/* set the link rate on the sink */
tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
drm_dp_dpcd_writeb(dp_info->aux, DP_LINK_BW_SET, tmp);
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
 
/* start training on the source */
if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
644,7 → 673,7
dp_info->dp_clock, dp_info->enc_id, 0);
 
/* disable the training pattern on the sink */
drm_dp_dpcd_writeb(dp_info->aux,
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_TRAINING_PATTERN_SET,
DP_TRAINING_PATTERN_DISABLE);
 
656,7 → 685,7
udelay(400);
 
/* disable the training pattern on the sink */
drm_dp_dpcd_writeb(dp_info->aux,
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_TRAINING_PATTERN_SET,
DP_TRAINING_PATTERN_DISABLE);
 
690,8 → 719,7
while (1) {
drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
 
if (drm_dp_dpcd_read_link_status(dp_info->aux,
dp_info->link_status) <= 0) {
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
DRM_ERROR("displayport link status failed\n");
break;
}
753,8 → 781,7
while (1) {
drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
 
if (drm_dp_dpcd_read_link_status(dp_info->aux,
dp_info->link_status) <= 0) {
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
DRM_ERROR("displayport link status failed\n");
break;
}
837,23 → 864,19
else
dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
 
if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp)
== 1) {
tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT);
if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
dp_info.tp3_supported = true;
else
dp_info.tp3_supported = false;
} else {
dp_info.tp3_supported = false;
}
 
memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
dp_info.rdev = rdev;
dp_info.encoder = encoder;
dp_info.connector = connector;
dp_info.radeon_connector = radeon_connector;
dp_info.dp_lane_count = dig_connector->dp_lane_count;
dp_info.dp_clock = dig_connector->dp_clock;
dp_info.aux = &radeon_connector->ddc_bus->aux;
 
if (radeon_dp_link_train_init(&dp_info))
goto done;
/drivers/video/drm/radeon/atombios_encoders.c
183,15 → 183,9
struct backlight_properties props;
struct radeon_backlight_privdata *pdata;
struct radeon_encoder_atom_dig *dig;
u8 backlight_level;
char bl_name[16];
 
/* Mac laptops with multiple GPUs use the gmux driver for backlight
* so don't register a backlight device
*/
if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
(rdev->pdev->device == 0x6741))
return;
 
if (!radeon_encoder->enc_priv)
return;
 
212,7 → 206,7
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name),
"radeon_bl%d", dev->primary->index);
bd = backlight_device_register(bl_name, drm_connector->kdev,
bd = backlight_device_register(bl_name, &drm_connector->kdev,
pdata, &radeon_atom_backlight_ops, &props);
if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n");
221,17 → 215,12
 
pdata->encoder = radeon_encoder;
 
backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
 
dig = radeon_encoder->enc_priv;
dig->bl_dev = bd;
 
bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
/* Set a reasonable default here if the level is 0 otherwise
* fbdev will attempt to turn the backlight on after console
* unblanking and it will try and restore 0 which turns the backlight
* off again.
*/
if (bd->props.brightness == 0)
bd->props.brightness = RADEON_MAX_BL_LEVEL;
bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd);
 
307,7 → 296,6
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
return true;
default:
return false;
331,10 → 319,12
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
 
/* get the native mode for scaling */
if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT|ATOM_DEVICE_DFP_SUPPORT)) {
/* get the native mode for LVDS */
if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT|ATOM_DEVICE_DFP_SUPPORT))
radeon_panel_mode_fixup(encoder, adjusted_mode);
} else if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
 
/* get the native mode for TV */
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
if (tv_dac) {
if (tv_dac->tv_std == TV_STD_NTSC ||
344,8 → 334,6
else
radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
}
} else if (radeon_encoder->rmx_type != RMX_OFF) {
radeon_panel_mode_fixup(encoder, adjusted_mode);
}
 
if (ASIC_IS_DCE3(rdev) &&
468,12 → 456,11
 
static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
{
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
int bpc = 8;
 
if (encoder->crtc) {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
bpc = radeon_crtc->bpc;
}
if (connector)
bpc = radeon_get_monitor_bpc(connector);
 
switch (bpc) {
case 0:
492,11 → 479,11
}
}
 
 
union dvo_encoder_control {
ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
DVO_ENCODER_CONTROL_PS_ALLOCATION_V1_4 dvo_v4;
};
 
void
546,13 → 533,6
args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.dvo_v3.ucDVOConfig = 0; /* XXX */
break;
case 4:
/* DCE8 */
args.dvo_v4.ucAction = action;
args.dvo_v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.dvo_v4.ucDVOConfig = 0; /* XXX */
args.dvo_v4.ucBitPerColor = radeon_atom_get_bpc(encoder);
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
687,6 → 667,8
int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
712,37 → 694,24
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
if (radeon_audio != 0) {
if (radeon_connector->use_digital &&
(radeon_connector->audio == RADEON_AUDIO_ENABLE))
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
radeon_audio &&
!ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
return ATOM_ENCODER_MODE_HDMI;
else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
(radeon_connector->audio == RADEON_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
else
return ATOM_ENCODER_MODE_CRT;
} else if (radeon_connector->use_digital) {
return ATOM_ENCODER_MODE_DVI;
} else {
return ATOM_ENCODER_MODE_CRT;
}
break;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
if (radeon_audio != 0) {
if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
radeon_audio &&
!ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
return ATOM_ENCODER_MODE_HDMI;
else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
(radeon_connector->audio == RADEON_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
} else {
return ATOM_ENCODER_MODE_DVI;
}
break;
case DRM_MODE_CONNECTOR_LVDS:
return ATOM_ENCODER_MODE_LVDS;
750,19 → 719,14
case DRM_MODE_CONNECTOR_DisplayPort:
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
} else if (radeon_audio != 0) {
if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
radeon_audio &&
!ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
return ATOM_ENCODER_MODE_HDMI;
else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
(radeon_connector->audio == RADEON_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
} else {
return ATOM_ENCODER_MODE_DVI;
}
break;
case DRM_MODE_CONNECTOR_eDP:
return ATOM_ENCODER_MODE_DP;
951,14 → 915,10
args.v4.ucLaneNum = 4;
 
if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) {
if (dp_clock == 540000)
if (dp_clock == 270000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
else if (dp_clock == 540000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
else if (dp_clock == 324000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ;
else if (dp_clock == 270000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ;
}
args.v4.acConfig.ucDigSel = dig->dig_encoder;
args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder);
1052,7 → 1012,6
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1312,13 → 1271,10
else
args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYE;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYG;
break;
}
if (is_dp)
args.v5.ucLaneNum = dp_lane_count;
else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
else if (radeon_encoder->pixel_clock > 165000)
args.v5.ucLaneNum = 8;
else
args.v5.ucLaneNum = 4;
1637,16 → 1593,10
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
struct radeon_connector *radeon_connector = NULL;
struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
bool travis_quirk = false;
 
if (connector) {
radeon_connector = to_radeon_connector(connector);
radeon_dig_connector = radeon_connector->con_priv;
if ((radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
ENCODER_OBJECT_ID_TRAVIS) &&
(radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
!ASIC_IS_DCE5(rdev))
travis_quirk = true;
}
 
switch (mode) {
1667,13 → 1617,21
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
}
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
} else if (ASIC_IS_DCE4(rdev)) {
/* setup and enable the encoder */
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
/* enable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
} else {
/* setup and enable the encoder and transmitter */
atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
/* some early dce3.2 boards have a bug in their transmitter control table */
if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
1681,50 → 1639,32
ATOM_TRANSMITTER_ACTION_POWER_ON);
radeon_dig_connector->edp_on = true;
}
}
/* enable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
/* DP_SET_POWER_D0 is set in radeon_dp_link_train */
radeon_dp_link_train(encoder, connector);
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
}
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
atombios_dig_transmitter_setup(encoder,
ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
if (ext_encoder)
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
if (ASIC_IS_DCE4(rdev)) {
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector)
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
}
if (ext_encoder)
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
atombios_dig_transmitter_setup(encoder,
ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
 
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) &&
connector && !travis_quirk)
radeon_dp_set_rx_power_state(connector, DP_SET_POWER_D3);
if (ASIC_IS_DCE4(rdev)) {
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
/* disable the transmitter */
atombios_dig_transmitter_setup(encoder,
ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
} else if (ASIC_IS_DCE4(rdev)) {
/* disable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
} else {
/* disable the encoder and transmitter */
atombios_dig_transmitter_setup(encoder,
ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
if (travis_quirk)
radeon_dp_set_rx_power_state(connector, DP_SET_POWER_D3);
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
1731,16 → 1671,52
radeon_dig_connector->edp_on = false;
}
}
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
break;
}
}
 
static void
radeon_atom_encoder_dpms_ext(struct drm_encoder *encoder,
struct drm_encoder *ext_encoder,
int mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
 
switch (mode) {
case DRM_MODE_DPMS_ON:
default:
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) {
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
} else
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) {
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
} else
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
break;
}
}
 
static void
radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
 
DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
radeon_encoder->encoder_id, mode, radeon_encoder->devices,
1759,7 → 1735,6
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
radeon_atom_encoder_dpms_dig(encoder, mode);
break;
1800,6 → 1775,9
return;
}
 
if (ext_encoder)
radeon_atom_encoder_dpms_ext(encoder, ext_encoder, mode);
 
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
}
1888,16 → 1866,12
args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
else
args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
} else {
} else
args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
}
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
dig = radeon_encoder->enc_priv;
switch (dig->dig_encoder) {
1919,9 → 1893,6
case 5:
args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
break;
case 6:
args.v2.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID;
break;
}
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1984,14 → 1955,8
/* set scaler clears this on some chips */
if (ASIC_IS_AVIVO(rdev) &&
(!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
if (ASIC_IS_DCE8(rdev)) {
if (ASIC_IS_DCE4(rdev)) {
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
WREG32(CIK_LB_DATA_FORMAT + radeon_crtc->crtc_offset,
CIK_INTERLEAVE_EN);
else
WREG32(CIK_LB_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
} else if (ASIC_IS_DCE4(rdev)) {
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
EVERGREEN_INTERLEAVE_EN);
else
2037,9 → 2002,6
else
return 4;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
return 6;
break;
}
} else if (ASIC_IS_DCE4(rdev)) {
/* DCE4/5 */
2124,7 → 2086,6
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
break;
2169,7 → 2130,6
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* handled in dpms */
break;
2391,15 → 2351,6
 
/* this is needed for the pll/ss setup to work correctly in some cases */
atombios_set_encoder_crtc_source(encoder);
/* set up the FMT blocks */
if (ASIC_IS_DCE8(rdev))
dce8_program_fmt(encoder);
else if (ASIC_IS_DCE4(rdev))
dce4_program_fmt(encoder);
else if (ASIC_IS_DCE3(rdev))
dce3_program_fmt(encoder);
else if (ASIC_IS_AVIVO(rdev))
avivo_program_fmt(encoder);
}
 
static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
2444,7 → 2395,6
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* handled in dpms */
break;
2676,7 → 2626,6
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
radeon_encoder->rmx_type = RMX_FULL;
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
/drivers/video/drm/radeon/evergreen.c
33,8 → 33,10
#include "avivod.h"
#include "evergreen_reg.h"
#include "evergreen_blit_shaders.h"
#include "radeon_ucode.h"
 
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
 
static const u32 crtc_offsets[6] =
{
EVERGREEN_CRTC0_REGISTER_OFFSET,
45,109 → 47,12
EVERGREEN_CRTC5_REGISTER_OFFSET
};
 
#include "clearstate_evergreen.h"
 
static const u32 sumo_rlc_save_restore_register_list[] =
{
0x98fc,
0x9830,
0x9834,
0x9838,
0x9870,
0x9874,
0x8a14,
0x8b24,
0x8bcc,
0x8b10,
0x8d00,
0x8d04,
0x8c00,
0x8c04,
0x8c08,
0x8c0c,
0x8d8c,
0x8c20,
0x8c24,
0x8c28,
0x8c18,
0x8c1c,
0x8cf0,
0x8e2c,
0x8e38,
0x8c30,
0x9508,
0x9688,
0x9608,
0x960c,
0x9610,
0x9614,
0x88c4,
0x88d4,
0xa008,
0x900c,
0x9100,
0x913c,
0x98f8,
0x98f4,
0x9b7c,
0x3f8c,
0x8950,
0x8954,
0x8a18,
0x8b28,
0x9144,
0x9148,
0x914c,
0x3f90,
0x3f94,
0x915c,
0x9160,
0x9178,
0x917c,
0x9180,
0x918c,
0x9190,
0x9194,
0x9198,
0x919c,
0x91a8,
0x91ac,
0x91b0,
0x91b4,
0x91b8,
0x91c4,
0x91c8,
0x91cc,
0x91d0,
0x91d4,
0x91e0,
0x91e4,
0x91ec,
0x91f0,
0x91f4,
0x9200,
0x9204,
0x929c,
0x9150,
0x802c,
};
 
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
void evergreen_program_aspm(struct radeon_device *rdev);
extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl);
extern void cayman_vm_decode_fault(struct radeon_device *rdev,
u32 status, u32 addr);
void cik_init_cp_pg_table(struct radeon_device *rdev);
 
extern u32 si_get_csb_size(struct radeon_device *rdev);
extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
extern u32 cik_get_csb_size(struct radeon_device *rdev);
extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
 
static const u32 evergreen_golden_registers[] =
{
0x3f90, 0xffff0000, 0xff000000,
189,7 → 94,7
0x8c1c, 0xffffffff, 0x00001010,
0x28350, 0xffffffff, 0x00000000,
0xa008, 0xffffffff, 0x00010000,
0x5c4, 0xffffffff, 0x00000001,
0x5cc, 0xffffffff, 0x00000001,
0x9508, 0xffffffff, 0x00000002,
0x913c, 0x0000000f, 0x0000000a
};
476,7 → 381,7
0x8c1c, 0xffffffff, 0x00001010,
0x28350, 0xffffffff, 0x00000000,
0xa008, 0xffffffff, 0x00010000,
0x5c4, 0xffffffff, 0x00000001,
0x5cc, 0xffffffff, 0x00000001,
0x9508, 0xffffffff, 0x00000002
};
 
635,7 → 540,7
static const u32 supersumo_golden_registers[] =
{
0x5eb4, 0xffffffff, 0x00000002,
0x5c4, 0xffffffff, 0x00000001,
0x5cc, 0xffffffff, 0x00000001,
0x7030, 0xffffffff, 0x00000011,
0x7c30, 0xffffffff, 0x00000011,
0x6104, 0x01000300, 0x00000000,
719,7 → 624,7
static const u32 wrestler_golden_registers[] =
{
0x5eb4, 0xffffffff, 0x00000002,
0x5c4, 0xffffffff, 0x00000001,
0x5cc, 0xffffffff, 0x00000001,
0x7030, 0xffffffff, 0x00000011,
0x7c30, 0xffffffff, 0x00000011,
0x6104, 0x01000300, 0x00000000,
1175,74 → 1080,25
 
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
{
int readrq;
u16 v;
u16 ctl, v;
int err;
 
readrq = pcie_get_readrq(rdev->pdev);
v = ffs(readrq) - 8;
err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl);
if (err)
return;
 
v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
 
/* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
* to avoid hangs or perfomance issues
*/
if ((v == 0) || (v == 6) || (v == 7))
pcie_set_readrq(rdev->pdev, 512);
if ((v == 0) || (v == 6) || (v == 7)) {
ctl &= ~PCI_EXP_DEVCTL_READRQ;
ctl |= (2 << 12);
pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl);
}
 
void dce4_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
int bpc = 0;
u32 tmp = 0;
enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
 
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
bpc = radeon_get_monitor_bpc(connector);
dither = radeon_connector->dither;
}
 
/* LVDS/eDP FMT is set up by atom */
if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
return;
 
/* not needed for analog */
if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
return;
 
if (bpc == 0)
return;
 
switch (bpc) {
case 6:
if (dither == RADEON_FMT_DITHER_ENABLE)
/* XXX sort out optimal dither settings */
tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
FMT_SPATIAL_DITHER_EN);
else
tmp |= FMT_TRUNCATE_EN;
break;
case 8:
if (dither == RADEON_FMT_DITHER_ENABLE)
/* XXX sort out optimal dither settings */
tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
FMT_RGB_RANDOM_ENABLE |
FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
else
tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
break;
case 10:
default:
/* not needed */
break;
}
 
WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
}
 
static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
{
if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
1300,6 → 1156,7
}
}
 
 
/**
* evergreen_page_flip - pageflip callback.
*
1313,7 → 1170,7
* double buffered update to take place.
* Returns the current update pending status.
*/
void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
1345,23 → 1202,9
/* Unlock the lock, so double-buffering can take place inside vblank */
tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
}
 
/**
* evergreen_page_flip_pending - check if page flip is still pending
*
* @rdev: radeon_device pointer
* @crtc_id: crtc to check
*
* Returns the current update pending status.
*/
bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc_id)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
 
/* Return current update_pending status: */
return !!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) &
EVERGREEN_GRPH_SURFACE_UPDATE_PENDING);
return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
}
 
/* get temperature in millidegrees */
1545,8 → 1388,8
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
 
if (voltage->type == VOLTAGE_SW) {
/* 0xff0x are flags rather then an actual voltage */
if ((voltage->voltage & 0xff00) == 0xff00)
/* 0xff01 is a flag rather then an actual voltage */
if (voltage->voltage == 0xff01)
return;
if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
1566,8 → 1409,8
voltage = &rdev->pm.power_state[req_ps_idx].
clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage;
 
/* 0xff0x are flags rather then an actual voltage */
if ((voltage->vddci & 0xff00) == 0xff00)
/* 0xff01 is a flag rather then an actual voltage */
if (voltage->vddci == 0xff01)
return;
if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
1846,8 → 1689,7
struct drm_display_mode *mode,
struct drm_display_mode *other_mode)
{
u32 tmp, buffer_alloc, i;
u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
u32 tmp;
/*
* Line Buffer Setup
* There are 3 line buffers, each one shared by 2 display controllers.
1870,17 → 1712,12
* non-linked crtcs for maximum line buffer allocation.
*/
if (radeon_crtc->base.enabled && mode) {
if (other_mode) {
if (other_mode)
tmp = 0; /* 1/2 */
buffer_alloc = 1;
} else {
else
tmp = 2; /* whole */
buffer_alloc = 2;
}
} else {
} else
tmp = 0;
buffer_alloc = 0;
}
 
/* second controller of the pair uses second half of the lb */
if (radeon_crtc->crtc_id % 2)
1887,17 → 1724,6
tmp += 4;
WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
 
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
DMIF_BUFFERS_ALLOCATED(buffer_alloc));
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
DMIF_BUFFERS_ALLOCATED_COMPLETED)
break;
udelay(1);
}
}
 
if (radeon_crtc->base.enabled && mode) {
switch (tmp) {
case 0:
2181,8 → 2007,7
u32 lb_size, u32 num_heads)
{
struct drm_display_mode *mode = &radeon_crtc->base.mode;
struct evergreen_wm_params wm_low, wm_high;
u32 dram_channels;
struct evergreen_wm_params wm;
u32 pixel_period;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
2198,81 → 2023,39
line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
dram_channels = evergreen_get_number_of_dram_channels(rdev);
 
/* watermark for high clocks */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
wm_high.yclk =
radeon_dpm_get_mclk(rdev, false) * 10;
wm_high.sclk =
radeon_dpm_get_sclk(rdev, false) * 10;
} else {
wm_high.yclk = rdev->pm.current_mclk * 10;
wm_high.sclk = rdev->pm.current_sclk * 10;
}
 
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
wm_high.active_time = mode->crtc_hdisplay * pixel_period;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
wm.yclk = rdev->pm.current_mclk * 10;
wm.sclk = rdev->pm.current_sclk * 10;
wm.disp_clk = mode->clock;
wm.src_width = mode->crtc_hdisplay;
wm.active_time = mode->crtc_hdisplay * pixel_period;
wm.blank_time = line_time - wm.active_time;
wm.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
wm_high.interlaced = true;
wm_high.vsc = radeon_crtc->vsc;
wm_high.vtaps = 1;
wm.interlaced = true;
wm.vsc = radeon_crtc->vsc;
wm.vtaps = 1;
if (radeon_crtc->rmx_type != RMX_OFF)
wm_high.vtaps = 2;
wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
wm_high.lb_size = lb_size;
wm_high.dram_channels = dram_channels;
wm_high.num_heads = num_heads;
wm.vtaps = 2;
wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
wm.lb_size = lb_size;
wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
wm.num_heads = num_heads;
 
/* watermark for low clocks */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
wm_low.yclk =
radeon_dpm_get_mclk(rdev, true) * 10;
wm_low.sclk =
radeon_dpm_get_sclk(rdev, true) * 10;
} else {
wm_low.yclk = rdev->pm.current_mclk * 10;
wm_low.sclk = rdev->pm.current_sclk * 10;
}
 
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
wm_low.active_time = mode->crtc_hdisplay * pixel_period;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
wm_low.interlaced = true;
wm_low.vsc = radeon_crtc->vsc;
wm_low.vtaps = 1;
if (radeon_crtc->rmx_type != RMX_OFF)
wm_low.vtaps = 2;
wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
wm_low.lb_size = lb_size;
wm_low.dram_channels = dram_channels;
wm_low.num_heads = num_heads;
 
/* set for high clocks */
latency_watermark_a = min(evergreen_latency_watermark(&wm_high), (u32)65535);
latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
/* set for low clocks */
latency_watermark_b = min(evergreen_latency_watermark(&wm_low), (u32)65535);
/* wm.yclk = low clk; wm.sclk = low clk */
latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
 
/* possibly force display priority to high */
/* should really do this at mode validation time... */
if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
!evergreen_average_bandwidth_vs_available_bandwidth(&wm_high) ||
!evergreen_check_latency_hiding(&wm_high) ||
if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
!evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
!evergreen_check_latency_hiding(&wm) ||
(rdev->disp_priority == 2)) {
DRM_DEBUG_KMS("force priority a to high\n");
DRM_DEBUG_KMS("force priority to high\n");
priority_a_cnt |= PRIORITY_ALWAYS_ON;
}
if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
!evergreen_average_bandwidth_vs_available_bandwidth(&wm_low) ||
!evergreen_check_latency_hiding(&wm_low) ||
(rdev->disp_priority == 2)) {
DRM_DEBUG_KMS("force priority b to high\n");
priority_b_cnt |= PRIORITY_ALWAYS_ON;
}
 
2325,10 → 2108,6
WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
 
/* save values for DPM */
radeon_crtc->line_time = line_time;
radeon_crtc->wm_high = latency_watermark_a;
radeon_crtc->wm_low = latency_watermark_b;
}
 
/**
2424,6 → 2203,7
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
2641,9 → 2421,8
for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled[i]) {
tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
if ((tmp & 0x7) != 3) {
tmp &= ~0x7;
tmp |= 0x3;
if ((tmp & 0x3) != 0) {
tmp &= ~0x3;
WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
2676,7 → 2455,7
if (save->crtc_enabled[i]) {
if (ASIC_IS_DCE6(rdev)) {
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp &= ~EVERGREEN_CRTC_BLANK_DATA_EN;
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
2869,7 → 2648,7
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring, false);
radeon_ring_unlock_commit(rdev, ring);
 
cp_me = 0xff;
WREG32(CP_ME_CNTL, cp_me);
2912,7 → 2691,7
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* */
 
radeon_ring_unlock_commit(rdev, ring, false);
radeon_ring_unlock_commit(rdev, ring);
 
return 0;
}
2937,8 → 2716,8
RREG32(GRBM_SOFT_RESET);
 
/* Set ring buffer size */
rb_bufsz = order_base_2(ring->ring_size / 8);
tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
2974,6 → 2753,8
WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
 
ring->rptr = RREG32(CP_RB_RPTR);
 
evergreen_cp_start(rdev);
ring->ready = true;
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
3163,7 → 2944,7
rdev->config.evergreen.sx_max_export_size = 256;
rdev->config.evergreen.sx_max_export_pos_size = 64;
rdev->config.evergreen.sx_max_export_smx_size = 192;
rdev->config.evergreen.max_hw_contexts = 4;
rdev->config.evergreen.max_hw_contexts = 8;
rdev->config.evergreen.sq_num_cf_insts = 2;
 
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
3310,8 → 3091,10
u32 efuse_straps_4;
u32 efuse_straps_3;
 
efuse_straps_4 = RREG32_RCU(0x204);
efuse_straps_3 = RREG32_RCU(0x203);
WREG32(RCU_IND_INDEX, 0x204);
efuse_straps_4 = RREG32(RCU_IND_DATA);
WREG32(RCU_IND_INDEX, 0x203);
efuse_straps_3 = RREG32(RCU_IND_DATA);
tmp = (((efuse_straps_4 & 0xf) << 4) |
((efuse_straps_3 & 0xf0000000) >> 28));
} else {
3337,18 → 3120,6
disabled_rb_mask &= ~(1 << i);
}
 
for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
u32 simd_disable_bitmap;
 
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
simd_disable_bitmap |= 0xffffffff << rdev->config.evergreen.max_simds;
tmp <<= 16;
tmp |= simd_disable_bitmap;
}
rdev->config.evergreen.active_simds = hweight32(~tmp);
 
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
 
3679,7 → 3450,7
return true;
}
 
u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
{
u32 reset_mask = 0;
u32 tmp;
3862,48 → 3633,6
evergreen_print_gpu_status_regs(rdev);
}
 
void evergreen_gpu_pci_config_reset(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
u32 tmp, i;
 
dev_info(rdev->dev, "GPU pci config reset\n");
 
/* disable dpm? */
 
/* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
udelay(50);
/* Disable DMA */
tmp = RREG32(DMA_RB_CNTL);
tmp &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL, tmp);
/* XXX other engines? */
 
/* halt the rlc */
r600_rlc_stop(rdev);
 
udelay(50);
 
/* set mclk/sclk to bypass */
rv770_set_clk_bypass_mode(rdev);
/* disable BM */
pci_clear_master(rdev->pdev);
/* disable mem access */
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
}
/* reset */
radeon_pci_config_reset(rdev);
/* wait for asic to come out of reset */
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
break;
udelay(1);
}
}
 
int evergreen_asic_reset(struct radeon_device *rdev)
{
u32 reset_mask;
3913,17 → 3642,10
if (reset_mask)
r600_set_bios_scratch_engine_hung(rdev, true);
 
/* try soft reset */
evergreen_gpu_soft_reset(rdev, reset_mask);
 
reset_mask = evergreen_gpu_check_soft_reset(rdev);
 
/* try pci config reset */
if (reset_mask && radeon_hard_reset)
evergreen_gpu_pci_config_reset(rdev);
 
reset_mask = evergreen_gpu_check_soft_reset(rdev);
 
if (!reset_mask)
r600_set_bios_scratch_engine_hung(rdev, false);
 
3946,356 → 3668,36
if (!(reset_mask & (RADEON_RESET_GFX |
RADEON_RESET_COMPUTE |
RADEON_RESET_CP))) {
radeon_ring_lockup_update(rdev, ring);
radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
 
/*
* RLC
/**
* evergreen_dma_is_lockup - Check if the DMA engine is locked up
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Check if the async DMA engine is locked up.
* Returns true if the engine appears to be locked up, false if not.
*/
#define RLC_SAVE_RESTORE_LIST_END_MARKER 0x00000000
#define RLC_CLEAR_STATE_END_MARKER 0x00000001
 
void sumo_rlc_fini(struct radeon_device *rdev)
bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
int r;
u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
 
/* save restore block */
if (rdev->rlc.save_restore_obj) {
r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
if (unlikely(r != 0))
dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
radeon_bo_unpin(rdev->rlc.save_restore_obj);
radeon_bo_unreserve(rdev->rlc.save_restore_obj);
 
radeon_bo_unref(&rdev->rlc.save_restore_obj);
rdev->rlc.save_restore_obj = NULL;
if (!(reset_mask & RADEON_RESET_DMA)) {
radeon_ring_lockup_update(ring);
return false;
}
 
/* clear state block */
if (rdev->rlc.clear_state_obj) {
r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
if (unlikely(r != 0))
dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
radeon_bo_unpin(rdev->rlc.clear_state_obj);
radeon_bo_unreserve(rdev->rlc.clear_state_obj);
 
radeon_bo_unref(&rdev->rlc.clear_state_obj);
rdev->rlc.clear_state_obj = NULL;
/* force ring activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
 
/* clear state block */
if (rdev->rlc.cp_table_obj) {
r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
if (unlikely(r != 0))
dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
radeon_bo_unpin(rdev->rlc.cp_table_obj);
radeon_bo_unreserve(rdev->rlc.cp_table_obj);
 
radeon_bo_unref(&rdev->rlc.cp_table_obj);
rdev->rlc.cp_table_obj = NULL;
}
}
 
#define CP_ME_TABLE_SIZE 96
 
int sumo_rlc_init(struct radeon_device *rdev)
{
const u32 *src_ptr;
volatile u32 *dst_ptr;
u32 dws, data, i, j, k, reg_num;
u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index = 0;
u64 reg_list_mc_addr;
const struct cs_section_def *cs_data;
int r;
 
src_ptr = rdev->rlc.reg_list;
dws = rdev->rlc.reg_list_size;
if (rdev->family >= CHIP_BONAIRE) {
dws += (5 * 16) + 48 + 48 + 64;
}
cs_data = rdev->rlc.cs_data;
 
if (src_ptr) {
/* save restore block */
if (rdev->rlc.save_restore_obj == NULL) {
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
&rdev->rlc.save_restore_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
return r;
}
}
 
r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
if (unlikely(r != 0)) {
sumo_rlc_fini(rdev);
return r;
}
r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
&rdev->rlc.save_restore_gpu_addr);
if (r) {
radeon_bo_unreserve(rdev->rlc.save_restore_obj);
dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
sumo_rlc_fini(rdev);
return r;
}
 
r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
if (r) {
dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
sumo_rlc_fini(rdev);
return r;
}
/* write the sr buffer */
dst_ptr = rdev->rlc.sr_ptr;
if (rdev->family >= CHIP_TAHITI) {
/* SI */
for (i = 0; i < rdev->rlc.reg_list_size; i++)
dst_ptr[i] = cpu_to_le32(src_ptr[i]);
} else {
/* ON/LN/TN */
/* format:
* dw0: (reg2 << 16) | reg1
* dw1: reg1 save space
* dw2: reg2 save space
*/
for (i = 0; i < dws; i++) {
data = src_ptr[i] >> 2;
i++;
if (i < dws)
data |= (src_ptr[i] >> 2) << 16;
j = (((i - 1) * 3) / 2);
dst_ptr[j] = cpu_to_le32(data);
}
j = ((i * 3) / 2);
dst_ptr[j] = cpu_to_le32(RLC_SAVE_RESTORE_LIST_END_MARKER);
}
radeon_bo_kunmap(rdev->rlc.save_restore_obj);
radeon_bo_unreserve(rdev->rlc.save_restore_obj);
}
 
if (cs_data) {
/* clear state block */
if (rdev->family >= CHIP_BONAIRE) {
rdev->rlc.clear_state_size = dws = cik_get_csb_size(rdev);
} else if (rdev->family >= CHIP_TAHITI) {
rdev->rlc.clear_state_size = si_get_csb_size(rdev);
dws = rdev->rlc.clear_state_size + (256 / 4);
} else {
reg_list_num = 0;
dws = 0;
for (i = 0; cs_data[i].section != NULL; i++) {
for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
reg_list_num++;
dws += cs_data[i].section[j].reg_count;
}
}
reg_list_blk_index = (3 * reg_list_num + 2);
dws += reg_list_blk_index;
rdev->rlc.clear_state_size = dws;
}
 
if (rdev->rlc.clear_state_obj == NULL) {
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
&rdev->rlc.clear_state_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
sumo_rlc_fini(rdev);
return r;
}
}
r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
if (unlikely(r != 0)) {
sumo_rlc_fini(rdev);
return r;
}
r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
&rdev->rlc.clear_state_gpu_addr);
if (r) {
radeon_bo_unreserve(rdev->rlc.clear_state_obj);
dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
sumo_rlc_fini(rdev);
return r;
}
 
r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
if (r) {
dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
sumo_rlc_fini(rdev);
return r;
}
/* set up the cs buffer */
dst_ptr = rdev->rlc.cs_ptr;
if (rdev->family >= CHIP_BONAIRE) {
cik_get_csb_buffer(rdev, dst_ptr);
} else if (rdev->family >= CHIP_TAHITI) {
reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256;
dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr));
dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr));
dst_ptr[2] = cpu_to_le32(rdev->rlc.clear_state_size);
si_get_csb_buffer(rdev, &dst_ptr[(256/4)]);
} else {
reg_list_hdr_blk_index = 0;
reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
data = upper_32_bits(reg_list_mc_addr);
dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
reg_list_hdr_blk_index++;
for (i = 0; cs_data[i].section != NULL; i++) {
for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
reg_num = cs_data[i].section[j].reg_count;
data = reg_list_mc_addr & 0xffffffff;
dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
reg_list_hdr_blk_index++;
 
data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
reg_list_hdr_blk_index++;
 
data = 0x08000000 | (reg_num * 4);
dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
reg_list_hdr_blk_index++;
 
for (k = 0; k < reg_num; k++) {
data = cs_data[i].section[j].extent[k];
dst_ptr[reg_list_blk_index + k] = cpu_to_le32(data);
}
reg_list_mc_addr += reg_num * 4;
reg_list_blk_index += reg_num;
}
}
dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(RLC_CLEAR_STATE_END_MARKER);
}
radeon_bo_kunmap(rdev->rlc.clear_state_obj);
radeon_bo_unreserve(rdev->rlc.clear_state_obj);
}
 
if (rdev->rlc.cp_table_size) {
if (rdev->rlc.cp_table_obj == NULL) {
r = radeon_bo_create(rdev, rdev->rlc.cp_table_size,
PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
&rdev->rlc.cp_table_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
sumo_rlc_fini(rdev);
return r;
}
}
 
r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
if (unlikely(r != 0)) {
dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
sumo_rlc_fini(rdev);
return r;
}
r = radeon_bo_pin(rdev->rlc.cp_table_obj, RADEON_GEM_DOMAIN_VRAM,
&rdev->rlc.cp_table_gpu_addr);
if (r) {
radeon_bo_unreserve(rdev->rlc.cp_table_obj);
dev_warn(rdev->dev, "(%d) pin RLC cp_table bo failed\n", r);
sumo_rlc_fini(rdev);
return r;
}
r = radeon_bo_kmap(rdev->rlc.cp_table_obj, (void **)&rdev->rlc.cp_table_ptr);
if (r) {
dev_warn(rdev->dev, "(%d) map RLC cp table bo failed\n", r);
sumo_rlc_fini(rdev);
return r;
}
 
cik_init_cp_pg_table(rdev);
 
radeon_bo_kunmap(rdev->rlc.cp_table_obj);
radeon_bo_unreserve(rdev->rlc.cp_table_obj);
 
}
 
return 0;
}
 
static void evergreen_rlc_start(struct radeon_device *rdev)
{
u32 mask = RLC_ENABLE;
 
if (rdev->flags & RADEON_IS_IGP) {
mask |= GFX_POWER_GATING_ENABLE | GFX_POWER_GATING_SRC;
}
 
WREG32(RLC_CNTL, mask);
}
 
int evergreen_rlc_resume(struct radeon_device *rdev)
{
u32 i;
const __be32 *fw_data;
 
if (!rdev->rlc_fw)
return -EINVAL;
 
r600_rlc_stop(rdev);
 
WREG32(RLC_HB_CNTL, 0);
 
if (rdev->flags & RADEON_IS_IGP) {
if (rdev->family == CHIP_ARUBA) {
u32 always_on_bitmap =
3 | (3 << (16 * rdev->config.cayman.max_shader_engines));
/* find out the number of active simds */
u32 tmp = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
tmp |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
tmp = hweight32(~tmp);
if (tmp == rdev->config.cayman.max_simds_per_se) {
WREG32(TN_RLC_LB_ALWAYS_ACTIVE_SIMD_MASK, always_on_bitmap);
WREG32(TN_RLC_LB_PARAMS, 0x00601004);
WREG32(TN_RLC_LB_INIT_SIMD_MASK, 0xffffffff);
WREG32(TN_RLC_LB_CNTR_INIT, 0x00000000);
WREG32(TN_RLC_LB_CNTR_MAX, 0x00002000);
}
} else {
WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
}
WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
} else {
WREG32(RLC_HB_BASE, 0);
WREG32(RLC_HB_RPTR, 0);
WREG32(RLC_HB_WPTR, 0);
WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
}
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
 
fw_data = (const __be32 *)rdev->rlc_fw->data;
if (rdev->family >= CHIP_ARUBA) {
for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
}
} else if (rdev->family >= CHIP_CAYMAN) {
for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
}
} else {
for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
}
}
WREG32(RLC_UCODE_ADDR, 0);
 
evergreen_rlc_start(rdev);
 
return 0;
}
 
/* Interrupts */
 
u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
4344,8 → 3746,8
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
 
/* only one DAC on DCE5 */
if (!ASIC_IS_DCE5(rdev))
/* only one DAC on DCE6 */
if (!ASIC_IS_DCE6(rdev))
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
 
4371,9 → 3773,9
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
u32 dma_cntl, dma_cntl1 = 0;
u32 thermal_int = 0;
 
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
4393,12 → 3795,6
hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
if (rdev->family == CHIP_ARUBA)
thermal_int = RREG32(TN_CG_THERMAL_INT_CTRL) &
~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
else
thermal_int = RREG32(CG_THERMAL_INT) &
~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
 
afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
4444,11 → 3840,6
}
}
 
if (rdev->irq.dpm_thermal) {
DRM_DEBUG("dpm thermal\n");
thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
}
 
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
4553,21 → 3944,15
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
}
 
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
GRPH_PFLIP_INT_MASK);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
GRPH_PFLIP_INT_MASK);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
if (rdev->num_crtc >= 4) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
GRPH_PFLIP_INT_MASK);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
GRPH_PFLIP_INT_MASK);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
}
if (rdev->num_crtc >= 6) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
GRPH_PFLIP_INT_MASK);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
GRPH_PFLIP_INT_MASK);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
}
 
WREG32(DC_HPD1_INT_CONTROL, hpd1);
4576,10 → 3961,6
WREG32(DC_HPD4_INT_CONTROL, hpd4);
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
if (rdev->family == CHIP_ARUBA)
WREG32(TN_CG_THERMAL_INT_CTRL, thermal_int);
else
WREG32(CG_THERMAL_INT, thermal_int);
 
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1);
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2);
4759,7 → 4140,6
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
4772,8 → 4152,6
u32 ring_index;
bool queue_hotplug = false;
bool queue_hdmi = false;
bool queue_thermal = false;
u32 status, addr;
 
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
4957,14 → 4335,6
break;
}
break;
case 8: /* D1 page flip */
case 10: /* D2 page flip */
case 12: /* D3 page flip */
case 14: /* D4 page flip */
case 16: /* D5 page flip */
case 18: /* D6 page flip */
DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
break;
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
5068,18 → 4438,13
break;
case 146:
case 147:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
/* reset addr and status */
WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
if (addr == 0x0 && status == 0x0)
break;
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr);
RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
cayman_vm_decode_fault(rdev, status, addr);
RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
/* reset addr and status */
WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
5108,16 → 4473,6
DRM_DEBUG("IH: DMA trap\n");
radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
break;
case 230: /* thermal low to high */
DRM_DEBUG("IH: thermal low to high\n");
rdev->pm.dpm.thermal.high_to_low = false;
queue_thermal = true;
break;
case 231: /* thermal high to low */
DRM_DEBUG("IH: thermal high to low\n");
rdev->pm.dpm.thermal.high_to_low = true;
queue_thermal = true;
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
break;
5148,6 → 4503,143
return IRQ_HANDLED;
}
 
/**
* evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
*
* @rdev: radeon_device pointer
* @fence: radeon fence object
*
* Add a DMA fence packet to the ring to write
* the fence seq number and DMA trap packet to generate
* an interrupt if needed (evergreen-SI).
*/
void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
/* write the fence */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
radeon_ring_write(ring, addr & 0xfffffffc);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
radeon_ring_write(ring, fence->seq);
/* generate an interrupt */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
/* flush HDP */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
radeon_ring_write(ring, 1);
}
 
/**
* evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
*
* @rdev: radeon_device pointer
* @ib: IB object to schedule
*
* Schedule an IB in the DMA ring (evergreen).
*/
void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
 
if (rdev->wb.enabled) {
u32 next_rptr = ring->wptr + 4;
while ((next_rptr & 7) != 5)
next_rptr++;
next_rptr += 3;
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
radeon_ring_write(ring, next_rptr);
}
 
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
* Pad as necessary with NOPs.
*/
while ((ring->wptr & 7) != 5)
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
 
}
 
/**
* evergreen_copy_dma - copy pages using the DMA engine
*
* @rdev: radeon_device pointer
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer
* @fence: radeon fence object
*
* Copy GPU paging using the DMA engine (evergreen-cayman).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
int evergreen_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence)
{
struct radeon_semaphore *sem = NULL;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_dw, cur_size_in_dw;
int i, num_loops;
int r = 0;
 
r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
 
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
 
if (radeon_fence_need_sync(*fence, ring->idx)) {
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
ring->idx);
radeon_fence_note_sync(*fence, ring->idx);
} else {
radeon_semaphore_free(rdev, &sem, NULL);
}
 
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
if (cur_size_in_dw > 0xFFFFF)
cur_size_in_dw = 0xFFFFF;
size_in_dw -= cur_size_in_dw;
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
radeon_ring_write(ring, dst_offset & 0xfffffffc);
radeon_ring_write(ring, src_offset & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
src_offset += cur_size_in_dw * 4;
dst_offset += cur_size_in_dw * 4;
}
 
r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
return r;
}
 
radeon_ring_unlock_commit(rdev, ring);
radeon_semaphore_free(rdev, &sem, *fence);
 
return r;
}
 
static int evergreen_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;
5155,24 → 4647,35
 
/* enable pcie gen2 link */
evergreen_pcie_gen2_enable(rdev);
/* enable aspm */
evergreen_program_aspm(rdev);
 
/* scratch needs to be initialized before MC */
r = r600_vram_scratch_init(rdev);
if (r)
if (ASIC_IS_DCE5(rdev)) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
r = ni_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
 
evergreen_mc_program(rdev);
 
if (ASIC_IS_DCE5(rdev) && !rdev->pm.dpm_enabled) {
}
}
r = ni_mc_load_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
return r;
}
} else {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
}
 
r = r600_vram_scratch_init(rdev);
if (r)
return r;
 
evergreen_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
evergreen_agp_enable(rdev);
} else {
5182,18 → 4685,12
}
evergreen_gpu_init(rdev);
 
/* allocate rlc buffers */
if (rdev->flags & RADEON_IS_IGP) {
rdev->rlc.reg_list = sumo_rlc_save_restore_register_list;
rdev->rlc.reg_list_size =
(u32)ARRAY_SIZE(sumo_rlc_save_restore_register_list);
rdev->rlc.cs_data = evergreen_cs_data;
r = sumo_rlc_init(rdev);
r = evergreen_blit_init(rdev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
// r600_blit_fini(rdev);
rdev->asic->copy.copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
}
 
/* allocate wb buffer */
r = radeon_wb_init(rdev);
5220,8 → 4717,8
// dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
// }
 
if (r)
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
// if (r)
// rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
 
/* Enable IRQ */
if (!rdev->irq.installed) {
5240,13 → 4737,15
 
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
RADEON_CP_PACKET2);
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
 
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_PACKET(DMA_PACKET_NOP, 0, 0));
DMA_RB_RPTR, DMA_RB_WPTR,
2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
if (r)
return r;
 
5260,7 → 4759,19
if (r)
return r;
 
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
r = radeon_ring_init(rdev, ring, ring->ring_size,
R600_WB_UVD_RPTR_OFFSET,
UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
if (!r)
r = r600_uvd_init(rdev);
 
if (r)
DRM_ERROR("radeon: error initializing UVD (%d).\n", r);
}
 
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
5272,7 → 4783,30
 
 
 
#if 0
 
int evergreen_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence)
{
int r;
 
mutex_lock(&rdev->r600_blit.mutex);
rdev->r600_blit.vb_ib = NULL;
r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
if (r) {
if (rdev->r600_blit.vb_ib)
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
mutex_unlock(&rdev->r600_blit.mutex);
return r;
}
evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
evergreen_blit_done_copy(rdev, fence);
mutex_unlock(&rdev->r600_blit.mutex);
return 0;
}
#endif
 
/* Plan is to move initialization in that function and use
* helper function so that radeon_device_init pretty much
* do nothing more than calling asic specific function. This
5337,27 → 4871,6
if (r)
return r;
 
if (ASIC_IS_DCE5(rdev)) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
r = ni_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
} else {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
}
 
/* Initialize power management */
radeon_pm_init(rdev);
 
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
5462,153 → 4975,3
WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
 
void evergreen_program_aspm(struct radeon_device *rdev)
{
u32 data, orig;
u32 pcie_lc_cntl, pcie_lc_cntl_old;
bool disable_l0s, disable_l1 = false, disable_plloff_in_l1 = false;
/* fusion_platform = true
* if the system is a fusion system
* (APU or DGPU in a fusion system).
* todo: check if the system is a fusion platform.
*/
bool fusion_platform = false;
 
if (radeon_aspm == 0)
return;
 
if (!(rdev->flags & RADEON_IS_PCIE))
return;
 
switch (rdev->family) {
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
case CHIP_JUNIPER:
case CHIP_REDWOOD:
case CHIP_CEDAR:
case CHIP_SUMO:
case CHIP_SUMO2:
case CHIP_PALM:
case CHIP_ARUBA:
disable_l0s = true;
break;
default:
disable_l0s = false;
break;
}
 
if (rdev->flags & RADEON_IS_IGP)
fusion_platform = true; /* XXX also dGPUs in a fusion system */
 
data = orig = RREG32_PIF_PHY0(PB0_PIF_PAIRING);
if (fusion_platform)
data &= ~MULTI_PIF;
else
data |= MULTI_PIF;
if (data != orig)
WREG32_PIF_PHY0(PB0_PIF_PAIRING, data);
 
data = orig = RREG32_PIF_PHY1(PB1_PIF_PAIRING);
if (fusion_platform)
data &= ~MULTI_PIF;
else
data |= MULTI_PIF;
if (data != orig)
WREG32_PIF_PHY1(PB1_PIF_PAIRING, data);
 
pcie_lc_cntl = pcie_lc_cntl_old = RREG32_PCIE_PORT(PCIE_LC_CNTL);
pcie_lc_cntl &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
if (!disable_l0s) {
if (rdev->family >= CHIP_BARTS)
pcie_lc_cntl |= LC_L0S_INACTIVITY(7);
else
pcie_lc_cntl |= LC_L0S_INACTIVITY(3);
}
 
if (!disable_l1) {
if (rdev->family >= CHIP_BARTS)
pcie_lc_cntl |= LC_L1_INACTIVITY(7);
else
pcie_lc_cntl |= LC_L1_INACTIVITY(8);
 
if (!disable_plloff_in_l1) {
data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
if (data != orig)
WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
 
data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
if (data != orig)
WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
 
data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
if (data != orig)
WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
 
data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
if (data != orig)
WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
 
if (rdev->family >= CHIP_BARTS) {
data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
data &= ~PLL_RAMP_UP_TIME_0_MASK;
data |= PLL_RAMP_UP_TIME_0(4);
if (data != orig)
WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
 
data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
data &= ~PLL_RAMP_UP_TIME_1_MASK;
data |= PLL_RAMP_UP_TIME_1(4);
if (data != orig)
WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
 
data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
data &= ~PLL_RAMP_UP_TIME_0_MASK;
data |= PLL_RAMP_UP_TIME_0(4);
if (data != orig)
WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
 
data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
data &= ~PLL_RAMP_UP_TIME_1_MASK;
data |= PLL_RAMP_UP_TIME_1(4);
if (data != orig)
WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
}
 
data = orig = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
data &= ~LC_DYN_LANES_PWR_STATE_MASK;
data |= LC_DYN_LANES_PWR_STATE(3);
if (data != orig)
WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
 
if (rdev->family >= CHIP_BARTS) {
data = orig = RREG32_PIF_PHY0(PB0_PIF_CNTL);
data &= ~LS2_EXIT_TIME_MASK;
data |= LS2_EXIT_TIME(1);
if (data != orig)
WREG32_PIF_PHY0(PB0_PIF_CNTL, data);
 
data = orig = RREG32_PIF_PHY1(PB1_PIF_CNTL);
data &= ~LS2_EXIT_TIME_MASK;
data |= LS2_EXIT_TIME(1);
if (data != orig)
WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
}
}
}
 
/* evergreen parts only */
if (rdev->family < CHIP_BARTS)
pcie_lc_cntl |= LC_PMI_TO_L1_DIS;
 
if (pcie_lc_cntl != pcie_lc_cntl_old)
WREG32_PCIE_PORT(PCIE_LC_CNTL, pcie_lc_cntl);
}
/drivers/video/drm/radeon/evergreen_hdmi.c
32,12 → 32,6
#include "evergreend.h"
#include "atom.h"
 
extern void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder);
extern void dce6_afmt_write_sad_regs(struct drm_encoder *encoder);
extern void dce6_afmt_select_pin(struct drm_encoder *encoder);
extern void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode);
 
/*
* update the N and CTS parameters for a given pixel clock rate
*/
60,83 → 54,6
WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
}
 
static void dce4_afmt_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector = NULL;
u32 tmp = 0;
 
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
break;
}
}
 
if (!radeon_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
return;
}
 
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (connector->latency_present[1])
tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
AUDIO_LIPSYNC(connector->audio_latency[1]);
else
tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
} else {
if (connector->latency_present[0])
tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
AUDIO_LIPSYNC(connector->audio_latency[0]);
else
tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
}
WREG32(AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC, tmp);
}
 
static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector = NULL;
u32 tmp;
u8 *sadb;
int sad_count;
 
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
break;
}
}
 
if (!radeon_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
return;
}
 
sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return;
}
 
/* program the speaker allocation */
tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
/* set HDMI mode */
tmp |= HDMI_CONNECTION;
if (sad_count)
tmp |= SPEAKER_ALLOCATION(sadb[0]);
else
tmp |= SPEAKER_ALLOCATION(5); /* stereo */
WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
 
kfree(sadb);
}
 
static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
{
struct radeon_device *rdev = encoder->dev->dev_private;
161,11 → 78,9
};
 
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
if (connector->encoder == encoder)
radeon_connector = to_radeon_connector(connector);
break;
}
}
 
if (!radeon_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
172,8 → 87,8
return;
}
 
sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
if (sad_count <= 0) {
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
if (sad_count < 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
}
181,8 → 96,6
 
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
u32 value = 0;
u8 stereo_freqs = 0;
int max_channels = -1;
int j;
 
for (j = 0; j < sad_count; j++) {
189,22 → 102,14
struct cea_sad *sad = &sads[j];
 
if (sad->format == eld_reg_to_type[i][1]) {
if (sad->channels > max_channels) {
value = MAX_CHANNELS(sad->channels) |
DESCRIPTOR_BYTE_2(sad->byte2) |
SUPPORTED_FREQUENCIES(sad->freq);
max_channels = sad->channels;
}
 
if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
stereo_freqs |= sad->freq;
else
value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
break;
}
}
 
value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
 
WREG32(eld_reg_to_type[i][0], value);
}
 
223,8 → 128,15
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
uint8_t *frame = buffer + 3;
uint8_t *header = buffer;
 
/* Our header values (type, version, length) should be alright, Intel
* is using the same. Checksum function also seems to be OK, it works
* fine for audio infoframe. However calculated value is always lower
* by 2 in comparison to fglrx. It breaks displaying anything in case
* of TVs that strictly check the checksum. Hack it manually here to
* workaround this issue. */
frame[0x0] += 2;
 
WREG32(AFMT_AVI_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
WREG32(AFMT_AVI_INFO1 + offset,
232,58 → 144,9
WREG32(AFMT_AVI_INFO2 + offset,
frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
WREG32(AFMT_AVI_INFO3 + offset,
frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
frame[0xC] | (frame[0xD] << 8));
}
 
static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
u32 base_rate = 24000;
u32 max_ratio = clock / base_rate;
u32 dto_phase;
u32 dto_modulo = clock;
u32 wallclock_ratio;
u32 dto_cntl;
 
if (!dig || !dig->afmt)
return;
 
if (ASIC_IS_DCE6(rdev)) {
dto_phase = 24 * 1000;
} else {
if (max_ratio >= 8) {
dto_phase = 192 * 1000;
wallclock_ratio = 3;
} else if (max_ratio >= 4) {
dto_phase = 96 * 1000;
wallclock_ratio = 2;
} else if (max_ratio >= 2) {
dto_phase = 48 * 1000;
wallclock_ratio = 1;
} else {
dto_phase = 24 * 1000;
wallclock_ratio = 0;
}
dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
}
 
/* XXX two dtos; generally use dto0 for hdmi */
/* Express [24MHz / target pixel clock] as an exact rational
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo);
}
 
 
/*
* update the info frames with the data from the current display mode
*/
293,72 → 156,34
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
struct hdmi_avi_infoframe frame;
uint32_t offset;
ssize_t err;
uint32_t val;
int bpc = 8;
 
if (!dig || !dig->afmt)
return;
 
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
return;
offset = dig->afmt->offset;
 
/* hdmi deep color mode general control packets setup, if bpc > 8 */
if (encoder->crtc) {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
bpc = radeon_crtc->bpc;
}
// r600_audio_set_clock(encoder, mode->clock);
 
/* disable audio prior to setting up hw */
if (ASIC_IS_DCE6(rdev)) {
dig->afmt->pin = dce6_audio_get_pin(rdev);
dce6_audio_enable(rdev, dig->afmt->pin, false);
} else {
dig->afmt->pin = r600_audio_get_pin(rdev);
r600_audio_enable(rdev, dig->afmt->pin, false);
}
 
evergreen_audio_set_dto(encoder, mode->clock);
 
WREG32(HDMI_VBI_PACKET_CONTROL + offset,
HDMI_NULL_SEND); /* send null packets when required */
 
WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
 
val = RREG32(HDMI_CONTROL + offset);
val &= ~HDMI_DEEP_COLOR_ENABLE;
val &= ~HDMI_DEEP_COLOR_DEPTH_MASK;
WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
 
switch (bpc) {
case 0:
case 6:
case 8:
case 16:
default:
DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
connector->name, bpc);
break;
case 10:
val |= HDMI_DEEP_COLOR_ENABLE;
val |= HDMI_DEEP_COLOR_DEPTH(HDMI_30BIT_DEEP_COLOR);
DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
connector->name);
break;
case 12:
val |= HDMI_DEEP_COLOR_ENABLE;
val |= HDMI_DEEP_COLOR_DEPTH(HDMI_36BIT_DEEP_COLOR);
DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
connector->name);
break;
}
WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
 
WREG32(HDMI_CONTROL + offset, val);
WREG32(HDMI_ACR_PACKET_CONTROL + offset,
HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
HDMI_ACR_SOURCE); /* select SW CTS value */
 
WREG32(HDMI_VBI_PACKET_CONTROL + offset,
HDMI_NULL_SEND | /* send null packets when required */
366,6 → 191,8
HDMI_GC_CONT); /* send general control packets every frame */
 
WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
HDMI_AVI_INFO_SEND | /* enable AVI info frames */
HDMI_AVI_INFO_CONT | /* send AVI info frames every frame/field */
HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
 
373,63 → 200,11
AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
 
WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
HDMI_AVI_INFO_LINE(2) | /* anything other than 0 */
HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
 
WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
 
WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
 
WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
 
/* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
 
if (bpc > 8)
WREG32(HDMI_ACR_PACKET_CONTROL + offset,
HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
else
WREG32(HDMI_ACR_PACKET_CONTROL + offset,
HDMI_ACR_SOURCE | /* select SW CTS value */
HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
 
evergreen_hdmi_update_ACR(encoder, mode->clock);
 
WREG32(AFMT_60958_0 + offset,
AFMT_60958_CS_CHANNEL_NUMBER_L(1));
 
WREG32(AFMT_60958_1 + offset,
AFMT_60958_CS_CHANNEL_NUMBER_R(2));
 
WREG32(AFMT_60958_2 + offset,
AFMT_60958_CS_CHANNEL_NUMBER_2(3) |
AFMT_60958_CS_CHANNEL_NUMBER_3(4) |
AFMT_60958_CS_CHANNEL_NUMBER_4(5) |
AFMT_60958_CS_CHANNEL_NUMBER_5(6) |
AFMT_60958_CS_CHANNEL_NUMBER_6(7) |
AFMT_60958_CS_CHANNEL_NUMBER_7(8));
 
if (ASIC_IS_DCE6(rdev)) {
dce6_afmt_write_speaker_allocation(encoder);
} else {
dce4_afmt_write_speaker_allocation(encoder);
}
 
WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
AFMT_AUDIO_CHANNEL_ENABLE(0xff));
 
/* fglrx sets 0x40 in 0x5f80 here */
 
if (ASIC_IS_DCE6(rdev)) {
dce6_afmt_select_pin(encoder);
dce6_afmt_write_sad_regs(encoder);
dce6_afmt_write_latency_fields(encoder, mode);
} else {
evergreen_hdmi_write_sad_regs(encoder);
dce4_afmt_write_latency_fields(encoder, mode);
}
 
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
443,47 → 218,11
}
 
evergreen_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
evergreen_hdmi_update_ACR(encoder, mode->clock);
 
WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset,
HDMI_AVI_INFO_SEND | /* enable AVI info frames */
HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
 
WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset,
HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
~HDMI_AVI_INFO_LINE_MASK);
 
WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset,
AFMT_AUDIO_SAMPLE_SEND); /* send audio packets */
 
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
 
/* enable audio after to setting up hw */
if (ASIC_IS_DCE6(rdev))
dce6_audio_enable(rdev, dig->afmt->pin, true);
else
r600_audio_enable(rdev, dig->afmt->pin, true);
}
 
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 
if (!dig || !dig->afmt)
return;
 
/* Silent, r600_hdmi_enable will raise WARN for us */
if (enable && dig->afmt->enabled)
return;
if (!enable && !dig->afmt->enabled)
return;
 
dig->afmt->enabled = enable;
 
DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
}
/drivers/video/drm/radeon/evergreen_reg.h
24,17 → 24,7
#ifndef __EVERGREEN_REG_H__
#define __EVERGREEN_REG_H__
 
/* trinity */
#define TN_SMC_IND_INDEX_0 0x200
#define TN_SMC_IND_DATA_0 0x204
 
/* evergreen */
#define EVERGREEN_PIF_PHY0_INDEX 0x8
#define EVERGREEN_PIF_PHY0_DATA 0xc
#define EVERGREEN_PIF_PHY1_INDEX 0x10
#define EVERGREEN_PIF_PHY1_DATA 0x14
#define EVERGREEN_MM_INDEX_HI 0x18
 
#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310
#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324
#define EVERGREEN_D3VGA_CONTROL 0x3e0
50,9 → 40,6
#define EVERGREEN_AUDIO_PLL1_DIV 0x5b4
#define EVERGREEN_AUDIO_PLL1_UNK 0x5bc
 
#define EVERGREEN_CG_IND_ADDR 0x8f8
#define EVERGREEN_CG_IND_DATA 0x8fc
 
#define EVERGREEN_AUDIO_ENABLE 0x5e78
#define EVERGREEN_AUDIO_VENDOR_ID 0x5ec0
 
116,8 → 103,6
# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
# define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2
# define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4
#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL 0x6808
# define EVERGREEN_LUT_10BIT_BYPASS_EN (1 << 8)
#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c
# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
# define EVERGREEN_GRPH_ENDIAN_NONE 0
239,6 → 224,7
# define EVERGREEN_CRTC_V_BLANK (1 << 0)
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
/drivers/video/drm/radeon/evergreend.h
48,297 → 48,6
#define SUMO_GB_ADDR_CONFIG_GOLDEN 0x02010002
#define SUMO2_GB_ADDR_CONFIG_GOLDEN 0x02010002
 
/* pm registers */
#define SMC_MSG 0x20c
#define HOST_SMC_MSG(x) ((x) << 0)
#define HOST_SMC_MSG_MASK (0xff << 0)
#define HOST_SMC_MSG_SHIFT 0
#define HOST_SMC_RESP(x) ((x) << 8)
#define HOST_SMC_RESP_MASK (0xff << 8)
#define HOST_SMC_RESP_SHIFT 8
#define SMC_HOST_MSG(x) ((x) << 16)
#define SMC_HOST_MSG_MASK (0xff << 16)
#define SMC_HOST_MSG_SHIFT 16
#define SMC_HOST_RESP(x) ((x) << 24)
#define SMC_HOST_RESP_MASK (0xff << 24)
#define SMC_HOST_RESP_SHIFT 24
 
#define DCCG_DISP_SLOW_SELECT_REG 0x4fc
#define DCCG_DISP1_SLOW_SELECT(x) ((x) << 0)
#define DCCG_DISP1_SLOW_SELECT_MASK (7 << 0)
#define DCCG_DISP1_SLOW_SELECT_SHIFT 0
#define DCCG_DISP2_SLOW_SELECT(x) ((x) << 4)
#define DCCG_DISP2_SLOW_SELECT_MASK (7 << 4)
#define DCCG_DISP2_SLOW_SELECT_SHIFT 4
 
#define CG_SPLL_FUNC_CNTL 0x600
#define SPLL_RESET (1 << 0)
#define SPLL_SLEEP (1 << 1)
#define SPLL_BYPASS_EN (1 << 3)
#define SPLL_REF_DIV(x) ((x) << 4)
#define SPLL_REF_DIV_MASK (0x3f << 4)
#define SPLL_PDIV_A(x) ((x) << 20)
#define SPLL_PDIV_A_MASK (0x7f << 20)
#define CG_SPLL_FUNC_CNTL_2 0x604
#define SCLK_MUX_SEL(x) ((x) << 0)
#define SCLK_MUX_SEL_MASK (0x1ff << 0)
#define SCLK_MUX_UPDATE (1 << 26)
#define CG_SPLL_FUNC_CNTL_3 0x608
#define SPLL_FB_DIV(x) ((x) << 0)
#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
#define SPLL_DITHEN (1 << 28)
#define CG_SPLL_STATUS 0x60c
#define SPLL_CHG_STATUS (1 << 1)
 
#define MPLL_CNTL_MODE 0x61c
# define MPLL_MCLK_SEL (1 << 11)
# define SS_SSEN (1 << 24)
# define SS_DSMODE_EN (1 << 25)
 
#define MPLL_AD_FUNC_CNTL 0x624
#define CLKF(x) ((x) << 0)
#define CLKF_MASK (0x7f << 0)
#define CLKR(x) ((x) << 7)
#define CLKR_MASK (0x1f << 7)
#define CLKFRAC(x) ((x) << 12)
#define CLKFRAC_MASK (0x1f << 12)
#define YCLK_POST_DIV(x) ((x) << 17)
#define YCLK_POST_DIV_MASK (3 << 17)
#define IBIAS(x) ((x) << 20)
#define IBIAS_MASK (0x3ff << 20)
#define RESET (1 << 30)
#define PDNB (1 << 31)
#define MPLL_AD_FUNC_CNTL_2 0x628
#define BYPASS (1 << 19)
#define BIAS_GEN_PDNB (1 << 24)
#define RESET_EN (1 << 25)
#define VCO_MODE (1 << 29)
#define MPLL_DQ_FUNC_CNTL 0x62c
#define MPLL_DQ_FUNC_CNTL_2 0x630
 
#define GENERAL_PWRMGT 0x63c
# define GLOBAL_PWRMGT_EN (1 << 0)
# define STATIC_PM_EN (1 << 1)
# define THERMAL_PROTECTION_DIS (1 << 2)
# define THERMAL_PROTECTION_TYPE (1 << 3)
# define ENABLE_GEN2PCIE (1 << 4)
# define ENABLE_GEN2XSP (1 << 5)
# define SW_SMIO_INDEX(x) ((x) << 6)
# define SW_SMIO_INDEX_MASK (3 << 6)
# define SW_SMIO_INDEX_SHIFT 6
# define LOW_VOLT_D2_ACPI (1 << 8)
# define LOW_VOLT_D3_ACPI (1 << 9)
# define VOLT_PWRMGT_EN (1 << 10)
# define BACKBIAS_PAD_EN (1 << 18)
# define BACKBIAS_VALUE (1 << 19)
# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
# define AC_DC_SW (1 << 24)
 
#define SCLK_PWRMGT_CNTL 0x644
# define SCLK_PWRMGT_OFF (1 << 0)
# define SCLK_LOW_D1 (1 << 1)
# define FIR_RESET (1 << 4)
# define FIR_FORCE_TREND_SEL (1 << 5)
# define FIR_TREND_MODE (1 << 6)
# define DYN_GFX_CLK_OFF_EN (1 << 7)
# define GFX_CLK_FORCE_ON (1 << 8)
# define GFX_CLK_REQUEST_OFF (1 << 9)
# define GFX_CLK_FORCE_OFF (1 << 10)
# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
# define DYN_LIGHT_SLEEP_EN (1 << 14)
#define MCLK_PWRMGT_CNTL 0x648
# define DLL_SPEED(x) ((x) << 0)
# define DLL_SPEED_MASK (0x1f << 0)
# define MPLL_PWRMGT_OFF (1 << 5)
# define DLL_READY (1 << 6)
# define MC_INT_CNTL (1 << 7)
# define MRDCKA0_PDNB (1 << 8)
# define MRDCKA1_PDNB (1 << 9)
# define MRDCKB0_PDNB (1 << 10)
# define MRDCKB1_PDNB (1 << 11)
# define MRDCKC0_PDNB (1 << 12)
# define MRDCKC1_PDNB (1 << 13)
# define MRDCKD0_PDNB (1 << 14)
# define MRDCKD1_PDNB (1 << 15)
# define MRDCKA0_RESET (1 << 16)
# define MRDCKA1_RESET (1 << 17)
# define MRDCKB0_RESET (1 << 18)
# define MRDCKB1_RESET (1 << 19)
# define MRDCKC0_RESET (1 << 20)
# define MRDCKC1_RESET (1 << 21)
# define MRDCKD0_RESET (1 << 22)
# define MRDCKD1_RESET (1 << 23)
# define DLL_READY_READ (1 << 24)
# define USE_DISPLAY_GAP (1 << 25)
# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
# define MPLL_TURNOFF_D2 (1 << 28)
#define DLL_CNTL 0x64c
# define MRDCKA0_BYPASS (1 << 24)
# define MRDCKA1_BYPASS (1 << 25)
# define MRDCKB0_BYPASS (1 << 26)
# define MRDCKB1_BYPASS (1 << 27)
# define MRDCKC0_BYPASS (1 << 28)
# define MRDCKC1_BYPASS (1 << 29)
# define MRDCKD0_BYPASS (1 << 30)
# define MRDCKD1_BYPASS (1 << 31)
 
#define CG_AT 0x6d4
# define CG_R(x) ((x) << 0)
# define CG_R_MASK (0xffff << 0)
# define CG_L(x) ((x) << 16)
# define CG_L_MASK (0xffff << 16)
 
#define CG_DISPLAY_GAP_CNTL 0x714
# define DISP1_GAP(x) ((x) << 0)
# define DISP1_GAP_MASK (3 << 0)
# define DISP2_GAP(x) ((x) << 2)
# define DISP2_GAP_MASK (3 << 2)
# define VBI_TIMER_COUNT(x) ((x) << 4)
# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
# define VBI_TIMER_UNIT(x) ((x) << 20)
# define VBI_TIMER_UNIT_MASK (7 << 20)
# define DISP1_GAP_MCHG(x) ((x) << 24)
# define DISP1_GAP_MCHG_MASK (3 << 24)
# define DISP2_GAP_MCHG(x) ((x) << 26)
# define DISP2_GAP_MCHG_MASK (3 << 26)
 
#define CG_BIF_REQ_AND_RSP 0x7f4
#define CG_CLIENT_REQ(x) ((x) << 0)
#define CG_CLIENT_REQ_MASK (0xff << 0)
#define CG_CLIENT_REQ_SHIFT 0
#define CG_CLIENT_RESP(x) ((x) << 8)
#define CG_CLIENT_RESP_MASK (0xff << 8)
#define CG_CLIENT_RESP_SHIFT 8
#define CLIENT_CG_REQ(x) ((x) << 16)
#define CLIENT_CG_REQ_MASK (0xff << 16)
#define CLIENT_CG_REQ_SHIFT 16
#define CLIENT_CG_RESP(x) ((x) << 24)
#define CLIENT_CG_RESP_MASK (0xff << 24)
#define CLIENT_CG_RESP_SHIFT 24
 
#define CG_SPLL_SPREAD_SPECTRUM 0x790
#define SSEN (1 << 0)
#define CG_SPLL_SPREAD_SPECTRUM_2 0x794
 
#define MPLL_SS1 0x85c
#define CLKV(x) ((x) << 0)
#define CLKV_MASK (0x3ffffff << 0)
#define MPLL_SS2 0x860
#define CLKS(x) ((x) << 0)
#define CLKS_MASK (0xfff << 0)
 
#define CG_IND_ADDR 0x8f8
#define CG_IND_DATA 0x8fc
/* CGIND regs */
#define CG_CGTT_LOCAL_0 0x00
#define CG_CGTT_LOCAL_1 0x01
#define CG_CGTT_LOCAL_2 0x02
#define CG_CGTT_LOCAL_3 0x03
#define CG_CGLS_TILE_0 0x20
#define CG_CGLS_TILE_1 0x21
#define CG_CGLS_TILE_2 0x22
#define CG_CGLS_TILE_3 0x23
#define CG_CGLS_TILE_4 0x24
#define CG_CGLS_TILE_5 0x25
#define CG_CGLS_TILE_6 0x26
#define CG_CGLS_TILE_7 0x27
#define CG_CGLS_TILE_8 0x28
#define CG_CGLS_TILE_9 0x29
#define CG_CGLS_TILE_10 0x2a
#define CG_CGLS_TILE_11 0x2b
 
#define VM_L2_CG 0x15c0
 
#define MC_CONFIG 0x2000
 
#define MC_CONFIG_MCD 0x20a0
#define MC_CG_CONFIG_MCD 0x20a4
#define MC_RD_ENABLE_MCD(x) ((x) << 8)
#define MC_RD_ENABLE_MCD_MASK (7 << 8)
 
#define MC_HUB_MISC_HUB_CG 0x20b8
#define MC_HUB_MISC_VM_CG 0x20bc
#define MC_HUB_MISC_SIP_CG 0x20c0
 
#define MC_XPB_CLK_GAT 0x2478
 
#define MC_CG_CONFIG 0x25bc
#define MC_RD_ENABLE(x) ((x) << 4)
#define MC_RD_ENABLE_MASK (3 << 4)
 
#define MC_CITF_MISC_RD_CG 0x2648
#define MC_CITF_MISC_WR_CG 0x264c
#define MC_CITF_MISC_VM_CG 0x2650
# define MEM_LS_ENABLE (1 << 19)
 
#define MC_ARB_BURST_TIME 0x2808
#define STATE0(x) ((x) << 0)
#define STATE0_MASK (0x1f << 0)
#define STATE1(x) ((x) << 5)
#define STATE1_MASK (0x1f << 5)
#define STATE2(x) ((x) << 10)
#define STATE2_MASK (0x1f << 10)
#define STATE3(x) ((x) << 15)
#define STATE3_MASK (0x1f << 15)
 
#define MC_SEQ_RAS_TIMING 0x28a0
#define MC_SEQ_CAS_TIMING 0x28a4
#define MC_SEQ_MISC_TIMING 0x28a8
#define MC_SEQ_MISC_TIMING2 0x28ac
 
#define MC_SEQ_RD_CTL_D0 0x28b4
#define MC_SEQ_RD_CTL_D1 0x28b8
#define MC_SEQ_WR_CTL_D0 0x28bc
#define MC_SEQ_WR_CTL_D1 0x28c0
 
#define MC_SEQ_STATUS_M 0x29f4
# define PMG_PWRSTATE (1 << 16)
 
#define MC_SEQ_MISC1 0x2a04
#define MC_SEQ_RESERVE_M 0x2a08
#define MC_PMG_CMD_EMRS 0x2a0c
 
#define MC_SEQ_MISC3 0x2a2c
 
#define MC_SEQ_MISC5 0x2a54
#define MC_SEQ_MISC6 0x2a58
 
#define MC_SEQ_MISC7 0x2a64
 
#define MC_SEQ_CG 0x2a68
#define CG_SEQ_REQ(x) ((x) << 0)
#define CG_SEQ_REQ_MASK (0xff << 0)
#define CG_SEQ_REQ_SHIFT 0
#define CG_SEQ_RESP(x) ((x) << 8)
#define CG_SEQ_RESP_MASK (0xff << 8)
#define CG_SEQ_RESP_SHIFT 8
#define SEQ_CG_REQ(x) ((x) << 16)
#define SEQ_CG_REQ_MASK (0xff << 16)
#define SEQ_CG_REQ_SHIFT 16
#define SEQ_CG_RESP(x) ((x) << 24)
#define SEQ_CG_RESP_MASK (0xff << 24)
#define SEQ_CG_RESP_SHIFT 24
#define MC_SEQ_RAS_TIMING_LP 0x2a6c
#define MC_SEQ_CAS_TIMING_LP 0x2a70
#define MC_SEQ_MISC_TIMING_LP 0x2a74
#define MC_SEQ_MISC_TIMING2_LP 0x2a78
#define MC_SEQ_WR_CTL_D0_LP 0x2a7c
#define MC_SEQ_WR_CTL_D1_LP 0x2a80
#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84
#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88
 
#define MC_PMG_CMD_MRS 0x2aac
 
#define MC_SEQ_RD_CTL_D0_LP 0x2b1c
#define MC_SEQ_RD_CTL_D1_LP 0x2b20
 
#define MC_PMG_CMD_MRS1 0x2b44
#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48
 
#define CGTS_SM_CTRL_REG 0x9150
 
/* Registers */
 
#define RCU_IND_INDEX 0x100
381,34 → 90,6
#define CG_VCLK_STATUS 0x61c
#define CG_SCRATCH1 0x820
 
#define RLC_CNTL 0x3f00
# define RLC_ENABLE (1 << 0)
# define GFX_POWER_GATING_ENABLE (1 << 7)
# define GFX_POWER_GATING_SRC (1 << 8)
# define DYN_PER_SIMD_PG_ENABLE (1 << 27)
# define LB_CNT_SPIM_ACTIVE (1 << 30)
# define LOAD_BALANCE_ENABLE (1 << 31)
 
#define RLC_HB_BASE 0x3f10
#define RLC_HB_CNTL 0x3f0c
#define RLC_HB_RPTR 0x3f20
#define RLC_HB_WPTR 0x3f1c
#define RLC_HB_WPTR_LSB_ADDR 0x3f14
#define RLC_HB_WPTR_MSB_ADDR 0x3f18
#define RLC_MC_CNTL 0x3f44
#define RLC_UCODE_CNTL 0x3f48
#define RLC_UCODE_ADDR 0x3f2c
#define RLC_UCODE_DATA 0x3f30
 
/* new for TN */
#define TN_RLC_SAVE_AND_RESTORE_BASE 0x3f10
#define TN_RLC_LB_CNTR_MAX 0x3f14
#define TN_RLC_LB_CNTR_INIT 0x3f18
#define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20
#define TN_RLC_LB_INIT_SIMD_MASK 0x3fe4
#define TN_RLC_LB_ALWAYS_ACTIVE_SIMD_MASK 0x3fe8
#define TN_RLC_LB_PARAMS 0x3fec
 
#define GRBM_GFX_INDEX 0x802C
#define INSTANCE_INDEX(x) ((x) << 0)
#define SE_INDEX(x) ((x) << 16)
501,9 → 182,6
#define DCCG_AUDIO_DTO0_MODULE 0x05b4
#define DCCG_AUDIO_DTO0_LOAD 0x05b8
#define DCCG_AUDIO_DTO0_CNTL 0x05bc
# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0)
# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7
# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0
 
#define DCCG_AUDIO_DTO1_PHASE 0x05c0
#define DCCG_AUDIO_DTO1_MODULE 0x05c4
517,11 → 195,10
# define HDMI_ERROR_ACK (1 << 8)
# define HDMI_ERROR_MASK (1 << 9)
# define HDMI_DEEP_COLOR_ENABLE (1 << 24)
# define HDMI_DEEP_COLOR_DEPTH(x) (((x) & 3) << 28)
# define HDMI_DEEP_COLOR_DEPTH (((x) & 3) << 28)
# define HDMI_24BIT_DEEP_COLOR 0
# define HDMI_30BIT_DEEP_COLOR 1
# define HDMI_36BIT_DEEP_COLOR 2
# define HDMI_DEEP_COLOR_DEPTH_MASK (3 << 28)
#define HDMI_STATUS 0x7034
# define HDMI_ACTIVE_AVMUTE (1 << 0)
# define HDMI_AUDIO_PACKET_ERROR (1 << 16)
719,13 → 396,6
#define AFMT_GENERIC0_7 0x7138
 
/* DCE4/5 ELD audio interface */
#define AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER 0x5f78
#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
#define SPEAKER_ALLOCATION_SHIFT 0
#define HDMI_CONNECTION (1 << 16)
#define DP_CONNECTION (1 << 17)
 
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x5f84 /* LPCM */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x5f88 /* AC3 */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x5f8c /* MPEG1 */
755,44 → 425,6
* bit6 = 192 kHz
*/
 
#define AZ_CHANNEL_COUNT_CONTROL 0x5fe4
# define HBR_CHANNEL_COUNT(x) (((x) & 0x7) << 0)
# define COMPRESSED_CHANNEL_COUNT(x) (((x) & 0x7) << 4)
/* HBR_CHANNEL_COUNT, COMPRESSED_CHANNEL_COUNT
* 0 = use stream header
* 1-7 = channel count - 1
*/
#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC 0x5fe8
# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
* 0 = invalid
* x = legal delay value
* 255 = sync not supported
*/
#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_HBR 0x5fec
# define HBR_CAPABLE (1 << 0) /* enabled by default */
 
#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_ASSOCIATION0 0x5ff4
# define DISPLAY0_TYPE(x) (((x) & 0x3) << 0)
# define DISPLAY_TYPE_NONE 0
# define DISPLAY_TYPE_HDMI 1
# define DISPLAY_TYPE_DP 2
# define DISPLAY0_ID(x) (((x) & 0x3f) << 2)
# define DISPLAY1_TYPE(x) (((x) & 0x3) << 8)
# define DISPLAY1_ID(x) (((x) & 0x3f) << 10)
# define DISPLAY2_TYPE(x) (((x) & 0x3) << 16)
# define DISPLAY2_ID(x) (((x) & 0x3f) << 18)
# define DISPLAY3_TYPE(x) (((x) & 0x3) << 24)
# define DISPLAY3_ID(x) (((x) & 0x3f) << 26)
#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_ASSOCIATION1 0x5ff8
# define DISPLAY4_TYPE(x) (((x) & 0x3) << 0)
# define DISPLAY4_ID(x) (((x) & 0x3f) << 2)
# define DISPLAY5_TYPE(x) (((x) & 0x3) << 8)
# define DISPLAY5_ID(x) (((x) & 0x3f) << 10)
#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_NUMBER 0x5ffc
# define NUMBER_OF_DISPLAY_ID(x) (((x) & 0x7) << 0)
 
#define AZ_HOT_PLUG_CONTROL 0x5e78
# define AZ_FORCE_CODEC_WAKE (1 << 0)
# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
871,30 → 503,6
#define CG_THERMAL_CTRL 0x72c
#define TOFFSET_MASK 0x00003FE0
#define TOFFSET_SHIFT 5
#define DIG_THERM_DPM(x) ((x) << 14)
#define DIG_THERM_DPM_MASK 0x003FC000
#define DIG_THERM_DPM_SHIFT 14
 
#define CG_THERMAL_INT 0x734
#define DIG_THERM_INTH(x) ((x) << 8)
#define DIG_THERM_INTH_MASK 0x0000FF00
#define DIG_THERM_INTH_SHIFT 8
#define DIG_THERM_INTL(x) ((x) << 16)
#define DIG_THERM_INTL_MASK 0x00FF0000
#define DIG_THERM_INTL_SHIFT 16
#define THERM_INT_MASK_HIGH (1 << 24)
#define THERM_INT_MASK_LOW (1 << 25)
 
#define TN_CG_THERMAL_INT_CTRL 0x738
#define TN_DIG_THERM_INTH(x) ((x) << 0)
#define TN_DIG_THERM_INTH_MASK 0x000000FF
#define TN_DIG_THERM_INTH_SHIFT 0
#define TN_DIG_THERM_INTL(x) ((x) << 8)
#define TN_DIG_THERM_INTL_MASK 0x0000FF00
#define TN_DIG_THERM_INTL_SHIFT 8
#define TN_THERM_INT_MASK_HIGH (1 << 24)
#define TN_THERM_INT_MASK_LOW (1 << 25)
 
#define CG_MULT_THERMAL_STATUS 0x740
#define ASIC_T(x) ((x) << 16)
#define ASIC_T_MASK 0x07FF0000
902,7 → 510,6
#define CG_TS0_STATUS 0x760
#define TS0_ADC_DOUT_MASK 0x000003FF
#define TS0_ADC_DOUT_SHIFT 0
 
/* APU */
#define CG_THERMAL_STATUS 0x678
 
1203,10 → 810,6
# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
 
#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
 
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
1355,38 → 958,6
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
 
/* DCE4/5/6 FMT blocks */
#define FMT_DYNAMIC_EXP_CNTL 0x6fb4
# define FMT_DYNAMIC_EXP_EN (1 << 0)
# define FMT_DYNAMIC_EXP_MODE (1 << 4)
/* 0 = 10bit -> 12bit, 1 = 8bit -> 12bit */
#define FMT_CONTROL 0x6fb8
# define FMT_PIXEL_ENCODING (1 << 16)
/* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
#define FMT_BIT_DEPTH_CONTROL 0x6fc8
# define FMT_TRUNCATE_EN (1 << 0)
# define FMT_TRUNCATE_DEPTH (1 << 4)
# define FMT_SPATIAL_DITHER_EN (1 << 8)
# define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
# define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
# define FMT_FRAME_RANDOM_ENABLE (1 << 13)
# define FMT_RGB_RANDOM_ENABLE (1 << 14)
# define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
# define FMT_TEMPORAL_DITHER_EN (1 << 16)
# define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
# define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
# define FMT_TEMPORAL_LEVEL (1 << 24)
# define FMT_TEMPORAL_DITHER_RESET (1 << 25)
# define FMT_25FRC_SEL(x) ((x) << 26)
# define FMT_50FRC_SEL(x) ((x) << 28)
# define FMT_75FRC_SEL(x) ((x) << 30)
#define FMT_CLAMP_CONTROL 0x6fe4
# define FMT_CLAMP_DATA_EN (1 << 0)
# define FMT_CLAMP_COLOR_FORMAT(x) ((x) << 16)
# define FMT_CLAMP_6BPC 0
# define FMT_CLAMP_8BPC 1
# define FMT_CLAMP_10BPC 2
 
/* ASYNC DMA */
#define DMA_RB_RPTR 0xd008
#define DMA_RB_WPTR 0xd00c
1421,48 → 992,7
#define DMA_PACKET_CONSTANT_FILL 0xd
#define DMA_PACKET_NOP 0xf
 
/* PIF PHY0 indirect regs */
#define PB0_PIF_CNTL 0x10
# define LS2_EXIT_TIME(x) ((x) << 17)
# define LS2_EXIT_TIME_MASK (0x7 << 17)
# define LS2_EXIT_TIME_SHIFT 17
#define PB0_PIF_PAIRING 0x11
# define MULTI_PIF (1 << 25)
#define PB0_PIF_PWRDOWN_0 0x12
# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7)
# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7)
# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7
# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10)
# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10)
# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10
# define PLL_RAMP_UP_TIME_0(x) ((x) << 24)
# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24)
# define PLL_RAMP_UP_TIME_0_SHIFT 24
#define PB0_PIF_PWRDOWN_1 0x13
# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7)
# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7)
# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7
# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10)
# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10)
# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10
# define PLL_RAMP_UP_TIME_1(x) ((x) << 24)
# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24)
# define PLL_RAMP_UP_TIME_1_SHIFT 24
/* PIF PHY1 indirect regs */
#define PB1_PIF_CNTL 0x10
#define PB1_PIF_PAIRING 0x11
#define PB1_PIF_PWRDOWN_0 0x12
#define PB1_PIF_PWRDOWN_1 0x13
/* PCIE PORT indirect regs */
#define PCIE_LC_CNTL 0xa0
# define LC_L0S_INACTIVITY(x) ((x) << 8)
# define LC_L0S_INACTIVITY_MASK (0xf << 8)
# define LC_L0S_INACTIVITY_SHIFT 8
# define LC_L1_INACTIVITY(x) ((x) << 12)
# define LC_L1_INACTIVITY_MASK (0xf << 12)
# define LC_L1_INACTIVITY_SHIFT 12
# define LC_PMI_TO_L1_DIS (1 << 16)
# define LC_ASPM_TO_L1_DIS (1 << 24)
/* PCIE link stuff */
#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
# define LC_LINK_WIDTH_SHIFT 0
1482,9 → 1012,6
# define LC_SHORT_RECONFIG_EN (1 << 11)
# define LC_UPCONFIGURE_SUPPORT (1 << 12)
# define LC_UPCONFIGURE_DIS (1 << 13)
# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21)
# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21)
# define LC_DYN_LANES_PWR_STATE_SHIFT 21
#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
# define LC_GEN2_EN_STRAP (1 << 0)
# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
1493,9 → 1020,6
# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
# define LC_CURRENT_DATA_RATE (1 << 11)
# define LC_HW_VOLTAGE_IF_CONTROL(x) ((x) << 12)
# define LC_HW_VOLTAGE_IF_CONTROL_MASK (3 << 12)
# define LC_HW_VOLTAGE_IF_CONTROL_SHIFT 12
# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
1576,7 → 1100,7
* 6. COMMAND [29:22] | BYTE_COUNT [20:0]
*/
# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
/* 0 - DST_ADDR
/* 0 - SRC_ADDR
* 1 - GDS
*/
# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
1591,7 → 1115,7
# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
/* COMMAND */
# define PACKET3_CP_DMA_DIS_WC (1 << 21)
# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
/* 0 - none
* 1 - 8 in 16
* 2 - 8 in 32
/drivers/video/drm/radeon/firmware/KAVERI_sdma.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/HAWAII_mec.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/MULLINS_me.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/HAWAII_smc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/TAHITI_mc2.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/OLAND_mc2.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/HAINAN_mc2.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/HAWAII_mc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/BONAIRE_vce.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/PITCAIRN_mc2.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/HAWAII_rlc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/KAVERI_pfp.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/BONAIRE_mc2.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/HAWAII_ce.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/HAWAII_me.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/MULLINS_ce.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/KAVERI_me.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/KAVERI_ce.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/HAWAII_mc2.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/KAVERI_mec.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/MULLINS_pfp.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/BONAIRE_smc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/HAWAII_pfp.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/MULLINS_rlc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/VERDE_mc2.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/MULLINS_sdma.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/KAVERI_rlc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/HAWAII_sdma.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/MULLINS_mec.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/fwblob.asm
14,318 → 14,684
 
align 16
 
macro CE_code [arg]
{
dd FIRMWARE_#arg#_CE
dd arg#_CE_START
dd (arg#_CE_END - arg#_CE_START)
}
___start_builtin_fw:
 
macro CE_firmware [arg]
{
forward
FIRMWARE_#arg#_CE db 'radeon/',`arg,'_ce.bin',0
forward
dd FIRMWARE_R100_CP
dd R100CP_START
dd (R100CP_END - R100CP_START)
 
align 16
arg#_CE_START:
file "firmware/"#`arg#"_ce.bin"
arg#_CE_END:
}
dd FIRMWARE_R200_CP
dd R200CP_START
dd (R200CP_END - R200CP_START)
 
macro CP_code [arg]
{
dd FIRMWARE_#arg#_CP
dd arg#_CP_START
dd (arg#_CP_END - arg#_CP_START)
}
dd FIRMWARE_R300_CP
dd R300CP_START
dd (R300CP_END - R300CP_START)
 
macro CP_firmware [arg]
dd FIRMWARE_R420_CP
dd R420CP_START
dd (R420CP_END - R420CP_START)
 
dd FIRMWARE_R520_CP
dd R520CP_START
dd (R520CP_END - R520CP_START)
 
dd FIRMWARE_RS600_CP
dd RS600CP_START
dd (RS600CP_END - RS600CP_START)
 
dd FIRMWARE_RS690_CP
dd RS690CP_START
dd (RS690CP_END - RS690CP_START)
 
 
 
dd FIRMWARE_R600_ME
dd R600ME_START
dd (R600ME_END - R600ME_START)
 
dd FIRMWARE_RS780_ME
dd RS780ME_START
dd (RS780ME_END - RS780ME_START)
 
dd FIRMWARE_RS780_PFP
dd RS780PFP_START
dd (RS780PFP_END - RS780PFP_START)
 
dd FIRMWARE_RV610_ME
dd RV610ME_START
dd (RV610ME_END - RV610ME_START)
 
dd FIRMWARE_RV620_ME
dd RV620ME_START
dd (RV620ME_END - RV620ME_START)
 
dd FIRMWARE_RV630_ME
dd RV630ME_START
dd (RV630ME_END - RV630ME_START)
 
dd FIRMWARE_RV635_ME
dd RV635ME_START
dd (RV635ME_END - RV635ME_START)
 
dd FIRMWARE_RV670_ME
dd RV670ME_START
dd (RV670ME_END - RV670ME_START)
 
dd FIRMWARE_RV710_ME
dd RV710ME_START
dd (RV710ME_END - RV710ME_START)
 
dd FIRMWARE_RV730_ME
dd RV730ME_START
dd (RV730ME_END - RV730ME_START)
 
dd FIRMWARE_RV770_ME
dd RV770ME_START
dd (RV770ME_END - RV770ME_START)
 
dd FIRMWARE_CYPRESS_ME
dd CYPRESSME_START
dd (CYPRESSME_END - CYPRESSME_START)
 
dd FIRMWARE_REDWOOD_ME
dd REDWOODME_START
dd (REDWOODME_END - REDWOODME_START)
 
dd FIRMWARE_CEDAR_ME
dd CEDARME_START
dd (CEDARME_END - CEDARME_START)
 
dd FIRMWARE_JUNIPER_ME
dd JUNIPERME_START
dd (JUNIPERME_END - JUNIPERME_START)
 
dd FIRMWARE_PALM_ME
dd PALMME_START
dd (PALMME_END - PALMME_START)
 
dd FIRMWARE_SUMO_ME
dd SUMOME_START
dd (SUMOME_END - SUMOME_START)
 
dd FIRMWARE_SUMO2_ME
dd SUMO2ME_START
dd (SUMO2ME_END - SUMO2ME_START)
 
 
macro NI_code [arg]
{
forward
FIRMWARE_#arg#_CP db 'radeon/',`arg,'_cp.bin',0
forward
dd FIRMWARE_#arg#_ME
dd arg#ME_START
dd (arg#ME_END - arg#ME_START)
 
align 16
arg#_CP_START:
file "firmware/"#`arg#"_cp.bin"
arg#_CP_END:
dd FIRMWARE_#arg#_PFP
dd arg#PFP_START
dd (arg#PFP_END - arg#PFP_START)
 
dd FIRMWARE_#arg#_MC
dd arg#MC_START
dd (arg#MC_END - arg#MC_START)
 
}
 
macro PFP_code [arg]
NI_code BARTS, TURKS, CAICOS, CAYMAN
 
dd FIRMWARE_RV610_PFP
dd RV610PFP_START
dd (RV610PFP_END - RV610PFP_START)
 
dd FIRMWARE_RV620_PFP
dd RV620PFP_START
dd (RV620PFP_END - RV620PFP_START)
 
dd FIRMWARE_RV630_PFP
dd RV630PFP_START
dd (RV630PFP_END - RV630PFP_START)
 
dd FIRMWARE_RV635_PFP
dd RV635PFP_START
dd (RV635PFP_END - RV635PFP_START)
 
dd FIRMWARE_RV670_PFP
dd RV670PFP_START
dd (RV670PFP_END - RV670PFP_START)
 
 
dd FIRMWARE_RV710_PFP
dd RV670PFP_START
dd (RV710PFP_END - RV710PFP_START)
 
dd FIRMWARE_RV730_PFP
dd RV730PFP_START
dd (RV730PFP_END - RV730PFP_START)
 
dd FIRMWARE_RV770_PFP
dd RV770PFP_START
dd (RV770PFP_END - RV770PFP_START)
 
dd FIRMWARE_CYPRESS_PFP
dd CYPRESSPFP_START
dd (CYPRESSPFP_END - CYPRESSPFP_START)
 
dd FIRMWARE_REDWOOD_PFP
dd REDWOODPFP_START
dd (REDWOODPFP_END - REDWOODPFP_START)
 
dd FIRMWARE_CEDAR_PFP
dd CEDARPFP_START
dd (CEDARPFP_END - CEDARPFP_START)
 
dd FIRMWARE_JUNIPER_PFP
dd JUNIPERPFP_START
dd (JUNIPERPFP_END - JUNIPERPFP_START)
 
dd FIRMWARE_PALM_PFP
dd PALMPFP_START
dd (PALMPFP_END - PALMPFP_START)
 
dd FIRMWARE_SUMO_PFP
dd SUMOPFP_START
dd (SUMOPFP_END - SUMOPFP_START)
 
dd FIRMWARE_SUMO2_PFP
dd SUMO2PFP_START
dd (SUMO2PFP_END - SUMO2PFP_START)
 
dd FIRMWARE_BARTS_PFP
dd BARTSPFP_START
dd (BARTSPFP_END - BARTSPFP_START)
 
 
dd FIRMWARE_R600_RLC
dd R600RLC_START
dd (R600RLC_END - R600RLC_START)
 
dd FIRMWARE_R700_RLC
dd R700RLC_START
dd (R700RLC_END - R700RLC_START)
 
dd FIRMWARE_CYPRESS_RLC
dd CYPRESSRLC_START
dd (CYPRESSRLC_END - CYPRESSRLC_START)
 
dd FIRMWARE_REDWOOD_RLC
dd REDWOODRLC_START
dd (REDWOODRLC_END - REDWOODRLC_START)
 
dd FIRMWARE_CEDAR_RLC
dd CEDARRLC_START
dd (CEDARRLC_END - CEDARRLC_START)
 
dd FIRMWARE_JUNIPER_RLC
dd JUNIPERRLC_START
dd (JUNIPERRLC_END - JUNIPERRLC_START)
 
dd FIRMWARE_BTC_RLC
dd BTCRLC_START
dd (BTCRLC_END - BTCRLC_START)
 
dd FIRMWARE_SUMO_RLC
dd SUMORLC_START
dd (SUMORLC_END - SUMORLC_START)
 
macro SI_code [arg]
{
dd FIRMWARE_#arg#_PFP
dd arg#_PFP_START
dd (arg#_PFP_END - arg#_PFP_START)
}
 
macro PFP_firmware [arg]
{
forward
FIRMWARE_#arg#_PFP db 'radeon/',`arg,'_pfp.bin',0
forward
dd FIRMWARE_#arg#_ME
dd arg#_ME_START
dd (arg#_ME_END - arg#_ME_START)
 
align 16
arg#_PFP_START:
file "firmware/"#`arg#"_pfp.bin"
arg#_PFP_END:
}
dd FIRMWARE_#arg#_CE
dd arg#_CE_START
dd (arg#_CE_END - arg#_CE_START)
 
macro MC_code [arg]
{
dd FIRMWARE_#arg#_MC
dd arg#_MC_START
dd (arg#_MC_END - arg#_MC_START)
 
dd FIRMWARE_#arg#_RLC
dd arg#_RLC_START
dd (arg#_RLC_END - arg#_RLC_START)
 
}
 
macro MC_firmware [arg]
SI_code TAHITI, PITCAIRN, VERDE, OLAND, HAINAN
 
___end_builtin_fw:
 
 
FIRMWARE_R100_CP db 'radeon/R100_cp.bin',0
FIRMWARE_R200_CP db 'radeon/R200_cp.bin',0
FIRMWARE_R300_CP db 'radeon/R300_cp.bin',0
FIRMWARE_R420_CP db 'radeon/R420_cp.bin',0
FIRMWARE_R520_CP db 'radeon/R520_cp.bin',0
 
FIRMWARE_RS600_CP db 'radeon/RS600_cp.bin',0
FIRMWARE_RS690_CP db 'radeon/RS690_cp.bin',0
 
FIRMWARE_RS780_ME db 'radeon/RS780_me.bin',0
 
FIRMWARE_R600_ME db 'radeon/RV600_me.bin',0
FIRMWARE_RV610_ME db 'radeon/RV610_me.bin',0
FIRMWARE_RV620_ME db 'radeon/RV620_me.bin',0
FIRMWARE_RV630_ME db 'radeon/RV630_me.bin',0
FIRMWARE_RV635_ME db 'radeon/RV635_me.bin',0
FIRMWARE_RV670_ME db 'radeon/RV670_me.bin',0
FIRMWARE_RV710_ME db 'radeon/RV710_me.bin',0
FIRMWARE_RV730_ME db 'radeon/RV730_me.bin',0
FIRMWARE_RV770_ME db 'radeon/RV770_me.bin',0
 
FIRMWARE_CYPRESS_ME db 'radeon/CYPRESS_me.bin',0
FIRMWARE_REDWOOD_ME db 'radeon/REDWOOD_me.bin',0
FIRMWARE_CEDAR_ME db 'radeon/CEDAR_me.bin',0
FIRMWARE_JUNIPER_ME db 'radeon/JUNIPER_me.bin',0
FIRMWARE_PALM_ME db 'radeon/PALM_me.bin',0
FIRMWARE_SUMO_ME db 'radeon/SUMO_me.bin',0
FIRMWARE_SUMO2_ME db 'radeon/SUMO2_me.bin',0
 
FIRMWARE_BARTS_ME db 'radeon/BARTS_me.bin',0
FIRMWARE_TURKS_ME db 'radeon/TURKS_me.bin',0
FIRMWARE_CAICOS_ME db 'radeon/CAICOS_me.bin',0
FIRMWARE_CAYMAN_ME db 'radeon/CAYMAN_me.bin',0
 
 
FIRMWARE_RS780_PFP db 'radeon/RS780_pfp.bin',0
FIRMWARE_R600_PFP db 'radeon/R600_pfp.bin',0
FIRMWARE_RV610_PFP db 'radeon/RV610_pfp.bin',0
FIRMWARE_RV620_PFP db 'radeon/RV620_pfp.bin',0
FIRMWARE_RV630_PFP db 'radeon/RV630_pfp.bin',0
FIRMWARE_RV635_PFP db 'radeon/RV635_pfp.bin',0
FIRMWARE_RV670_PFP db 'radeon/RV670_pfp.bin',0
FIRMWARE_RV710_PFP db 'radeon/RV710_pfp.bin',0
FIRMWARE_RV730_PFP db 'radeon/RV730_pfp.bin',0
FIRMWARE_RV770_PFP db 'radeon/RV770_pfp.bin',0
 
FIRMWARE_CYPRESS_PFP db 'radeon/CYPRESS_pfp.bin',0
FIRMWARE_REDWOOD_PFP db 'radeon/REDWOOD_pfp.bin',0
FIRMWARE_CEDAR_PFP db 'radeon/CEDAR_pfp.bin',0
FIRMWARE_JUNIPER_PFP db 'radeon/JUNIPER_pfp.bin',0
FIRMWARE_PALM_PFP db 'radeon/PALM_pfp.bin',0
FIRMWARE_SUMO_PFP db 'radeon/SUMO_pfp.bin',0
FIRMWARE_SUMO2_PFP db 'radeon/SUMO2_pfp.bin',0
 
FIRMWARE_BARTS_PFP db 'radeon/BARTS_pfp.bin',0
FIRMWARE_TURKS_PFP db 'radeon/TURKS_pfp.bin',0
FIRMWARE_CAICOS_PFP db 'radeon/CAICOS_pfp.bin',0
FIRMWARE_CAYMAN_PFP db 'radeon/CAYMAN_pfp.bin',0
 
 
FIRMWARE_R600_RLC db 'radeon/R600_rlc.bin',0
FIRMWARE_R700_RLC db 'radeon/R700_rlc.bin',0
FIRMWARE_CYPRESS_RLC db 'radeon/CYPRESS_rlc.bin',0
FIRMWARE_REDWOOD_RLC db 'radeon/REDWOOD_rlc.bin',0
FIRMWARE_CEDAR_RLC db 'radeon/CEDAR_rlc.bin',0
FIRMWARE_JUNIPER_RLC db 'radeon/JUNIPER_rlc.bin',0
FIRMWARE_SUMO_RLC db 'radeon/SUMO_rlc.bin',0
FIRMWARE_BTC_RLC db 'radeon/BTC_rlc.bin',0
FIRMWARE_CAYMAN_RLC db 'radeon/CAYMAN_rlc.bin',0
 
 
FIRMWARE_BARTS_MC db 'radeon/BARTS_mc.bin',0
FIRMWARE_TURKS_MC db 'radeon/TURKS_mc.bin',0
FIRMWARE_CAICOS_MC db 'radeon/CAICOS_mc.bin',0
FIRMWARE_CAYMAN_MC db 'radeon/CAYMAN_mc.bin',0
 
macro SI_firmware [arg]
{
 
forward
 
FIRMWARE_#arg#_PFP db 'radeon/',`arg,'_pfp.bin',0
FIRMWARE_#arg#_ME db 'radeon/',`arg,'_me.bin',0
FIRMWARE_#arg#_CE db 'radeon/',`arg,'_ce.bin',0
FIRMWARE_#arg#_MC db 'radeon/',`arg,'_mc.bin',0
FIRMWARE_#arg#_RLC db 'radeon/',`arg,'_rlc.bin',0
 
forward
 
align 16
arg#_PFP_START:
file "firmware/"#`arg#"_pfp.bin"
arg#_PFP_END:
 
align 16
arg#_ME_START:
file "firmware/"#`arg#"_me.bin"
arg#_ME_END:
 
align 16
arg#_CE_START:
file "firmware/"#`arg#"_ce.bin"
arg#_CE_END:
 
align 16
arg#_MC_START:
file "firmware/"#`arg#"_mc.bin"
arg#_MC_END:
}
 
macro MC2_code [arg]
{
dd FIRMWARE_#arg#_MC2
dd arg#_MC2_START
dd (arg#_MC2_END - arg#_MC2_START)
align 16
arg#_RLC_START:
file "firmware/"#`arg#"_rlc.bin"
arg#_RLC_END:
 
}
 
macro MC2_firmware [arg]
{
SI_firmware TAHITI, PITCAIRN, VERDE, OLAND, HAINAN
 
forward
FIRMWARE_#arg#_MC2 db 'radeon/',`arg,'_mc2.bin',0
forward
align 16
R100CP_START:
file 'firmware/R100_cp.bin'
R100CP_END:
 
align 16
arg#_MC2_START:
file "firmware/"#`arg#"_mc2.bin"
arg#_MC2_END:
}
R200CP_START:
file 'firmware/R200_cp.bin'
R200CP_END:
 
macro ME_code [arg]
{
dd FIRMWARE_#arg#_ME
dd arg#_ME_START
dd (arg#_ME_END - arg#_ME_START)
}
align 16
R300CP_START:
file 'firmware/R300_cp.bin'
R300CP_END:
 
macro ME_firmware [arg]
{
align 16
R420CP_START:
file 'firmware/R420_cp.bin'
R420CP_END:
 
forward
FIRMWARE_#arg#_ME db 'radeon/',`arg,'_me.bin',0
forward
align 16
R520CP_START:
file 'firmware/R520_cp.bin'
R520CP_END:
 
align 16
arg#_ME_START:
file "firmware/"#`arg#"_me.bin"
arg#_ME_END:
}
RS600CP_START:
file 'firmware/RS600_cp.bin'
RS600CP_END:
 
macro MEC_code [arg]
{
dd FIRMWARE_#arg#_MEC
dd arg#_MEC_START
dd (arg#_MEC_END - arg#_MEC_START)
}
align 16
RS690CP_START:
file 'firmware/RS690_cp.bin'
RS690CP_END:
 
macro MEC_firmware [arg]
{
forward
FIRMWARE_#arg#_MEC db 'radeon/',`arg,'_mec.bin',0
forward
align 16
RS780ME_START:
file 'firmware/RS780_me.bin'
RS780ME_END:
 
align 16
arg#_MEC_START:
file "firmware/"#`arg#"_mec.bin"
arg#_MEC_END:
}
RS780PFP_START:
file 'firmware/RS780_pfp.bin'
RS780PFP_END:
 
macro RLC_code [arg]
{
dd FIRMWARE_#arg#_RLC
dd arg#_RLC_START
dd (arg#_RLC_END - arg#_RLC_START)
}
align 16
R600ME_START:
file 'firmware/R600_me.bin'
R600ME_END:
 
macro RLC_firmware [arg]
{
forward
FIRMWARE_#arg#_RLC db 'radeon/',`arg,'_rlc.bin',0
forward
align 16
RV610ME_START:
file 'firmware/RV610_me.bin'
RV610ME_END:
 
align 16
arg#_RLC_START:
file "firmware/"#`arg#"_rlc.bin"
arg#_RLC_END:
}
RV620ME_START:
file 'firmware/RV620_me.bin'
RV620ME_END:
 
macro SDMA_code [arg]
{
dd FIRMWARE_#arg#_SDMA
dd arg#_SDMA_START
dd (arg#_SDMA_END - arg#_SDMA_START)
}
align 16
RV630ME_START:
file 'firmware/RV630_me.bin'
RV630ME_END:
 
macro SDMA_firmware [arg]
{
forward
FIRMWARE_#arg#_SDMA db 'radeon/',`arg,'_sdma.bin',0
forward
align 16
RV635ME_START:
file 'firmware/RV635_me.bin'
RV635ME_END:
 
align 16
arg#_SDMA_START:
file "firmware/"#`arg#"_sdma.bin"
arg#_SDMA_END:
}
RV670ME_START:
file 'firmware/RV670_me.bin'
RV670ME_END:
 
macro SMC_code [arg]
{
dd FIRMWARE_#arg#_SMC
dd arg#_SMC_START
dd (arg#_SMC_END - arg#_SMC_START)
}
 
macro SMC_firmware [arg]
{
forward
FIRMWARE_#arg#_SMC db 'radeon/',`arg,'_smc.bin',0
forward
align 16
RV710ME_START:
file 'firmware/RV710_me.bin'
RV710ME_END:
 
align 16
arg#_SMC_START:
file "firmware/"#`arg#"_smc.bin"
arg#_SMC_END:
}
RV730ME_START:
file 'firmware/RV730_me.bin'
RV730ME_END:
 
macro UVD_code [arg]
{
dd FIRMWARE_#arg#_UVD
dd arg#_UVD_START
dd (arg#_UVD_END - arg#_UVD_START)
}
align 16
RV770ME_START:
file 'firmware/RV770_me.bin'
RV770ME_END:
 
macro UVD_firmware [arg]
{
forward
FIRMWARE_#arg#_UVD db 'radeon/',`arg,'_uvd.bin',0
forward
align 16
CYPRESSME_START:
file 'firmware/CYPRESS_me.bin'
CYPRESSME_END:
 
align 16
arg#_UVD_START:
file "firmware/"#`arg#"_uvd.bin"
arg#_UVD_END:
}
REDWOODME_START:
file 'firmware/REDWOOD_me.bin'
REDWOODME_END:
 
macro VCE_code [arg]
{
dd FIRMWARE_#arg#_VCE
dd arg#_VCE_START
dd (arg#_VCE_END - arg#_VCE_START)
}
align 16
CEDARME_START:
file 'firmware/CEDAR_me.bin'
CEDARME_END:
 
macro VCE_firmware [arg]
{
forward
FIRMWARE_#arg#_VCE db 'radeon/',`arg,'_vce.bin',0
forward
align 16
JUNIPERME_START:
file 'firmware/JUNIPER_me.bin'
JUNIPERME_END:
 
align 16
arg#_VCE_START:
file "firmware/"#`arg#"_vce.bin"
arg#_VCE_END:
}
PALMME_START:
file 'firmware/PALM_me.bin'
PALMME_END:
 
___start_builtin_fw:
align 16
SUMOME_START:
file 'firmware/SUMO_me.bin'
SUMOME_END:
 
CE_code BONAIRE, HAINAN, HAWAII, KABINI, KAVERI, MULLINS, OLAND,\
PITCAIRN, TAHITI, VERDE
align 16
SUMO2ME_START:
file 'firmware/SUMO2_me.bin'
SUMO2ME_END:
 
CP_code R100, R200, R300, R420, R520, RS600, RS690
align 16
BARTSME_START:
file 'firmware/BARTS_me.bin'
BARTSME_END:
 
MC_code BARTS, BONAIRE, CAICOS, CAYMAN, HAINAN,\
HAWAII, OLAND, PITCAIRN,\
TAHITI, TURKS, VERDE
align 16
TURKSME_START:
file 'firmware/TURKS_me.bin'
TURKSME_END:
 
MC2_code BONAIRE, HAINAN, HAWAII, OLAND, PITCAIRN,\
TAHITI, VERDE
align 16
CAICOSME_START:
file 'firmware/CAICOS_me.bin'
CAICOSME_END:
 
ME_code R600, RS780, RV610, RV620, RV630, RV635, RV670, RV710, RV730, RV770,\
ARUBA, BARTS, BONAIRE, CAICOS, CAYMAN, CEDAR, CYPRESS, HAINAN,\
HAWAII, JUNIPER, KABINI, KAVERI, MULLINS, OLAND, PALM, PITCAIRN,\
REDWOOD, SUMO, SUMO2, TAHITI, TURKS, VERDE
align 16
CAYMANME_START:
file 'firmware/CAYMAN_me.bin'
CAYMANME_END:
 
MEC_code BONAIRE, HAWAII, KABINI, KAVERI, MULLINS
 
PFP_code R600, RS780, RV610, RV620, RV630, RV635, RV670, RV710, RV730, RV770,\
ARUBA, BARTS, BONAIRE, CAICOS, CAYMAN, CEDAR, CYPRESS, HAINAN,\
HAWAII, JUNIPER, KABINI, KAVERI, MULLINS, OLAND, PALM, PITCAIRN,\
REDWOOD, SUMO, SUMO2, TAHITI, TURKS, VERDE
align 16
RV610PFP_START:
file 'firmware/RV610_pfp.bin'
RV610PFP_END:
 
RLC_code R600, R700,\
ARUBA, BONAIRE, BTC, CAYMAN, CEDAR, CYPRESS, HAINAN,\
HAWAII, JUNIPER, KABINI, KAVERI, MULLINS, OLAND, PITCAIRN,\
REDWOOD, SUMO, TAHITI, VERDE
 
SDMA_code BONAIRE, HAWAII, KABINI, KAVERI, MULLINS
align 16
RV620PFP_START:
file 'firmware/RV620_pfp.bin'
RV620PFP_END:
 
SMC_code RV710, RV730, RV740, RV770,\
BARTS, BONAIRE, CAICOS, CAYMAN, CEDAR, CYPRESS, HAINAN,\
HAWAII, JUNIPER, OLAND, PITCAIRN,\
REDWOOD, TAHITI, TURKS, VERDE
align 16
RV630PFP_START:
file 'firmware/RV630_pfp.bin'
RV630PFP_END:
 
UVD_code RV710, BONAIRE, CYPRESS, SUMO, TAHITI
 
VCE_code BONAIRE
align 16
RV635PFP_START:
file 'firmware/RV635_pfp.bin'
RV635PFP_END:
 
___end_builtin_fw:
align 16
RV670PFP_START:
file 'firmware/RV670_pfp.bin'
RV670PFP_END:
 
CE_firmware BONAIRE, HAINAN, HAWAII, KABINI, KAVERI, MULLINS, OLAND,\
PITCAIRN, TAHITI, VERDE
align 16
RV710PFP_START:
file 'firmware/RV710_pfp.bin'
RV710PFP_END:
 
CP_firmware R100, R200, R300, R420, R520, RS600, RS690
align 16
RV730PFP_START:
file 'firmware/RV730_pfp.bin'
RV730PFP_END:
 
MC_firmware BARTS, BONAIRE, CAICOS, CAYMAN, HAINAN,\
HAWAII, OLAND, PITCAIRN,\
TAHITI, TURKS, VERDE
 
MC2_firmware BONAIRE, HAINAN, HAWAII, OLAND, PITCAIRN,\
TAHITI, VERDE
align 16
RV770PFP_START:
file 'firmware/RV770_pfp.bin'
RV770PFP_END:
 
ME_firmware R600, RS780, RV610, RV620, RV630, RV635, RV670, RV710, RV730, RV770,\
ARUBA, BARTS, BONAIRE, CAICOS, CAYMAN, CEDAR, CYPRESS, HAINAN,\
HAWAII, JUNIPER, KABINI, KAVERI, MULLINS, OLAND, PALM, PITCAIRN,\
REDWOOD, SUMO, SUMO2, TAHITI, TURKS, VERDE
 
MEC_firmware BONAIRE, HAWAII, KABINI, KAVERI, MULLINS
align 16
CYPRESSPFP_START:
file 'firmware/CYPRESS_pfp.bin'
CYPRESSPFP_END:
 
PFP_firmware R600, RS780, RV610, RV620, RV630, RV635, RV670, RV710, RV730, RV770,\
ARUBA, BARTS, BONAIRE, CAICOS, CAYMAN, CEDAR, CYPRESS, HAINAN,\
HAWAII, JUNIPER, KABINI, KAVERI, MULLINS, OLAND, PALM, PITCAIRN,\
REDWOOD, SUMO, SUMO2, TAHITI, TURKS, VERDE
align 16
REDWOODPFP_START:
file 'firmware/REDWOOD_pfp.bin'
REDWOODPFP_END:
 
RLC_firmware R600, R700,\
ARUBA, BONAIRE, BTC, CAYMAN, CEDAR, CYPRESS, HAINAN,\
HAWAII, JUNIPER, KABINI, KAVERI, MULLINS, OLAND, PITCAIRN,\
REDWOOD, SUMO, TAHITI, VERDE
align 16
CEDARPFP_START:
file 'firmware/CEDAR_pfp.bin'
CEDARPFP_END:
 
SDMA_firmware BONAIRE, HAWAII, KABINI, KAVERI, MULLINS
align 16
JUNIPERPFP_START:
file 'firmware/JUNIPER_pfp.bin'
JUNIPERPFP_END:
 
SMC_firmware RV710, RV730, RV740, RV770,\
BARTS, BONAIRE, CAICOS, CAYMAN, CEDAR, CYPRESS, HAINAN,\
HAWAII, JUNIPER, OLAND, PITCAIRN,\
REDWOOD, TAHITI, TURKS, VERDE
align 16
PALMPFP_START:
file 'firmware/PALM_pfp.bin'
PALMPFP_END:
 
UVD_firmware RV710, BONAIRE, CYPRESS, SUMO, TAHITI
align 16
SUMOPFP_START:
file 'firmware/SUMO_pfp.bin'
SUMOPFP_END:
 
VCE_firmware BONAIRE
align 16
SUMO2PFP_START:
file 'firmware/SUMO2_pfp.bin'
SUMO2PFP_END:
 
align 16
BARTSPFP_START:
file 'firmware/BARTS_pfp.bin'
BARTSPFP_END:
 
align 16
TURKSPFP_START:
file 'firmware/TURKS_pfp.bin'
TURKSPFP_END:
 
align 16
CAICOSPFP_START:
file 'firmware/CAICOS_pfp.bin'
CAICOSPFP_END:
 
align 16
CAYMANPFP_START:
file 'firmware/CAYMAN_pfp.bin'
CAYMANPFP_END:
 
align 16
R600RLC_START:
file 'firmware/R600_rlc.bin'
R600RLC_END:
 
align 16
R700RLC_START:
file 'firmware/R700_rlc.bin'
R700RLC_END:
 
align 16
CYPRESSRLC_START:
file 'firmware/CYPRESS_rlc.bin'
CYPRESSRLC_END:
 
align 16
REDWOODRLC_START:
file 'firmware/REDWOOD_rlc.bin'
REDWOODRLC_END:
 
align 16
CEDARRLC_START:
file 'firmware/CEDAR_rlc.bin'
CEDARRLC_END:
 
align 16
JUNIPERRLC_START:
file 'firmware/JUNIPER_rlc.bin'
JUNIPERRLC_END:
 
align 16
SUMORLC_START:
file 'firmware/SUMO_rlc.bin'
SUMORLC_END:
 
align 16
BTCRLC_START:
file 'firmware/BTC_rlc.bin'
BTCRLC_END:
 
align 16
CAYMANRLC_START:
file 'firmware/CAYMAN_rlc.bin'
CAYMANRLC_END:
 
 
align 16
BARTSMC_START:
file 'firmware/BARTS_mc.bin'
BARTSMC_END:
 
align 16
TURKSMC_START:
file 'firmware/TURKS_mc.bin'
TURKSMC_END:
 
align 16
CAICOSMC_START:
file 'firmware/CAICOS_mc.bin'
CAICOSMC_END:
 
align 16
CAYMANMC_START:
file 'firmware/CAYMAN_mc.bin'
CAYMANMC_END:
 
 
/drivers/video/drm/radeon/ni.c
22,6 → 22,7
* Authors: Alex Deucher
*/
#include <linux/firmware.h>
//#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <drm/drmP.h>
32,135 → 33,7
#include "atom.h"
#include "ni_reg.h"
#include "cayman_blit_shaders.h"
#include "radeon_ucode.h"
#include "clearstate_cayman.h"
 
static const u32 tn_rlc_save_restore_register_list[] =
{
0x98fc,
0x98f0,
0x9834,
0x9838,
0x9870,
0x9874,
0x8a14,
0x8b24,
0x8bcc,
0x8b10,
0x8c30,
0x8d00,
0x8d04,
0x8c00,
0x8c04,
0x8c10,
0x8c14,
0x8d8c,
0x8cf0,
0x8e38,
0x9508,
0x9688,
0x9608,
0x960c,
0x9610,
0x9614,
0x88c4,
0x8978,
0x88d4,
0x900c,
0x9100,
0x913c,
0x90e8,
0x9354,
0xa008,
0x98f8,
0x9148,
0x914c,
0x3f94,
0x98f4,
0x9b7c,
0x3f8c,
0x8950,
0x8954,
0x8a18,
0x8b28,
0x9144,
0x3f90,
0x915c,
0x9160,
0x9178,
0x917c,
0x9180,
0x918c,
0x9190,
0x9194,
0x9198,
0x919c,
0x91a8,
0x91ac,
0x91b0,
0x91b4,
0x91b8,
0x91c4,
0x91c8,
0x91cc,
0x91d0,
0x91d4,
0x91e0,
0x91e4,
0x91ec,
0x91f0,
0x91f4,
0x9200,
0x9204,
0x929c,
0x8030,
0x9150,
0x9a60,
0x920c,
0x9210,
0x9228,
0x922c,
0x9244,
0x9248,
0x91e8,
0x9294,
0x9208,
0x9224,
0x9240,
0x9220,
0x923c,
0x9258,
0x9744,
0xa200,
0xa204,
0xa208,
0xa20c,
0x8d58,
0x9030,
0x9034,
0x9038,
0x903c,
0x9040,
0x9654,
0x897c,
0xa210,
0xa214,
0x9868,
0xa02c,
0x9664,
0x9698,
0x949c,
0x8e10,
0x8e18,
0x8c50,
0x8c58,
0x8c60,
0x8c68,
0x89b4,
0x9830,
0x802c,
};
 
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
171,30 → 44,36
extern int evergreen_mc_init(struct radeon_device *rdev);
extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
extern void evergreen_program_aspm(struct radeon_device *rdev);
extern void sumo_rlc_fini(struct radeon_device *rdev);
extern int sumo_rlc_init(struct radeon_device *rdev);
extern void evergreen_gpu_pci_config_reset(struct radeon_device *rdev);
extern void si_rlc_fini(struct radeon_device *rdev);
extern int si_rlc_init(struct radeon_device *rdev);
 
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
#define EVERGREEN_RLC_UCODE_SIZE 768
#define BTC_MC_UCODE_SIZE 6024
 
#define CAYMAN_PFP_UCODE_SIZE 2176
#define CAYMAN_PM4_UCODE_SIZE 2176
#define CAYMAN_RLC_UCODE_SIZE 1024
#define CAYMAN_MC_UCODE_SIZE 6037
 
#define ARUBA_RLC_UCODE_SIZE 1536
 
/* Firmware Names */
MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
MODULE_FIRMWARE("radeon/BARTS_me.bin");
MODULE_FIRMWARE("radeon/BARTS_mc.bin");
MODULE_FIRMWARE("radeon/BARTS_smc.bin");
MODULE_FIRMWARE("radeon/BTC_rlc.bin");
MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
MODULE_FIRMWARE("radeon/TURKS_me.bin");
MODULE_FIRMWARE("radeon/TURKS_mc.bin");
MODULE_FIRMWARE("radeon/TURKS_smc.bin");
MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
MODULE_FIRMWARE("radeon/CAICOS_me.bin");
MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
MODULE_FIRMWARE("radeon/CAICOS_smc.bin");
MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
MODULE_FIRMWARE("radeon/CAYMAN_smc.bin");
MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
MODULE_FIRMWARE("radeon/ARUBA_me.bin");
MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
683,15 → 562,22
 
int ni_init_microcode(struct radeon_device *rdev)
{
struct platform_device *pdev;
const char *chip_name;
const char *rlc_chip_name;
size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
size_t smc_req_size = 0;
char fw_name[30];
int err;
 
DRM_DEBUG("\n");
 
pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
err = IS_ERR(pdev);
if (err) {
printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
return -EINVAL;
}
 
switch (rdev->family) {
case CHIP_BARTS:
chip_name = "BARTS";
700,7 → 586,6
me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
mc_req_size = BTC_MC_UCODE_SIZE * 4;
smc_req_size = ALIGN(BARTS_SMC_UCODE_SIZE, 4);
break;
case CHIP_TURKS:
chip_name = "TURKS";
709,7 → 594,6
me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
mc_req_size = BTC_MC_UCODE_SIZE * 4;
smc_req_size = ALIGN(TURKS_SMC_UCODE_SIZE, 4);
break;
case CHIP_CAICOS:
chip_name = "CAICOS";
718,7 → 602,6
me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
mc_req_size = BTC_MC_UCODE_SIZE * 4;
smc_req_size = ALIGN(CAICOS_SMC_UCODE_SIZE, 4);
break;
case CHIP_CAYMAN:
chip_name = "CAYMAN";
727,7 → 610,6
me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
smc_req_size = ALIGN(CAYMAN_SMC_UCODE_SIZE, 4);
break;
case CHIP_ARUBA:
chip_name = "ARUBA";
744,7 → 626,7
DRM_INFO("Loading %s Microcode\n", chip_name);
 
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
if (err)
goto out;
if (rdev->pfp_fw->size != pfp_req_size) {
756,7 → 638,7
}
 
snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
if (err)
goto out;
if (rdev->me_fw->size != me_req_size) {
767,7 → 649,7
}
 
snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
if (err)
goto out;
if (rdev->rlc_fw->size != rlc_req_size) {
780,7 → 662,7
/* no MC ucode on TN */
if (!(rdev->flags & RADEON_IS_IGP)) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
if (err)
goto out;
if (rdev->mc_fw->size != mc_req_size) {
790,27 → 672,10
err = -EINVAL;
}
}
out:
platform_device_unregister(pdev);
 
if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
if (err) {
printk(KERN_ERR
"smc: error loading firmware \"%s\"\n",
fw_name);
release_firmware(rdev->smc_fw);
rdev->smc_fw = NULL;
err = 0;
} else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR
"ni_mc: Bogus length %zu in firmware \"%s\"\n",
rdev->mc_fw->size, fw_name);
err = -EINVAL;
}
}
 
out:
if (err) {
if (err != -EINVAL)
printk(KERN_ERR
"ni_cp: Failed to load firmware \"%s\"\n",
827,14 → 692,6
return err;
}
 
int tn_get_temp(struct radeon_device *rdev)
{
u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff;
int actual_temp = (temp / 8) - 49;
 
return actual_temp * 1000;
}
 
/*
* Core functions
*/
896,10 → 753,6
(rdev->pdev->device == 0x999C)) {
rdev->config.cayman.max_simds_per_se = 6;
rdev->config.cayman.max_backends_per_se = 2;
rdev->config.cayman.max_hw_contexts = 8;
rdev->config.cayman.sx_max_export_size = 256;
rdev->config.cayman.sx_max_export_pos_size = 64;
rdev->config.cayman.sx_max_export_smx_size = 192;
} else if ((rdev->pdev->device == 0x9903) ||
(rdev->pdev->device == 0x9904) ||
(rdev->pdev->device == 0x990A) ||
910,10 → 763,6
(rdev->pdev->device == 0x999D)) {
rdev->config.cayman.max_simds_per_se = 4;
rdev->config.cayman.max_backends_per_se = 2;
rdev->config.cayman.max_hw_contexts = 8;
rdev->config.cayman.sx_max_export_size = 256;
rdev->config.cayman.sx_max_export_pos_size = 64;
rdev->config.cayman.sx_max_export_smx_size = 192;
} else if ((rdev->pdev->device == 0x9919) ||
(rdev->pdev->device == 0x9990) ||
(rdev->pdev->device == 0x9991) ||
924,17 → 773,9
(rdev->pdev->device == 0x99A0)) {
rdev->config.cayman.max_simds_per_se = 3;
rdev->config.cayman.max_backends_per_se = 1;
rdev->config.cayman.max_hw_contexts = 4;
rdev->config.cayman.sx_max_export_size = 128;
rdev->config.cayman.sx_max_export_pos_size = 32;
rdev->config.cayman.sx_max_export_smx_size = 96;
} else {
rdev->config.cayman.max_simds_per_se = 2;
rdev->config.cayman.max_backends_per_se = 1;
rdev->config.cayman.max_hw_contexts = 4;
rdev->config.cayman.sx_max_export_size = 128;
rdev->config.cayman.sx_max_export_pos_size = 32;
rdev->config.cayman.sx_max_export_smx_size = 96;
}
rdev->config.cayman.max_texture_channel_caches = 2;
rdev->config.cayman.max_gprs = 256;
942,6 → 783,10
rdev->config.cayman.max_gs_threads = 32;
rdev->config.cayman.max_stack_entries = 512;
rdev->config.cayman.sx_num_of_sets = 8;
rdev->config.cayman.sx_max_export_size = 256;
rdev->config.cayman.sx_max_export_pos_size = 64;
rdev->config.cayman.sx_max_export_smx_size = 192;
rdev->config.cayman.max_hw_contexts = 8;
rdev->config.cayman.sq_num_cf_insts = 2;
 
rdev->config.cayman.sc_prim_fifo_size = 0x40;
1057,18 → 902,6
disabled_rb_mask &= ~(1 << i);
}
 
for (i = 0; i < rdev->config.cayman.max_shader_engines; i++) {
u32 simd_disable_bitmap;
 
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
simd_disable_bitmap |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
tmp <<= 16;
tmp |= simd_disable_bitmap;
}
rdev->config.cayman.active_simds = hweight32(~tmp);
 
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
 
1194,17 → 1027,7
WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
 
udelay(50);
 
/* set clockgating golden values on TN */
if (rdev->family == CHIP_ARUBA) {
tmp = RREG32_CG(CG_CGTT_LOCAL_0);
tmp &= ~0x00380000;
WREG32_CG(CG_CGTT_LOCAL_0, tmp);
tmp = RREG32_CG(CG_CGTT_LOCAL_1);
tmp &= ~0x0e000000;
WREG32_CG(CG_CGTT_LOCAL_1, tmp);
}
}
 
/*
* GART
1229,6 → 1052,7
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
radeon_gart_restore(rdev);
/* Setup TLB control */
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
1239,7 → 1063,6
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
EFFECTIVE_L2_QUEUE_SIZE(7) |
1246,7 → 1069,6
CONTEXT1_IDENTITY_ACCESS_MODE(1));
WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
BANK_SELECT(6) |
L2_CACHE_BIGK_FRAGMENT_SIZE(6));
/* setup context0 */
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1271,7 → 1093,7
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
rdev->vm_manager.saved_table_addr[i]);
rdev->gart.table_addr >> 12);
}
 
/* enable context1-7 */
1279,7 → 1101,6
(u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT1_CNTL2, 4);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1303,13 → 1124,6
 
static void cayman_pcie_gart_disable(struct radeon_device *rdev)
{
unsigned i;
 
for (i = 1; i < 8; ++i) {
rdev->vm_manager.saved_table_addr[i] = RREG32(
VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2));
}
 
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
1345,12 → 1159,13
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
PACKET3_SH_ACTION_ENA;
 
/* flush read cache over gart for this vmid */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
1357,7 → 1172,7
/* EVENT_WRITE_EOP - flush caches, send int */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
1366,8 → 1181,6
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
PACKET3_SH_ACTION_ENA;
 
/* set to DX10/11 mode */
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1392,19 → 1205,38
(ib->vm ? (ib->vm->id << 24) : 0));
 
/* flush read cache over gart for this vmid */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */
radeon_ring_write(ring, 10); /* poll interval */
}
 
void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
uint64_t addr = semaphore->gpu_addr;
 
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
 
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
 
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
}
 
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
WREG32(CP_ME_CNTL, 0);
else {
if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
1412,55 → 1244,6
}
}
 
u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
u32 rptr;
 
if (rdev->wb.enabled)
rptr = rdev->wb.wb[ring->rptr_offs/4];
else {
if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
rptr = RREG32(CP_RB0_RPTR);
else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
rptr = RREG32(CP_RB1_RPTR);
else
rptr = RREG32(CP_RB2_RPTR);
}
 
return rptr;
}
 
u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
u32 wptr;
 
if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
wptr = RREG32(CP_RB0_WPTR);
else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
wptr = RREG32(CP_RB1_WPTR);
else
wptr = RREG32(CP_RB2_WPTR);
 
return wptr;
}
 
void cayman_gfx_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
WREG32(CP_RB0_WPTR, ring->wptr);
(void)RREG32(CP_RB0_WPTR);
} else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) {
WREG32(CP_RB1_WPTR, ring->wptr);
(void)RREG32(CP_RB1_WPTR);
} else {
WREG32(CP_RB2_WPTR, ring->wptr);
(void)RREG32(CP_RB2_WPTR);
}
}
 
static int cayman_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
1505,7 → 1288,7
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring, false);
radeon_ring_unlock_commit(rdev, ring);
 
cayman_cp_enable(rdev, true);
 
1547,7 → 1330,7
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* */
 
radeon_ring_unlock_commit(rdev, ring, false);
radeon_ring_unlock_commit(rdev, ring);
 
/* XXX init other rings */
 
1582,16 → 1365,6
CP_RB1_BASE,
CP_RB2_BASE
};
static const unsigned cp_rb_rptr[] = {
CP_RB0_RPTR,
CP_RB1_RPTR,
CP_RB2_RPTR
};
static const unsigned cp_rb_wptr[] = {
CP_RB0_WPTR,
CP_RB1_WPTR,
CP_RB2_WPTR
};
struct radeon_ring *ring;
int i, r;
 
1625,8 → 1398,8
 
/* Set ring buffer size */
ring = &rdev->ring[ridx[i]];
rb_cntl = order_base_2(ring->ring_size / 8);
rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8;
rb_cntl = drm_order(ring->ring_size / 8);
rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8;
#ifdef __BIG_ENDIAN
rb_cntl |= BUF_SWAP_32BIT;
#endif
1649,9 → 1422,9
ring = &rdev->ring[ridx[i]];
WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
 
ring->wptr = 0;
WREG32(cp_rb_rptr[i], 0);
WREG32(cp_rb_wptr[i], ring->wptr);
ring->rptr = ring->wptr = 0;
WREG32(ring->rptr_reg, ring->rptr);
WREG32(ring->wptr_reg, ring->wptr);
 
mdelay(1);
WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
1671,14 → 1444,190
return r;
}
 
if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
return 0;
}
 
/*
* DMA
* Starting with R600, the GPU has an asynchronous
* DMA engine. The programming model is very similar
* to the 3D engine (ring buffer, IBs, etc.), but the
* DMA controller has it's own packet format that is
* different form the PM4 format used by the 3D engine.
* It supports copying data, writing embedded data,
* solid fills, and a number of other things. It also
* has support for tiling/detiling of buffers.
* Cayman and newer support two asynchronous DMA engines.
*/
/**
* cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
*
* @rdev: radeon_device pointer
* @ib: IB object to schedule
*
* Schedule an IB in the DMA ring (cayman-SI).
*/
void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
 
if (rdev->wb.enabled) {
u32 next_rptr = ring->wptr + 4;
while ((next_rptr & 7) != 5)
next_rptr++;
next_rptr += 3;
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
radeon_ring_write(ring, next_rptr);
}
 
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
* Pad as necessary with NOPs.
*/
while ((ring->wptr & 7) != 5)
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
 
}
 
/**
* cayman_dma_stop - stop the async dma engines
*
* @rdev: radeon_device pointer
*
* Stop the async dma engines (cayman-SI).
*/
void cayman_dma_stop(struct radeon_device *rdev)
{
u32 rb_cntl;
 
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 
/* dma0 */
rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
rb_cntl &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
 
/* dma1 */
rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
rb_cntl &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
 
rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
}
 
/**
* cayman_dma_resume - setup and start the async dma engines
*
* @rdev: radeon_device pointer
*
* Set up the DMA ring buffers and enable them. (cayman-SI).
* Returns 0 for success, error for failure.
*/
int cayman_dma_resume(struct radeon_device *rdev)
{
struct radeon_ring *ring;
u32 rb_cntl, dma_cntl, ib_cntl;
u32 rb_bufsz;
u32 reg_offset, wb_offset;
int i, r;
 
/* Reset dma */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
RREG32(SRBM_SOFT_RESET);
udelay(50);
WREG32(SRBM_SOFT_RESET, 0);
 
for (i = 0; i < 2; i++) {
if (i == 0) {
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
reg_offset = DMA0_REGISTER_OFFSET;
wb_offset = R600_WB_DMA_RPTR_OFFSET;
} else {
ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
reg_offset = DMA1_REGISTER_OFFSET;
wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
}
 
WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
 
/* Set ring buffer size in dwords */
rb_bufsz = drm_order(ring->ring_size / 4);
rb_cntl = rb_bufsz << 1;
#ifdef __BIG_ENDIAN
rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
#endif
WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
 
/* Initialize the ring buffer's read and write pointers */
WREG32(DMA_RB_RPTR + reg_offset, 0);
WREG32(DMA_RB_WPTR + reg_offset, 0);
 
/* set the wb address whether it's enabled or not */
WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
 
if (rdev->wb.enabled)
rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
 
WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
 
/* enable DMA IBs */
ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
#ifdef __BIG_ENDIAN
ib_cntl |= DMA_IB_SWAP_ENABLE;
#endif
WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
 
dma_cntl = RREG32(DMA_CNTL + reg_offset);
dma_cntl &= ~CTXEMPTY_INT_ENABLE;
WREG32(DMA_CNTL + reg_offset, dma_cntl);
 
ring->wptr = 0;
WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
 
ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
 
WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
 
ring->ready = true;
 
r = radeon_ring_test(rdev, ring->idx, ring);
if (r) {
ring->ready = false;
return r;
}
}
 
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
 
return 0;
}
 
u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
/**
* cayman_dma_fini - tear down the async dma engines
*
* @rdev: radeon_device pointer
*
* Stop the async dma engines and free the rings (cayman-SI).
*/
void cayman_dma_fini(struct radeon_device *rdev)
{
cayman_dma_stop(rdev);
radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
}
 
static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
{
u32 reset_mask = 0;
u32 tmp;
 
1900,9 → 1849,7
 
reset_mask = cayman_gpu_check_soft_reset(rdev);
 
if (reset_mask)
evergreen_gpu_pci_config_reset(rdev);
 
if (!reset_mask)
r600_set_bios_scratch_engine_hung(rdev, false);
 
return 0;
1924,12 → 1871,42
if (!(reset_mask & (RADEON_RESET_GFX |
RADEON_RESET_COMPUTE |
RADEON_RESET_CP))) {
radeon_ring_lockup_update(rdev, ring);
radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
 
/**
* cayman_dma_is_lockup - Check if the DMA engine is locked up
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Check if the async DMA engine is locked up.
* Returns true if the engine appears to be locked up, false if not.
*/
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
u32 mask;
 
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
mask = RADEON_RESET_DMA;
else
mask = RADEON_RESET_DMA1;
 
if (!(reset_mask & mask)) {
radeon_ring_lockup_update(ring);
return false;
}
/* force ring activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
 
static int cayman_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1937,17 → 1914,24
 
/* enable pcie gen2 link */
evergreen_pcie_gen2_enable(rdev);
/* enable aspm */
evergreen_program_aspm(rdev);
 
/* scratch needs to be initialized before MC */
r = r600_vram_scratch_init(rdev);
if (r)
if (rdev->flags & RADEON_IS_IGP) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = ni_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
} else {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
r = ni_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
 
evergreen_mc_program(rdev);
 
if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
r = ni_mc_load_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
1955,18 → 1939,26
}
}
 
r = r600_vram_scratch_init(rdev);
if (r)
return r;
 
evergreen_mc_program(rdev);
r = cayman_pcie_gart_enable(rdev);
if (r)
return r;
cayman_gpu_init(rdev);
 
r = evergreen_blit_init(rdev);
if (r) {
// r600_blit_fini(rdev);
rdev->asic->copy.copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
 
/* allocate rlc buffers */
if (rdev->flags & RADEON_IS_IGP) {
rdev->rlc.reg_list = tn_rlc_save_restore_register_list;
rdev->rlc.reg_list_size =
(u32)ARRAY_SIZE(tn_rlc_save_restore_register_list);
rdev->rlc.cs_data = cayman_cs_data;
r = sumo_rlc_init(rdev);
r = si_rlc_init(rdev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
2034,19 → 2026,24
evergreen_irq_set(rdev);
 
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
RADEON_CP_PACKET2);
CP_RB0_RPTR, CP_RB0_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
 
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
 
ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
 
2061,6 → 2058,17
if (r)
return r;
 
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
r = radeon_ring_init(rdev, ring, ring->ring_size,
R600_WB_UVD_RPTR_OFFSET,
UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
if (!r)
r = r600_uvd_init(rdev);
if (r)
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
}
 
r = radeon_ib_pool_init(rdev);
if (r) {
2137,27 → 2145,6
if (r)
return r;
 
if (rdev->flags & RADEON_IS_IGP) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = ni_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
} else {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
r = ni_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
}
 
/* Initialize power management */
radeon_pm_init(rdev);
 
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
 
2226,167 → 2213,131
{
}
 
#define R600_ENTRY_VALID (1 << 0)
#define R600_PTE_SYSTEM (1 << 1)
#define R600_PTE_SNOOPED (1 << 2)
#define R600_PTE_READABLE (1 << 5)
#define R600_PTE_WRITEABLE (1 << 6)
 
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
{
uint32_t r600_flags = 0;
r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
if (flags & RADEON_VM_PAGE_SYSTEM) {
r600_flags |= R600_PTE_SYSTEM;
r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
}
return r600_flags;
}
 
/**
* cayman_vm_decode_fault - print human readable fault info
* cayman_vm_set_page - update the page tables using the CP
*
* @rdev: radeon_device pointer
* @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
* @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
* Print human readable fault information (cayman/TN).
* Update the page tables using the CP (cayman/TN).
*/
void cayman_vm_decode_fault(struct radeon_device *rdev,
u32 status, u32 addr)
void cayman_vm_set_page(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
char *block;
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
 
switch (mc_id) {
case 32:
case 16:
case 96:
case 80:
case 160:
case 144:
case 224:
case 208:
block = "CB";
break;
case 33:
case 17:
case 97:
case 81:
case 161:
case 145:
case 225:
case 209:
block = "CB_FMASK";
break;
case 34:
case 18:
case 98:
case 82:
case 162:
case 146:
case 226:
case 210:
block = "CB_CMASK";
break;
case 35:
case 19:
case 99:
case 83:
case 163:
case 147:
case 227:
case 211:
block = "CB_IMMED";
break;
case 36:
case 20:
case 100:
case 84:
case 164:
case 148:
case 228:
case 212:
block = "DB";
break;
case 37:
case 21:
case 101:
case 85:
case 165:
case 149:
case 229:
case 213:
block = "DB_HTILE";
break;
case 38:
case 22:
case 102:
case 86:
case 166:
case 150:
case 230:
case 214:
block = "SX";
break;
case 39:
case 23:
case 103:
case 87:
case 167:
case 151:
case 231:
case 215:
block = "DB_STEN";
break;
case 40:
case 24:
case 104:
case 88:
case 232:
case 216:
case 168:
case 152:
block = "TC_TFETCH";
break;
case 41:
case 25:
case 105:
case 89:
case 233:
case 217:
case 169:
case 153:
block = "TC_VFETCH";
break;
case 42:
case 26:
case 106:
case 90:
case 234:
case 218:
case 170:
case 154:
block = "VC";
break;
case 112:
block = "CP";
break;
case 113:
case 114:
block = "SH";
break;
case 115:
block = "VGT";
break;
case 178:
block = "IH";
break;
case 51:
block = "RLC";
break;
case 55:
block = "DMA";
break;
case 56:
block = "HDP";
break;
default:
block = "unknown";
break;
if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
while (count) {
ndw = 1 + count * 2;
if (ndw > 0x3FFF)
ndw = 0x3FFF;
 
ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
ib->ptr[ib->length_dw++] = pe;
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 1; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & RADEON_VM_PAGE_VALID) {
value = addr;
} else {
value = 0;
}
addr += incr;
value |= r600_flags;
ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
} else {
if ((flags & RADEON_VM_PAGE_SYSTEM) ||
(count == 1)) {
while (count) {
ndw = count * 2;
if (ndw > 0xFFFFE)
ndw = 0xFFFFE;
 
printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
protections, vmid, addr,
(status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
block, mc_id);
/* for non-physically contiguous pages (system) */
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
ib->ptr[ib->length_dw++] = pe;
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & RADEON_VM_PAGE_VALID) {
value = addr;
} else {
value = 0;
}
addr += incr;
value |= r600_flags;
ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
while (ib->length_dw & 0x7)
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
} else {
while (count) {
ndw = count * 2;
if (ndw > 0xFFFFE)
ndw = 0xFFFFE;
 
if (flags & RADEON_VM_PAGE_VALID)
value = addr;
else
value = 0;
/* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
ib->ptr[ib->length_dw++] = pe; /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
ib->ptr[ib->length_dw++] = r600_flags; /* mask */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = value; /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(value);
ib->ptr[ib->length_dw++] = incr; /* increment size */
ib->ptr[ib->length_dw++] = 0;
pe += ndw * 4;
addr += (ndw / 2) * incr;
count -= ndw / 2;
}
}
while (ib->length_dw & 0x7)
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
}
}
 
/**
* cayman_vm_flush - vm flush using the CP
*
2417,3 → 2368,26
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
}
 
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
struct radeon_ring *ring = &rdev->ring[ridx];
 
if (vm == NULL)
return;
 
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
 
/* flush hdp cache */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
radeon_ring_write(ring, 1);
 
/* bits 0-7 are the VM contexts0-7 */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
radeon_ring_write(ring, 1 << vm->id);
}
 
/drivers/video/drm/radeon/nid.h
128,28 → 128,11
#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_CNTL2 0x1430
#define VM_CONTEXT1_CNTL2 0x1434
#define VM_INVALIDATE_REQUEST 0x1478
#define VM_INVALIDATE_RESPONSE 0x147c
#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
#define PROTECTIONS_MASK (0xf << 0)
#define PROTECTIONS_SHIFT 0
/* bit 0: range
* bit 2: pde0
* bit 3: valid
* bit 4: read
* bit 5: write
*/
#define MEMORY_CLIENT_ID_MASK (0xff << 12)
#define MEMORY_CLIENT_ID_SHIFT 12
#define MEMORY_CLIENT_RW_MASK (1 << 24)
#define MEMORY_CLIENT_RW_SHIFT 24
#define FAULT_VMID_MASK (0x7 << 25)
#define FAULT_VMID_SHIFT 25
#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c
#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
506,571 → 489,6
# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0)
# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
 
/* TN SMU registers */
#define TN_CURRENT_GNB_TEMP 0x1F390
 
/* pm registers */
#define SMC_MSG 0x20c
#define HOST_SMC_MSG(x) ((x) << 0)
#define HOST_SMC_MSG_MASK (0xff << 0)
#define HOST_SMC_MSG_SHIFT 0
#define HOST_SMC_RESP(x) ((x) << 8)
#define HOST_SMC_RESP_MASK (0xff << 8)
#define HOST_SMC_RESP_SHIFT 8
#define SMC_HOST_MSG(x) ((x) << 16)
#define SMC_HOST_MSG_MASK (0xff << 16)
#define SMC_HOST_MSG_SHIFT 16
#define SMC_HOST_RESP(x) ((x) << 24)
#define SMC_HOST_RESP_MASK (0xff << 24)
#define SMC_HOST_RESP_SHIFT 24
 
#define CG_SPLL_FUNC_CNTL 0x600
#define SPLL_RESET (1 << 0)
#define SPLL_SLEEP (1 << 1)
#define SPLL_BYPASS_EN (1 << 3)
#define SPLL_REF_DIV(x) ((x) << 4)
#define SPLL_REF_DIV_MASK (0x3f << 4)
#define SPLL_PDIV_A(x) ((x) << 20)
#define SPLL_PDIV_A_MASK (0x7f << 20)
#define SPLL_PDIV_A_SHIFT 20
#define CG_SPLL_FUNC_CNTL_2 0x604
#define SCLK_MUX_SEL(x) ((x) << 0)
#define SCLK_MUX_SEL_MASK (0x1ff << 0)
#define CG_SPLL_FUNC_CNTL_3 0x608
#define SPLL_FB_DIV(x) ((x) << 0)
#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
#define SPLL_FB_DIV_SHIFT 0
#define SPLL_DITHEN (1 << 28)
 
#define MPLL_CNTL_MODE 0x61c
# define SS_SSEN (1 << 24)
# define SS_DSMODE_EN (1 << 25)
 
#define MPLL_AD_FUNC_CNTL 0x624
#define CLKF(x) ((x) << 0)
#define CLKF_MASK (0x7f << 0)
#define CLKR(x) ((x) << 7)
#define CLKR_MASK (0x1f << 7)
#define CLKFRAC(x) ((x) << 12)
#define CLKFRAC_MASK (0x1f << 12)
#define YCLK_POST_DIV(x) ((x) << 17)
#define YCLK_POST_DIV_MASK (3 << 17)
#define IBIAS(x) ((x) << 20)
#define IBIAS_MASK (0x3ff << 20)
#define RESET (1 << 30)
#define PDNB (1 << 31)
#define MPLL_AD_FUNC_CNTL_2 0x628
#define BYPASS (1 << 19)
#define BIAS_GEN_PDNB (1 << 24)
#define RESET_EN (1 << 25)
#define VCO_MODE (1 << 29)
#define MPLL_DQ_FUNC_CNTL 0x62c
#define MPLL_DQ_FUNC_CNTL_2 0x630
 
#define GENERAL_PWRMGT 0x63c
# define GLOBAL_PWRMGT_EN (1 << 0)
# define STATIC_PM_EN (1 << 1)
# define THERMAL_PROTECTION_DIS (1 << 2)
# define THERMAL_PROTECTION_TYPE (1 << 3)
# define ENABLE_GEN2PCIE (1 << 4)
# define ENABLE_GEN2XSP (1 << 5)
# define SW_SMIO_INDEX(x) ((x) << 6)
# define SW_SMIO_INDEX_MASK (3 << 6)
# define SW_SMIO_INDEX_SHIFT 6
# define LOW_VOLT_D2_ACPI (1 << 8)
# define LOW_VOLT_D3_ACPI (1 << 9)
# define VOLT_PWRMGT_EN (1 << 10)
# define BACKBIAS_PAD_EN (1 << 18)
# define BACKBIAS_VALUE (1 << 19)
# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
# define AC_DC_SW (1 << 24)
 
#define SCLK_PWRMGT_CNTL 0x644
# define SCLK_PWRMGT_OFF (1 << 0)
# define SCLK_LOW_D1 (1 << 1)
# define FIR_RESET (1 << 4)
# define FIR_FORCE_TREND_SEL (1 << 5)
# define FIR_TREND_MODE (1 << 6)
# define DYN_GFX_CLK_OFF_EN (1 << 7)
# define GFX_CLK_FORCE_ON (1 << 8)
# define GFX_CLK_REQUEST_OFF (1 << 9)
# define GFX_CLK_FORCE_OFF (1 << 10)
# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
# define DYN_LIGHT_SLEEP_EN (1 << 14)
#define MCLK_PWRMGT_CNTL 0x648
# define DLL_SPEED(x) ((x) << 0)
# define DLL_SPEED_MASK (0x1f << 0)
# define MPLL_PWRMGT_OFF (1 << 5)
# define DLL_READY (1 << 6)
# define MC_INT_CNTL (1 << 7)
# define MRDCKA0_PDNB (1 << 8)
# define MRDCKA1_PDNB (1 << 9)
# define MRDCKB0_PDNB (1 << 10)
# define MRDCKB1_PDNB (1 << 11)
# define MRDCKC0_PDNB (1 << 12)
# define MRDCKC1_PDNB (1 << 13)
# define MRDCKD0_PDNB (1 << 14)
# define MRDCKD1_PDNB (1 << 15)
# define MRDCKA0_RESET (1 << 16)
# define MRDCKA1_RESET (1 << 17)
# define MRDCKB0_RESET (1 << 18)
# define MRDCKB1_RESET (1 << 19)
# define MRDCKC0_RESET (1 << 20)
# define MRDCKC1_RESET (1 << 21)
# define MRDCKD0_RESET (1 << 22)
# define MRDCKD1_RESET (1 << 23)
# define DLL_READY_READ (1 << 24)
# define USE_DISPLAY_GAP (1 << 25)
# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
# define MPLL_TURNOFF_D2 (1 << 28)
#define DLL_CNTL 0x64c
# define MRDCKA0_BYPASS (1 << 24)
# define MRDCKA1_BYPASS (1 << 25)
# define MRDCKB0_BYPASS (1 << 26)
# define MRDCKB1_BYPASS (1 << 27)
# define MRDCKC0_BYPASS (1 << 28)
# define MRDCKC1_BYPASS (1 << 29)
# define MRDCKD0_BYPASS (1 << 30)
# define MRDCKD1_BYPASS (1 << 31)
 
#define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
# define CURRENT_STATE_INDEX_MASK (0xf << 4)
# define CURRENT_STATE_INDEX_SHIFT 4
 
#define CG_AT 0x6d4
# define CG_R(x) ((x) << 0)
# define CG_R_MASK (0xffff << 0)
# define CG_L(x) ((x) << 16)
# define CG_L_MASK (0xffff << 16)
 
#define CG_BIF_REQ_AND_RSP 0x7f4
#define CG_CLIENT_REQ(x) ((x) << 0)
#define CG_CLIENT_REQ_MASK (0xff << 0)
#define CG_CLIENT_REQ_SHIFT 0
#define CG_CLIENT_RESP(x) ((x) << 8)
#define CG_CLIENT_RESP_MASK (0xff << 8)
#define CG_CLIENT_RESP_SHIFT 8
#define CLIENT_CG_REQ(x) ((x) << 16)
#define CLIENT_CG_REQ_MASK (0xff << 16)
#define CLIENT_CG_REQ_SHIFT 16
#define CLIENT_CG_RESP(x) ((x) << 24)
#define CLIENT_CG_RESP_MASK (0xff << 24)
#define CLIENT_CG_RESP_SHIFT 24
 
#define CG_SPLL_SPREAD_SPECTRUM 0x790
#define SSEN (1 << 0)
#define CLK_S(x) ((x) << 4)
#define CLK_S_MASK (0xfff << 4)
#define CLK_S_SHIFT 4
#define CG_SPLL_SPREAD_SPECTRUM_2 0x794
#define CLK_V(x) ((x) << 0)
#define CLK_V_MASK (0x3ffffff << 0)
#define CLK_V_SHIFT 0
 
#define SMC_SCRATCH0 0x81c
 
#define CG_SPLL_FUNC_CNTL_4 0x850
 
#define MPLL_SS1 0x85c
#define CLKV(x) ((x) << 0)
#define CLKV_MASK (0x3ffffff << 0)
#define MPLL_SS2 0x860
#define CLKS(x) ((x) << 0)
#define CLKS_MASK (0xfff << 0)
 
#define CG_CAC_CTRL 0x88c
#define TID_CNT(x) ((x) << 0)
#define TID_CNT_MASK (0x3fff << 0)
#define TID_UNIT(x) ((x) << 14)
#define TID_UNIT_MASK (0xf << 14)
 
#define CG_IND_ADDR 0x8f8
#define CG_IND_DATA 0x8fc
/* CGIND regs */
#define CG_CGTT_LOCAL_0 0x00
#define CG_CGTT_LOCAL_1 0x01
 
#define MC_CG_CONFIG 0x25bc
#define MCDW_WR_ENABLE (1 << 0)
#define MCDX_WR_ENABLE (1 << 1)
#define MCDY_WR_ENABLE (1 << 2)
#define MCDZ_WR_ENABLE (1 << 3)
#define MC_RD_ENABLE(x) ((x) << 4)
#define MC_RD_ENABLE_MASK (3 << 4)
#define INDEX(x) ((x) << 6)
#define INDEX_MASK (0xfff << 6)
#define INDEX_SHIFT 6
 
#define MC_ARB_CAC_CNTL 0x2750
#define ENABLE (1 << 0)
#define READ_WEIGHT(x) ((x) << 1)
#define READ_WEIGHT_MASK (0x3f << 1)
#define READ_WEIGHT_SHIFT 1
#define WRITE_WEIGHT(x) ((x) << 7)
#define WRITE_WEIGHT_MASK (0x3f << 7)
#define WRITE_WEIGHT_SHIFT 7
#define ALLOW_OVERFLOW (1 << 13)
 
#define MC_ARB_DRAM_TIMING 0x2774
#define MC_ARB_DRAM_TIMING2 0x2778
 
#define MC_ARB_RFSH_RATE 0x27b0
#define POWERMODE0(x) ((x) << 0)
#define POWERMODE0_MASK (0xff << 0)
#define POWERMODE0_SHIFT 0
#define POWERMODE1(x) ((x) << 8)
#define POWERMODE1_MASK (0xff << 8)
#define POWERMODE1_SHIFT 8
#define POWERMODE2(x) ((x) << 16)
#define POWERMODE2_MASK (0xff << 16)
#define POWERMODE2_SHIFT 16
#define POWERMODE3(x) ((x) << 24)
#define POWERMODE3_MASK (0xff << 24)
#define POWERMODE3_SHIFT 24
 
#define MC_ARB_CG 0x27e8
#define CG_ARB_REQ(x) ((x) << 0)
#define CG_ARB_REQ_MASK (0xff << 0)
#define CG_ARB_REQ_SHIFT 0
#define CG_ARB_RESP(x) ((x) << 8)
#define CG_ARB_RESP_MASK (0xff << 8)
#define CG_ARB_RESP_SHIFT 8
#define ARB_CG_REQ(x) ((x) << 16)
#define ARB_CG_REQ_MASK (0xff << 16)
#define ARB_CG_REQ_SHIFT 16
#define ARB_CG_RESP(x) ((x) << 24)
#define ARB_CG_RESP_MASK (0xff << 24)
#define ARB_CG_RESP_SHIFT 24
 
#define MC_ARB_DRAM_TIMING_1 0x27f0
#define MC_ARB_DRAM_TIMING_2 0x27f4
#define MC_ARB_DRAM_TIMING_3 0x27f8
#define MC_ARB_DRAM_TIMING2_1 0x27fc
#define MC_ARB_DRAM_TIMING2_2 0x2800
#define MC_ARB_DRAM_TIMING2_3 0x2804
#define MC_ARB_BURST_TIME 0x2808
#define STATE0(x) ((x) << 0)
#define STATE0_MASK (0x1f << 0)
#define STATE0_SHIFT 0
#define STATE1(x) ((x) << 5)
#define STATE1_MASK (0x1f << 5)
#define STATE1_SHIFT 5
#define STATE2(x) ((x) << 10)
#define STATE2_MASK (0x1f << 10)
#define STATE2_SHIFT 10
#define STATE3(x) ((x) << 15)
#define STATE3_MASK (0x1f << 15)
#define STATE3_SHIFT 15
 
#define MC_CG_DATAPORT 0x2884
 
#define MC_SEQ_RAS_TIMING 0x28a0
#define MC_SEQ_CAS_TIMING 0x28a4
#define MC_SEQ_MISC_TIMING 0x28a8
#define MC_SEQ_MISC_TIMING2 0x28ac
#define MC_SEQ_PMG_TIMING 0x28b0
#define MC_SEQ_RD_CTL_D0 0x28b4
#define MC_SEQ_RD_CTL_D1 0x28b8
#define MC_SEQ_WR_CTL_D0 0x28bc
#define MC_SEQ_WR_CTL_D1 0x28c0
 
#define MC_SEQ_MISC0 0x2a00
#define MC_SEQ_MISC0_GDDR5_SHIFT 28
#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
#define MC_SEQ_MISC0_GDDR5_VALUE 5
#define MC_SEQ_MISC1 0x2a04
#define MC_SEQ_RESERVE_M 0x2a08
#define MC_PMG_CMD_EMRS 0x2a0c
 
#define MC_SEQ_MISC3 0x2a2c
 
#define MC_SEQ_MISC5 0x2a54
#define MC_SEQ_MISC6 0x2a58
 
#define MC_SEQ_MISC7 0x2a64
 
#define MC_SEQ_RAS_TIMING_LP 0x2a6c
#define MC_SEQ_CAS_TIMING_LP 0x2a70
#define MC_SEQ_MISC_TIMING_LP 0x2a74
#define MC_SEQ_MISC_TIMING2_LP 0x2a78
#define MC_SEQ_WR_CTL_D0_LP 0x2a7c
#define MC_SEQ_WR_CTL_D1_LP 0x2a80
#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84
#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88
 
#define MC_PMG_CMD_MRS 0x2aac
 
#define MC_SEQ_RD_CTL_D0_LP 0x2b1c
#define MC_SEQ_RD_CTL_D1_LP 0x2b20
 
#define MC_PMG_CMD_MRS1 0x2b44
#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48
#define MC_SEQ_PMG_TIMING_LP 0x2b4c
 
#define MC_PMG_CMD_MRS2 0x2b5c
#define MC_SEQ_PMG_CMD_MRS2_LP 0x2b60
 
#define LB_SYNC_RESET_SEL 0x6b28
#define LB_SYNC_RESET_SEL_MASK (3 << 0)
#define LB_SYNC_RESET_SEL_SHIFT 0
 
#define DC_STUTTER_CNTL 0x6b30
#define DC_STUTTER_ENABLE_A (1 << 0)
#define DC_STUTTER_ENABLE_B (1 << 1)
 
#define SQ_CAC_THRESHOLD 0x8e4c
#define VSP(x) ((x) << 0)
#define VSP_MASK (0xff << 0)
#define VSP_SHIFT 0
#define VSP0(x) ((x) << 8)
#define VSP0_MASK (0xff << 8)
#define VSP0_SHIFT 8
#define GPR(x) ((x) << 16)
#define GPR_MASK (0xff << 16)
#define GPR_SHIFT 16
 
#define SQ_POWER_THROTTLE 0x8e58
#define MIN_POWER(x) ((x) << 0)
#define MIN_POWER_MASK (0x3fff << 0)
#define MIN_POWER_SHIFT 0
#define MAX_POWER(x) ((x) << 16)
#define MAX_POWER_MASK (0x3fff << 16)
#define MAX_POWER_SHIFT 0
#define SQ_POWER_THROTTLE2 0x8e5c
#define MAX_POWER_DELTA(x) ((x) << 0)
#define MAX_POWER_DELTA_MASK (0x3fff << 0)
#define MAX_POWER_DELTA_SHIFT 0
#define STI_SIZE(x) ((x) << 16)
#define STI_SIZE_MASK (0x3ff << 16)
#define STI_SIZE_SHIFT 16
#define LTI_RATIO(x) ((x) << 27)
#define LTI_RATIO_MASK (0xf << 27)
#define LTI_RATIO_SHIFT 27
 
/* CG indirect registers */
#define CG_CAC_REGION_1_WEIGHT_0 0x83
#define WEIGHT_TCP_SIG0(x) ((x) << 0)
#define WEIGHT_TCP_SIG0_MASK (0x3f << 0)
#define WEIGHT_TCP_SIG0_SHIFT 0
#define WEIGHT_TCP_SIG1(x) ((x) << 6)
#define WEIGHT_TCP_SIG1_MASK (0x3f << 6)
#define WEIGHT_TCP_SIG1_SHIFT 6
#define WEIGHT_TA_SIG(x) ((x) << 12)
#define WEIGHT_TA_SIG_MASK (0x3f << 12)
#define WEIGHT_TA_SIG_SHIFT 12
#define CG_CAC_REGION_1_WEIGHT_1 0x84
#define WEIGHT_TCC_EN0(x) ((x) << 0)
#define WEIGHT_TCC_EN0_MASK (0x3f << 0)
#define WEIGHT_TCC_EN0_SHIFT 0
#define WEIGHT_TCC_EN1(x) ((x) << 6)
#define WEIGHT_TCC_EN1_MASK (0x3f << 6)
#define WEIGHT_TCC_EN1_SHIFT 6
#define WEIGHT_TCC_EN2(x) ((x) << 12)
#define WEIGHT_TCC_EN2_MASK (0x3f << 12)
#define WEIGHT_TCC_EN2_SHIFT 12
#define WEIGHT_TCC_EN3(x) ((x) << 18)
#define WEIGHT_TCC_EN3_MASK (0x3f << 18)
#define WEIGHT_TCC_EN3_SHIFT 18
#define CG_CAC_REGION_2_WEIGHT_0 0x85
#define WEIGHT_CB_EN0(x) ((x) << 0)
#define WEIGHT_CB_EN0_MASK (0x3f << 0)
#define WEIGHT_CB_EN0_SHIFT 0
#define WEIGHT_CB_EN1(x) ((x) << 6)
#define WEIGHT_CB_EN1_MASK (0x3f << 6)
#define WEIGHT_CB_EN1_SHIFT 6
#define WEIGHT_CB_EN2(x) ((x) << 12)
#define WEIGHT_CB_EN2_MASK (0x3f << 12)
#define WEIGHT_CB_EN2_SHIFT 12
#define WEIGHT_CB_EN3(x) ((x) << 18)
#define WEIGHT_CB_EN3_MASK (0x3f << 18)
#define WEIGHT_CB_EN3_SHIFT 18
#define CG_CAC_REGION_2_WEIGHT_1 0x86
#define WEIGHT_DB_SIG0(x) ((x) << 0)
#define WEIGHT_DB_SIG0_MASK (0x3f << 0)
#define WEIGHT_DB_SIG0_SHIFT 0
#define WEIGHT_DB_SIG1(x) ((x) << 6)
#define WEIGHT_DB_SIG1_MASK (0x3f << 6)
#define WEIGHT_DB_SIG1_SHIFT 6
#define WEIGHT_DB_SIG2(x) ((x) << 12)
#define WEIGHT_DB_SIG2_MASK (0x3f << 12)
#define WEIGHT_DB_SIG2_SHIFT 12
#define WEIGHT_DB_SIG3(x) ((x) << 18)
#define WEIGHT_DB_SIG3_MASK (0x3f << 18)
#define WEIGHT_DB_SIG3_SHIFT 18
#define CG_CAC_REGION_2_WEIGHT_2 0x87
#define WEIGHT_SXM_SIG0(x) ((x) << 0)
#define WEIGHT_SXM_SIG0_MASK (0x3f << 0)
#define WEIGHT_SXM_SIG0_SHIFT 0
#define WEIGHT_SXM_SIG1(x) ((x) << 6)
#define WEIGHT_SXM_SIG1_MASK (0x3f << 6)
#define WEIGHT_SXM_SIG1_SHIFT 6
#define WEIGHT_SXM_SIG2(x) ((x) << 12)
#define WEIGHT_SXM_SIG2_MASK (0x3f << 12)
#define WEIGHT_SXM_SIG2_SHIFT 12
#define WEIGHT_SXS_SIG0(x) ((x) << 18)
#define WEIGHT_SXS_SIG0_MASK (0x3f << 18)
#define WEIGHT_SXS_SIG0_SHIFT 18
#define WEIGHT_SXS_SIG1(x) ((x) << 24)
#define WEIGHT_SXS_SIG1_MASK (0x3f << 24)
#define WEIGHT_SXS_SIG1_SHIFT 24
#define CG_CAC_REGION_3_WEIGHT_0 0x88
#define WEIGHT_XBR_0(x) ((x) << 0)
#define WEIGHT_XBR_0_MASK (0x3f << 0)
#define WEIGHT_XBR_0_SHIFT 0
#define WEIGHT_XBR_1(x) ((x) << 6)
#define WEIGHT_XBR_1_MASK (0x3f << 6)
#define WEIGHT_XBR_1_SHIFT 6
#define WEIGHT_XBR_2(x) ((x) << 12)
#define WEIGHT_XBR_2_MASK (0x3f << 12)
#define WEIGHT_XBR_2_SHIFT 12
#define WEIGHT_SPI_SIG0(x) ((x) << 18)
#define WEIGHT_SPI_SIG0_MASK (0x3f << 18)
#define WEIGHT_SPI_SIG0_SHIFT 18
#define CG_CAC_REGION_3_WEIGHT_1 0x89
#define WEIGHT_SPI_SIG1(x) ((x) << 0)
#define WEIGHT_SPI_SIG1_MASK (0x3f << 0)
#define WEIGHT_SPI_SIG1_SHIFT 0
#define WEIGHT_SPI_SIG2(x) ((x) << 6)
#define WEIGHT_SPI_SIG2_MASK (0x3f << 6)
#define WEIGHT_SPI_SIG2_SHIFT 6
#define WEIGHT_SPI_SIG3(x) ((x) << 12)
#define WEIGHT_SPI_SIG3_MASK (0x3f << 12)
#define WEIGHT_SPI_SIG3_SHIFT 12
#define WEIGHT_SPI_SIG4(x) ((x) << 18)
#define WEIGHT_SPI_SIG4_MASK (0x3f << 18)
#define WEIGHT_SPI_SIG4_SHIFT 18
#define WEIGHT_SPI_SIG5(x) ((x) << 24)
#define WEIGHT_SPI_SIG5_MASK (0x3f << 24)
#define WEIGHT_SPI_SIG5_SHIFT 24
#define CG_CAC_REGION_4_WEIGHT_0 0x8a
#define WEIGHT_LDS_SIG0(x) ((x) << 0)
#define WEIGHT_LDS_SIG0_MASK (0x3f << 0)
#define WEIGHT_LDS_SIG0_SHIFT 0
#define WEIGHT_LDS_SIG1(x) ((x) << 6)
#define WEIGHT_LDS_SIG1_MASK (0x3f << 6)
#define WEIGHT_LDS_SIG1_SHIFT 6
#define WEIGHT_SC(x) ((x) << 24)
#define WEIGHT_SC_MASK (0x3f << 24)
#define WEIGHT_SC_SHIFT 24
#define CG_CAC_REGION_4_WEIGHT_1 0x8b
#define WEIGHT_BIF(x) ((x) << 0)
#define WEIGHT_BIF_MASK (0x3f << 0)
#define WEIGHT_BIF_SHIFT 0
#define WEIGHT_CP(x) ((x) << 6)
#define WEIGHT_CP_MASK (0x3f << 6)
#define WEIGHT_CP_SHIFT 6
#define WEIGHT_PA_SIG0(x) ((x) << 12)
#define WEIGHT_PA_SIG0_MASK (0x3f << 12)
#define WEIGHT_PA_SIG0_SHIFT 12
#define WEIGHT_PA_SIG1(x) ((x) << 18)
#define WEIGHT_PA_SIG1_MASK (0x3f << 18)
#define WEIGHT_PA_SIG1_SHIFT 18
#define WEIGHT_VGT_SIG0(x) ((x) << 24)
#define WEIGHT_VGT_SIG0_MASK (0x3f << 24)
#define WEIGHT_VGT_SIG0_SHIFT 24
#define CG_CAC_REGION_4_WEIGHT_2 0x8c
#define WEIGHT_VGT_SIG1(x) ((x) << 0)
#define WEIGHT_VGT_SIG1_MASK (0x3f << 0)
#define WEIGHT_VGT_SIG1_SHIFT 0
#define WEIGHT_VGT_SIG2(x) ((x) << 6)
#define WEIGHT_VGT_SIG2_MASK (0x3f << 6)
#define WEIGHT_VGT_SIG2_SHIFT 6
#define WEIGHT_DC_SIG0(x) ((x) << 12)
#define WEIGHT_DC_SIG0_MASK (0x3f << 12)
#define WEIGHT_DC_SIG0_SHIFT 12
#define WEIGHT_DC_SIG1(x) ((x) << 18)
#define WEIGHT_DC_SIG1_MASK (0x3f << 18)
#define WEIGHT_DC_SIG1_SHIFT 18
#define WEIGHT_DC_SIG2(x) ((x) << 24)
#define WEIGHT_DC_SIG2_MASK (0x3f << 24)
#define WEIGHT_DC_SIG2_SHIFT 24
#define CG_CAC_REGION_4_WEIGHT_3 0x8d
#define WEIGHT_DC_SIG3(x) ((x) << 0)
#define WEIGHT_DC_SIG3_MASK (0x3f << 0)
#define WEIGHT_DC_SIG3_SHIFT 0
#define WEIGHT_UVD_SIG0(x) ((x) << 6)
#define WEIGHT_UVD_SIG0_MASK (0x3f << 6)
#define WEIGHT_UVD_SIG0_SHIFT 6
#define WEIGHT_UVD_SIG1(x) ((x) << 12)
#define WEIGHT_UVD_SIG1_MASK (0x3f << 12)
#define WEIGHT_UVD_SIG1_SHIFT 12
#define WEIGHT_SPARE0(x) ((x) << 18)
#define WEIGHT_SPARE0_MASK (0x3f << 18)
#define WEIGHT_SPARE0_SHIFT 18
#define WEIGHT_SPARE1(x) ((x) << 24)
#define WEIGHT_SPARE1_MASK (0x3f << 24)
#define WEIGHT_SPARE1_SHIFT 24
#define CG_CAC_REGION_5_WEIGHT_0 0x8e
#define WEIGHT_SQ_VSP(x) ((x) << 0)
#define WEIGHT_SQ_VSP_MASK (0x3fff << 0)
#define WEIGHT_SQ_VSP_SHIFT 0
#define WEIGHT_SQ_VSP0(x) ((x) << 14)
#define WEIGHT_SQ_VSP0_MASK (0x3fff << 14)
#define WEIGHT_SQ_VSP0_SHIFT 14
#define CG_CAC_REGION_4_OVERRIDE_4 0xab
#define OVR_MODE_SPARE_0(x) ((x) << 16)
#define OVR_MODE_SPARE_0_MASK (0x1 << 16)
#define OVR_MODE_SPARE_0_SHIFT 16
#define OVR_VAL_SPARE_0(x) ((x) << 17)
#define OVR_VAL_SPARE_0_MASK (0x1 << 17)
#define OVR_VAL_SPARE_0_SHIFT 17
#define OVR_MODE_SPARE_1(x) ((x) << 18)
#define OVR_MODE_SPARE_1_MASK (0x3f << 18)
#define OVR_MODE_SPARE_1_SHIFT 18
#define OVR_VAL_SPARE_1(x) ((x) << 19)
#define OVR_VAL_SPARE_1_MASK (0x3f << 19)
#define OVR_VAL_SPARE_1_SHIFT 19
#define CG_CAC_REGION_5_WEIGHT_1 0xb7
#define WEIGHT_SQ_GPR(x) ((x) << 0)
#define WEIGHT_SQ_GPR_MASK (0x3fff << 0)
#define WEIGHT_SQ_GPR_SHIFT 0
#define WEIGHT_SQ_LDS(x) ((x) << 14)
#define WEIGHT_SQ_LDS_MASK (0x3fff << 14)
#define WEIGHT_SQ_LDS_SHIFT 14
 
/* PCIE link stuff */
#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
# define LC_LINK_WIDTH_SHIFT 0
# define LC_LINK_WIDTH_MASK 0x7
# define LC_LINK_WIDTH_X0 0
# define LC_LINK_WIDTH_X1 1
# define LC_LINK_WIDTH_X2 2
# define LC_LINK_WIDTH_X4 3
# define LC_LINK_WIDTH_X8 4
# define LC_LINK_WIDTH_X16 6
# define LC_LINK_WIDTH_RD_SHIFT 4
# define LC_LINK_WIDTH_RD_MASK 0x70
# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
# define LC_RECONFIG_NOW (1 << 8)
# define LC_RENEGOTIATION_SUPPORT (1 << 9)
# define LC_RENEGOTIATE_EN (1 << 10)
# define LC_SHORT_RECONFIG_EN (1 << 11)
# define LC_UPCONFIGURE_SUPPORT (1 << 12)
# define LC_UPCONFIGURE_DIS (1 << 13)
#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
# define LC_GEN2_EN_STRAP (1 << 0)
# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
# define LC_CURRENT_DATA_RATE (1 << 11)
# define LC_HW_VOLTAGE_IF_CONTROL(x) ((x) << 12)
# define LC_HW_VOLTAGE_IF_CONTROL_MASK (3 << 12)
# define LC_HW_VOLTAGE_IF_CONTROL_SHIFT 12
# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
#define MM_CFGREGS_CNTL 0x544c
# define MM_WR_TO_CFG_EN (1 << 3)
#define LINK_CNTL2 0x88 /* F0 */
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
 
/*
* UVD
*/
1155,7 → 573,6
# define PACKET3_DB_ACTION_ENA (1 << 26)
# define PACKET3_SH_ACTION_ENA (1 << 27)
# define PACKET3_SX_ACTION_ENA (1 << 28)
# define PACKET3_ENGINE_ME (1 << 31)
#define PACKET3_ME_INITIALIZE 0x44
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45
/drivers/video/drm/radeon/pci.c
8,12 → 8,6
#include <pci.h>
#include <syscall.h>
 
static inline __attribute__((const))
bool is_power_of_2(unsigned long n)
{
return (n != 0 && ((n & (n - 1)) == 0));
}
 
extern int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn);
 
static LIST_HEAD(devices);
1057,52 → 1051,3
}
EXPORT_SYMBOL(pcie_capability_write_dword);
 
int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
u16 clear, u16 set)
{
int ret;
u16 val;
 
ret = pcie_capability_read_word(dev, pos, &val);
if (!ret) {
val &= ~clear;
val |= set;
ret = pcie_capability_write_word(dev, pos, val);
}
 
return ret;
}
 
 
 
int pcie_get_readrq(struct pci_dev *dev)
{
u16 ctl;
 
pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
 
return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
}
EXPORT_SYMBOL(pcie_get_readrq);
 
/**
* pcie_set_readrq - set PCI Express maximum memory read request
* @dev: PCI device to query
* @rq: maximum memory read count in bytes
* valid values are 128, 256, 512, 1024, 2048, 4096
*
* If possible sets maximum memory read request in bytes
*/
int pcie_set_readrq(struct pci_dev *dev, int rq)
{
u16 v;
 
if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
return -EINVAL;
 
v = (ffs(rq) - 8) << 12;
 
return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_READRQ, v);
}
 
/drivers/video/drm/radeon/r100.c
61,7 → 61,6
MODULE_FIRMWARE(FIRMWARE_RS600);
MODULE_FIRMWARE(FIRMWARE_R520);
 
#include "r100_track.h"
 
/* This files gather functions specifics to:
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
140,20 → 139,7
}
}
}
 
/**
* r100_page_flip - pageflip callback.
*
* @rdev: radeon_device pointer
* @crtc_id: crtc to cleanup pageflip on
* @crtc_base: new address of the crtc (GPU MC address)
*
* Does the actual pageflip (r1xx-r4xx).
* During vblank we take the crtc lock and wait for the update_pending
* bit to go high, when it does, we release the lock, and allow the
* double buffered update to take place.
*/
void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
175,334 → 161,9
tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
 
}
 
/**
* r100_page_flip_pending - check if page flip is still pending
*
* @rdev: radeon_device pointer
* @crtc_id: crtc to check
*
* Check if the last pagefilp is still pending (r1xx-r4xx).
* Returns the current update pending status.
*/
bool r100_page_flip_pending(struct radeon_device *rdev, int crtc_id)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
 
/* Return current update_pending status: */
return !!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) &
RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET);
return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
}
 
/**
* r100_pm_get_dynpm_state - look up dynpm power state callback.
*
* @rdev: radeon_device pointer
*
* Look up the optimal power state based on the
* current state of the GPU (r1xx-r5xx).
* Used for dynpm only.
*/
void r100_pm_get_dynpm_state(struct radeon_device *rdev)
{
int i;
rdev->pm.dynpm_can_upclock = true;
rdev->pm.dynpm_can_downclock = true;
 
switch (rdev->pm.dynpm_planned_action) {
case DYNPM_ACTION_MINIMUM:
rdev->pm.requested_power_state_index = 0;
rdev->pm.dynpm_can_downclock = false;
break;
case DYNPM_ACTION_DOWNCLOCK:
if (rdev->pm.current_power_state_index == 0) {
rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
rdev->pm.dynpm_can_downclock = false;
} else {
if (rdev->pm.active_crtc_count > 1) {
for (i = 0; i < rdev->pm.num_power_states; i++) {
if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
continue;
else if (i >= rdev->pm.current_power_state_index) {
rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
break;
} else {
rdev->pm.requested_power_state_index = i;
break;
}
}
} else
rdev->pm.requested_power_state_index =
rdev->pm.current_power_state_index - 1;
}
/* don't use the power state if crtcs are active and no display flag is set */
if ((rdev->pm.active_crtc_count > 0) &&
(rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
RADEON_PM_MODE_NO_DISPLAY)) {
rdev->pm.requested_power_state_index++;
}
break;
case DYNPM_ACTION_UPCLOCK:
if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
rdev->pm.dynpm_can_upclock = false;
} else {
if (rdev->pm.active_crtc_count > 1) {
for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
continue;
else if (i <= rdev->pm.current_power_state_index) {
rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
break;
} else {
rdev->pm.requested_power_state_index = i;
break;
}
}
} else
rdev->pm.requested_power_state_index =
rdev->pm.current_power_state_index + 1;
}
break;
case DYNPM_ACTION_DEFAULT:
rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.dynpm_can_upclock = false;
break;
case DYNPM_ACTION_NONE:
default:
DRM_ERROR("Requested mode for not defined action\n");
return;
}
/* only one clock mode per power state */
rdev->pm.requested_clock_mode_index = 0;
 
DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].sclk,
rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].mclk,
rdev->pm.power_state[rdev->pm.requested_power_state_index].
pcie_lanes);
}
 
/**
* r100_pm_init_profile - Initialize power profiles callback.
*
* @rdev: radeon_device pointer
*
* Initialize the power states used in profile mode
* (r1xx-r3xx).
* Used for profile mode only.
*/
void r100_pm_init_profile(struct radeon_device *rdev)
{
/* default */
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
/* low sh */
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
/* mid sh */
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
/* low mh */
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
/* mid mh */
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
}
 
/**
* r100_pm_misc - set additional pm hw parameters callback.
*
* @rdev: radeon_device pointer
*
* Set non-clock parameters associated with a power state
* (voltage, pcie lanes, etc.) (r1xx-r4xx).
*/
void r100_pm_misc(struct radeon_device *rdev)
{
int requested_index = rdev->pm.requested_power_state_index;
struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
 
if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
tmp = RREG32(voltage->gpio.reg);
if (voltage->active_high)
tmp |= voltage->gpio.mask;
else
tmp &= ~(voltage->gpio.mask);
WREG32(voltage->gpio.reg, tmp);
if (voltage->delay)
udelay(voltage->delay);
} else {
tmp = RREG32(voltage->gpio.reg);
if (voltage->active_high)
tmp &= ~voltage->gpio.mask;
else
tmp |= voltage->gpio.mask;
WREG32(voltage->gpio.reg, tmp);
if (voltage->delay)
udelay(voltage->delay);
}
}
 
sclk_cntl = RREG32_PLL(SCLK_CNTL);
sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
else
sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
} else
sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
 
if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
if (voltage->delay) {
sclk_more_cntl |= VOLTAGE_DROP_SYNC;
switch (voltage->delay) {
case 33:
sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
break;
case 66:
sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
break;
case 99:
sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
break;
case 132:
sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
break;
}
} else
sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
} else
sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
 
if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
sclk_cntl &= ~FORCE_HDP;
else
sclk_cntl |= FORCE_HDP;
 
WREG32_PLL(SCLK_CNTL, sclk_cntl);
WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
 
/* set pcie lanes */
if ((rdev->flags & RADEON_IS_PCIE) &&
!(rdev->flags & RADEON_IS_IGP) &&
rdev->asic->pm.set_pcie_lanes &&
(ps->pcie_lanes !=
rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
radeon_set_pcie_lanes(rdev,
ps->pcie_lanes);
DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes);
}
}
 
/**
* r100_pm_prepare - pre-power state change callback.
*
* @rdev: radeon_device pointer
*
* Prepare for a power state change (r1xx-r4xx).
*/
void r100_pm_prepare(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 tmp;
 
/* disable any active CRTCs */
list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (radeon_crtc->enabled) {
if (radeon_crtc->crtc_id) {
tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
} else {
tmp = RREG32(RADEON_CRTC_GEN_CNTL);
tmp |= RADEON_CRTC_DISP_REQ_EN_B;
WREG32(RADEON_CRTC_GEN_CNTL, tmp);
}
}
}
}
 
/**
* r100_pm_finish - post-power state change callback.
*
* @rdev: radeon_device pointer
*
* Clean up after a power state change (r1xx-r4xx).
*/
void r100_pm_finish(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 tmp;
 
/* enable any active CRTCs */
list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (radeon_crtc->enabled) {
if (radeon_crtc->crtc_id) {
tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
} else {
tmp = RREG32(RADEON_CRTC_GEN_CNTL);
tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
WREG32(RADEON_CRTC_GEN_CNTL, tmp);
}
}
}
}
 
/**
* r100_gui_idle - gui idle callback.
*
* @rdev: radeon_device pointer
*
* Check of the GUI (2D/3D engines) are idle (r1xx-r5xx).
* Returns true if idle, false if not.
*/
bool r100_gui_idle(struct radeon_device *rdev)
{
if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
652,6 → 313,7
{
uint32_t tmp;
 
radeon_gart_restore(rdev);
/* discard memory request outside of configured range */
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32(RADEON_AIC_CNTL, tmp);
681,11 → 343,15
WREG32(RADEON_AIC_HI_ADDR, 0);
}
 
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags)
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
u32 *gtt = rdev->gart.ptr;
 
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
}
gtt[i] = cpu_to_le32(lower_32_bits(addr));
return 0;
}
 
void r100_pci_gart_fini(struct radeon_device *rdev)
837,7 → 503,11
/* Wait until IDLE & CLEAN */
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
r100_ring_hdp_flush(rdev, ring);
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
radeon_ring_write(ring, fence->seq);
845,7 → 515,7
radeon_ring_write(ring, RADEON_SW_INT_FIRE);
}
 
bool r100_semaphore_ring_emit(struct radeon_device *rdev,
void r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
852,7 → 522,6
{
/* Unused on older asics, since we don't have semaphores or multiple rings */
BUG();
return false;
}
 
int r100_copy_blit(struct radeon_device *rdev,
925,7 → 594,7
if (fence) {
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
}
radeon_ring_unlock_commit(rdev, ring, false);
radeon_ring_unlock_commit(rdev, ring);
return r;
}
 
958,7 → 627,7
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
radeon_ring_unlock_commit(rdev, ring, false);
radeon_ring_unlock_commit(rdev, ring);
}
 
 
965,11 → 634,18
/* Load the microcode for the CP */
static int r100_cp_init_microcode(struct radeon_device *rdev)
{
struct platform_device *pdev;
const char *fw_name = NULL;
int err;
 
DRM_DEBUG_KMS("\n");
 
pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
err = IS_ERR(pdev);
if (err) {
printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
return -EINVAL;
}
if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
(rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
(rdev->family == CHIP_RS200)) {
1011,7 → 687,8
fw_name = FIRMWARE_R520;
}
 
err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
platform_device_unregister(pdev);
if (err) {
printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
fw_name);
1026,50 → 703,6
return err;
}
 
u32 r100_gfx_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
u32 rptr;
 
if (rdev->wb.enabled)
rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
else
rptr = RREG32(RADEON_CP_RB_RPTR);
 
return rptr;
}
 
u32 r100_gfx_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
u32 wptr;
 
wptr = RREG32(RADEON_CP_RB_WPTR);
 
return wptr;
}
 
void r100_gfx_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
WREG32(RADEON_CP_RB_WPTR, ring->wptr);
(void)RREG32(RADEON_CP_RB_WPTR);
}
 
/**
* r100_ring_hdp_flush - flush Host Data Path via the ring buffer
* rdev: radeon device structure
* ring: ring buffer struct for emitting packets
*/
void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
{
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
}
 
static void r100_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
1118,11 → 751,12
}
 
/* Align ring size */
rb_bufsz = order_base_2(ring_size / 8);
rb_bufsz = drm_order(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
r100_cp_load_microcode(rdev);
r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
RADEON_CP_PACKET2);
RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
0, 0x7fffff, RADEON_CP_PACKET2);
if (r) {
return r;
}
1183,6 → 817,7
 
WREG32(RADEON_CP_RB_CNTL, tmp);
udelay(10);
ring->rptr = RREG32(RADEON_CP_RB_RPTR);
/* Set cp mode to bus mastering & enable cp*/
WREG32(RADEON_CP_CSQ_MODE,
REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
1236,6 → 871,7
}
}
 
#if 0
/*
* CS functions
*/
1260,12 → 896,12
 
value = radeon_get_ib_value(p, idx);
tmp = value & 0x003fffff;
tmp += (((u32)reloc->gpu_offset) >> 10);
tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
 
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->tiling_flags & RADEON_TILING_MACRO)
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_DST_TILE_MACRO;
if (reloc->tiling_flags & RADEON_TILING_MICRO) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
if (reg == RADEON_SRC_PITCH_OFFSET) {
DRM_ERROR("Cannot src blit from microtiled surface\n");
radeon_cs_dump_packet(p, pkt);
1311,7 → 947,7
return r;
}
idx_value = radeon_get_ib_value(p, idx);
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
 
track->arrays[i + 0].esize = idx_value >> 8;
track->arrays[i + 0].robj = reloc->robj;
1323,7 → 959,7
radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset);
ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 1].robj = reloc->robj;
track->arrays[i + 1].esize = idx_value >> 24;
track->arrays[i + 1].esize &= 0x7F;
1337,7 → 973,7
return r;
}
idx_value = radeon_get_ib_value(p, idx);
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize = idx_value >> 8;
track->arrays[i + 0].esize &= 0x7F;
1390,7 → 1026,68
return 0;
}
 
void r100_cs_dump_packet(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
volatile uint32_t *ib;
unsigned i;
unsigned idx;
 
ib = p->ib.ptr;
idx = pkt->idx;
for (i = 0; i <= (pkt->count + 1); i++, idx++) {
DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
}
}
 
/**
* r100_cs_packet_parse() - parse cp packet and point ib index to next packet
* @parser: parser structure holding parsing context.
* @pkt: where to store packet informations
*
* Assume that chunk_ib_index is properly set. Will return -EINVAL
* if packet is bigger than remaining ib size. or if packets is unknown.
**/
int r100_cs_packet_parse(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx)
{
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
uint32_t header;
 
if (idx >= ib_chunk->length_dw) {
DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
idx, ib_chunk->length_dw);
return -EINVAL;
}
header = radeon_get_ib_value(p, idx);
pkt->idx = idx;
pkt->type = CP_PACKET_GET_TYPE(header);
pkt->count = CP_PACKET_GET_COUNT(header);
switch (pkt->type) {
case PACKET_TYPE0:
pkt->reg = CP_PACKET0_GET_REG(header);
pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
break;
case PACKET_TYPE3:
pkt->opcode = CP_PACKET3_GET_OPCODE(header);
break;
case PACKET_TYPE2:
pkt->count = -1;
break;
default:
DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
return -EINVAL;
}
if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
return -EINVAL;
}
return 0;
}
 
/**
* r100_cs_packet_next_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
*
1406,6 → 1103,7
*/
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
{
struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
struct radeon_cs_packet p3reloc, waitreloc;
1445,11 → 1143,12
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 5);
reg = R100_CP_PACKET0_GET_REG(header);
crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
if (!crtc) {
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
return -ENOENT;
return -EINVAL;
}
crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
crtc_id = radeon_crtc->crtc_id;
 
1578,7 → 1277,7
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
track->zb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1591,7 → 1290,7
track->cb[0].robj = reloc->robj;
track->cb[0].offset = idx_value;
track->cb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_PP_TXOFFSET_0:
case RADEON_PP_TXOFFSET_1:
1605,16 → 1304,16
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->tiling_flags & RADEON_TILING_MACRO)
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_TXO_MACRO_TILE;
if (reloc->tiling_flags & RADEON_TILING_MICRO)
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_TXO_MICRO_TILE_X2;
 
tmp = idx_value & ~(0x7 << 2);
tmp |= tile_flags;
ib[idx] = tmp + ((u32)reloc->gpu_offset);
ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
} else
ib[idx] = idx_value + ((u32)reloc->gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
1632,7 → 1331,7
return r;
}
track->textures[0].cube_info[i].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[0].cube_info[i].robj = reloc->robj;
track->tex_dirty = true;
break;
1650,7 → 1349,7
return r;
}
track->textures[1].cube_info[i].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[1].cube_info[i].robj = reloc->robj;
track->tex_dirty = true;
break;
1668,7 → 1367,7
return r;
}
track->textures[2].cube_info[i].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[2].cube_info[i].robj = reloc->robj;
track->tex_dirty = true;
break;
1686,9 → 1385,9
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->tiling_flags & RADEON_TILING_MACRO)
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_COLOR_TILE_ENABLE;
if (reloc->tiling_flags & RADEON_TILING_MICRO)
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
 
tmp = idx_value & ~(0x7 << 16);
1756,7 → 1455,7
radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = idx_value + ((u32)reloc->gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_PP_CNTL:
{
1916,7 → 1615,7
radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset);
ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
if (r) {
return r;
1930,7 → 1629,7
radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset);
ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
track->num_arrays = 1;
track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
 
2431,6 → 2130,7
}
}
}
#endif
 
/*
* Global GPU functions
2506,9 → 2206,11
 
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
radeon_ring_lockup_update(rdev, ring);
radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
 
2864,28 → 2566,21
 
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
{
unsigned long flags;
uint32_t data;
 
spin_lock_irqsave(&rdev->pll_idx_lock, flags);
WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
r100_pll_errata_after_index(rdev);
data = RREG32(RADEON_CLOCK_CNTL_DATA);
r100_pll_errata_after_data(rdev);
spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
return data;
}
 
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
unsigned long flags;
 
spin_lock_irqsave(&rdev->pll_idx_lock, flags);
WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
r100_pll_errata_after_index(rdev);
WREG32(RADEON_CLOCK_CNTL_DATA, v);
r100_pll_errata_after_data(rdev);
spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
}
 
static void r100_set_safe_registers(struct radeon_device *rdev)
2944,12 → 2639,10
seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
if (ring->ready) {
for (j = 0; j <= count; j++) {
i = (rdp + j) & ring->ptr_mask;
seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
}
}
return 0;
}
 
3088,10 → 2781,6
flags |= RADEON_SURF_TILE_COLOR_BOTH;
if (tiling_flags & RADEON_TILING_MACRO)
flags |= RADEON_SURF_TILE_COLOR_MACRO;
/* setting pitch to 0 disables tiling */
if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
== 0)
pitch = 0;
} else if (rdev->family <= CHIP_RV280) {
if (tiling_flags & (RADEON_TILING_MACRO))
flags |= R200_SURF_TILE_COLOR_MACRO;
3109,6 → 2798,13
if (tiling_flags & RADEON_TILING_SWAP_32BIT)
flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
 
/* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */
if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) {
if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO)))
if (ASIC_IS_RN50(rdev))
pitch /= 16;
}
 
/* r100/r200 divide by 16 */
if (rdev->family < CHIP_R300)
flags |= pitch / 16;
3204,12 → 2900,12
 
if (rdev->mode_info.crtcs[0]->base.enabled) {
mode1 = &rdev->mode_info.crtcs[0]->base.mode;
pixel_bytes1 = rdev->mode_info.crtcs[0]->base.primary->fb->bits_per_pixel / 8;
pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
}
if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
if (rdev->mode_info.crtcs[1]->base.enabled) {
mode2 = &rdev->mode_info.crtcs[1]->base.mode;
pixel_bytes2 = rdev->mode_info.crtcs[1]->base.primary->fb->bits_per_pixel / 8;
pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
}
}
 
3634,7 → 3330,7
}
radeon_ring_write(ring, PACKET0(scratch, 0));
radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring, false);
radeon_ring_unlock_commit(rdev, ring);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF) {
3696,7 → 3392,7
ib.ptr[6] = PACKET2(0);
ib.ptr[7] = PACKET2(0);
ib.length_dw = 8;
r = radeon_ib_schedule(rdev, &ib, NULL, false);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
goto free_ib;
3992,9 → 3688,6
}
r100_set_safe_registers(rdev);
 
/* Initialize power management */
radeon_pm_init(rdev);
 
rdev->accel_working = true;
r = r100_startup(rdev);
if (r) {
4007,6 → 3700,39
return 0;
}
 
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
bool always_indirect)
{
if (reg < rdev->rmmio_size && !always_indirect)
return readl(((void __iomem *)rdev->rmmio) + reg);
else {
unsigned long flags;
uint32_t ret;
 
spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
 
return ret;
}
}
 
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
bool always_indirect)
{
if (reg < rdev->rmmio_size && !always_indirect)
writel(v, ((void __iomem *)rdev->rmmio) + reg);
else {
unsigned long flags;
 
spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
}
}
 
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
{
if (reg < rdev->rio_mem_size)
/drivers/video/drm/radeon/r200.c
34,6 → 34,8
#include "r100d.h"
#include "r200_reg_safe.h"
 
#if 0
 
#include "r100_track.h"
 
static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
79,6 → 81,7
vtx_size += 3;
return vtx_size;
}
#endif
 
int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
121,11 → 124,11
if (fence) {
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
}
radeon_ring_unlock_commit(rdev, ring, false);
radeon_ring_unlock_commit(rdev, ring);
return r;
}
#if 0
 
 
static int r200_get_vtx_size_1(uint32_t vtx_fmt_1)
{
int vtx_size, i, tex_size;
185,7 → 188,7
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
track->zb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
198,7 → 201,7
track->cb[0].robj = reloc->robj;
track->cb[0].offset = idx_value;
track->cb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R200_PP_TXOFFSET_0:
case R200_PP_TXOFFSET_1:
215,16 → 218,16
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->tiling_flags & RADEON_TILING_MACRO)
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R200_TXO_MACRO_TILE;
if (reloc->tiling_flags & RADEON_TILING_MICRO)
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R200_TXO_MICRO_TILE;
 
tmp = idx_value & ~(0x7 << 2);
tmp |= tile_flags;
ib[idx] = tmp + ((u32)reloc->gpu_offset);
ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
} else
ib[idx] = idx_value + ((u32)reloc->gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
268,7 → 271,7
return r;
}
track->textures[i].cube_info[face - 1].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].cube_info[face - 1].robj = reloc->robj;
track->tex_dirty = true;
break;
287,9 → 290,9
}
 
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->tiling_flags & RADEON_TILING_MACRO)
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_COLOR_TILE_ENABLE;
if (reloc->tiling_flags & RADEON_TILING_MICRO)
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
 
tmp = idx_value & ~(0x7 << 16);
362,7 → 365,7
radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = idx_value + ((u32)reloc->gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_PP_CNTL:
{
540,6 → 543,7
}
return 0;
}
#endif
 
void r200_set_safe_registers(struct radeon_device *rdev)
{
/drivers/video/drm/radeon/r300.c
34,7 → 34,7
#include "radeon.h"
#include "radeon_asic.h"
#include <drm/radeon_drm.h>
#include "r100_track.h"
 
#include "r300d.h"
#include "rv350d.h"
#include "r300_reg_safe.h"
69,27 → 69,24
mb();
}
 
#define R300_PTE_UNSNOOPED (1 << 0)
#define R300_PTE_WRITEABLE (1 << 2)
#define R300_PTE_READABLE (1 << 3)
 
void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags)
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
void __iomem *ptr = rdev->gart.ptr;
 
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
}
addr = (lower_32_bits(addr) >> 8) |
((upper_32_bits(addr) & 0xff) << 24);
if (flags & RADEON_GART_PAGE_READ)
addr |= R300_PTE_READABLE;
if (flags & RADEON_GART_PAGE_WRITE)
addr |= R300_PTE_WRITEABLE;
if (!(flags & RADEON_GART_PAGE_SNOOP))
addr |= R300_PTE_UNSNOOPED;
((upper_32_bits(addr) & 0xff) << 24) |
R300_PTE_WRITEABLE | R300_PTE_READABLE;
/* on x86 we want this to be CPU endian, on powerpc
* on powerpc without HW swappers, it'll get swapped on way
* into VRAM - so no need for cpu_to_le32 on VRAM tables */
writel(addr, ((void __iomem *)ptr) + (i * 4));
return 0;
}
 
int rv370_pcie_gart_init(struct radeon_device *rdev)
126,6 → 123,7
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
radeon_gart_restore(rdev);
/* discard memory request outside of configured range */
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
295,7 → 293,7
radeon_ring_write(ring,
R300_GEOMETRY_ROUND_NEAREST |
R300_COLOR_ROUND_NEAREST);
radeon_ring_unlock_commit(rdev, ring, false);
radeon_ring_unlock_commit(rdev, ring);
}
 
static void r300_errata(struct radeon_device *rdev)
594,6 → 592,9
#endif
}
 
 
#if 0
 
static int r300_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
642,7 → 643,7
track->cb[i].robj = reloc->robj;
track->cb[i].offset = idx_value;
track->cb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R300_ZB_DEPTHOFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
655,7 → 656,7
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
track->zb_dirty = true;
ib[idx] = idx_value + ((u32)reloc->gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R300_TX_OFFSET_0:
case R300_TX_OFFSET_0+4:
684,16 → 685,16
 
if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
((idx_value & ~31) + (u32)reloc->gpu_offset);
((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
} else {
if (reloc->tiling_flags & RADEON_TILING_MACRO)
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_TXO_MACRO_TILE;
if (reloc->tiling_flags & RADEON_TILING_MICRO)
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_TXO_MICRO_TILE;
else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
 
tmp = idx_value + ((u32)reloc->gpu_offset);
tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
tmp |= tile_flags;
ib[idx] = tmp;
}
755,11 → 756,11
return r;
}
 
if (reloc->tiling_flags & RADEON_TILING_MACRO)
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_COLOR_TILE_ENABLE;
if (reloc->tiling_flags & RADEON_TILING_MICRO)
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_COLOR_MICROTILE_ENABLE;
else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
 
tmp = idx_value & ~(0x7 << 16);
840,11 → 841,11
return r;
}
 
if (reloc->tiling_flags & RADEON_TILING_MACRO)
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_DEPTHMACROTILE_ENABLE;
if (reloc->tiling_flags & RADEON_TILING_MICRO)
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_DEPTHMICROTILE_TILED;
else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
 
tmp = idx_value & ~(0x7 << 16);
1054,7 → 1055,7
radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = idx_value + ((u32)reloc->gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case 0x4e0c:
/* RB3D_COLOR_CHANNEL_MASK */
1099,7 → 1100,7
track->aa.robj = reloc->robj;
track->aa.offset = idx_value;
track->aa_dirty = true;
ib[idx] = idx_value + ((u32)reloc->gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R300_RB3D_AARESOLVE_PITCH:
track->aa.pitch = idx_value & 0x3FFE;
1164,7 → 1165,7
radeon_cs_dump_packet(p, pkt);
return r;
}
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
if (r) {
return r;
1286,7 → 1287,9
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
return 0;
}
#endif
 
 
void r300_set_reg_safe(struct radeon_device *rdev)
{
rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
1481,13 → 1484,10
}
r300_set_reg_safe(rdev);
 
/* Initialize power management */
radeon_pm_init(rdev);
 
rdev->accel_working = true;
r = r300_startup(rdev);
if (r) {
/* Something went wrong with the accel init, so stop accel */
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
/drivers/video/drm/radeon/r420.c
160,25 → 160,18
 
u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
{
unsigned long flags;
u32 r;
 
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
r = RREG32(R_0001FC_MC_IND_DATA);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
}
 
void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
unsigned long flags;
 
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
S_0001F8_MC_IND_WR_EN(1));
WREG32(R_0001FC_MC_IND_DATA, v);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
 
static void r420_debugfs(struct radeon_device *rdev)
219,7 → 212,7
radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
radeon_ring_write(ring, rdev->config.r300.resync_scratch);
radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring, false);
radeon_ring_unlock_commit(rdev, ring);
}
 
static void r420_cp_errata_fini(struct radeon_device *rdev)
232,7 → 225,7
radeon_ring_lock(rdev, ring, 8);
radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_RB3D_DC_FINISH);
radeon_ring_unlock_commit(rdev, ring, false);
radeon_ring_unlock_commit(rdev, ring);
radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
}
 
377,9 → 370,6
}
r420_set_reg_safe(rdev);
 
/* Initialize power management */
radeon_pm_init(rdev);
 
rdev->accel_working = true;
r = r420_startup(rdev);
if (r) {
/drivers/video/drm/radeon/r500_reg.h
402,7 → 402,6
* block and vice versa. This applies to GRPH, CUR, etc.
*/
#define AVIVO_D1GRPH_LUT_SEL 0x6108
# define AVIVO_LUT_10BIT_BYPASS_EN (1 << 8)
#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
/drivers/video/drm/radeon/r520.c
284,9 → 284,6
return r;
rv515_set_safe_registers(rdev);
 
/* Initialize power management */
radeon_pm_init(rdev);
 
rdev->accel_working = true;
r = r520_startup(rdev);
if (r) {
/drivers/video/drm/radeon/r600.c
37,8 → 37,19
#include "r600d.h"
#include "atom.h"
#include "avivod.h"
#include "radeon_ucode.h"
 
#define PFP_UCODE_SIZE 576
#define PM4_UCODE_SIZE 1792
#define RLC_UCODE_SIZE 768
#define R700_PFP_UCODE_SIZE 848
#define R700_PM4_UCODE_SIZE 1360
#define R700_RLC_UCODE_SIZE 1024
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
#define EVERGREEN_RLC_UCODE_SIZE 768
#define CAYMAN_RLC_UCODE_SIZE 1024
#define ARUBA_RLC_UCODE_SIZE 1536
 
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
MODULE_FIRMWARE("radeon/R600_me.bin");
56,32 → 67,24
MODULE_FIRMWARE("radeon/RS780_me.bin");
MODULE_FIRMWARE("radeon/RV770_pfp.bin");
MODULE_FIRMWARE("radeon/RV770_me.bin");
MODULE_FIRMWARE("radeon/RV770_smc.bin");
MODULE_FIRMWARE("radeon/RV730_pfp.bin");
MODULE_FIRMWARE("radeon/RV730_me.bin");
MODULE_FIRMWARE("radeon/RV730_smc.bin");
MODULE_FIRMWARE("radeon/RV740_smc.bin");
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
MODULE_FIRMWARE("radeon/RV710_me.bin");
MODULE_FIRMWARE("radeon/RV710_smc.bin");
MODULE_FIRMWARE("radeon/R600_rlc.bin");
MODULE_FIRMWARE("radeon/R700_rlc.bin");
MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
MODULE_FIRMWARE("radeon/CEDAR_me.bin");
MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
MODULE_FIRMWARE("radeon/PALM_pfp.bin");
MODULE_FIRMWARE("radeon/PALM_me.bin");
MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
104,8 → 107,6
void r600_fini(struct radeon_device *rdev);
void r600_irq_disable(struct radeon_device *rdev);
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
extern int evergreen_rlc_resume(struct radeon_device *rdev);
extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
 
/**
* r600_get_xclk - get the xclk
120,64 → 121,6
return rdev->clock.spll.reference_freq;
}
 
int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
{
return 0;
}
 
void dce3_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
int bpc = 0;
u32 tmp = 0;
enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
 
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
bpc = radeon_get_monitor_bpc(connector);
dither = radeon_connector->dither;
}
 
/* LVDS FMT is set up by atom */
if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
return;
 
/* not needed for analog */
if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
return;
 
if (bpc == 0)
return;
 
switch (bpc) {
case 6:
if (dither == RADEON_FMT_DITHER_ENABLE)
/* XXX sort out optimal dither settings */
tmp |= FMT_SPATIAL_DITHER_EN;
else
tmp |= FMT_TRUNCATE_EN;
break;
case 8:
if (dither == RADEON_FMT_DITHER_ENABLE)
/* XXX sort out optimal dither settings */
tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
else
tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
break;
case 10:
default:
/* not needed */
break;
}
 
WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
}
 
/* get temperature in millidegrees */
int rv6xx_get_temp(struct radeon_device *rdev)
{
191,439 → 134,11
return actual_temp * 1000;
}
 
void r600_pm_get_dynpm_state(struct radeon_device *rdev)
{
int i;
 
rdev->pm.dynpm_can_upclock = true;
rdev->pm.dynpm_can_downclock = true;
 
/* power state array is low to high, default is first */
if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
int min_power_state_index = 0;
 
if (rdev->pm.num_power_states > 2)
min_power_state_index = 1;
 
switch (rdev->pm.dynpm_planned_action) {
case DYNPM_ACTION_MINIMUM:
rdev->pm.requested_power_state_index = min_power_state_index;
rdev->pm.requested_clock_mode_index = 0;
rdev->pm.dynpm_can_downclock = false;
break;
case DYNPM_ACTION_DOWNCLOCK:
if (rdev->pm.current_power_state_index == min_power_state_index) {
rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
rdev->pm.dynpm_can_downclock = false;
} else {
if (rdev->pm.active_crtc_count > 1) {
for (i = 0; i < rdev->pm.num_power_states; i++) {
if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
continue;
else if (i >= rdev->pm.current_power_state_index) {
rdev->pm.requested_power_state_index =
rdev->pm.current_power_state_index;
break;
} else {
rdev->pm.requested_power_state_index = i;
break;
}
}
} else {
if (rdev->pm.current_power_state_index == 0)
rdev->pm.requested_power_state_index =
rdev->pm.num_power_states - 1;
else
rdev->pm.requested_power_state_index =
rdev->pm.current_power_state_index - 1;
}
}
rdev->pm.requested_clock_mode_index = 0;
/* don't use the power state if crtcs are active and no display flag is set */
if ((rdev->pm.active_crtc_count > 0) &&
(rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].flags &
RADEON_PM_MODE_NO_DISPLAY)) {
rdev->pm.requested_power_state_index++;
}
break;
case DYNPM_ACTION_UPCLOCK:
if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
rdev->pm.dynpm_can_upclock = false;
} else {
if (rdev->pm.active_crtc_count > 1) {
for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
continue;
else if (i <= rdev->pm.current_power_state_index) {
rdev->pm.requested_power_state_index =
rdev->pm.current_power_state_index;
break;
} else {
rdev->pm.requested_power_state_index = i;
break;
}
}
} else
rdev->pm.requested_power_state_index =
rdev->pm.current_power_state_index + 1;
}
rdev->pm.requested_clock_mode_index = 0;
break;
case DYNPM_ACTION_DEFAULT:
rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.requested_clock_mode_index = 0;
rdev->pm.dynpm_can_upclock = false;
break;
case DYNPM_ACTION_NONE:
default:
DRM_ERROR("Requested mode for not defined action\n");
return;
}
} else {
/* XXX select a power state based on AC/DC, single/dualhead, etc. */
/* for now just select the first power state and switch between clock modes */
/* power state array is low to high, default is first (0) */
if (rdev->pm.active_crtc_count > 1) {
rdev->pm.requested_power_state_index = -1;
/* start at 1 as we don't want the default mode */
for (i = 1; i < rdev->pm.num_power_states; i++) {
if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
continue;
else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
(rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
rdev->pm.requested_power_state_index = i;
break;
}
}
/* if nothing selected, grab the default state. */
if (rdev->pm.requested_power_state_index == -1)
rdev->pm.requested_power_state_index = 0;
} else
rdev->pm.requested_power_state_index = 1;
 
switch (rdev->pm.dynpm_planned_action) {
case DYNPM_ACTION_MINIMUM:
rdev->pm.requested_clock_mode_index = 0;
rdev->pm.dynpm_can_downclock = false;
break;
case DYNPM_ACTION_DOWNCLOCK:
if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
if (rdev->pm.current_clock_mode_index == 0) {
rdev->pm.requested_clock_mode_index = 0;
rdev->pm.dynpm_can_downclock = false;
} else
rdev->pm.requested_clock_mode_index =
rdev->pm.current_clock_mode_index - 1;
} else {
rdev->pm.requested_clock_mode_index = 0;
rdev->pm.dynpm_can_downclock = false;
}
/* don't use the power state if crtcs are active and no display flag is set */
if ((rdev->pm.active_crtc_count > 0) &&
(rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].flags &
RADEON_PM_MODE_NO_DISPLAY)) {
rdev->pm.requested_clock_mode_index++;
}
break;
case DYNPM_ACTION_UPCLOCK:
if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
if (rdev->pm.current_clock_mode_index ==
(rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
rdev->pm.dynpm_can_upclock = false;
} else
rdev->pm.requested_clock_mode_index =
rdev->pm.current_clock_mode_index + 1;
} else {
rdev->pm.requested_clock_mode_index =
rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
rdev->pm.dynpm_can_upclock = false;
}
break;
case DYNPM_ACTION_DEFAULT:
rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.requested_clock_mode_index = 0;
rdev->pm.dynpm_can_upclock = false;
break;
case DYNPM_ACTION_NONE:
default:
DRM_ERROR("Requested mode for not defined action\n");
return;
}
}
 
DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].sclk,
rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].mclk,
rdev->pm.power_state[rdev->pm.requested_power_state_index].
pcie_lanes);
}
 
void rs780_pm_init_profile(struct radeon_device *rdev)
{
if (rdev->pm.num_power_states == 2) {
/* default */
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
/* low sh */
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
/* mid sh */
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
/* low mh */
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
/* mid mh */
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
} else if (rdev->pm.num_power_states == 3) {
/* default */
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
/* low sh */
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
/* mid sh */
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
/* low mh */
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
/* mid mh */
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
} else {
/* default */
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
/* low sh */
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
/* mid sh */
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
/* low mh */
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
/* mid mh */
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
}
}
 
void r600_pm_init_profile(struct radeon_device *rdev)
{
int idx;
 
if (rdev->family == CHIP_R600) {
/* XXX */
/* default */
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
/* low sh */
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
/* mid sh */
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
/* low mh */
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
/* mid mh */
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
} else {
if (rdev->pm.num_power_states < 4) {
/* default */
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
/* low sh */
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
/* mid sh */
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
/* high sh */
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
/* low mh */
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
/* low mh */
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
/* high mh */
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
} else {
/* default */
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
/* low sh */
if (rdev->flags & RADEON_IS_MOBILITY)
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
else
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
/* mid sh */
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
/* high sh */
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
/* low mh */
if (rdev->flags & RADEON_IS_MOBILITY)
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
else
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
/* mid mh */
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
/* high mh */
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
}
}
}
 
void r600_pm_misc(struct radeon_device *rdev)
{
int req_ps_idx = rdev->pm.requested_power_state_index;
int req_cm_idx = rdev->pm.requested_clock_mode_index;
struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
 
if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
/* 0xff01 is a flag rather then an actual voltage */
if (voltage->voltage == 0xff01)
return;
if (voltage->voltage != rdev->pm.current_vddc) {
radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
}
}
}
 
bool r600_gui_idle(struct radeon_device *rdev)
{
if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
968,6 → 483,7
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
radeon_gart_restore(rdev);
 
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1103,27 → 619,20
 
uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
unsigned long flags;
uint32_t r;
 
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
r = RREG32(R_0028FC_MC_DATA);
WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
}
 
void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
unsigned long flags;
 
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
S_0028F8_MC_IND_WR_EN(1));
WREG32(R_0028FC_MC_DATA, v);
WREG32(R_0028F8_MC_INDEX, 0x7F);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
 
static void r600_mc_program(struct radeon_device *rdev)
1338,7 → 847,7
if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
0, NULL, &rdev->vram_scratch.robj);
NULL, &rdev->vram_scratch.robj);
if (r) {
return r;
}
1439,7 → 948,7
return true;
}
 
u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
{
u32 reset_mask = 0;
u32 tmp;
1644,67 → 1153,6
r600_print_gpu_status_regs(rdev);
}
 
static void r600_gpu_pci_config_reset(struct radeon_device *rdev)
{
struct rv515_mc_save save;
u32 tmp, i;
 
dev_info(rdev->dev, "GPU pci config reset\n");
 
/* disable dpm? */
 
/* Disable CP parsing/prefetching */
if (rdev->family >= CHIP_RV770)
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
else
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
 
/* disable the RLC */
WREG32(RLC_CNTL, 0);
 
/* Disable DMA */
tmp = RREG32(DMA_RB_CNTL);
tmp &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL, tmp);
 
mdelay(50);
 
/* set mclk/sclk to bypass */
if (rdev->family >= CHIP_RV770)
rv770_set_clk_bypass_mode(rdev);
/* disable BM */
pci_clear_master(rdev->pdev);
/* disable mem access */
rv515_mc_stop(rdev, &save);
if (r600_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
 
/* BIF reset workaround. Not sure if this is needed on 6xx */
tmp = RREG32(BUS_CNTL);
tmp |= VGA_COHE_SPEC_TIMER_DIS;
WREG32(BUS_CNTL, tmp);
 
tmp = RREG32(BIF_SCRATCH0);
 
/* reset */
radeon_pci_config_reset(rdev);
mdelay(1);
 
/* BIF reset workaround. Not sure if this is needed on 6xx */
tmp = SOFT_RESET_BIF;
WREG32(SRBM_SOFT_RESET, tmp);
mdelay(1);
WREG32(SRBM_SOFT_RESET, 0);
 
/* wait for asic to come out of reset */
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
break;
udelay(1);
}
}
 
int r600_asic_reset(struct radeon_device *rdev)
{
u32 reset_mask;
1714,17 → 1162,10
if (reset_mask)
r600_set_bios_scratch_engine_hung(rdev, true);
 
/* try soft reset */
r600_gpu_soft_reset(rdev, reset_mask);
 
reset_mask = r600_gpu_check_soft_reset(rdev);
 
/* try pci config reset */
if (reset_mask && radeon_hard_reset)
r600_gpu_pci_config_reset(rdev);
 
reset_mask = r600_gpu_check_soft_reset(rdev);
 
if (!reset_mask)
r600_set_bios_scratch_engine_hung(rdev, false);
 
1747,12 → 1188,36
if (!(reset_mask & (RADEON_RESET_GFX |
RADEON_RESET_COMPUTE |
RADEON_RESET_CP))) {
radeon_ring_lockup_update(rdev, ring);
radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
 
/**
* r600_dma_is_lockup - Check if the DMA engine is locked up
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Check if the async DMA engine is locked up.
* Returns true if the engine appears to be locked up, false if not.
*/
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 reset_mask = r600_gpu_check_soft_reset(rdev);
 
if (!(reset_mask & RADEON_RESET_DMA)) {
radeon_ring_lockup_update(ring);
return false;
}
/* force ring activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
 
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
1812,6 → 1277,7
{
u32 tiling_config;
u32 ramcfg;
u32 cc_rb_backend_disable;
u32 cc_gc_shader_pipe_config;
u32 tmp;
int i, j;
1938,20 → 1404,26
}
tiling_config |= BANK_SWAPS(1);
 
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
tmp = R6XX_MAX_BACKENDS -
r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
if (tmp < rdev->config.r600.max_backends) {
rdev->config.r600.max_backends = tmp;
}
 
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
tmp = rdev->config.r600.max_simds -
tmp = R6XX_MAX_PIPES -
r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
if (tmp < rdev->config.r600.max_pipes) {
rdev->config.r600.max_pipes = tmp;
}
tmp = R6XX_MAX_SIMDS -
r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
rdev->config.r600.active_simds = tmp;
if (tmp < rdev->config.r600.max_simds) {
rdev->config.r600.max_simds = tmp;
}
 
disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
tmp = 0;
for (i = 0; i < rdev->config.r600.max_backends; i++)
tmp |= (1 << i);
/* if all the backends are disabled, fix it up here */
if ((disabled_rb_mask & tmp) == tmp) {
for (i = 0; i < rdev->config.r600.max_backends; i++)
disabled_rb_mask &= ~(1 << i);
}
tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
R6XX_MAX_BACKENDS, disabled_rb_mask);
2216,27 → 1688,20
*/
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
{
unsigned long flags;
u32 r;
 
spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
(void)RREG32(PCIE_PORT_INDEX);
r = RREG32(PCIE_PORT_DATA);
spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
return r;
}
 
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
unsigned long flags;
 
spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
(void)RREG32(PCIE_PORT_INDEX);
WREG32(PCIE_PORT_DATA, (v));
(void)RREG32(PCIE_PORT_DATA);
spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
}
 
/*
2244,7 → 1709,6
*/
void r600_cp_stop(struct radeon_device *rdev)
{
if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
WREG32(SCRATCH_UMSK, 0);
2253,15 → 1717,22
 
int r600_init_microcode(struct radeon_device *rdev)
{
struct platform_device *pdev;
const char *chip_name;
const char *rlc_chip_name;
const char *smc_chip_name = "RV770";
size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0;
size_t pfp_req_size, me_req_size, rlc_req_size;
char fw_name[30];
int err;
 
DRM_DEBUG("\n");
 
pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
err = IS_ERR(pdev);
if (err) {
printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
return -EINVAL;
}
 
switch (rdev->family) {
case CHIP_R600:
chip_name = "R600";
2295,51 → 1766,32
case CHIP_RV770:
chip_name = "RV770";
rlc_chip_name = "R700";
smc_chip_name = "RV770";
smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4);
break;
case CHIP_RV730:
case CHIP_RV740:
chip_name = "RV730";
rlc_chip_name = "R700";
smc_chip_name = "RV730";
smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4);
break;
case CHIP_RV710:
chip_name = "RV710";
rlc_chip_name = "R700";
smc_chip_name = "RV710";
smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4);
break;
case CHIP_RV740:
chip_name = "RV730";
rlc_chip_name = "R700";
smc_chip_name = "RV740";
smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4);
break;
case CHIP_CEDAR:
chip_name = "CEDAR";
rlc_chip_name = "CEDAR";
smc_chip_name = "CEDAR";
smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4);
break;
case CHIP_REDWOOD:
chip_name = "REDWOOD";
rlc_chip_name = "REDWOOD";
smc_chip_name = "REDWOOD";
smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4);
break;
case CHIP_JUNIPER:
chip_name = "JUNIPER";
rlc_chip_name = "JUNIPER";
smc_chip_name = "JUNIPER";
smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4);
break;
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
chip_name = "CYPRESS";
rlc_chip_name = "CYPRESS";
smc_chip_name = "CYPRESS";
smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4);
break;
case CHIP_PALM:
chip_name = "PALM";
2365,15 → 1817,15
me_req_size = R700_PM4_UCODE_SIZE * 4;
rlc_req_size = R700_RLC_UCODE_SIZE * 4;
} else {
pfp_req_size = R600_PFP_UCODE_SIZE * 4;
me_req_size = R600_PM4_UCODE_SIZE * 12;
rlc_req_size = R600_RLC_UCODE_SIZE * 4;
pfp_req_size = PFP_UCODE_SIZE * 4;
me_req_size = PM4_UCODE_SIZE * 12;
rlc_req_size = RLC_UCODE_SIZE * 4;
}
 
DRM_INFO("Loading %s Microcode\n", chip_name);
 
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
if (err)
goto out;
if (rdev->pfp_fw->size != pfp_req_size) {
2385,7 → 1837,7
}
 
snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
if (err)
goto out;
if (rdev->me_fw->size != me_req_size) {
2396,7 → 1848,7
}
 
snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
if (err)
goto out;
if (rdev->rlc_fw->size != rlc_req_size) {
2406,25 → 1858,9
err = -EINVAL;
}
 
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
if (err) {
printk(KERN_ERR
"smc: error loading firmware \"%s\"\n",
fw_name);
release_firmware(rdev->smc_fw);
rdev->smc_fw = NULL;
err = 0;
} else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR
"smc: Bogus length %zu in firmware \"%s\"\n",
rdev->smc_fw->size, fw_name);
err = -EINVAL;
}
}
out:
platform_device_unregister(pdev);
 
out:
if (err) {
if (err != -EINVAL)
printk(KERN_ERR
2436,42 → 1872,10
rdev->me_fw = NULL;
release_firmware(rdev->rlc_fw);
rdev->rlc_fw = NULL;
release_firmware(rdev->smc_fw);
rdev->smc_fw = NULL;
}
return err;
}
 
u32 r600_gfx_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
u32 rptr;
 
if (rdev->wb.enabled)
rptr = rdev->wb.wb[ring->rptr_offs/4];
else
rptr = RREG32(R600_CP_RB_RPTR);
 
return rptr;
}
 
u32 r600_gfx_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
u32 wptr;
 
wptr = RREG32(R600_CP_RB_WPTR);
 
return wptr;
}
 
void r600_gfx_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
WREG32(R600_CP_RB_WPTR, ring->wptr);
(void)RREG32(R600_CP_RB_WPTR);
}
 
static int r600_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
2498,13 → 1902,13
 
fw_data = (const __be32 *)rdev->me_fw->data;
WREG32(CP_ME_RAM_WADDR, 0);
for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++)
for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
WREG32(CP_ME_RAM_DATA,
be32_to_cpup(fw_data++));
 
fw_data = (const __be32 *)rdev->pfp_fw->data;
WREG32(CP_PFP_UCODE_ADDR, 0);
for (i = 0; i < R600_PFP_UCODE_SIZE; i++)
for (i = 0; i < PFP_UCODE_SIZE; i++)
WREG32(CP_PFP_UCODE_DATA,
be32_to_cpup(fw_data++));
 
2537,7 → 1941,7
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring, false);
radeon_ring_unlock_commit(rdev, ring);
 
cp_me = 0xff;
WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2558,8 → 1962,8
WREG32(GRBM_SOFT_RESET, 0);
 
/* Set ring buffer size */
rb_bufsz = order_base_2(ring->ring_size / 8);
tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
2594,6 → 1998,8
WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
 
ring->rptr = RREG32(CP_RB_RPTR);
 
r600_cp_start(rdev);
ring->ready = true;
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
2601,10 → 2007,6
ring->ready = false;
return r;
}
 
if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
 
return 0;
}
 
2614,7 → 2016,7
int r;
 
/* Align ring size */
rb_bufsz = order_base_2(ring_size / 8);
rb_bufsz = drm_order(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
ring->ring_size = ring_size;
ring->align_mask = 16 - 1;
2637,6 → 2039,327
}
 
/*
* DMA
* Starting with R600, the GPU has an asynchronous
* DMA engine. The programming model is very similar
* to the 3D engine (ring buffer, IBs, etc.), but the
* DMA controller has it's own packet format that is
* different form the PM4 format used by the 3D engine.
* It supports copying data, writing embedded data,
* solid fills, and a number of other things. It also
* has support for tiling/detiling of buffers.
*/
/**
* r600_dma_stop - stop the async dma engine
*
* @rdev: radeon_device pointer
*
* Stop the async dma engine (r6xx-evergreen).
*/
void r600_dma_stop(struct radeon_device *rdev)
{
u32 rb_cntl = RREG32(DMA_RB_CNTL);
 
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 
rb_cntl &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL, rb_cntl);
 
rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
}
 
/**
* r600_dma_resume - setup and start the async dma engine
*
* @rdev: radeon_device pointer
*
* Set up the DMA ring buffer and enable it. (r6xx-evergreen).
* Returns 0 for success, error for failure.
*/
int r600_dma_resume(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
u32 rb_cntl, dma_cntl, ib_cntl;
u32 rb_bufsz;
int r;
 
/* Reset dma */
if (rdev->family >= CHIP_RV770)
WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
else
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
RREG32(SRBM_SOFT_RESET);
udelay(50);
WREG32(SRBM_SOFT_RESET, 0);
 
WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
 
/* Set ring buffer size in dwords */
rb_bufsz = drm_order(ring->ring_size / 4);
rb_cntl = rb_bufsz << 1;
#ifdef __BIG_ENDIAN
rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
#endif
WREG32(DMA_RB_CNTL, rb_cntl);
 
/* Initialize the ring buffer's read and write pointers */
WREG32(DMA_RB_RPTR, 0);
WREG32(DMA_RB_WPTR, 0);
 
/* set the wb address whether it's enabled or not */
WREG32(DMA_RB_RPTR_ADDR_HI,
upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
WREG32(DMA_RB_RPTR_ADDR_LO,
((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
 
if (rdev->wb.enabled)
rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
 
WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
 
/* enable DMA IBs */
ib_cntl = DMA_IB_ENABLE;
#ifdef __BIG_ENDIAN
ib_cntl |= DMA_IB_SWAP_ENABLE;
#endif
WREG32(DMA_IB_CNTL, ib_cntl);
 
dma_cntl = RREG32(DMA_CNTL);
dma_cntl &= ~CTXEMPTY_INT_ENABLE;
WREG32(DMA_CNTL, dma_cntl);
 
if (rdev->family >= CHIP_RV770)
WREG32(DMA_MODE, 1);
 
ring->wptr = 0;
WREG32(DMA_RB_WPTR, ring->wptr << 2);
 
ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
 
WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
 
ring->ready = true;
 
r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
if (r) {
ring->ready = false;
return r;
}
 
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
 
return 0;
}
 
/**
* r600_dma_fini - tear down the async dma engine
*
* @rdev: radeon_device pointer
*
* Stop the async dma engine and free the ring (r6xx-evergreen).
*/
void r600_dma_fini(struct radeon_device *rdev)
{
r600_dma_stop(rdev);
radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
}
 
/*
* UVD
*/
int r600_uvd_rbc_start(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
uint64_t rptr_addr;
uint32_t rb_bufsz, tmp;
int r;
 
rptr_addr = rdev->wb.gpu_addr + R600_WB_UVD_RPTR_OFFSET;
 
if (upper_32_bits(rptr_addr) != upper_32_bits(ring->gpu_addr)) {
DRM_ERROR("UVD ring and rptr not in the same 4GB segment!\n");
return -EINVAL;
}
 
/* force RBC into idle state */
WREG32(UVD_RBC_RB_CNTL, 0x11010101);
 
/* Set the write pointer delay */
WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
 
/* set the wb address */
WREG32(UVD_RBC_RB_RPTR_ADDR, rptr_addr >> 2);
 
/* programm the 4GB memory segment for rptr and ring buffer */
WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(rptr_addr) |
(0x7 << 16) | (0x1 << 31));
 
/* Initialize the ring buffer's read and write pointers */
WREG32(UVD_RBC_RB_RPTR, 0x0);
 
ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
WREG32(UVD_RBC_RB_WPTR, ring->wptr);
 
/* set the ring address */
WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
 
/* Set ring buffer size */
rb_bufsz = drm_order(ring->ring_size);
rb_bufsz = (0x1 << 8) | rb_bufsz;
WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
 
ring->ready = true;
r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
if (r) {
ring->ready = false;
return r;
}
 
r = radeon_ring_lock(rdev, ring, 10);
if (r) {
DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
return r;
}
 
tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
radeon_ring_write(ring, tmp);
radeon_ring_write(ring, 0xFFFFF);
 
tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
radeon_ring_write(ring, tmp);
radeon_ring_write(ring, 0xFFFFF);
 
tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
radeon_ring_write(ring, tmp);
radeon_ring_write(ring, 0xFFFFF);
 
/* Clear timeout status bits */
radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
radeon_ring_write(ring, 0x8);
 
radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
radeon_ring_write(ring, 3);
 
radeon_ring_unlock_commit(rdev, ring);
 
return 0;
}
 
void r600_uvd_rbc_stop(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
 
/* force RBC into idle state */
WREG32(UVD_RBC_RB_CNTL, 0x11010101);
ring->ready = false;
}
 
int r600_uvd_init(struct radeon_device *rdev)
{
int i, j, r;
/* disable byte swapping */
u32 lmi_swap_cntl = 0;
u32 mp_swap_cntl = 0;
 
/* raise clocks while booting up the VCPU */
radeon_set_uvd_clocks(rdev, 53300, 40000);
 
/* disable clock gating */
WREG32(UVD_CGC_GATE, 0);
 
/* disable interupt */
WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
 
/* put LMI, VCPU, RBC etc... into reset */
WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
mdelay(5);
 
/* take UVD block out of reset */
WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
mdelay(5);
 
/* initialize UVD memory controller */
WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
(1 << 21) | (1 << 9) | (1 << 20));
 
#ifdef __BIG_ENDIAN
/* swap (8 in 32) RB and IB */
lmi_swap_cntl = 0xa;
mp_swap_cntl = 0;
#endif
WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
 
WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
WREG32(UVD_MPC_SET_MUXA1, 0x0);
WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
WREG32(UVD_MPC_SET_MUXB1, 0x0);
WREG32(UVD_MPC_SET_ALU, 0);
WREG32(UVD_MPC_SET_MUX, 0x88);
 
/* Stall UMC */
WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
 
/* take all subblocks out of reset, except VCPU */
WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
mdelay(5);
 
/* enable VCPU clock */
WREG32(UVD_VCPU_CNTL, 1 << 9);
 
/* enable UMC */
WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
 
/* boot up the VCPU */
WREG32(UVD_SOFT_RESET, 0);
mdelay(10);
 
WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
 
for (i = 0; i < 10; ++i) {
uint32_t status;
for (j = 0; j < 100; ++j) {
status = RREG32(UVD_STATUS);
if (status & 2)
break;
mdelay(10);
}
r = 0;
if (status & 2)
break;
 
DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
mdelay(10);
WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
mdelay(10);
r = -1;
}
 
if (r) {
DRM_ERROR("UVD not responding, giving up!!!\n");
radeon_set_uvd_clocks(rdev, 0, 0);
return r;
}
 
/* enable interupt */
WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
 
r = r600_uvd_rbc_start(rdev);
if (!r)
DRM_INFO("UVD initialized successfully.\n");
 
/* lower clocks again */
radeon_set_uvd_clocks(rdev, 0, 0);
 
return r;
}
 
/*
* GPU scratch registers helpers function.
*/
void r600_scratch_init(struct radeon_device *rdev)
2673,7 → 2396,7
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring, false);
radeon_ring_unlock_commit(rdev, ring);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
2691,6 → 2414,94
return r;
}
 
/**
* r600_dma_ring_test - simple async dma engine test
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Test the DMA engine by writing using it to write an
* value to memory. (r6xx-SI).
* Returns 0 for success, error for failure.
*/
int r600_dma_ring_test(struct radeon_device *rdev,
struct radeon_ring *ring)
{
unsigned i;
int r;
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
u32 tmp;
 
if (!ptr) {
DRM_ERROR("invalid vram scratch pointer\n");
return -EINVAL;
}
 
tmp = 0xCAFEDEAD;
writel(tmp, ptr);
 
r = radeon_ring_lock(rdev, ring, 4);
if (r) {
DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
return r;
}
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring);
 
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = readl(ptr);
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
}
 
if (i < rdev->usec_timeout) {
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
} else {
DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
ring->idx, tmp);
r = -EINVAL;
}
return r;
}
 
int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
uint32_t tmp = 0;
unsigned i;
int r;
 
WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
r = radeon_ring_lock(rdev, ring, 3);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
ring->idx, r);
return r;
}
radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(UVD_CONTEXT_ID);
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
}
 
if (i < rdev->usec_timeout) {
DRM_INFO("ring test on %d succeeded in %d usecs\n",
ring->idx, i);
} else {
DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
ring->idx, tmp);
r = -EINVAL;
}
return r;
}
 
/*
* CP fences/semaphores
*/
2699,17 → 2510,14
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
PACKET3_SH_ACTION_ENA;
 
if (rdev->family >= CHIP_RV770)
cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
 
if (rdev->wb.use_event) {
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
/* flush read cache over gart */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, cp_coher_cntl);
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
PACKET3_VC_ACTION_ENA |
PACKET3_SH_ACTION_ENA);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
2716,7 → 2524,7
/* EVENT_WRITE_EOP - flush caches, send int */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
2723,7 → 2531,9
} else {
/* flush read cache over gart */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, cp_coher_cntl);
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
PACKET3_VC_ACTION_ENA |
PACKET3_SH_ACTION_ENA);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
2743,44 → 2553,137
}
}
 
void r600_uvd_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr;
 
radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
radeon_ring_write(ring, 0);
 
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
radeon_ring_write(ring, 2);
return;
}
 
void r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
uint64_t addr = semaphore->gpu_addr;
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
 
if (rdev->family < CHIP_CAYMAN)
sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
 
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
}
 
/*
* DMA fences/semaphores
*/
 
/**
* r600_semaphore_ring_emit - emit a semaphore on the CP ring
* r600_dma_fence_ring_emit - emit a fence on the DMA ring
*
* @rdev: radeon_device pointer
* @ring: radeon ring buffer object
* @fence: radeon fence object
*
* Add a DMA fence packet to the ring to write
* the fence seq number and DMA trap packet to generate
* an interrupt if needed (r6xx-r7xx).
*/
void r600_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
 
/* write the fence */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
radeon_ring_write(ring, addr & 0xfffffffc);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
radeon_ring_write(ring, lower_32_bits(fence->seq));
/* generate an interrupt */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
}
 
/**
* r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
* @semaphore: radeon semaphore object
* @emit_wait: Is this a sempahore wait?
* @emit_wait: wait or signal semaphore
*
* Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
* from running ahead of semaphore waits.
* Add a DMA semaphore packet to the ring wait on or signal
* other rings (r6xx-SI).
*/
bool r600_semaphore_ring_emit(struct radeon_device *rdev,
void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
u64 addr = semaphore->gpu_addr;
u32 s = emit_wait ? 0 : 1;
 
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
radeon_ring_write(ring, addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
}
 
void r600_uvd_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
uint64_t addr = semaphore->gpu_addr;
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
 
if (rdev->family < CHIP_CAYMAN)
sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
 
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
 
/* PFP_SYNC_ME packet only exists on 7xx+ */
if (emit_wait && (rdev->family >= CHIP_RV770)) {
/* Prevent the PFP from running ahead of the semaphore wait */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
radeon_ring_write(ring, emit_wait ? 1 : 0);
}
 
return true;
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence)
{
struct radeon_semaphore *sem = NULL;
struct radeon_sa_bo *vb = NULL;
int r;
 
r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
if (r) {
return r;
}
r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
r600_blit_done_copy(rdev, fence, vb, sem);
return 0;
}
 
/**
* r600_copy_cpdma - copy pages using the CP DMA engine
* r600_copy_dma - copy pages using the DMA engine
*
* @rdev: radeon_device pointer
* @src_offset: src GPU address
2788,19 → 2691,19
* @num_gpu_pages: number of GPU pages to xfer
* @fence: radeon fence object
*
* Copy GPU paging using the CP DMA engine (r6xx+).
* Copy GPU paging using the DMA engine (r6xx).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
int r600_copy_cpdma(struct radeon_device *rdev,
int r600_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence)
{
struct radeon_semaphore *sem = NULL;
int ring_index = rdev->asic->copy.blit_ring_index;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes, tmp;
u32 size_in_dw, cur_size_in_dw;
int i, num_loops;
int r = 0;
 
2810,9 → 2713,9
return r;
}
 
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL);
2819,41 → 2722,35
return r;
}
 
radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
if (radeon_fence_need_sync(*fence, ring->idx)) {
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
ring->idx);
radeon_fence_note_sync(*fence, ring->idx);
} else {
radeon_semaphore_free(rdev, &sem, NULL);
}
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(ring, WAIT_3D_IDLE_bit);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
if (cur_size_in_bytes > 0x1fffff)
cur_size_in_bytes = 0x1fffff;
size_in_bytes -= cur_size_in_bytes;
tmp = upper_32_bits(src_offset) & 0xff;
if (size_in_bytes == 0)
tmp |= PACKET3_CP_DMA_CP_SYNC;
radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
radeon_ring_write(ring, lower_32_bits(src_offset));
radeon_ring_write(ring, tmp);
radeon_ring_write(ring, lower_32_bits(dst_offset));
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
radeon_ring_write(ring, cur_size_in_bytes);
src_offset += cur_size_in_bytes;
dst_offset += cur_size_in_bytes;
cur_size_in_dw = size_in_dw;
if (cur_size_in_dw > 0xFFFE)
cur_size_in_dw = 0xFFFE;
size_in_dw -= cur_size_in_dw;
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
radeon_ring_write(ring, dst_offset & 0xfffffffc);
radeon_ring_write(ring, src_offset & 0xfffffffc);
radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
(upper_32_bits(src_offset) & 0xff)));
src_offset += cur_size_in_dw * 4;
dst_offset += cur_size_in_dw * 4;
}
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
 
r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
 
radeon_ring_unlock_commit(rdev, ring, false);
radeon_ring_unlock_commit(rdev, ring);
radeon_semaphore_free(rdev, &sem, *fence);
 
return r;
2880,13 → 2777,19
/* enable pcie gen2 link */
r600_pcie_gen2_enable(rdev);
 
/* scratch needs to be initialized before MC */
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
 
r = r600_vram_scratch_init(rdev);
if (r)
return r;
 
r600_mc_program(rdev);
 
if (rdev->flags & RADEON_IS_AGP) {
r600_agp_enable(rdev);
} else {
2895,6 → 2798,12
return r;
}
r600_gpu_init(rdev);
r = r600_blit_init(rdev);
if (r) {
// r600_blit_fini(rdev);
rdev->asic->copy.copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
 
/* allocate wb buffer */
r = radeon_wb_init(rdev);
2907,6 → 2816,12
return r;
}
 
r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
if (r) {
dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
return r;
}
 
/* Enable IRQ */
if (!rdev->irq.installed) {
r = radeon_irq_kms_init(rdev);
2924,10 → 2839,18
 
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
RADEON_CP_PACKET2);
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
 
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR, DMA_RB_WPTR,
2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
 
r = r600_cp_load_microcode(rdev);
if (r)
return r;
2935,13 → 2858,15
if (r)
return r;
 
r = r600_dma_resume(rdev);
if (r)
return r;
 
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
}
 
 
return 0;
}
 
3021,20 → 2946,12
if (r)
return r;
 
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
 
/* Initialize power management */
radeon_pm_init(rdev);
 
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
 
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
 
3086,6 → 3003,16
radeon_ring_write(ring, ib->length_dw);
}
 
void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
 
radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
radeon_ring_write(ring, ib->gpu_addr);
radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
radeon_ring_write(ring, ib->length_dw);
}
 
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
struct radeon_ib ib;
3109,7 → 3036,7
ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
r = radeon_ib_schedule(rdev, &ib, NULL, false);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
goto free_ib;
3139,6 → 3066,139
return r;
}
 
/**
* r600_dma_ib_test - test an IB on the DMA engine
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Test a simple IB in the DMA ring (r6xx-SI).
* Returns 0 on success, error on failure.
*/
int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
struct radeon_ib ib;
unsigned i;
int r;
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
u32 tmp = 0;
 
if (!ptr) {
DRM_ERROR("invalid vram scratch pointer\n");
return -EINVAL;
}
 
tmp = 0xCAFEDEAD;
writel(tmp, ptr);
 
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
if (r) {
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
return r;
}
 
ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
ib.ptr[3] = 0xDEADBEEF;
ib.length_dw = 4;
 
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_ib_free(rdev, &ib);
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
return r;
}
r = radeon_fence_wait(ib.fence, false);
if (r) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
return r;
}
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = readl(ptr);
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
} else {
DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
r = -EINVAL;
}
radeon_ib_free(rdev, &ib);
return r;
}
 
int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
struct radeon_fence *fence = NULL;
int r;
 
r = radeon_set_uvd_clocks(rdev, 53300, 40000);
if (r) {
DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
return r;
}
 
// r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
if (r) {
DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
goto error;
}
 
// r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
if (r) {
DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
goto error;
}
 
r = radeon_fence_wait(fence, false);
if (r) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
goto error;
}
DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
error:
radeon_fence_unref(&fence);
radeon_set_uvd_clocks(rdev, 0, 0);
return r;
}
 
/**
* r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
*
* @rdev: radeon_device pointer
* @ib: IB object to schedule
*
* Schedule an IB in the DMA ring (r6xx-r7xx).
*/
void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
 
if (rdev->wb.enabled) {
u32 next_rptr = ring->wptr + 4;
while ((next_rptr & 7) != 5)
next_rptr++;
next_rptr += 3;
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
radeon_ring_write(ring, next_rptr);
}
 
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
* Pad as necessary with NOPs.
*/
while ((ring->wptr & 7) != 5)
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
 
}
 
/*
* Interrupts
*
3155,7 → 3215,7
u32 rb_bufsz;
 
/* Align ring size */
rb_bufsz = order_base_2(ring_size / 4);
rb_bufsz = drm_order(ring_size / 4);
ring_size = (1 << rb_bufsz) * 4;
rdev->ih.ring_size = ring_size;
rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
3170,7 → 3230,7
if (rdev->ih.ring_obj == NULL) {
r = radeon_bo_create(rdev, rdev->ih.ring_size,
PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, 0,
RADEON_GEM_DOMAIN_GTT,
NULL, &rdev->ih.ring_obj);
if (r) {
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
3235,7 → 3295,7
WREG32(RLC_CNTL, RLC_ENABLE);
}
 
static int r600_rlc_resume(struct radeon_device *rdev)
static int r600_rlc_init(struct radeon_device *rdev)
{
u32 i;
const __be32 *fw_data;
3247,22 → 3307,45
 
WREG32(RLC_HB_CNTL, 0);
 
if (rdev->family == CHIP_ARUBA) {
WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
}
if (rdev->family <= CHIP_CAYMAN) {
WREG32(RLC_HB_BASE, 0);
WREG32(RLC_HB_RPTR, 0);
WREG32(RLC_HB_WPTR, 0);
}
if (rdev->family <= CHIP_CAICOS) {
WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
}
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
 
fw_data = (const __be32 *)rdev->rlc_fw->data;
if (rdev->family >= CHIP_RV770) {
if (rdev->family >= CHIP_ARUBA) {
for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
}
} else if (rdev->family >= CHIP_CAYMAN) {
for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
}
} else if (rdev->family >= CHIP_CEDAR) {
for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
}
} else if (rdev->family >= CHIP_RV770) {
for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
}
} else {
for (i = 0; i < R600_RLC_UCODE_SIZE; i++) {
for (i = 0; i < RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
}
3370,10 → 3453,7
r600_disable_interrupts(rdev);
 
/* init rlc */
if (rdev->family >= CHIP_CEDAR)
ret = evergreen_rlc_resume(rdev);
else
ret = r600_rlc_resume(rdev);
ret = r600_rlc_init(rdev);
if (ret) {
r600_ih_ring_fini(rdev);
return ret;
3392,7 → 3472,7
WREG32(INTERRUPT_CNTL, interrupt_cntl);
 
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
rb_bufsz = drm_order(rdev->ih.ring_size / 4);
 
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR |
3439,8 → 3519,8
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
u32 grbm_int_cntl = 0;
u32 hdmi0, hdmi1;
u32 d1grph = 0, d2grph = 0;
u32 dma_cntl;
u32 thermal_int = 0;
 
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3475,21 → 3555,8
hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
}
 
dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
 
if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
thermal_int = RREG32(CG_THERMAL_INT) &
~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
} else if (rdev->family >= CHIP_RV770) {
thermal_int = RREG32(RV770_CG_THERMAL_INT) &
~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
}
if (rdev->irq.dpm_thermal) {
DRM_DEBUG("dpm thermal\n");
thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
}
 
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
3547,8 → 3614,8
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(DMA_CNTL, dma_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
if (ASIC_IS_DCE3(rdev)) {
WREG32(DC_HPD1_INT_CONTROL, hpd1);
3571,11 → 3638,6
WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
}
if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
WREG32(CG_THERMAL_INT, thermal_int);
} else if (rdev->family >= CHIP_RV770) {
WREG32(RV770_CG_THERMAL_INT, thermal_int);
}
 
return 0;
}
3725,7 → 3787,6
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
3770,7 → 3831,6
u32 ring_index;
bool queue_hotplug = false;
bool queue_hdmi = false;
bool queue_thermal = false;
 
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
3924,10 → 3984,6
break;
}
break;
case 124: /* UVD */
DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
3942,16 → 3998,6
DRM_DEBUG("IH: DMA trap\n");
radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
break;
case 230: /* thermal low to high */
DRM_DEBUG("IH: thermal low to high\n");
rdev->pm.dpm.thermal.high_to_low = false;
queue_thermal = true;
break;
case 231: /* thermal high to low */
DRM_DEBUG("IH: thermal high to low\n");
rdev->pm.dpm.thermal.high_to_low = true;
queue_thermal = true;
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
break;
4007,15 → 4053,16
}
 
/**
* r600_mmio_hdp_flush - flush Host Data Path cache via MMIO
* r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
* rdev: radeon device structure
* bo: buffer object struct which userspace is waiting for idle
*
* Some R6XX/R7XX don't seem to take into account HDP flushes performed
* through the ring buffer. This leads to corruption in rendering, see
* http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we
* directly perform the HDP flush by writing the register through MMIO.
* Some R6XX/R7XX doesn't seems to take into account HDP flush performed
* through ring buffer, this leads to corruption in rendering, see
* http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
* directly perform HDP flush by writing register through MMIO.
*/
void r600_mmio_hdp_flush(struct radeon_device *rdev)
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
{
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
/drivers/video/drm/radeon/r600_audio.c
57,12 → 57,12
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
return ASIC_IS_DCE2(rdev) && !ASIC_IS_NODCE(rdev);
return ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE6(rdev);
}
 
struct r600_audio_pin r600_audio_status(struct radeon_device *rdev)
struct r600_audio r600_audio_status(struct radeon_device *rdev)
{
struct r600_audio_pin status;
struct r600_audio status;
uint32_t value;
 
value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
120,16 → 120,16
struct radeon_device *rdev = container_of(work, struct radeon_device,
audio_work);
struct drm_device *dev = rdev->ddev;
struct r600_audio_pin audio_status = r600_audio_status(rdev);
struct r600_audio audio_status = r600_audio_status(rdev);
struct drm_encoder *encoder;
bool changed = false;
 
if (rdev->audio.pin[0].channels != audio_status.channels ||
rdev->audio.pin[0].rate != audio_status.rate ||
rdev->audio.pin[0].bits_per_sample != audio_status.bits_per_sample ||
rdev->audio.pin[0].status_bits != audio_status.status_bits ||
rdev->audio.pin[0].category_code != audio_status.category_code) {
rdev->audio.pin[0] = audio_status;
if (rdev->audio_status.channels != audio_status.channels ||
rdev->audio_status.rate != audio_status.rate ||
rdev->audio_status.bits_per_sample != audio_status.bits_per_sample ||
rdev->audio_status.status_bits != audio_status.status_bits ||
rdev->audio_status.category_code != audio_status.category_code) {
rdev->audio_status = audio_status;
changed = true;
}
 
141,16 → 141,13
}
}
 
/* enable the audio stream */
void r600_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin,
bool enable)
/*
* turn on/off audio engine
*/
static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
{
u32 value = 0;
 
if (!pin)
return;
 
DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
if (ASIC_IS_DCE4(rdev)) {
if (enable) {
value |= 0x81000000; /* Required to enable audio */
161,6 → 158,7
WREG32_P(R600_AUDIO_ENABLE,
enable ? 0x81000000 : 0x0, ~0x81000000);
}
rdev->audio_enabled = enable;
}
 
/*
171,17 → 169,13
if (!radeon_audio || !r600_audio_chipset_supported(rdev))
return 0;
 
rdev->audio.enabled = true;
r600_audio_engine_enable(rdev, true);
 
rdev->audio.num_pins = 1;
rdev->audio.pin[0].channels = -1;
rdev->audio.pin[0].rate = -1;
rdev->audio.pin[0].bits_per_sample = -1;
rdev->audio.pin[0].status_bits = 0;
rdev->audio.pin[0].category_code = 0;
rdev->audio.pin[0].id = 0;
/* disable audio. it will be set up later */
r600_audio_enable(rdev, &rdev->audio.pin[0], false);
rdev->audio_status.channels = -1;
rdev->audio_status.rate = -1;
rdev->audio_status.bits_per_sample = -1;
rdev->audio_status.status_bits = 0;
rdev->audio_status.category_code = 0;
 
return 0;
}
192,16 → 186,8
*/
void r600_audio_fini(struct radeon_device *rdev)
{
if (!rdev->audio.enabled)
if (!rdev->audio_enabled)
return;
 
r600_audio_enable(rdev, &rdev->audio.pin[0], false);
 
rdev->audio.enabled = false;
r600_audio_engine_enable(rdev, false);
}
 
struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev)
{
/* only one pin on 6xx-NI */
return &rdev->audio.pin[0];
}
/drivers/video/drm/radeon/r600_hdmi.c
57,57 → 57,28
static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
/* 32kHz 44.1kHz 48kHz */
/* Clock N CTS N CTS N CTS */
{ 25175, 4096, 25175, 28224, 125875, 6144, 25175 }, /* 25,20/1.001 MHz */
{ 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
{ 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
{ 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
{ 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
{ 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
{ 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
{ 74176, 4096, 74176, 5733, 75335, 6144, 74176 }, /* 74.25/1.001 MHz */
{ 74175, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */
{ 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
{ 148352, 4096, 148352, 5733, 150670, 6144, 148352 }, /* 148.50/1.001 MHz */
{ 148351, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
{ 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
{ 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
};
 
 
/*
* calculate CTS and N values if they are not found in the table
* calculate CTS value if it's not found in the table
*/
static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int *N, int freq)
static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
{
int n, cts;
unsigned long div, mul;
 
/* Safe, but overly large values */
n = 128 * freq;
cts = clock * 1000;
 
/* Smallest valid fraction */
div = gcd(n, cts);
 
n /= div;
cts /= div;
 
/*
* The optimal N is 128*freq/1000. Calculate the closest larger
* value that doesn't truncate any bits.
*/
mul = ((128*freq/1000) + (n-1))/n;
 
n *= mul;
cts *= mul;
 
/* Check that we are in spec (not always possible) */
if (n < (128*freq/1500))
printk(KERN_WARNING "Calculated ACR N value is too small. You may experience audio problems.\n");
if (n > (128*freq/300))
printk(KERN_WARNING "Calculated ACR N value is too large. You may experience audio problems.\n");
 
*N = n;
*CTS = cts;
 
DRM_DEBUG("Calculated ACR timing N=%d CTS=%d for frequency %d\n",
*N, *CTS, freq);
if (*CTS == 0)
*CTS = clock * N / (128 * freq) * 1000;
DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
N, *CTS, freq);
}
 
struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
115,16 → 86,15
struct radeon_hdmi_acr res;
u8 i;
 
/* Precalculated values for common clocks */
for (i = 0; i < ARRAY_SIZE(r600_hdmi_predefined_acr); i++) {
if (r600_hdmi_predefined_acr[i].clock == clock)
return r600_hdmi_predefined_acr[i];
}
for (i = 0; r600_hdmi_predefined_acr[i].clock != clock &&
r600_hdmi_predefined_acr[i].clock != 0; i++)
;
res = r600_hdmi_predefined_acr[i];
 
/* And odd clocks get manually calculated */
r600_hdmi_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
r600_hdmi_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
r600_hdmi_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
/* In case some CTS are missing */
r600_hdmi_calc_cts(clock, &res.cts_32khz, res.n_32khz, 32000);
r600_hdmi_calc_cts(clock, &res.cts_44_1khz, res.n_44_1khz, 44100);
r600_hdmi_calc_cts(clock, &res.cts_48khz, res.n_48khz, 48000);
 
return res;
}
132,7 → 102,7
/*
* update the N and CTS parameters for a given pixel clock rate
*/
void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
141,33 → 111,21
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
 
WREG32_P(HDMI0_ACR_32_0 + offset,
HDMI0_ACR_CTS_32(acr.cts_32khz),
~HDMI0_ACR_CTS_32_MASK);
WREG32_P(HDMI0_ACR_32_1 + offset,
HDMI0_ACR_N_32(acr.n_32khz),
~HDMI0_ACR_N_32_MASK);
WREG32(HDMI0_ACR_32_0 + offset, HDMI0_ACR_CTS_32(acr.cts_32khz));
WREG32(HDMI0_ACR_32_1 + offset, acr.n_32khz);
 
WREG32_P(HDMI0_ACR_44_0 + offset,
HDMI0_ACR_CTS_44(acr.cts_44_1khz),
~HDMI0_ACR_CTS_44_MASK);
WREG32_P(HDMI0_ACR_44_1 + offset,
HDMI0_ACR_N_44(acr.n_44_1khz),
~HDMI0_ACR_N_44_MASK);
WREG32(HDMI0_ACR_44_0 + offset, HDMI0_ACR_CTS_44(acr.cts_44_1khz));
WREG32(HDMI0_ACR_44_1 + offset, acr.n_44_1khz);
 
WREG32_P(HDMI0_ACR_48_0 + offset,
HDMI0_ACR_CTS_48(acr.cts_48khz),
~HDMI0_ACR_CTS_48_MASK);
WREG32_P(HDMI0_ACR_48_1 + offset,
HDMI0_ACR_N_48(acr.n_48khz),
~HDMI0_ACR_N_48_MASK);
WREG32(HDMI0_ACR_48_0 + offset, HDMI0_ACR_CTS_48(acr.cts_48khz));
WREG32(HDMI0_ACR_48_1 + offset, acr.n_48khz);
}
 
/*
* build a HDMI Video Info Frame
*/
void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
size_t size)
static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
175,8 → 133,15
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
uint8_t *frame = buffer + 3;
uint8_t *header = buffer;
 
/* Our header values (type, version, length) should be alright, Intel
* is using the same. Checksum function also seems to be OK, it works
* fine for audio infoframe. However calculated value is always lower
* by 2 in comparison to fglrx. It breaks displaying anything in case
* of TVs that strictly check the checksum. Hack it manually here to
* workaround this issue. */
frame[0x0] += 2;
 
WREG32(HDMI0_AVI_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
WREG32(HDMI0_AVI_INFO1 + offset,
184,7 → 149,7
WREG32(HDMI0_AVI_INFO2 + offset,
frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
WREG32(HDMI0_AVI_INFO3 + offset,
frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
frame[0xC] | (frame[0xD] << 8));
}
 
/*
242,7 → 207,7
/*
* write the audio workaround status to the hardware
*/
void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
268,29 → 233,10
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 base_rate = 24000;
u32 max_ratio = clock / base_rate;
u32 dto_phase;
u32 dto_modulo = clock;
u32 wallclock_ratio;
u32 dto_cntl;
 
if (!dig || !dig->afmt)
return;
 
if (max_ratio >= 8) {
dto_phase = 192 * 1000;
wallclock_ratio = 3;
} else if (max_ratio >= 4) {
dto_phase = 96 * 1000;
wallclock_ratio = 2;
} else if (max_ratio >= 2) {
dto_phase = 48 * 1000;
wallclock_ratio = 1;
} else {
dto_phase = 24 * 1000;
wallclock_ratio = 0;
}
 
/* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
* doesn't matter which one you use. Just use the first one.
*/
299,37 → 245,19
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
if (ASIC_IS_DCE32(rdev)) {
if (dig->dig_encoder == 0) {
dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo);
WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
} else {
dto_cntl = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
WREG32(DCCG_AUDIO_DTO1_CNTL, dto_cntl);
WREG32(DCCG_AUDIO_DTO1_PHASE, dto_phase);
WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
}
} else {
if (ASIC_IS_DCE3(rdev)) {
/* according to the reg specs, this should DCE3.2 only, but in
* practice it seems to cover DCE2.0/3.0/3.1 as well.
* practice it seems to cover DCE3.0 as well.
*/
if (dig->dig_encoder == 0) {
WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
} else {
WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
/* according to the reg specs, this should be DCE2.0 and DCE3.0 */
WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
AUDIO_DTO_MODULE(clock / 10));
}
}
}
 
/*
* update the info frames with the data from the current display mode
343,61 → 271,56
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
struct hdmi_avi_infoframe frame;
uint32_t offset;
uint32_t acr_ctl;
ssize_t err;
 
if (!dig || !dig->afmt)
return;
 
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
return;
offset = dig->afmt->offset;
 
/* disable audio prior to setting up hw */
dig->afmt->pin = r600_audio_get_pin(rdev);
r600_audio_enable(rdev, dig->afmt->pin, false);
// r600_audio_set_clock(encoder, mode->clock);
 
r600_audio_set_dto(encoder, mode->clock);
WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
HDMI0_NULL_SEND); /* send null packets when required */
 
WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000);
 
if (ASIC_IS_DCE32(rdev)) {
WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
} else {
WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
HDMI0_60958_CS_UPDATE, /* allow 60958 channel status fields to be updated */
~(HDMI0_AUDIO_SAMPLE_SEND |
HDMI0_AUDIO_DELAY_EN_MASK |
HDMI0_AUDIO_PACKETS_PER_LINE_MASK |
HDMI0_60958_CS_UPDATE));
HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
}
 
/* DCE 3.0 uses register that's normally for CRC_CONTROL */
acr_ctl = ASIC_IS_DCE3(rdev) ? DCE3_HDMI0_ACR_PACKET_CONTROL :
HDMI0_ACR_PACKET_CONTROL;
WREG32_P(acr_ctl + offset,
HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */
HDMI0_ACR_AUTO_SEND, /* allow hw to sent ACR packets when required */
~(HDMI0_ACR_SOURCE |
HDMI0_ACR_AUTO_SEND));
WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
HDMI0_ACR_SOURCE); /* select SW CTS value */
 
WREG32_OR(HDMI0_VBI_PACKET_CONTROL + offset,
WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
HDMI0_NULL_SEND | /* send null packets when required */
HDMI0_GC_SEND | /* send general control packets */
HDMI0_GC_CONT); /* send general control packets every frame */
 
WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
/* TODO: HDMI0_AUDIO_INFO_UPDATE */
WREG32(HDMI0_INFOFRAME_CONTROL0 + offset,
HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
HDMI0_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */
 
WREG32_P(HDMI0_INFOFRAME_CONTROL1 + offset,
WREG32(HDMI0_INFOFRAME_CONTROL1 + offset,
HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
HDMI0_AUDIO_INFO_LINE(2), /* anything other than 0 */
~(HDMI0_AVI_INFO_LINE_MASK |
HDMI0_AUDIO_INFO_LINE_MASK));
HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */
 
WREG32_AND(HDMI0_GC + offset,
~HDMI0_GC_AVMUTE); /* unset HDMI0_GC_AVMUTE */
WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
 
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
if (err < 0) {
412,29 → 335,8
}
 
r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
 
/* fglrx duplicates INFOFRAME_CONTROL0 & INFOFRAME_CONTROL1 ops here */
 
WREG32_AND(HDMI0_GENERIC_PACKET_CONTROL + offset,
~(HDMI0_GENERIC0_SEND |
HDMI0_GENERIC0_CONT |
HDMI0_GENERIC0_UPDATE |
HDMI0_GENERIC1_SEND |
HDMI0_GENERIC1_CONT |
HDMI0_GENERIC0_LINE_MASK |
HDMI0_GENERIC1_LINE_MASK));
 
r600_hdmi_update_ACR(encoder, mode->clock);
 
WREG32_P(HDMI0_60958_0 + offset,
HDMI0_60958_CS_CHANNEL_NUMBER_L(1),
~(HDMI0_60958_CS_CHANNEL_NUMBER_L_MASK |
HDMI0_60958_CS_CLOCK_ACCURACY_MASK));
 
WREG32_P(HDMI0_60958_1 + offset,
HDMI0_60958_CS_CHANNEL_NUMBER_R(2),
~HDMI0_60958_CS_CHANNEL_NUMBER_R_MASK);
 
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF);
WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF);
441,16 → 343,12
WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001);
WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
 
/* enable audio after to setting up hw */
r600_audio_enable(rdev, dig->afmt->pin, true);
r600_hdmi_audio_workaround(encoder);
}
 
/**
* r600_hdmi_update_audio_settings - Update audio infoframe
*
* @encoder: drm encoder
*
* Gets info about current audio stream and updates audio infoframe.
#if 0
/*
* update settings with current parameters from audio engine
*/
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
{
458,11 → 356,11
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct r600_audio_pin audio = r600_audio_status(rdev);
struct r600_audio audio = r600_audio_status(rdev);
uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
struct hdmi_audio_infoframe frame;
uint32_t offset;
uint32_t value;
uint32_t iec;
ssize_t err;
 
if (!dig->afmt || !dig->afmt->enabled)
475,6 → 373,60
DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
(int)audio.status_bits, (int)audio.category_code);
 
iec = 0;
if (audio.status_bits & AUDIO_STATUS_PROFESSIONAL)
iec |= 1 << 0;
if (audio.status_bits & AUDIO_STATUS_NONAUDIO)
iec |= 1 << 1;
if (audio.status_bits & AUDIO_STATUS_COPYRIGHT)
iec |= 1 << 2;
if (audio.status_bits & AUDIO_STATUS_EMPHASIS)
iec |= 1 << 3;
 
iec |= HDMI0_60958_CS_CATEGORY_CODE(audio.category_code);
 
switch (audio.rate) {
case 32000:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x3);
break;
case 44100:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x0);
break;
case 48000:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x2);
break;
case 88200:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x8);
break;
case 96000:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xa);
break;
case 176400:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xc);
break;
case 192000:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xe);
break;
}
 
WREG32(HDMI0_60958_0 + offset, iec);
 
iec = 0;
switch (audio.bits_per_sample) {
case 16:
iec |= HDMI0_60958_CS_WORD_LENGTH(0x2);
break;
case 20:
iec |= HDMI0_60958_CS_WORD_LENGTH(0x3);
break;
case 24:
iec |= HDMI0_60958_CS_WORD_LENGTH(0xb);
break;
}
if (audio.status_bits & AUDIO_STATUS_V)
iec |= 0x5 << 16;
WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
 
err = hdmi_audio_infoframe_init(&frame);
if (err < 0) {
DRM_ERROR("failed to setup audio infoframe\n");
489,23 → 441,10
return;
}
 
value = RREG32(HDMI0_AUDIO_PACKET_CONTROL + offset);
if (value & HDMI0_AUDIO_TEST_EN)
WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
value & ~HDMI0_AUDIO_TEST_EN);
 
WREG32_OR(HDMI0_CONTROL + offset,
HDMI0_ERROR_ACK);
 
WREG32_AND(HDMI0_INFOFRAME_CONTROL0 + offset,
~HDMI0_AUDIO_INFO_SOURCE);
 
r600_hdmi_update_audio_infoframe(encoder, buffer, sizeof(buffer));
 
WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
HDMI0_AUDIO_INFO_CONT |
HDMI0_AUDIO_INFO_UPDATE);
r600_hdmi_audio_workaround(encoder);
}
#endif
 
/*
* enable the HDMI engine
518,9 → 457,6
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 hdmi = HDMI0_ERROR_ACK;
 
if (!dig || !dig->afmt)
return;
 
/* Silent, r600_hdmi_enable will raise WARN for us */
if (enable && dig->afmt->enabled)
return;
/drivers/video/drm/radeon/r600d.h
302,26 → 302,11
#define GRBM_SOFT_RESET 0x8020
#define SOFT_RESET_CP (1<<0)
 
#define CG_THERMAL_CTRL 0x7F0
#define DIG_THERM_DPM(x) ((x) << 12)
#define DIG_THERM_DPM_MASK 0x000FF000
#define DIG_THERM_DPM_SHIFT 12
#define CG_THERMAL_STATUS 0x7F4
#define ASIC_T(x) ((x) << 0)
#define ASIC_T_MASK 0x1FF
#define ASIC_T_SHIFT 0
#define CG_THERMAL_INT 0x7F8
#define DIG_THERM_INTH(x) ((x) << 8)
#define DIG_THERM_INTH_MASK 0x0000FF00
#define DIG_THERM_INTH_SHIFT 8
#define DIG_THERM_INTL(x) ((x) << 16)
#define DIG_THERM_INTL_MASK 0x00FF0000
#define DIG_THERM_INTL_SHIFT 16
#define THERM_INT_MASK_HIGH (1 << 24)
#define THERM_INT_MASK_LOW (1 << 25)
 
#define RV770_CG_THERMAL_INT 0x734
 
#define HDP_HOST_PATH_CNTL 0x2C00
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
602,7 → 587,6
#define L2_BUSY (1 << 0)
 
#define WAIT_UNTIL 0x8040
#define WAIT_CP_DMA_IDLE_bit (1 << 8)
#define WAIT_2D_IDLE_bit (1 << 14)
#define WAIT_3D_IDLE_bit (1 << 15)
#define WAIT_2D_IDLECLEAN_bit (1 << 16)
700,19 → 684,16
#define RLC_UCODE_ADDR 0x3f2c
#define RLC_UCODE_DATA 0x3f30
 
/* new for TN */
#define TN_RLC_SAVE_AND_RESTORE_BASE 0x3f10
#define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20
 
#define SRBM_SOFT_RESET 0xe60
# define SOFT_RESET_BIF (1 << 1)
# define SOFT_RESET_DMA (1 << 12)
# define SOFT_RESET_RLC (1 << 13)
# define SOFT_RESET_UVD (1 << 18)
# define RV770_SOFT_RESET_DMA (1 << 20)
 
#define BIF_SCRATCH0 0x5438
 
#define BUS_CNTL 0x5420
# define BIOS_ROM_DIS (1 << 1)
# define VGA_COHE_SPEC_TIMER_DIS (1 << 9)
 
#define CP_INT_CNTL 0xc124
# define CNTX_BUSY_INT_ENABLE (1 << 19)
# define CNTX_EMPTY_INT_ENABLE (1 << 20)
940,9 → 921,6
#define DCCG_AUDIO_DTO0_LOAD 0x051c
# define DTO_LOAD (1 << 31)
#define DCCG_AUDIO_DTO0_CNTL 0x0520
# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0)
# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7
# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0
 
#define DCCG_AUDIO_DTO1_PHASE 0x0524
#define DCCG_AUDIO_DTO1_MODULE 0x0528
967,42 → 945,6
# define DIG_MODE_SDVO 4
#define DIG1_CNTL 0x79a0
 
#define AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER 0x71bc
#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
#define SPEAKER_ALLOCATION_SHIFT 0
#define HDMI_CONNECTION (1 << 16)
#define DP_CONNECTION (1 << 17)
 
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x71c8 /* LPCM */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x71cc /* AC3 */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x71d0 /* MPEG1 */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x71d4 /* MP3 */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x71d8 /* MPEG2 */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x71dc /* AAC */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x71e0 /* DTS */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x71e4 /* ATRAC */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x71e8 /* one bit audio - leave at 0 (default) */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x71ec /* Dolby Digital */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x71f0 /* DTS-HD */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x71f4 /* MAT-MLP */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x71f8 /* DTS */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x71fc /* WMA Pro */
# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
/* max channels minus one. 7 = 8 channels */
# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
* bit0 = 32 kHz
* bit1 = 44.1 kHz
* bit2 = 48 kHz
* bit3 = 88.2 kHz
* bit4 = 96 kHz
* bit5 = 176.4 kHz
* bit6 = 192 kHz
*/
 
/* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one
* instance of the blocks while r6xx has 2. DCE 3.0 cards are slightly
* different due to the new DIG blocks, but also have 2 instances.
1029,11 → 971,9
#define HDMI0_AUDIO_PACKET_CONTROL 0x7408
# define HDMI0_AUDIO_SAMPLE_SEND (1 << 0)
# define HDMI0_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
# define HDMI0_AUDIO_DELAY_EN_MASK (3 << 4)
# define HDMI0_AUDIO_SEND_MAX_PACKETS (1 << 8)
# define HDMI0_AUDIO_TEST_EN (1 << 12)
# define HDMI0_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
# define HDMI0_AUDIO_PACKETS_PER_LINE_MASK (0x1f << 16)
# define HDMI0_AUDIO_CHANNEL_SWAP (1 << 24)
# define HDMI0_60958_CS_UPDATE (1 << 26)
# define HDMI0_AZ_FORMAT_WTRIG_MASK (1 << 28)
1040,7 → 980,6
# define HDMI0_AZ_FORMAT_WTRIG_ACK (1 << 29)
#define HDMI0_AUDIO_CRC_CONTROL 0x740c
# define HDMI0_AUDIO_CRC_EN (1 << 0)
#define DCE3_HDMI0_ACR_PACKET_CONTROL 0x740c
#define HDMI0_VBI_PACKET_CONTROL 0x7410
# define HDMI0_NULL_SEND (1 << 0)
# define HDMI0_GC_SEND (1 << 4)
1050,7 → 989,7
# define HDMI0_AVI_INFO_CONT (1 << 1)
# define HDMI0_AUDIO_INFO_SEND (1 << 4)
# define HDMI0_AUDIO_INFO_CONT (1 << 5)
# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */
# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
# define HDMI0_AUDIO_INFO_UPDATE (1 << 7)
# define HDMI0_MPEG_INFO_SEND (1 << 8)
# define HDMI0_MPEG_INFO_CONT (1 << 9)
1057,9 → 996,7
# define HDMI0_MPEG_INFO_UPDATE (1 << 10)
#define HDMI0_INFOFRAME_CONTROL1 0x7418
# define HDMI0_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
# define HDMI0_AVI_INFO_LINE_MASK (0x3f << 0)
# define HDMI0_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
# define HDMI0_AUDIO_INFO_LINE_MASK (0x3f << 8)
# define HDMI0_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
#define HDMI0_GENERIC_PACKET_CONTROL 0x741c
# define HDMI0_GENERIC0_SEND (1 << 0)
1068,9 → 1005,7
# define HDMI0_GENERIC1_SEND (1 << 4)
# define HDMI0_GENERIC1_CONT (1 << 5)
# define HDMI0_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
# define HDMI0_GENERIC0_LINE_MASK (0x3f << 16)
# define HDMI0_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
# define HDMI0_GENERIC1_LINE_MASK (0x3f << 24)
#define HDMI0_GC 0x7428
# define HDMI0_GC_AVMUTE (1 << 0)
#define HDMI0_AVI_INFO0 0x7454
1126,22 → 1061,16
#define HDMI0_GENERIC1_6 0x74a8
#define HDMI0_ACR_32_0 0x74ac
# define HDMI0_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
# define HDMI0_ACR_CTS_32_MASK (0xfffff << 12)
#define HDMI0_ACR_32_1 0x74b0
# define HDMI0_ACR_N_32(x) (((x) & 0xfffff) << 0)
# define HDMI0_ACR_N_32_MASK (0xfffff << 0)
#define HDMI0_ACR_44_0 0x74b4
# define HDMI0_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
# define HDMI0_ACR_CTS_44_MASK (0xfffff << 12)
#define HDMI0_ACR_44_1 0x74b8
# define HDMI0_ACR_N_44(x) (((x) & 0xfffff) << 0)
# define HDMI0_ACR_N_44_MASK (0xfffff << 0)
#define HDMI0_ACR_48_0 0x74bc
# define HDMI0_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
# define HDMI0_ACR_CTS_48_MASK (0xfffff << 12)
#define HDMI0_ACR_48_1 0x74c0
# define HDMI0_ACR_N_48(x) (((x) & 0xfffff) << 0)
# define HDMI0_ACR_N_48_MASK (0xfffff << 0)
#define HDMI0_ACR_STATUS_0 0x74c4
#define HDMI0_ACR_STATUS_1 0x74c8
#define HDMI0_AUDIO_INFO0 0x74cc
1161,10 → 1090,8
# define HDMI0_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
# define HDMI0_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
# define HDMI0_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
# define HDMI0_60958_CS_CHANNEL_NUMBER_L_MASK (0xf << 20)
# define HDMI0_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
# define HDMI0_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
# define HDMI0_60958_CS_CLOCK_ACCURACY_MASK (3 << 28)
#define HDMI0_60958_1 0x74d8
# define HDMI0_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
# define HDMI0_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
1171,7 → 1098,6
# define HDMI0_60958_CS_VALID_L(x) (((x) & 1) << 16)
# define HDMI0_60958_CS_VALID_R(x) (((x) & 1) << 18)
# define HDMI0_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
# define HDMI0_60958_CS_CHANNEL_NUMBER_R_MASK (0xf << 20)
#define HDMI0_ACR_PACKET_CONTROL 0x74dc
# define HDMI0_ACR_SEND (1 << 0)
# define HDMI0_ACR_CONT (1 << 1)
1182,7 → 1108,6
# define HDMI0_ACR_48 3
# define HDMI0_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
# define HDMI0_ACR_AUTO_SEND (1 << 12)
#define DCE3_HDMI0_AUDIO_CRC_CONTROL 0x74dc
#define HDMI0_RAMP_CONTROL0 0x74e0
# define HDMI0_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
#define HDMI0_RAMP_CONTROL1 0x74e4
1223,247 → 1148,6
# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
 
/* DCE3 FMT blocks */
#define FMT_CONTROL 0x6700
# define FMT_PIXEL_ENCODING (1 << 16)
/* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
#define FMT_BIT_DEPTH_CONTROL 0x6710
# define FMT_TRUNCATE_EN (1 << 0)
# define FMT_TRUNCATE_DEPTH (1 << 4)
# define FMT_SPATIAL_DITHER_EN (1 << 8)
# define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
# define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
# define FMT_FRAME_RANDOM_ENABLE (1 << 13)
# define FMT_RGB_RANDOM_ENABLE (1 << 14)
# define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
# define FMT_TEMPORAL_DITHER_EN (1 << 16)
# define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
# define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
# define FMT_TEMPORAL_LEVEL (1 << 24)
# define FMT_TEMPORAL_DITHER_RESET (1 << 25)
# define FMT_25FRC_SEL(x) ((x) << 26)
# define FMT_50FRC_SEL(x) ((x) << 28)
# define FMT_75FRC_SEL(x) ((x) << 30)
#define FMT_CLAMP_CONTROL 0x672c
# define FMT_CLAMP_DATA_EN (1 << 0)
# define FMT_CLAMP_COLOR_FORMAT(x) ((x) << 16)
# define FMT_CLAMP_6BPC 0
# define FMT_CLAMP_8BPC 1
# define FMT_CLAMP_10BPC 2
 
/* Power management */
#define CG_SPLL_FUNC_CNTL 0x600
# define SPLL_RESET (1 << 0)
# define SPLL_SLEEP (1 << 1)
# define SPLL_REF_DIV(x) ((x) << 2)
# define SPLL_REF_DIV_MASK (7 << 2)
# define SPLL_FB_DIV(x) ((x) << 5)
# define SPLL_FB_DIV_MASK (0xff << 5)
# define SPLL_PULSEEN (1 << 13)
# define SPLL_PULSENUM(x) ((x) << 14)
# define SPLL_PULSENUM_MASK (3 << 14)
# define SPLL_SW_HILEN(x) ((x) << 16)
# define SPLL_SW_HILEN_MASK (0xf << 16)
# define SPLL_SW_LOLEN(x) ((x) << 20)
# define SPLL_SW_LOLEN_MASK (0xf << 20)
# define SPLL_DIVEN (1 << 24)
# define SPLL_BYPASS_EN (1 << 25)
# define SPLL_CHG_STATUS (1 << 29)
# define SPLL_CTLREQ (1 << 30)
# define SPLL_CTLACK (1 << 31)
 
#define GENERAL_PWRMGT 0x618
# define GLOBAL_PWRMGT_EN (1 << 0)
# define STATIC_PM_EN (1 << 1)
# define MOBILE_SU (1 << 2)
# define THERMAL_PROTECTION_DIS (1 << 3)
# define THERMAL_PROTECTION_TYPE (1 << 4)
# define ENABLE_GEN2PCIE (1 << 5)
# define SW_GPIO_INDEX(x) ((x) << 6)
# define SW_GPIO_INDEX_MASK (3 << 6)
# define LOW_VOLT_D2_ACPI (1 << 8)
# define LOW_VOLT_D3_ACPI (1 << 9)
# define VOLT_PWRMGT_EN (1 << 10)
#define CG_TPC 0x61c
# define TPCC(x) ((x) << 0)
# define TPCC_MASK (0x7fffff << 0)
# define TPU(x) ((x) << 23)
# define TPU_MASK (0x1f << 23)
#define SCLK_PWRMGT_CNTL 0x620
# define SCLK_PWRMGT_OFF (1 << 0)
# define SCLK_TURNOFF (1 << 1)
# define SPLL_TURNOFF (1 << 2)
# define SU_SCLK_USE_BCLK (1 << 3)
# define DYNAMIC_GFX_ISLAND_PWR_DOWN (1 << 4)
# define DYNAMIC_GFX_ISLAND_PWR_LP (1 << 5)
# define CLK_TURN_ON_STAGGER (1 << 6)
# define CLK_TURN_OFF_STAGGER (1 << 7)
# define FIR_FORCE_TREND_SEL (1 << 8)
# define FIR_TREND_MODE (1 << 9)
# define DYN_GFX_CLK_OFF_EN (1 << 10)
# define VDDC3D_TURNOFF_D1 (1 << 11)
# define VDDC3D_TURNOFF_D2 (1 << 12)
# define VDDC3D_TURNOFF_D3 (1 << 13)
# define SPLL_TURNOFF_D2 (1 << 14)
# define SCLK_LOW_D1 (1 << 15)
# define DYN_GFX_CLK_OFF_MC_EN (1 << 16)
#define MCLK_PWRMGT_CNTL 0x624
# define MPLL_PWRMGT_OFF (1 << 0)
# define YCLK_TURNOFF (1 << 1)
# define MPLL_TURNOFF (1 << 2)
# define SU_MCLK_USE_BCLK (1 << 3)
# define DLL_READY (1 << 4)
# define MC_BUSY (1 << 5)
# define MC_INT_CNTL (1 << 7)
# define MRDCKA_SLEEP (1 << 8)
# define MRDCKB_SLEEP (1 << 9)
# define MRDCKC_SLEEP (1 << 10)
# define MRDCKD_SLEEP (1 << 11)
# define MRDCKE_SLEEP (1 << 12)
# define MRDCKF_SLEEP (1 << 13)
# define MRDCKG_SLEEP (1 << 14)
# define MRDCKH_SLEEP (1 << 15)
# define MRDCKA_RESET (1 << 16)
# define MRDCKB_RESET (1 << 17)
# define MRDCKC_RESET (1 << 18)
# define MRDCKD_RESET (1 << 19)
# define MRDCKE_RESET (1 << 20)
# define MRDCKF_RESET (1 << 21)
# define MRDCKG_RESET (1 << 22)
# define MRDCKH_RESET (1 << 23)
# define DLL_READY_READ (1 << 24)
# define USE_DISPLAY_GAP (1 << 25)
# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
# define USE_DISPLAY_GAP_CTXSW (1 << 27)
# define MPLL_TURNOFF_D2 (1 << 28)
# define USE_DISPLAY_URGENT_CTXSW (1 << 29)
 
#define MPLL_TIME 0x634
# define MPLL_LOCK_TIME(x) ((x) << 0)
# define MPLL_LOCK_TIME_MASK (0xffff << 0)
# define MPLL_RESET_TIME(x) ((x) << 16)
# define MPLL_RESET_TIME_MASK (0xffff << 16)
 
#define SCLK_FREQ_SETTING_STEP_0_PART1 0x648
# define STEP_0_SPLL_POST_DIV(x) ((x) << 0)
# define STEP_0_SPLL_POST_DIV_MASK (0xff << 0)
# define STEP_0_SPLL_FB_DIV(x) ((x) << 8)
# define STEP_0_SPLL_FB_DIV_MASK (0xff << 8)
# define STEP_0_SPLL_REF_DIV(x) ((x) << 16)
# define STEP_0_SPLL_REF_DIV_MASK (7 << 16)
# define STEP_0_SPLL_STEP_TIME(x) ((x) << 19)
# define STEP_0_SPLL_STEP_TIME_MASK (0x1fff << 19)
#define SCLK_FREQ_SETTING_STEP_0_PART2 0x64c
# define STEP_0_PULSE_HIGH_CNT(x) ((x) << 0)
# define STEP_0_PULSE_HIGH_CNT_MASK (0x1ff << 0)
# define STEP_0_POST_DIV_EN (1 << 9)
# define STEP_0_SPLL_STEP_ENABLE (1 << 30)
# define STEP_0_SPLL_ENTRY_VALID (1 << 31)
 
#define VID_RT 0x6f8
# define VID_CRT(x) ((x) << 0)
# define VID_CRT_MASK (0x1fff << 0)
# define VID_CRTU(x) ((x) << 13)
# define VID_CRTU_MASK (7 << 13)
# define SSTU(x) ((x) << 16)
# define SSTU_MASK (7 << 16)
#define CTXSW_PROFILE_INDEX 0x6fc
# define CTXSW_FREQ_VIDS_CFG_INDEX(x) ((x) << 0)
# define CTXSW_FREQ_VIDS_CFG_INDEX_MASK (3 << 0)
# define CTXSW_FREQ_VIDS_CFG_INDEX_SHIFT 0
# define CTXSW_FREQ_MCLK_CFG_INDEX(x) ((x) << 2)
# define CTXSW_FREQ_MCLK_CFG_INDEX_MASK (3 << 2)
# define CTXSW_FREQ_MCLK_CFG_INDEX_SHIFT 2
# define CTXSW_FREQ_SCLK_CFG_INDEX(x) ((x) << 4)
# define CTXSW_FREQ_SCLK_CFG_INDEX_MASK (0x1f << 4)
# define CTXSW_FREQ_SCLK_CFG_INDEX_SHIFT 4
# define CTXSW_FREQ_STATE_SPLL_RESET_EN (1 << 9)
# define CTXSW_FREQ_STATE_ENABLE (1 << 10)
# define CTXSW_FREQ_DISPLAY_WATERMARK (1 << 11)
# define CTXSW_FREQ_GEN2PCIE_VOLT (1 << 12)
 
#define TARGET_AND_CURRENT_PROFILE_INDEX 0x70c
# define TARGET_PROFILE_INDEX_MASK (3 << 0)
# define TARGET_PROFILE_INDEX_SHIFT 0
# define CURRENT_PROFILE_INDEX_MASK (3 << 2)
# define CURRENT_PROFILE_INDEX_SHIFT 2
# define DYN_PWR_ENTER_INDEX(x) ((x) << 4)
# define DYN_PWR_ENTER_INDEX_MASK (3 << 4)
# define DYN_PWR_ENTER_INDEX_SHIFT 4
# define CURR_MCLK_INDEX_MASK (3 << 6)
# define CURR_MCLK_INDEX_SHIFT 6
# define CURR_SCLK_INDEX_MASK (0x1f << 8)
# define CURR_SCLK_INDEX_SHIFT 8
# define CURR_VID_INDEX_MASK (3 << 13)
# define CURR_VID_INDEX_SHIFT 13
 
#define LOWER_GPIO_ENABLE 0x710
#define UPPER_GPIO_ENABLE 0x714
#define CTXSW_VID_LOWER_GPIO_CNTL 0x718
 
#define VID_UPPER_GPIO_CNTL 0x740
#define CG_CTX_CGTT3D_R 0x744
# define PHC(x) ((x) << 0)
# define PHC_MASK (0x1ff << 0)
# define SDC(x) ((x) << 9)
# define SDC_MASK (0x3fff << 9)
#define CG_VDDC3D_OOR 0x748
# define SU(x) ((x) << 23)
# define SU_MASK (0xf << 23)
#define CG_FTV 0x74c
#define CG_FFCT_0 0x750
# define UTC_0(x) ((x) << 0)
# define UTC_0_MASK (0x3ff << 0)
# define DTC_0(x) ((x) << 10)
# define DTC_0_MASK (0x3ff << 10)
 
#define CG_BSP 0x78c
# define BSP(x) ((x) << 0)
# define BSP_MASK (0xffff << 0)
# define BSU(x) ((x) << 16)
# define BSU_MASK (0xf << 16)
#define CG_RT 0x790
# define FLS(x) ((x) << 0)
# define FLS_MASK (0xffff << 0)
# define FMS(x) ((x) << 16)
# define FMS_MASK (0xffff << 16)
#define CG_LT 0x794
# define FHS(x) ((x) << 0)
# define FHS_MASK (0xffff << 0)
#define CG_GIT 0x798
# define CG_GICST(x) ((x) << 0)
# define CG_GICST_MASK (0xffff << 0)
# define CG_GIPOT(x) ((x) << 16)
# define CG_GIPOT_MASK (0xffff << 16)
 
#define CG_SSP 0x7a8
# define CG_SST(x) ((x) << 0)
# define CG_SST_MASK (0xffff << 0)
# define CG_SSTU(x) ((x) << 16)
# define CG_SSTU_MASK (0xf << 16)
 
#define CG_RLC_REQ_AND_RSP 0x7c4
# define RLC_CG_REQ_TYPE_MASK 0xf
# define RLC_CG_REQ_TYPE_SHIFT 0
# define CG_RLC_RSP_TYPE_MASK 0xf0
# define CG_RLC_RSP_TYPE_SHIFT 4
 
#define CG_FC_T 0x7cc
# define FC_T(x) ((x) << 0)
# define FC_T_MASK (0xffff << 0)
# define FC_TU(x) ((x) << 16)
# define FC_TU_MASK (0x1f << 16)
 
#define GPIOPAD_MASK 0x1798
#define GPIOPAD_A 0x179c
#define GPIOPAD_EN 0x17a0
 
#define GRBM_PWR_CNTL 0x800c
# define REQ_TYPE_MASK 0xf
# define REQ_TYPE_SHIFT 0
# define RSP_TYPE_MASK 0xf0
# define RSP_TYPE_SHIFT 4
 
/*
* UVD
*/
1575,7 → 1259,7
*/
# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
/* COMMAND */
# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
/* 0 - none
* 1 - 8 in 16
* 2 - 8 in 32
1597,10 → 1281,8
*/
# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
#define PACKET3_PFP_SYNC_ME 0x42 /* r7xx+ only */
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */
# define PACKET3_TC_ACTION_ENA (1 << 23)
# define PACKET3_VC_ACTION_ENA (1 << 24)
# define PACKET3_CB_ACTION_ENA (1 << 25)
/drivers/video/drm/radeon/radeon.h
64,14 → 64,12
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/interval_tree.h>
#include <asm/div64.h>
 
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
//#include <ttm/ttm_module.h>
#include <ttm/ttm_execbuf_util.h>
#include <ttm/ttm_module.h>
 
#include <linux/irqreturn.h>
#include <pci.h>
105,15 → 103,6
extern int radeon_msi;
extern int radeon_lockup_timeout;
extern int radeon_fastfb;
extern int radeon_dpm;
extern int radeon_aspm;
extern int radeon_runtime_pm;
extern int radeon_hard_reset;
extern int radeon_vm_size;
extern int radeon_vm_block_size;
extern int radeon_deep_color;
extern int radeon_use_pflipirq;
extern int radeon_bapm;
 
 
typedef struct pm_message {
153,6 → 142,9
#define RADEONFB_CONN_LIMIT 4
#define RADEON_BIOS_NUM_SCRATCH 8
 
/* max number of rings */
#define RADEON_NUM_RINGS 6
 
/* fence seq are set to this number when signaled */
#define RADEON_FENCE_SIGNALED_SEQ 0LL
 
172,27 → 164,11
/* R600+ */
#define R600_RING_TYPE_UVD_INDEX 5
 
/* TN+ */
#define TN_RING_TYPE_VCE1_INDEX 6
#define TN_RING_TYPE_VCE2_INDEX 7
 
/* max number of rings */
#define RADEON_NUM_RINGS 8
 
/* number of hw syncs before falling back on blocking */
#define RADEON_NUM_SYNCS 4
 
/* number of hw syncs before falling back on blocking */
#define RADEON_NUM_SYNCS 4
 
/* hardcode those limit for now */
#define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20)
#define RADEON_IB_VM_MAX_SIZE (64 << 10)
 
/* hard reset data */
#define RADEON_ASIC_RESET_DATA 0x39d5e86b
 
/* reset flags */
#define RADEON_RESET_GFX (1 << 0)
#define RADEON_RESET_COMPUTE (1 << 1)
207,54 → 183,6
#define RADEON_RESET_MC (1 << 10)
#define RADEON_RESET_DISPLAY (1 << 11)
 
/* CG block flags */
#define RADEON_CG_BLOCK_GFX (1 << 0)
#define RADEON_CG_BLOCK_MC (1 << 1)
#define RADEON_CG_BLOCK_SDMA (1 << 2)
#define RADEON_CG_BLOCK_UVD (1 << 3)
#define RADEON_CG_BLOCK_VCE (1 << 4)
#define RADEON_CG_BLOCK_HDP (1 << 5)
#define RADEON_CG_BLOCK_BIF (1 << 6)
 
/* CG flags */
#define RADEON_CG_SUPPORT_GFX_MGCG (1 << 0)
#define RADEON_CG_SUPPORT_GFX_MGLS (1 << 1)
#define RADEON_CG_SUPPORT_GFX_CGCG (1 << 2)
#define RADEON_CG_SUPPORT_GFX_CGLS (1 << 3)
#define RADEON_CG_SUPPORT_GFX_CGTS (1 << 4)
#define RADEON_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
#define RADEON_CG_SUPPORT_GFX_CP_LS (1 << 6)
#define RADEON_CG_SUPPORT_GFX_RLC_LS (1 << 7)
#define RADEON_CG_SUPPORT_MC_LS (1 << 8)
#define RADEON_CG_SUPPORT_MC_MGCG (1 << 9)
#define RADEON_CG_SUPPORT_SDMA_LS (1 << 10)
#define RADEON_CG_SUPPORT_SDMA_MGCG (1 << 11)
#define RADEON_CG_SUPPORT_BIF_LS (1 << 12)
#define RADEON_CG_SUPPORT_UVD_MGCG (1 << 13)
#define RADEON_CG_SUPPORT_VCE_MGCG (1 << 14)
#define RADEON_CG_SUPPORT_HDP_LS (1 << 15)
#define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16)
 
/* PG flags */
#define RADEON_PG_SUPPORT_GFX_PG (1 << 0)
#define RADEON_PG_SUPPORT_GFX_SMG (1 << 1)
#define RADEON_PG_SUPPORT_GFX_DMG (1 << 2)
#define RADEON_PG_SUPPORT_UVD (1 << 3)
#define RADEON_PG_SUPPORT_VCE (1 << 4)
#define RADEON_PG_SUPPORT_CP (1 << 5)
#define RADEON_PG_SUPPORT_GDS (1 << 6)
#define RADEON_PG_SUPPORT_RLC_SMU_HS (1 << 7)
#define RADEON_PG_SUPPORT_SDMA (1 << 8)
#define RADEON_PG_SUPPORT_ACP (1 << 9)
#define RADEON_PG_SUPPORT_SAMU (1 << 10)
 
/* max cursor sizes (in pixels) */
#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64
 
#define CIK_CURSOR_WIDTH 128
#define CIK_CURSOR_HEIGHT 128
 
/*
* Errata workarounds.
*/
297,7 → 225,6
uint32_t default_mclk;
uint32_t default_sclk;
uint32_t default_dispclk;
uint32_t current_dispclk;
uint32_t dp_extclk;
uint32_t max_pixel_clock;
};
306,7 → 233,6
* Power management
*/
int radeon_pm_init(struct radeon_device *rdev);
int radeon_pm_late_init(struct radeon_device *rdev);
void radeon_pm_fini(struct radeon_device *rdev);
void radeon_pm_compute_clocks(struct radeon_device *rdev);
void radeon_pm_suspend(struct radeon_device *rdev);
318,63 → 244,13
u32 clock,
bool strobe_mode,
struct atom_clock_dividers *dividers);
int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
u32 clock,
bool strobe_mode,
struct atom_mpll_param *mpll_param);
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
u16 voltage_level, u8 voltage_type,
u32 *gpio_value, u32 *gpio_mask);
void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev,
u32 eng_clock, u32 mem_clock);
int radeon_atom_get_voltage_step(struct radeon_device *rdev,
u8 voltage_type, u16 *voltage_step);
int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
u16 voltage_id, u16 *voltage);
int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev,
u16 *voltage,
u16 leakage_idx);
int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
u16 *leakage_id);
int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
u16 *vddc, u16 *vddci,
u16 virtual_voltage_id,
u16 vbios_voltage_id);
int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
u16 virtual_voltage_id,
u16 *voltage);
int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
u8 voltage_type,
u16 nominal_voltage,
u16 *true_voltage);
int radeon_atom_get_min_voltage(struct radeon_device *rdev,
u8 voltage_type, u16 *min_voltage);
int radeon_atom_get_max_voltage(struct radeon_device *rdev,
u8 voltage_type, u16 *max_voltage);
int radeon_atom_get_voltage_table(struct radeon_device *rdev,
u8 voltage_type, u8 voltage_mode,
struct atom_voltage_table *voltage_table);
bool radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
u8 voltage_type, u8 voltage_mode);
int radeon_atom_get_svi2_info(struct radeon_device *rdev,
u8 voltage_type,
u8 *svd_gpio_id, u8 *svc_gpio_id);
void radeon_atom_update_memory_dll(struct radeon_device *rdev,
u32 mem_clock);
void radeon_atom_set_ac_timing(struct radeon_device *rdev,
u32 mem_clock);
int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
u8 module_index,
struct atom_mc_reg_table *reg_table);
int radeon_atom_get_memory_info(struct radeon_device *rdev,
u8 module_index, struct atom_memory_info *mem_info);
int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
bool gddr5, u8 module_index,
struct atom_memory_clock_range_table *mclk_range_table);
int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
u16 voltage_id, u16 *voltage);
void rs690_pm_info(struct radeon_device *rdev);
extern int rv6xx_get_temp(struct radeon_device *rdev);
extern int rv770_get_temp(struct radeon_device *rdev);
extern int evergreen_get_temp(struct radeon_device *rdev);
extern int sumo_get_temp(struct radeon_device *rdev);
extern int si_get_temp(struct radeon_device *rdev);
extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
unsigned *bankh, unsigned *mtaspect,
unsigned *tile_split);
389,6 → 265,7
/* sync_seq is protected by ring emission lock */
uint64_t sync_seq[RADEON_NUM_RINGS];
atomic64_t last_seq;
unsigned long last_activity;
bool initialized;
};
 
409,8 → 286,8
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev,
struct radeon_fence **fences,
bool intr);
473,11 → 350,6
struct ttm_bo_device bdev;
bool mem_global_referenced;
bool initialized;
 
#if defined(CONFIG_DEBUG_FS)
struct dentry *vram;
struct dentry *gtt;
#endif
};
 
/* bo virtual address in a specific vm */
484,13 → 356,14
struct radeon_bo_va {
/* protected by bo being reserved */
struct list_head bo_list;
uint64_t soffset;
uint64_t eoffset;
uint32_t flags;
uint64_t addr;
bool valid;
unsigned ref_count;
 
/* protected by vm mutex */
struct interval_tree_node it;
struct list_head vm_status;
struct list_head vm_list;
 
/* constant after initialization */
struct radeon_vm *vm;
501,14 → 374,15
/* Protected by gem.mutex */
struct list_head list;
/* Protected by tbo.reserved */
u32 initial_domain;
u32 placements[3];
u32 domain;
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
u32 flags;
unsigned pin_count;
void *kptr;
void *uptr;
u32 cpu_addr;
u32 tiling_flags;
u32 pitch;
int surface_reg;
520,10 → 394,18
struct radeon_device *rdev;
struct drm_gem_object gem_base;
 
pid_t pid;
struct ttm_bo_kmap_obj dma_buf_vmap;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
 
struct radeon_bo_list {
struct radeon_bo *bo;
uint64_t gpu_offset;
unsigned rdomain;
unsigned wdomain;
u32 tiling_flags;
};
 
int radeon_gem_debugfs_init(struct radeon_device *rdev);
 
/* sub-allocation manager, it has to be protected by another lock.
559,7 → 441,6
uint64_t gpu_addr;
void *cpu_ptr;
uint32_t domain;
uint32_t align;
};
 
struct radeon_sa_bo;
584,9 → 465,9
 
int radeon_gem_init(struct radeon_device *rdev);
void radeon_gem_fini(struct radeon_device *rdev);
int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
int radeon_gem_object_create(struct radeon_device *rdev, int size,
int alignment, int initial_domain,
u32 flags, bool kernel,
bool discardable, bool kernel,
struct drm_gem_object **obj);
 
int radeon_mode_dumb_create(struct drm_file *file_priv,
595,28 → 476,29
int radeon_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
int radeon_mode_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle);
 
/*
* Semaphores.
*/
/* everything here is constant */
struct radeon_semaphore {
struct radeon_sa_bo *sa_bo;
signed waiters;
uint64_t gpu_addr;
struct radeon_fence *sync_to[RADEON_NUM_RINGS];
};
 
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore);
bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
struct radeon_fence *fence);
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
int waiting_ring);
int signaler, int waiter);
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore **semaphore,
struct radeon_fence *fence);
631,12 → 513,6
#define RADEON_GPU_PAGE_SHIFT 12
#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
 
#define RADEON_GART_PAGE_DUMMY 0
#define RADEON_GART_PAGE_VALID (1 << 0)
#define RADEON_GART_PAGE_READ (1 << 1)
#define RADEON_GART_PAGE_WRITE (1 << 2)
#define RADEON_GART_PAGE_SNOOP (1 << 3)
 
struct radeon_gart {
dma_addr_t table_addr;
struct radeon_bo *robj;
660,8 → 536,9
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int pages);
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, struct page **pagelist,
dma_addr_t *dma_addr, uint32_t flags);
int pages, u32 *pagelist,
dma_addr_t *dma_addr);
void radeon_gart_restore(struct radeon_device *rdev);
 
 
/*
705,23 → 582,7
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
 
/*
* GPU doorbell structures, functions & helpers
*/
#define RADEON_MAX_DOORBELLS 1024 /* Reserve at most 1024 doorbell slots for radeon-owned rings. */
 
struct radeon_doorbell {
/* doorbell mmio */
resource_size_t base;
resource_size_t size;
u32 __iomem *ptr;
u32 num_doorbells; /* Number of doorbells actually reserved for radeon. */
unsigned long used[DIV_ROUND_UP(RADEON_MAX_DOORBELLS, BITS_PER_LONG)];
};
 
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
 
/*
* IRQS.
*/
761,29 → 622,16
u32 afmt_status6;
};
 
struct cik_irq_stat_regs {
u32 disp_int;
u32 disp_int_cont;
u32 disp_int_cont2;
u32 disp_int_cont3;
u32 disp_int_cont4;
u32 disp_int_cont5;
u32 disp_int_cont6;
u32 d1grph_int;
u32 d2grph_int;
u32 d3grph_int;
u32 d4grph_int;
u32 d5grph_int;
u32 d6grph_int;
};
 
union radeon_irq_stat_regs {
struct r500_irq_stat_regs r500;
struct r600_irq_stat_regs r600;
struct evergreen_irq_stat_regs evergreen;
struct cik_irq_stat_regs cik;
};
 
#define RADEON_MAX_HPD_PINS 6
#define RADEON_MAX_CRTCS 6
#define RADEON_MAX_AFMT_BLOCKS 6
 
struct radeon_irq {
bool installed;
spinlock_t lock;
794,7 → 642,6
bool hpd[RADEON_MAX_HPD_PINS];
bool afmt[RADEON_MAX_AFMT_BLOCKS];
union radeon_irq_stat_regs stat_regs;
bool dpm_thermal;
};
 
int radeon_irq_kms_init(struct radeon_device *rdev);
821,6 → 668,7
struct radeon_fence *fence;
struct radeon_vm *vm;
bool is_const_ib;
struct radeon_fence *sync_to[RADEON_NUM_RINGS];
struct radeon_semaphore *semaphore;
};
 
827,42 → 675,32
struct radeon_ring {
struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
unsigned rptr_offs;
unsigned rptr_reg;
unsigned rptr_save_reg;
u64 next_rptr_gpu_addr;
volatile u32 *next_rptr_cpu_addr;
unsigned wptr;
unsigned wptr_old;
unsigned wptr_reg;
unsigned ring_size;
unsigned ring_free_dw;
int count_dw;
atomic_t last_rptr;
atomic64_t last_activity;
unsigned long last_activity;
unsigned last_rptr;
uint64_t gpu_addr;
uint32_t align_mask;
uint32_t ptr_mask;
bool ready;
u32 ptr_reg_shift;
u32 ptr_reg_mask;
u32 nop;
u32 idx;
u64 last_semaphore_signal_addr;
u64 last_semaphore_wait_addr;
/* for CIK queues */
u32 me;
u32 pipe;
u32 queue;
struct radeon_bo *mqd_obj;
u32 doorbell_index;
unsigned wptr_offs;
};
 
struct radeon_mec {
struct radeon_bo *hpd_eop_obj;
u64 hpd_eop_gpu_addr;
u32 num_pipe;
u32 num_mec;
u32 num_queue;
};
 
/*
* VM
*/
870,65 → 708,38
/* maximum number of VMIDs */
#define RADEON_NUM_VM 16
 
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, 9 bits in the page
* table and the remaining 19 bits are in the page directory */
#define RADEON_VM_BLOCK_SIZE 9
 
/* number of entries in page table */
#define RADEON_VM_PTE_COUNT (1 << radeon_vm_block_size)
#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE)
 
/* PTBs (Page Table Blocks) need to be aligned to 32K */
#define RADEON_VM_PTB_ALIGN_SIZE 32768
#define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1)
#define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK)
 
#define R600_PTE_VALID (1 << 0)
#define R600_PTE_SYSTEM (1 << 1)
#define R600_PTE_SNOOPED (1 << 2)
#define R600_PTE_READABLE (1 << 5)
#define R600_PTE_WRITEABLE (1 << 6)
 
/* PTE (Page Table Entry) fragment field for different page sizes */
#define R600_PTE_FRAG_4KB (0 << 7)
#define R600_PTE_FRAG_64KB (4 << 7)
#define R600_PTE_FRAG_256KB (6 << 7)
 
/* flags needed to be set so we can copy directly from the GART table */
#define R600_PTE_GART_MASK ( R600_PTE_READABLE | R600_PTE_WRITEABLE | \
R600_PTE_SYSTEM | R600_PTE_VALID )
 
struct radeon_vm_pt {
struct radeon_bo *bo;
uint64_t addr;
};
 
struct radeon_vm {
struct rb_root va;
struct list_head list;
struct list_head va;
unsigned id;
 
/* BOs moved, but not yet updated in the PT */
struct list_head invalidated;
 
/* BOs freed, but not yet updated in the PT */
struct list_head freed;
 
/* contains the page directory */
struct radeon_bo *page_directory;
struct radeon_sa_bo *page_directory;
uint64_t pd_gpu_addr;
unsigned max_pde_used;
 
/* array of page tables, one for each page directory entry */
struct radeon_vm_pt *page_tables;
struct radeon_sa_bo **page_tables;
 
struct radeon_bo_va *ib_bo_va;
 
struct mutex mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
/* last flush or NULL if we still need to flush */
struct radeon_fence *last_flush;
/* last use of vmid */
struct radeon_fence *last_id_use;
};
 
struct radeon_vm_manager {
struct mutex lock;
struct list_head lru_vm;
struct radeon_fence *active[RADEON_NUM_VM];
struct radeon_sa_manager sa_manager;
uint32_t max_pfn;
/* number of VMIDs */
unsigned nvm;
936,8 → 747,6
u64 vram_base_offset;
/* is vm enabled? */
bool enabled;
/* for hw to save the PD addr on suspend/resume */
uint32_t saved_table_addr[RADEON_NUM_VM];
};
 
/*
961,29 → 770,45
bool enabled;
};
 
struct r600_blit_cp_primitives {
void (*set_render_target)(struct radeon_device *rdev, int format,
int w, int h, u64 gpu_addr);
void (*cp_set_surface_sync)(struct radeon_device *rdev,
u32 sync_type, u32 size,
u64 mc_addr);
void (*set_shaders)(struct radeon_device *rdev);
void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
void (*set_tex_resource)(struct radeon_device *rdev,
int format, int w, int h, int pitch,
u64 gpu_addr, u32 size);
void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
int x2, int y2);
void (*draw_auto)(struct radeon_device *rdev);
void (*set_default_state)(struct radeon_device *rdev);
};
 
struct r600_blit {
struct radeon_bo *shader_obj;
struct r600_blit_cp_primitives primitives;
int max_dim;
int ring_size_common;
int ring_size_per_loop;
u64 shader_gpu_addr;
u32 vs_offset, ps_offset;
u32 state_offset;
u32 state_len;
};
 
/*
* RLC stuff
* SI RLC stuff
*/
#include "clearstate_defs.h"
 
struct radeon_rlc {
struct si_rlc {
/* for power gating */
struct radeon_bo *save_restore_obj;
uint64_t save_restore_gpu_addr;
volatile uint32_t *sr_ptr;
const u32 *reg_list;
u32 reg_list_size;
/* for clear state */
struct radeon_bo *clear_state_obj;
uint64_t clear_state_gpu_addr;
volatile uint32_t *cs_ptr;
const struct cs_section_def *cs_data;
u32 clear_state_size;
/* for cp tables */
struct radeon_bo *cp_table_obj;
uint64_t cp_table_gpu_addr;
volatile uint32_t *cp_table_ptr;
u32 cp_table_size;
};
 
int radeon_ib_get(struct radeon_device *rdev, int ring,
990,8 → 815,9
struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib, bool hdp_flush);
struct radeon_ib *const_ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
int radeon_ib_ring_tests(struct radeon_device *rdev);
1001,15 → 827,13
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp,
bool hdp_flush);
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp,
bool hdp_flush);
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_undo(struct radeon_ring *ring);
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_lockup_update(struct radeon_device *rdev,
struct radeon_ring *ring);
void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
void radeon_ring_lockup_update(struct radeon_ring *ring);
bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
uint32_t **data);
1016,7 → 840,8
int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned size, uint32_t *data);
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
unsigned rptr_offs, u32 nop);
unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
 
 
1033,21 → 858,22
* CS.
*/
struct radeon_cs_reloc {
struct drm_gem_object *gobj;
// struct drm_gem_object *gobj;
struct radeon_bo *robj;
struct ttm_validate_buffer tv;
uint64_t gpu_offset;
unsigned prefered_domains;
unsigned allowed_domains;
uint32_t tiling_flags;
struct radeon_bo_list lobj;
uint32_t handle;
uint32_t flags;
};
 
struct radeon_cs_chunk {
uint32_t chunk_id;
uint32_t length_dw;
int kpage_idx[2];
uint32_t *kpage[2];
uint32_t *kdata;
void __user *user_ptr;
int last_copied_page;
int last_page_index;
};
 
struct radeon_cs_parser {
1064,7 → 890,6
unsigned nrelocs;
struct radeon_cs_reloc *relocs;
struct radeon_cs_reloc **relocs_ptr;
struct radeon_cs_reloc *vm_bos;
struct list_head validated;
unsigned dma_reloc_idx;
/* indices of various chunks */
1080,19 → 905,11
u32 cs_flags;
u32 ring;
s32 priority;
struct ww_acquire_ctx ticket;
};
 
static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
 
if (ibc->kdata)
return ibc->kdata[idx];
return p->ib.ptr[idx];
}
 
 
struct radeon_cs_packet {
unsigned idx;
unsigned type;
1137,9 → 954,8
#define R600_WB_DMA_RPTR_OFFSET 1792
#define R600_WB_IH_WPTR_OFFSET 2048
#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
#define R600_WB_UVD_RPTR_OFFSET 2560
#define R600_WB_EVENT_OFFSET 3072
#define CIK_WB_CP1_WPTR_OFFSET 3328
#define CIK_WB_CP2_WPTR_OFFSET 3584
 
/**
* struct radeon_pm - power management datas
1164,7 → 980,6
enum radeon_pm_method {
PM_METHOD_PROFILE,
PM_METHOD_DYNPM,
PM_METHOD_DPM,
};
 
enum radeon_dynpm_state {
1190,24 → 1005,11
};
 
enum radeon_pm_state_type {
/* not used for dpm */
POWER_STATE_TYPE_DEFAULT,
POWER_STATE_TYPE_POWERSAVE,
/* user selectable states */
POWER_STATE_TYPE_BATTERY,
POWER_STATE_TYPE_BALANCED,
POWER_STATE_TYPE_PERFORMANCE,
/* internal states */
POWER_STATE_TYPE_INTERNAL_UVD,
POWER_STATE_TYPE_INTERNAL_UVD_SD,
POWER_STATE_TYPE_INTERNAL_UVD_HD,
POWER_STATE_TYPE_INTERNAL_UVD_HD2,
POWER_STATE_TYPE_INTERNAL_UVD_MVC,
POWER_STATE_TYPE_INTERNAL_BOOT,
POWER_STATE_TYPE_INTERNAL_THERMAL,
POWER_STATE_TYPE_INTERNAL_ACPI,
POWER_STATE_TYPE_INTERNAL_ULV,
POWER_STATE_TYPE_INTERNAL_3DPERF,
};
 
enum radeon_pm_profile_type {
1236,18 → 1038,12
 
enum radeon_int_thermal_type {
THERMAL_TYPE_NONE,
THERMAL_TYPE_EXTERNAL,
THERMAL_TYPE_EXTERNAL_GPIO,
THERMAL_TYPE_RV6XX,
THERMAL_TYPE_RV770,
THERMAL_TYPE_ADT7473_WITH_INTERNAL,
THERMAL_TYPE_EVERGREEN,
THERMAL_TYPE_SUMO,
THERMAL_TYPE_NI,
THERMAL_TYPE_SI,
THERMAL_TYPE_EMC2103_WITH_INTERNAL,
THERMAL_TYPE_CI,
THERMAL_TYPE_KV,
};
 
struct radeon_voltage {
1301,280 → 1097,6
*/
#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
 
enum radeon_dpm_auto_throttle_src {
RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL,
RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL
};
 
enum radeon_dpm_event_src {
RADEON_DPM_EVENT_SRC_ANALOG = 0,
RADEON_DPM_EVENT_SRC_EXTERNAL = 1,
RADEON_DPM_EVENT_SRC_DIGITAL = 2,
RADEON_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
};
 
#define RADEON_MAX_VCE_LEVELS 6
 
enum radeon_vce_level {
RADEON_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
RADEON_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
RADEON_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
RADEON_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
RADEON_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
RADEON_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
};
 
struct radeon_ps {
u32 caps; /* vbios flags */
u32 class; /* vbios flags */
u32 class2; /* vbios flags */
/* UVD clocks */
u32 vclk;
u32 dclk;
/* VCE clocks */
u32 evclk;
u32 ecclk;
bool vce_active;
enum radeon_vce_level vce_level;
/* asic priv */
void *ps_priv;
};
 
struct radeon_dpm_thermal {
/* thermal interrupt work */
struct work_struct work;
/* low temperature threshold */
int min_temp;
/* high temperature threshold */
int max_temp;
/* was interrupt low to high or high to low */
bool high_to_low;
};
 
enum radeon_clk_action
{
RADEON_SCLK_UP = 1,
RADEON_SCLK_DOWN
};
 
struct radeon_blacklist_clocks
{
u32 sclk;
u32 mclk;
enum radeon_clk_action action;
};
 
struct radeon_clock_and_voltage_limits {
u32 sclk;
u32 mclk;
u16 vddc;
u16 vddci;
};
 
struct radeon_clock_array {
u32 count;
u32 *values;
};
 
struct radeon_clock_voltage_dependency_entry {
u32 clk;
u16 v;
};
 
struct radeon_clock_voltage_dependency_table {
u32 count;
struct radeon_clock_voltage_dependency_entry *entries;
};
 
union radeon_cac_leakage_entry {
struct {
u16 vddc;
u32 leakage;
};
struct {
u16 vddc1;
u16 vddc2;
u16 vddc3;
};
};
 
struct radeon_cac_leakage_table {
u32 count;
union radeon_cac_leakage_entry *entries;
};
 
struct radeon_phase_shedding_limits_entry {
u16 voltage;
u32 sclk;
u32 mclk;
};
 
struct radeon_phase_shedding_limits_table {
u32 count;
struct radeon_phase_shedding_limits_entry *entries;
};
 
struct radeon_uvd_clock_voltage_dependency_entry {
u32 vclk;
u32 dclk;
u16 v;
};
 
struct radeon_uvd_clock_voltage_dependency_table {
u8 count;
struct radeon_uvd_clock_voltage_dependency_entry *entries;
};
 
struct radeon_vce_clock_voltage_dependency_entry {
u32 ecclk;
u32 evclk;
u16 v;
};
 
struct radeon_vce_clock_voltage_dependency_table {
u8 count;
struct radeon_vce_clock_voltage_dependency_entry *entries;
};
 
struct radeon_ppm_table {
u8 ppm_design;
u16 cpu_core_number;
u32 platform_tdp;
u32 small_ac_platform_tdp;
u32 platform_tdc;
u32 small_ac_platform_tdc;
u32 apu_tdp;
u32 dgpu_tdp;
u32 dgpu_ulv_power;
u32 tj_max;
};
 
struct radeon_cac_tdp_table {
u16 tdp;
u16 configurable_tdp;
u16 tdc;
u16 battery_power_limit;
u16 small_power_limit;
u16 low_cac_leakage;
u16 high_cac_leakage;
u16 maximum_power_delivery_limit;
};
 
struct radeon_dpm_dynamic_state {
struct radeon_clock_voltage_dependency_table vddc_dependency_on_sclk;
struct radeon_clock_voltage_dependency_table vddci_dependency_on_mclk;
struct radeon_clock_voltage_dependency_table vddc_dependency_on_mclk;
struct radeon_clock_voltage_dependency_table mvdd_dependency_on_mclk;
struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk;
struct radeon_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
struct radeon_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
struct radeon_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
struct radeon_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
struct radeon_clock_array valid_sclk_values;
struct radeon_clock_array valid_mclk_values;
struct radeon_clock_and_voltage_limits max_clock_voltage_on_dc;
struct radeon_clock_and_voltage_limits max_clock_voltage_on_ac;
u32 mclk_sclk_ratio;
u32 sclk_mclk_delta;
u16 vddc_vddci_delta;
u16 min_vddc_for_pcie_gen2;
struct radeon_cac_leakage_table cac_leakage_table;
struct radeon_phase_shedding_limits_table phase_shedding_limits_table;
struct radeon_ppm_table *ppm_table;
struct radeon_cac_tdp_table *cac_tdp_table;
};
 
struct radeon_dpm_fan {
u16 t_min;
u16 t_med;
u16 t_high;
u16 pwm_min;
u16 pwm_med;
u16 pwm_high;
u8 t_hyst;
u32 cycle_delay;
u16 t_max;
bool ucode_fan_control;
};
 
enum radeon_pcie_gen {
RADEON_PCIE_GEN1 = 0,
RADEON_PCIE_GEN2 = 1,
RADEON_PCIE_GEN3 = 2,
RADEON_PCIE_GEN_INVALID = 0xffff
};
 
enum radeon_dpm_forced_level {
RADEON_DPM_FORCED_LEVEL_AUTO = 0,
RADEON_DPM_FORCED_LEVEL_LOW = 1,
RADEON_DPM_FORCED_LEVEL_HIGH = 2,
};
 
struct radeon_vce_state {
/* vce clocks */
u32 evclk;
u32 ecclk;
/* gpu clocks */
u32 sclk;
u32 mclk;
u8 clk_idx;
u8 pstate;
};
 
struct radeon_dpm {
struct radeon_ps *ps;
/* number of valid power states */
int num_ps;
/* current power state that is active */
struct radeon_ps *current_ps;
/* requested power state */
struct radeon_ps *requested_ps;
/* boot up power state */
struct radeon_ps *boot_ps;
/* default uvd power state */
struct radeon_ps *uvd_ps;
/* vce requirements */
struct radeon_vce_state vce_states[RADEON_MAX_VCE_LEVELS];
enum radeon_vce_level vce_level;
enum radeon_pm_state_type state;
enum radeon_pm_state_type user_state;
u32 platform_caps;
u32 voltage_response_time;
u32 backbias_response_time;
void *priv;
u32 new_active_crtcs;
int new_active_crtc_count;
u32 current_active_crtcs;
int current_active_crtc_count;
struct radeon_dpm_dynamic_state dyn_state;
struct radeon_dpm_fan fan;
u32 tdp_limit;
u32 near_tdp_limit;
u32 near_tdp_limit_adjusted;
u32 sq_ramping_threshold;
u32 cac_leakage;
u16 tdp_od_limit;
u32 tdp_adjustment;
u16 load_line_slope;
bool power_control;
bool ac_power;
/* special states active */
bool thermal_active;
bool uvd_active;
bool vce_active;
/* thermal handling */
struct radeon_dpm_thermal thermal;
/* forced levels */
enum radeon_dpm_forced_level forced_level;
/* track UVD streams */
unsigned sd;
unsigned hd;
};
 
void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable);
void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable);
 
struct radeon_pm {
struct mutex mutex;
/* write locked while reprogramming mclk */
1615,7 → 1137,7
/* selected pm method */
enum radeon_pm_method pm_method;
/* dynpm power management */
struct delayed_work dynpm_idle_work;
// struct delayed_work dynpm_idle_work;
enum radeon_dynpm_state dynpm_state;
enum radeon_dynpm_action dynpm_planned_action;
unsigned long dynpm_action_timeout;
1628,9 → 1150,6
/* internal thermal controller on rv6xx+ */
enum radeon_int_thermal_type int_thermal_type;
struct device *int_hwmon_dev;
/* dpm */
bool dpm_enabled;
struct radeon_dpm dpm;
};
 
int radeon_pm_get_type_index(struct radeon_device *rdev,
1647,10 → 1166,8
struct radeon_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
void *saved_bo;
atomic_t handles[RADEON_MAX_UVD_HANDLES];
struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
unsigned img_size[RADEON_MAX_UVD_HANDLES];
struct delayed_work idle_work;
};
 
1679,123 → 1196,14
int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
unsigned cg_upll_func_cntl);
 
/*
* VCE
*/
#define RADEON_MAX_VCE_HANDLES 16
#define RADEON_VCE_STACK_SIZE (1024*1024)
#define RADEON_VCE_HEAP_SIZE (4*1024*1024)
 
struct radeon_vce {
struct radeon_bo *vcpu_bo;
uint64_t gpu_addr;
unsigned fw_version;
unsigned fb_version;
atomic_t handles[RADEON_MAX_VCE_HANDLES];
struct drm_file *filp[RADEON_MAX_VCE_HANDLES];
unsigned img_size[RADEON_MAX_VCE_HANDLES];
struct delayed_work idle_work;
};
 
int radeon_vce_init(struct radeon_device *rdev);
void radeon_vce_fini(struct radeon_device *rdev);
int radeon_vce_suspend(struct radeon_device *rdev);
int radeon_vce_resume(struct radeon_device *rdev);
int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
uint32_t handle, struct radeon_fence **fence);
int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
uint32_t handle, struct radeon_fence **fence);
void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
void radeon_vce_note_usage(struct radeon_device *rdev);
int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
int radeon_vce_cs_parse(struct radeon_cs_parser *p);
bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
void radeon_vce_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
 
struct r600_audio_pin {
struct r600_audio {
int channels;
int rate;
int bits_per_sample;
u8 status_bits;
u8 category_code;
u32 offset;
bool connected;
u32 id;
};
 
struct r600_audio {
bool enabled;
struct r600_audio_pin pin[RADEON_MAX_AFMT_BLOCKS];
int num_pins;
};
 
/*
* Benchmarking
*/
void radeon_benchmark(struct radeon_device *rdev, int test_number);
 
 
/*
* Testing
*/
void radeon_test_moves(struct radeon_device *rdev);
void radeon_test_ring_sync(struct radeon_device *rdev,
struct radeon_ring *cpA,
struct radeon_ring *cpB);
void radeon_test_syncing(struct radeon_device *rdev);
 
 
/*
* Debugfs
*/
struct radeon_debugfs {
struct drm_info_list *files;
unsigned num_files;
};
 
int radeon_debugfs_add_files(struct radeon_device *rdev,
struct drm_info_list *files,
unsigned nfiles);
int radeon_debugfs_fence_init(struct radeon_device *rdev);
 
/*
* ASIC ring specific functions.
*/
struct radeon_asic_ring {
/* ring read/write ptr handling */
u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring);
u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
 
/* validating and patching of IBs */
int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
int (*cs_parse)(struct radeon_cs_parser *p);
 
/* command emmit functions */
void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring);
bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, bool emit_wait);
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 
/* testing functions */
int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
 
/* deprecated */
void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
};
 
/*
* ASIC specific functions.
*/
struct radeon_asic {
1805,8 → 1213,13
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
int (*asic_reset)(struct radeon_device *rdev);
/* Flush the HDP cache via MMIO */
void (*mmio_hdp_flush)(struct radeon_device *rdev);
/* ioctl hw specific callback. Some hw might want to perform special
* operation on specific ioctl. For instance on wait idle some hw
* might want to perform and HDP flush through MMIO as it seems that
* some R6XX/R7XX hw doesn't take HDP flush into account if programmed
* through ring.
*/
void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
/* check if 3D engine is idle */
bool (*gui_idle)(struct radeon_device *rdev);
/* wait for mc_idle */
1818,30 → 1231,33
/* gart */
struct {
void (*tlb_flush)(struct radeon_device *rdev);
void (*set_page)(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags);
int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
} gart;
struct {
int (*init)(struct radeon_device *rdev);
void (*fini)(struct radeon_device *rdev);
void (*copy_pages)(struct radeon_device *rdev,
 
u32 pt_ring_index;
void (*set_page)(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe, uint64_t src,
unsigned count);
void (*write_pages)(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void (*set_pages)(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void (*pad_ib)(struct radeon_ib *ib);
} vm;
/* ring specific callbacks */
struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
struct {
void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, bool emit_wait);
int (*cs_parse)(struct radeon_cs_parser *p);
void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
} ring[RADEON_NUM_RINGS];
/* irqs */
struct {
int (*set)(struct radeon_device *rdev);
1900,7 → 1316,7
bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
} hpd;
/* static power management */
/* power management */
struct {
void (*misc)(struct radeon_device *rdev);
void (*prepare)(struct radeon_device *rdev);
1915,34 → 1331,12
void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
void (*set_clock_gating)(struct radeon_device *rdev, int enable);
int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk);
int (*set_vce_clocks)(struct radeon_device *rdev, u32 evclk, u32 ecclk);
int (*get_temperature)(struct radeon_device *rdev);
} pm;
/* dynamic power management */
struct {
int (*init)(struct radeon_device *rdev);
void (*setup_asic)(struct radeon_device *rdev);
int (*enable)(struct radeon_device *rdev);
int (*late_enable)(struct radeon_device *rdev);
void (*disable)(struct radeon_device *rdev);
int (*pre_set_power_state)(struct radeon_device *rdev);
int (*set_power_state)(struct radeon_device *rdev);
void (*post_set_power_state)(struct radeon_device *rdev);
void (*display_configuration_changed)(struct radeon_device *rdev);
void (*fini)(struct radeon_device *rdev);
u32 (*get_sclk)(struct radeon_device *rdev, bool low);
u32 (*get_mclk)(struct radeon_device *rdev, bool low);
void (*print_power_state)(struct radeon_device *rdev, struct radeon_ps *ps);
void (*debugfs_print_current_performance_level)(struct radeon_device *rdev, struct seq_file *m);
int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
bool (*vblank_too_short)(struct radeon_device *rdev);
void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
void (*enable_bapm)(struct radeon_device *rdev, bool enable);
} dpm;
/* pageflipping */
struct {
void (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
bool (*page_flip_pending)(struct radeon_device *rdev, int crtc);
void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
void (*post_page_flip)(struct radeon_device *rdev, int crtc);
} pflip;
};
 
1981,7 → 1375,6
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
unsigned active_simds;
};
 
struct rv770_asic {
2007,7 → 1400,6
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
unsigned active_simds;
};
 
struct evergreen_asic {
2034,7 → 1426,6
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
unsigned active_simds;
};
 
struct cayman_asic {
2073,7 → 1464,6
unsigned multi_gpu_tile_size;
 
unsigned tile_config;
unsigned active_simds;
};
 
struct si_asic {
2092,7 → 1482,7
unsigned sc_earlyz_tile_fifo_size;
 
unsigned num_tile_pipes;
unsigned backend_enable_mask;
unsigned num_backends_per_se;
unsigned backend_disable_mask_per_asic;
unsigned backend_map;
unsigned num_texture_channel_caches;
2104,41 → 1494,8
 
unsigned tile_config;
uint32_t tile_mode_array[32];
uint32_t active_cus;
};
 
struct cik_asic {
unsigned max_shader_engines;
unsigned max_tile_pipes;
unsigned max_cu_per_sh;
unsigned max_sh_per_se;
unsigned max_backends_per_se;
unsigned max_texture_channel_caches;
unsigned max_gprs;
unsigned max_gs_threads;
unsigned max_hw_contexts;
unsigned sc_prim_fifo_size_frontend;
unsigned sc_prim_fifo_size_backend;
unsigned sc_hiz_tile_fifo_size;
unsigned sc_earlyz_tile_fifo_size;
 
unsigned num_tile_pipes;
unsigned backend_enable_mask;
unsigned backend_disable_mask_per_asic;
unsigned backend_map;
unsigned num_texture_channel_caches;
unsigned mem_max_burst_length_bytes;
unsigned mem_row_size_in_kb;
unsigned shader_engine_tile_size;
unsigned num_gpus;
unsigned multi_gpu_tile_size;
 
unsigned tile_config;
uint32_t tile_mode_array[32];
uint32_t macrotile_mode_array[16];
uint32_t active_cus;
};
 
union radeon_asic_config {
struct r300_asic r300;
struct r100_asic r100;
2147,7 → 1504,6
struct evergreen_asic evergreen;
struct cayman_asic cayman;
struct si_asic si;
struct cik_asic cik;
};
 
/*
2165,57 → 1521,7
u64 gpu_addr;
};
 
/*
* ACPI
*/
struct radeon_atif_notification_cfg {
bool enabled;
int command_code;
};
 
struct radeon_atif_notifications {
bool display_switch;
bool expansion_mode_change;
bool thermal_state;
bool forced_power_state;
bool system_power_state;
bool display_conf_change;
bool px_gfx_switch;
bool brightness_change;
bool dgpu_display_event;
};
 
struct radeon_atif_functions {
bool system_params;
bool sbios_requests;
bool select_active_disp;
bool lid_state;
bool get_tv_standard;
bool set_tv_standard;
bool get_panel_expansion_mode;
bool set_panel_expansion_mode;
bool temperature_change;
bool graphics_device_types;
};
 
struct radeon_atif {
struct radeon_atif_notifications notifications;
struct radeon_atif_functions functions;
struct radeon_atif_notification_cfg notification_cfg;
struct radeon_encoder *encoder_for_bl;
};
 
struct radeon_atcs_functions {
bool get_ext_state;
bool pcie_perf_req;
bool pcie_dev_rdy;
bool pcie_bus_width;
};
 
struct radeon_atcs {
struct radeon_atcs_functions functions;
};
 
/*
* Core structure, functions and helpers.
*/
2246,28 → 1552,6
resource_size_t rmmio_size;
/* protects concurrent MM_INDEX/DATA based register access */
spinlock_t mmio_idx_lock;
/* protects concurrent SMC based register access */
spinlock_t smc_idx_lock;
/* protects concurrent PLL register access */
spinlock_t pll_idx_lock;
/* protects concurrent MC register access */
spinlock_t mc_idx_lock;
/* protects concurrent PCIE register access */
spinlock_t pcie_idx_lock;
/* protects concurrent PCIE_PORT register access */
spinlock_t pciep_idx_lock;
/* protects concurrent PIF register access */
spinlock_t pif_idx_lock;
/* protects concurrent CG register access */
spinlock_t cg_idx_lock;
/* protects concurrent UVD register access */
spinlock_t uvd_idx_lock;
/* protects concurrent RCU register access */
spinlock_t rcu_idx_lock;
/* protects concurrent DIDT register access */
spinlock_t didt_idx_lock;
/* protects concurrent ENDPOINT (audio) register access */
spinlock_t end_idx_lock;
void __iomem *rmmio;
radeon_rreg_t mc_rreg;
radeon_wreg_t mc_wreg;
2284,7 → 1568,6
struct radeon_gart gart;
struct radeon_mode_info mode_info;
struct radeon_scratch scratch;
struct radeon_doorbell doorbell;
struct radeon_mman mman;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
wait_queue_head_t fence_queue;
2297,7 → 1580,6
struct radeon_gem gem;
struct radeon_pm pm;
struct radeon_uvd uvd;
struct radeon_vce vce;
uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
struct radeon_wb wb;
struct radeon_dummy_page dummy_page;
2306,7 → 1588,6
bool need_dma32;
bool accel_working;
bool fastfb_working; /* IGP feature*/
bool needs_reset;
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
2313,59 → 1594,36
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
const struct firmware *mc_fw; /* NI MC firmware */
const struct firmware *ce_fw; /* SI CE firmware */
const struct firmware *mec_fw; /* CIK MEC firmware */
const struct firmware *mec2_fw; /* KV MEC2 firmware */
const struct firmware *sdma_fw; /* CIK SDMA firmware */
const struct firmware *smc_fw; /* SMC firmware */
const struct firmware *uvd_fw; /* UVD firmware */
const struct firmware *vce_fw; /* VCE firmware */
bool new_fw;
struct r600_blit r600_blit;
struct r600_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
struct radeon_rlc rlc;
struct radeon_mec mec;
struct work_struct hotplug_work;
struct work_struct audio_work;
struct work_struct reset_work;
struct si_rlc rlc;
// struct work_struct hotplug_work;
// struct work_struct audio_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
bool audio_enabled;
bool has_uvd;
struct r600_audio audio; /* audio stuff */
// struct r600_audio audio_status; /* audio stuff */
// struct notifier_block acpi_nb;
/* only one userspace can use Hyperz features or CMASK at a time */
struct drm_file *hyperz_filp;
struct drm_file *cmask_filp;
// struct drm_file *hyperz_filp;
// struct drm_file *cmask_filp;
/* i2c buses */
struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
/* debugfs */
struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
// struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
unsigned debugfs_count;
/* virtual memory */
struct radeon_vm_manager vm_manager;
struct mutex gpu_clock_mutex;
/* memory stats */
atomic64_t vram_usage;
atomic64_t gtt_usage;
atomic64_t num_bytes_moved;
/* ACPI interface */
struct radeon_atif atif;
struct radeon_atcs atcs;
/* srbm instance registers */
struct mutex srbm_mutex;
/* clock, powergating flags */
u32 cg_flags;
u32 pg_flags;
 
// struct dev_pm_domain vga_pm_domain;
bool have_disp_power_ref;
u32 px_quirk_flags;
 
/* tracking pinned memory */
u64 vram_pin_size;
u64 gart_pin_size;
// struct radeon_atif atif;
// struct radeon_atcs atcs;
};
 
bool radeon_is_px(struct drm_device *dev);
int radeon_device_init(struct radeon_device *rdev,
struct drm_device *ddev,
struct pci_dev *pdev,
2373,48 → 1631,13
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
 
#define RADEON_MIN_MMIO_SIZE 0x10000
 
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
bool always_indirect)
{
/* The mmio size is 64kb at minimum. Allows the if to be optimized out. */
if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
return readl(((void __iomem *)rdev->rmmio) + reg);
else {
unsigned long flags;
uint32_t ret;
 
spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
 
return ret;
}
}
 
static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
bool always_indirect)
{
if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
writel(v, ((void __iomem *)rdev->rmmio) + reg);
else {
unsigned long flags;
 
spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
}
}
 
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
bool always_indirect);
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
bool always_indirect);
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
 
u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index);
void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
 
/*
* Cast helper
*/
2442,20 → 1665,6
#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
#define RREG32_PCIE_PORT(reg) rdev->pciep_rreg(rdev, (reg))
#define WREG32_PCIE_PORT(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
#define RREG32_SMC(reg) tn_smc_rreg(rdev, (reg))
#define WREG32_SMC(reg, v) tn_smc_wreg(rdev, (reg), (v))
#define RREG32_RCU(reg) r600_rcu_rreg(rdev, (reg))
#define WREG32_RCU(reg, v) r600_rcu_wreg(rdev, (reg), (v))
#define RREG32_CG(reg) eg_cg_rreg(rdev, (reg))
#define WREG32_CG(reg, v) eg_cg_wreg(rdev, (reg), (v))
#define RREG32_PIF_PHY0(reg) eg_pif_phy0_rreg(rdev, (reg))
#define WREG32_PIF_PHY0(reg, v) eg_pif_phy0_wreg(rdev, (reg), (v))
#define RREG32_PIF_PHY1(reg) eg_pif_phy1_rreg(rdev, (reg))
#define WREG32_PIF_PHY1(reg, v) eg_pif_phy1_wreg(rdev, (reg), (v))
#define RREG32_UVD_CTX(reg) r600_uvd_ctx_rreg(rdev, (reg))
#define WREG32_UVD_CTX(reg, v) r600_uvd_ctx_wreg(rdev, (reg), (v))
#define RREG32_DIDT(reg) cik_didt_rreg(rdev, (reg))
#define WREG32_DIDT(reg, v) cik_didt_wreg(rdev, (reg), (v))
#define WREG32_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32(reg); \
2464,7 → 1673,7
WREG32(reg, tmp_); \
} while (0)
#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or)
#define WREG32_PLL_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32_PLL(reg); \
2472,193 → 1681,27
tmp_ |= ((val) & ~(mask)); \
WREG32_PLL(reg, tmp_); \
} while (0)
#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
 
#define RDOORBELL32(index) cik_mm_rdoorbell(rdev, (index))
#define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v))
 
/*
* Indirect registers accessor
*/
static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
{
unsigned long flags;
uint32_t r;
 
spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
r = RREG32(RADEON_PCIE_DATA);
spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
return r;
}
 
static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
unsigned long flags;
 
spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
WREG32(RADEON_PCIE_DATA, (v));
spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
}
 
static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
{
unsigned long flags;
u32 r;
 
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(TN_SMC_IND_INDEX_0, (reg));
r = RREG32(TN_SMC_IND_DATA_0);
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
return r;
}
 
static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
unsigned long flags;
 
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(TN_SMC_IND_INDEX_0, (reg));
WREG32(TN_SMC_IND_DATA_0, (v));
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
}
 
static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
{
unsigned long flags;
u32 r;
 
spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
r = RREG32(R600_RCU_DATA);
spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
return r;
}
 
static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
unsigned long flags;
 
spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
WREG32(R600_RCU_DATA, (v));
spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
}
 
static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg)
{
unsigned long flags;
u32 r;
 
spin_lock_irqsave(&rdev->cg_idx_lock, flags);
WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
r = RREG32(EVERGREEN_CG_IND_DATA);
spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
return r;
}
 
static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
unsigned long flags;
 
spin_lock_irqsave(&rdev->cg_idx_lock, flags);
WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
WREG32(EVERGREEN_CG_IND_DATA, (v));
spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
}
 
static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg)
{
unsigned long flags;
u32 r;
 
spin_lock_irqsave(&rdev->pif_idx_lock, flags);
WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
r = RREG32(EVERGREEN_PIF_PHY0_DATA);
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
return r;
}
 
static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
unsigned long flags;
 
spin_lock_irqsave(&rdev->pif_idx_lock, flags);
WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
}
 
static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg)
{
unsigned long flags;
u32 r;
 
spin_lock_irqsave(&rdev->pif_idx_lock, flags);
WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
r = RREG32(EVERGREEN_PIF_PHY1_DATA);
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
return r;
}
 
static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
unsigned long flags;
 
spin_lock_irqsave(&rdev->pif_idx_lock, flags);
WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
}
 
static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
{
unsigned long flags;
u32 r;
 
spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
r = RREG32(R600_UVD_CTX_DATA);
spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
return r;
}
 
static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
unsigned long flags;
 
spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
WREG32(R600_UVD_CTX_DATA, (v));
spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
}
 
 
static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg)
{
unsigned long flags;
u32 r;
 
spin_lock_irqsave(&rdev->didt_idx_lock, flags);
WREG32(CIK_DIDT_IND_INDEX, (reg));
r = RREG32(CIK_DIDT_IND_DATA);
spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
return r;
}
 
static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
unsigned long flags;
 
spin_lock_irqsave(&rdev->didt_idx_lock, flags);
WREG32(CIK_DIDT_IND_INDEX, (reg));
WREG32(CIK_DIDT_IND_DATA, (v));
spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
}
 
void r100_pll_errata_after_index(struct radeon_device *rdev);
 
 
2707,21 → 1750,7
(rdev->flags & RADEON_IS_IGP))
#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
#define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN))
#define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
#define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI))
#define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE))
#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
(rdev->family == CHIP_MULLINS))
 
#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
(rdev->ddev->pdev->device == 0x6850) || \
(rdev->ddev->pdev->device == 0x6858) || \
(rdev->ddev->pdev->device == 0x6859) || \
(rdev->ddev->pdev->device == 0x6840) || \
(rdev->ddev->pdev->device == 0x6841) || \
(rdev->ddev->pdev->device == 0x6842) || \
(rdev->ddev->pdev->device == 0x6843))
 
/*
* BIOS helpers.
*/
2758,27 → 1787,21
#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)]->cs_parse((p))
#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
#define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
#define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
#define radeon_asic_vm_pad_ib(rdev, ib) ((rdev)->asic->vm.pad_ib((ib)))
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib))
#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib))
#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp))
#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)]->vm_flush((rdev), (r), (vm))
#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r))
#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r))
#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r))
#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm))
#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
2786,8 → 1809,8
#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
#define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b))
#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence))
#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
2802,8 → 1825,6
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
#define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d))
#define radeon_set_vce_clocks(rdev, ev, ec) (rdev)->asic->pm.set_vce_clocks((rdev), (ev), (ec))
#define radeon_get_temperature(rdev) (rdev)->asic->pm.get_temperature((rdev))
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
2817,35 → 1838,17
#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc))
#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base))
#define radeon_page_flip_pending(rdev, crtc) (rdev)->asic->pflip.page_flip_pending((rdev), (crtc))
#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev))
#define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev))
#define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev))
#define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev))
#define radeon_dpm_late_enable(rdev) rdev->asic->dpm.late_enable((rdev))
#define radeon_dpm_disable(rdev) rdev->asic->dpm.disable((rdev))
#define radeon_dpm_pre_set_power_state(rdev) rdev->asic->dpm.pre_set_power_state((rdev))
#define radeon_dpm_set_power_state(rdev) rdev->asic->dpm.set_power_state((rdev))
#define radeon_dpm_post_set_power_state(rdev) rdev->asic->dpm.post_set_power_state((rdev))
#define radeon_dpm_display_configuration_changed(rdev) rdev->asic->dpm.display_configuration_changed((rdev))
#define radeon_dpm_fini(rdev) rdev->asic->dpm.fini((rdev))
#define radeon_dpm_get_sclk(rdev, l) rdev->asic->dpm.get_sclk((rdev), (l))
#define radeon_dpm_get_mclk(rdev, l) rdev->asic->dpm.get_mclk((rdev), (l))
#define radeon_dpm_print_power_state(rdev, ps) rdev->asic->dpm.print_power_state((rdev), (ps))
#define radeon_dpm_debugfs_print_current_performance_level(rdev, m) rdev->asic->dpm.debugfs_print_current_performance_level((rdev), (m))
#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
#define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e))
 
/* Common functions */
/* AGP */
extern int radeon_gpu_reset(struct radeon_device *rdev);
extern void radeon_pci_config_reset(struct radeon_device *rdev);
extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung);
extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
2866,8 → 1869,8
extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
extern int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
extern int radeon_resume_kms(struct drm_device *dev);
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
extern void radeon_program_register_sequence(struct radeon_device *rdev,
const u32 *registers,
2878,28 → 1881,19
*/
int radeon_vm_manager_init(struct radeon_device *rdev);
void radeon_vm_manager_fini(struct radeon_device *rdev);
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
struct radeon_vm *vm,
struct list_head *head);
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
struct radeon_vm *vm, int ring);
void radeon_vm_flush(struct radeon_device *rdev,
struct radeon_vm *vm,
int ring);
void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence);
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
int radeon_vm_update_page_directory(struct radeon_device *rdev,
struct radeon_vm *vm);
int radeon_vm_clear_freed(struct radeon_device *rdev,
struct radeon_vm *vm);
int radeon_vm_clear_invalids(struct radeon_device *rdev,
struct radeon_vm *vm);
int radeon_vm_bo_update(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo,
struct ttm_mem_reg *mem);
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo);
2912,19 → 1906,11
struct radeon_bo_va *bo_va,
uint64_t offset,
uint32_t flags);
void radeon_vm_bo_rmv(struct radeon_device *rdev,
int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo_va *bo_va);
 
/* audio */
void r600_audio_update_hdmi(struct work_struct *work);
struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
void r600_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin,
bool enable);
void dce6_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin,
bool enable);
 
/*
* R600 vram scratch functions
2978,28 → 1964,11
#if defined(CONFIG_ACPI)
extern int radeon_acpi_init(struct radeon_device *rdev);
extern void radeon_acpi_fini(struct radeon_device *rdev);
extern bool radeon_acpi_is_pcie_performance_request_supported(struct radeon_device *rdev);
extern int radeon_acpi_pcie_performance_request(struct radeon_device *rdev,
u8 perf_req, bool advertise);
extern int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev);
#else
static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
#endif
 
int radeon_cs_packet_parse(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx);
bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
void radeon_cs_dump_packet(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt);
int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc,
int nomm);
int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
uint32_t *vline_start_end,
uint32_t *vline_status);
 
#include "radeon_object.h"
 
#define DRM_UDELAY(d) udelay(d)
3009,5 → 1978,12
resource_size_t
drm_get_resource_len(struct drm_device *dev, unsigned int resource);
 
bool set_mode(struct drm_device *dev, struct drm_connector *connector,
videomode_t *mode, bool strict);
 
 
#ifndef __TTM__
#define radeon_ttm_set_active_vram_size(a, b)
#endif
 
#endif
/drivers/video/drm/radeon/radeon_asic.c
126,11 → 126,7
rdev->mc_rreg = &rs780_mc_rreg;
rdev->mc_wreg = &rs780_mc_wreg;
}
 
if (rdev->family >= CHIP_BONAIRE) {
rdev->pciep_rreg = &cik_pciep_rreg;
rdev->pciep_wreg = &cik_pciep_wreg;
} else if (rdev->family >= CHIP_R600) {
if (rdev->family >= CHIP_R600) {
rdev->pciep_rreg = &r600_pciep_rreg;
rdev->pciep_wreg = &r600_pciep_wreg;
}
172,22 → 168,6
/*
* ASIC
*/
 
static struct radeon_asic_ring r100_gfx_ring = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r100_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
.cs_parse = &r100_cs_parse,
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
.get_rptr = &r100_gfx_get_rptr,
.get_wptr = &r100_gfx_get_wptr,
.set_wptr = &r100_gfx_set_wptr,
.hdp_flush = &r100_ring_hdp_flush,
};
 
static struct radeon_asic r100_asic = {
.init = &r100_init,
// .fini = &r100_fini,
195,7 → 175,7
// .resume = &r100_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &r100_asic_reset,
.mmio_hdp_flush = NULL,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
203,7 → 183,16
.set_page = &r100_pci_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r100_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r100_cs_parse,
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &r100_irq_set,
213,8 → 202,8
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
.set_backlight_level = &radeon_legacy_set_backlight_level,
.get_backlight_level = &radeon_legacy_get_backlight_level,
// .set_backlight_level = &radeon_legacy_set_backlight_level,
// .get_backlight_level = &radeon_legacy_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
229,28 → 218,29
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &r100_hpd_init,
.fini = &r100_hpd_fini,
.sense = &r100_hpd_sense,
.set_polarity = &r100_hpd_set_polarity,
// .init = &r100_hpd_init,
// .fini = &r100_hpd_fini,
// .sense = &r100_hpd_sense,
// .set_polarity = &r100_hpd_set_polarity,
},
.pm = {
.misc = &r100_pm_misc,
.prepare = &r100_pm_prepare,
.finish = &r100_pm_finish,
.init_profile = &r100_pm_init_profile,
.get_dynpm_state = &r100_pm_get_dynpm_state,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
// .misc = &r100_pm_misc,
// .prepare = &r100_pm_prepare,
// .finish = &r100_pm_finish,
// .init_profile = &r100_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_legacy_get_engine_clock,
// .set_engine_clock = &radeon_legacy_set_engine_clock,
// .get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
// .set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &r100_pre_page_flip,
// .page_flip = &r100_page_flip,
// .post_page_flip = &r100_post_page_flip,
},
};
 
261,7 → 251,7
// .resume = &r100_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &r100_asic_reset,
.mmio_hdp_flush = NULL,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
269,7 → 259,16
.set_page = &r100_pci_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r100_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r100_cs_parse,
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &r100_irq_set,
279,8 → 278,8
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
.set_backlight_level = &radeon_legacy_set_backlight_level,
.get_backlight_level = &radeon_legacy_get_backlight_level,
// .set_backlight_level = &radeon_legacy_set_backlight_level,
// .get_backlight_level = &radeon_legacy_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
295,46 → 294,32
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &r100_hpd_init,
.fini = &r100_hpd_fini,
.sense = &r100_hpd_sense,
.set_polarity = &r100_hpd_set_polarity,
// .init = &r100_hpd_init,
// .fini = &r100_hpd_fini,
// .sense = &r100_hpd_sense,
// .set_polarity = &r100_hpd_set_polarity,
},
.pm = {
.misc = &r100_pm_misc,
.prepare = &r100_pm_prepare,
.finish = &r100_pm_finish,
.init_profile = &r100_pm_init_profile,
.get_dynpm_state = &r100_pm_get_dynpm_state,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
// .misc = &r100_pm_misc,
// .prepare = &r100_pm_prepare,
// .finish = &r100_pm_finish,
// .init_profile = &r100_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_legacy_get_engine_clock,
// .set_engine_clock = &radeon_legacy_set_engine_clock,
// .get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
// .set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &r100_pre_page_flip,
// .page_flip = &r100_page_flip,
// .post_page_flip = &r100_post_page_flip,
},
};
 
static struct radeon_asic_ring r300_gfx_ring = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
.cs_parse = &r300_cs_parse,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
.get_rptr = &r100_gfx_get_rptr,
.get_wptr = &r100_gfx_get_wptr,
.set_wptr = &r100_gfx_set_wptr,
.hdp_flush = &r100_ring_hdp_flush,
};
 
static struct radeon_asic r300_asic = {
.init = &r300_init,
// .fini = &r300_fini,
342,7 → 327,7
// .resume = &r300_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
.mmio_hdp_flush = NULL,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
350,7 → 335,16
.set_page = &r100_pci_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &r100_irq_set,
360,8 → 354,8
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
.set_backlight_level = &radeon_legacy_set_backlight_level,
.get_backlight_level = &radeon_legacy_get_backlight_level,
// .set_backlight_level = &radeon_legacy_set_backlight_level,
// .get_backlight_level = &radeon_legacy_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
376,28 → 370,29
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &r100_hpd_init,
.fini = &r100_hpd_fini,
.sense = &r100_hpd_sense,
.set_polarity = &r100_hpd_set_polarity,
// .init = &r100_hpd_init,
// .fini = &r100_hpd_fini,
// .sense = &r100_hpd_sense,
// .set_polarity = &r100_hpd_set_polarity,
},
.pm = {
.misc = &r100_pm_misc,
.prepare = &r100_pm_prepare,
.finish = &r100_pm_finish,
.init_profile = &r100_pm_init_profile,
.get_dynpm_state = &r100_pm_get_dynpm_state,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating,
// .misc = &r100_pm_misc,
// .prepare = &r100_pm_prepare,
// .finish = &r100_pm_finish,
// .init_profile = &r100_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_legacy_get_engine_clock,
// .set_engine_clock = &radeon_legacy_set_engine_clock,
// .get_memory_clock = &radeon_legacy_get_memory_clock,
// .set_memory_clock = NULL,
// .get_pcie_lanes = &rv370_get_pcie_lanes,
// .set_pcie_lanes = &rv370_set_pcie_lanes,
// .set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &r100_pre_page_flip,
// .page_flip = &r100_page_flip,
// .post_page_flip = &r100_post_page_flip,
},
};
 
408,7 → 403,7
// .resume = &r300_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
.mmio_hdp_flush = NULL,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
416,7 → 411,16
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &r100_irq_set,
426,8 → 430,8
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
.set_backlight_level = &radeon_legacy_set_backlight_level,
.get_backlight_level = &radeon_legacy_get_backlight_level,
// .set_backlight_level = &radeon_legacy_set_backlight_level,
// .get_backlight_level = &radeon_legacy_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
448,22 → 452,23
.set_polarity = &r100_hpd_set_polarity,
},
.pm = {
.misc = &r100_pm_misc,
.prepare = &r100_pm_prepare,
.finish = &r100_pm_finish,
.init_profile = &r100_pm_init_profile,
.get_dynpm_state = &r100_pm_get_dynpm_state,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating,
// .misc = &r100_pm_misc,
// .prepare = &r100_pm_prepare,
// .finish = &r100_pm_finish,
// .init_profile = &r100_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_legacy_get_engine_clock,
// .set_engine_clock = &radeon_legacy_set_engine_clock,
// .get_memory_clock = &radeon_legacy_get_memory_clock,
// .set_memory_clock = NULL,
// .get_pcie_lanes = &rv370_get_pcie_lanes,
// .set_pcie_lanes = &rv370_set_pcie_lanes,
// .set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &r100_pre_page_flip,
// .page_flip = &r100_page_flip,
// .post_page_flip = &r100_post_page_flip,
},
};
 
474,7 → 479,7
// .resume = &r420_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
.mmio_hdp_flush = NULL,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
482,7 → 487,16
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &r100_irq_set,
492,8 → 506,8
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
508,28 → 522,29
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &r100_hpd_init,
.fini = &r100_hpd_fini,
.sense = &r100_hpd_sense,
.set_polarity = &r100_hpd_set_polarity,
// .init = &r100_hpd_init,
// .fini = &r100_hpd_fini,
// .sense = &r100_hpd_sense,
// .set_polarity = &r100_hpd_set_polarity,
},
.pm = {
.misc = &r100_pm_misc,
.prepare = &r100_pm_prepare,
.finish = &r100_pm_finish,
.init_profile = &r420_pm_init_profile,
.get_dynpm_state = &r100_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
// .misc = &r100_pm_misc,
// .prepare = &r100_pm_prepare,
// .finish = &r100_pm_finish,
// .init_profile = &r420_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = &rv370_get_pcie_lanes,
// .set_pcie_lanes = &rv370_set_pcie_lanes,
// .set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &r100_pre_page_flip,
// .page_flip = &r100_page_flip,
// .post_page_flip = &r100_post_page_flip,
},
};
 
540,7 → 555,7
// .resume = &rs400_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
.mmio_hdp_flush = NULL,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs400_mc_wait_for_idle,
.gart = {
548,7 → 563,16
.set_page = &rs400_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &r100_irq_set,
558,8 → 582,8
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
.set_backlight_level = &radeon_legacy_set_backlight_level,
.get_backlight_level = &radeon_legacy_get_backlight_level,
// .set_backlight_level = &radeon_legacy_set_backlight_level,
// .get_backlight_level = &radeon_legacy_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
574,28 → 598,29
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &r100_hpd_init,
.fini = &r100_hpd_fini,
.sense = &r100_hpd_sense,
.set_polarity = &r100_hpd_set_polarity,
// .init = &r100_hpd_init,
// .fini = &r100_hpd_fini,
// .sense = &r100_hpd_sense,
// .set_polarity = &r100_hpd_set_polarity,
},
.pm = {
.misc = &r100_pm_misc,
.prepare = &r100_pm_prepare,
.finish = &r100_pm_finish,
.init_profile = &r100_pm_init_profile,
.get_dynpm_state = &r100_pm_get_dynpm_state,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
// .misc = &r100_pm_misc,
// .prepare = &r100_pm_prepare,
// .finish = &r100_pm_finish,
// .init_profile = &r100_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_legacy_get_engine_clock,
// .set_engine_clock = &radeon_legacy_set_engine_clock,
// .get_memory_clock = &radeon_legacy_get_memory_clock,
// .set_memory_clock = NULL,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &r100_pre_page_flip,
// .page_flip = &r100_page_flip,
// .post_page_flip = &r100_post_page_flip,
},
};
 
606,7 → 631,7
// .resume = &rs600_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
.mmio_hdp_flush = NULL,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs600_mc_wait_for_idle,
.gart = {
614,7 → 639,16
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &rs600_irq_set,
624,10 → 658,8
.bandwidth_update = &rs600_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &r600_hdmi_enable,
.hdmi_setmode = &r600_hdmi_setmode,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
642,28 → 674,29
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &rs600_hpd_init,
.fini = &rs600_hpd_fini,
.sense = &rs600_hpd_sense,
.set_polarity = &rs600_hpd_set_polarity,
// .init = &rs600_hpd_init,
// .fini = &rs600_hpd_fini,
// .sense = &rs600_hpd_sense,
// .set_polarity = &rs600_hpd_set_polarity,
},
.pm = {
.misc = &rs600_pm_misc,
.prepare = &rs600_pm_prepare,
.finish = &rs600_pm_finish,
.init_profile = &r420_pm_init_profile,
.get_dynpm_state = &r100_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
// .misc = &rs600_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &r420_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
// .set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rs600_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
};
 
674,7 → 707,7
// .resume = &rs690_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
.mmio_hdp_flush = NULL,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs690_mc_wait_for_idle,
.gart = {
682,7 → 715,16
.set_page = &rs400_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &rs600_irq_set,
692,10 → 734,8
.get_vblank_counter = &rs600_get_vblank_counter,
.bandwidth_update = &rs690_bandwidth_update,
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &r600_hdmi_enable,
.hdmi_setmode = &r600_hdmi_setmode,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
710,28 → 750,29
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &rs600_hpd_init,
.fini = &rs600_hpd_fini,
// .init = &rs600_hpd_init,
// .fini = &rs600_hpd_fini,
.sense = &rs600_hpd_sense,
.set_polarity = &rs600_hpd_set_polarity,
},
.pm = {
.misc = &rs600_pm_misc,
.prepare = &rs600_pm_prepare,
.finish = &rs600_pm_finish,
.init_profile = &r420_pm_init_profile,
.get_dynpm_state = &r100_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
// .misc = &rs600_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &r420_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
// .set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rs600_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
};
 
742,7 → 783,7
// .resume = &rv515_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
.mmio_hdp_flush = NULL,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rv515_mc_wait_for_idle,
.gart = {
750,7 → 791,16
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &rs600_irq_set,
760,8 → 810,8
.get_vblank_counter = &rs600_get_vblank_counter,
.bandwidth_update = &rv515_bandwidth_update,
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
776,28 → 826,29
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &rs600_hpd_init,
.fini = &rs600_hpd_fini,
.sense = &rs600_hpd_sense,
.set_polarity = &rs600_hpd_set_polarity,
// .init = &rs600_hpd_init,
// .fini = &rs600_hpd_fini,
// .sense = &rs600_hpd_sense,
// .set_polarity = &rs600_hpd_set_polarity,
},
.pm = {
.misc = &rs600_pm_misc,
.prepare = &rs600_pm_prepare,
.finish = &rs600_pm_finish,
.init_profile = &r420_pm_init_profile,
.get_dynpm_state = &r100_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
// .misc = &rs600_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &r420_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = &rv370_get_pcie_lanes,
// .set_pcie_lanes = &rv370_set_pcie_lanes,
// .set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rs600_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
};
 
808,7 → 859,7
// .resume = &r520_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
.mmio_hdp_flush = NULL,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r520_mc_wait_for_idle,
.gart = {
816,7 → 867,16
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &rs600_irq_set,
826,8 → 886,8
.bandwidth_update = &rv515_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
842,57 → 902,32
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &rs600_hpd_init,
.fini = &rs600_hpd_fini,
.sense = &rs600_hpd_sense,
.set_polarity = &rs600_hpd_set_polarity,
// .init = &rs600_hpd_init,
// .fini = &rs600_hpd_fini,
// .sense = &rs600_hpd_sense,
// .set_polarity = &rs600_hpd_set_polarity,
},
.pm = {
.misc = &rs600_pm_misc,
.prepare = &rs600_pm_prepare,
.finish = &rs600_pm_finish,
.init_profile = &r420_pm_init_profile,
.get_dynpm_state = &r100_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
// .misc = &rs600_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &r420_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = &rv370_get_pcie_lanes,
// .set_pcie_lanes = &rv370_set_pcie_lanes,
// .set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rs600_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
};
 
static struct radeon_asic_ring r600_gfx_ring = {
.ib_execute = &r600_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gfx_is_lockup,
.get_rptr = &r600_gfx_get_rptr,
.get_wptr = &r600_gfx_get_wptr,
.set_wptr = &r600_gfx_set_wptr,
};
 
static struct radeon_asic_ring r600_dma_ring = {
.ib_execute = &r600_dma_ring_ib_execute,
.emit_fence = &r600_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit,
.cs_parse = &r600_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &r600_dma_is_lockup,
.get_rptr = &r600_dma_get_rptr,
.get_wptr = &r600_dma_get_wptr,
.set_wptr = &r600_dma_set_wptr,
};
 
static struct radeon_asic r600_asic = {
.init = &r600_init,
// .fini = &r600_fini,
900,7 → 935,7
// .resume = &r600_resume,
// .vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
.mmio_hdp_flush = r600_mmio_hdp_flush,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
910,81 → 945,25
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
[R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r600_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gfx_is_lockup,
},
.irq = {
.set = &r600_irq_set,
.process = &r600_irq_process,
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &r600_dma_ring_ib_execute,
.emit_fence = &r600_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit,
// .cs_parse = &r600_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &r600_dma_is_lockup,
}
},
.display = {
.bandwidth_update = &rv515_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &r600_hdmi_enable,
.hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
.blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r600_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
.copy = &r600_copy_cpdma,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &r600_hpd_init,
.fini = &r600_hpd_fini,
.sense = &r600_hpd_sense,
.set_polarity = &r600_hpd_set_polarity,
},
.pm = {
.misc = &r600_pm_misc,
.prepare = &rs600_pm_prepare,
.finish = &rs600_pm_finish,
.init_profile = &r600_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
.get_temperature = &rv6xx_get_temp,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rs600_page_flip,
},
};
 
static struct radeon_asic rv6xx_asic = {
.init = &r600_init,
// .fini = &r600_fini,
// .suspend = &r600_suspend,
// .resume = &r600_resume,
// .vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
.mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
[R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
},
.irq = {
.set = &r600_irq_set,
.process = &r600_irq_process,
993,18 → 972,16
.bandwidth_update = &rv515_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &r600_hdmi_enable,
.hdmi_setmode = &r600_hdmi_setmode,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_cpdma,
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r600_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
.copy = &r600_copy_cpdma,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_dma,
.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
1011,47 → 988,29
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &r600_hpd_init,
.fini = &r600_hpd_fini,
.sense = &r600_hpd_sense,
.set_polarity = &r600_hpd_set_polarity,
// .init = &r600_hpd_init,
// .fini = &r600_hpd_fini,
// .sense = &r600_hpd_sense,
// .set_polarity = &r600_hpd_set_polarity,
},
.pm = {
.misc = &r600_pm_misc,
.prepare = &rs600_pm_prepare,
.finish = &rs600_pm_finish,
.init_profile = &r600_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
// .misc = &r600_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &r600_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = &r600_get_pcie_lanes,
// .set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
.get_temperature = &rv6xx_get_temp,
.set_uvd_clocks = &r600_set_uvd_clocks,
},
.dpm = {
.init = &rv6xx_dpm_init,
.setup_asic = &rv6xx_setup_asic,
.enable = &rv6xx_dpm_enable,
.late_enable = &r600_dpm_late_enable,
.disable = &rv6xx_dpm_disable,
.pre_set_power_state = &r600_dpm_pre_set_power_state,
.set_power_state = &rv6xx_dpm_set_power_state,
.post_set_power_state = &r600_dpm_post_set_power_state,
.display_configuration_changed = &rv6xx_dpm_display_configuration_changed,
.fini = &rv6xx_dpm_fini,
.get_sclk = &rv6xx_dpm_get_sclk,
.get_mclk = &rv6xx_dpm_get_mclk,
.print_power_state = &rv6xx_dpm_print_power_state,
.debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv6xx_dpm_force_performance_level,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rs600_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
};
 
1062,7 → 1021,7
// .resume = &r600_resume,
// .vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
.mmio_hdp_flush = r600_mmio_hdp_flush,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
1072,9 → 1031,25
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
[R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r600_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &r600_dma_ring_ib_execute,
.emit_fence = &r600_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit,
// .cs_parse = &r600_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &r600_dma_is_lockup,
}
},
.irq = {
.set = &r600_irq_set,
.process = &r600_irq_process,
1083,18 → 1058,16
.bandwidth_update = &rs690_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &r600_hdmi_enable,
.hdmi_setmode = &r600_hdmi_setmode,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_cpdma,
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r600_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
.copy = &r600_copy_cpdma,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_dma,
.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
1101,63 → 1074,32
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &r600_hpd_init,
.fini = &r600_hpd_fini,
.sense = &r600_hpd_sense,
.set_polarity = &r600_hpd_set_polarity,
// .init = &r600_hpd_init,
// .fini = &r600_hpd_fini,
// .sense = &r600_hpd_sense,
// .set_polarity = &r600_hpd_set_polarity,
},
.pm = {
.misc = &r600_pm_misc,
.prepare = &rs600_pm_prepare,
.finish = &rs600_pm_finish,
.init_profile = &rs780_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
// .misc = &r600_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &rs780_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = NULL,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.get_temperature = &rv6xx_get_temp,
.set_uvd_clocks = &r600_set_uvd_clocks,
},
.dpm = {
.init = &rs780_dpm_init,
.setup_asic = &rs780_dpm_setup_asic,
.enable = &rs780_dpm_enable,
.late_enable = &r600_dpm_late_enable,
.disable = &rs780_dpm_disable,
.pre_set_power_state = &r600_dpm_pre_set_power_state,
.set_power_state = &rs780_dpm_set_power_state,
.post_set_power_state = &r600_dpm_post_set_power_state,
.display_configuration_changed = &rs780_dpm_display_configuration_changed,
.fini = &rs780_dpm_fini,
.get_sclk = &rs780_dpm_get_sclk,
.get_mclk = &rs780_dpm_get_mclk,
.print_power_state = &rs780_dpm_print_power_state,
.debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rs780_dpm_force_performance_level,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rs600_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
};
 
static struct radeon_asic_ring rv770_uvd_ring = {
.ib_execute = &uvd_v1_0_ib_execute,
.emit_fence = &uvd_v2_2_fence_emit,
.emit_semaphore = &uvd_v1_0_semaphore_emit,
.cs_parse = &radeon_uvd_cs_parse,
.ring_test = &uvd_v1_0_ring_test,
.ib_test = &uvd_v1_0_ib_test,
.is_lockup = &radeon_ring_test_lockup,
.get_rptr = &uvd_v1_0_get_rptr,
.get_wptr = &uvd_v1_0_get_wptr,
.set_wptr = &uvd_v1_0_set_wptr,
};
 
static struct radeon_asic rv770_asic = {
.init = &rv770_init,
// .fini = &rv770_fini,
1165,7 → 1107,7
// .resume = &rv770_resume,
.asic_reset = &r600_asic_reset,
// .vga_set_state = &r600_vga_set_state,
.mmio_hdp_flush = r600_mmio_hdp_flush,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
1175,10 → 1117,34
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
[R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
[R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r600_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &r600_dma_ring_ib_execute,
.emit_fence = &r600_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit,
// .cs_parse = &r600_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &r600_dma_is_lockup,
},
[R600_RING_TYPE_UVD_INDEX] = {
// .ib_execute = &r600_uvd_ib_execute,
// .emit_fence = &r600_uvd_fence_emit,
// .emit_semaphore = &r600_uvd_semaphore_emit,
// .cs_parse = &radeon_uvd_cs_parse,
// .ring_test = &r600_uvd_ring_test,
// .ib_test = &r600_uvd_ib_test,
// .is_lockup = &radeon_ring_test_lockup,
}
},
.irq = {
.set = &r600_irq_set,
.process = &r600_irq_process,
1187,13 → 1153,11
.bandwidth_update = &rv515_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &r600_hdmi_enable,
.hdmi_setmode = &dce3_1_hdmi_setmode,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_cpdma,
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &rv770_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1205,77 → 1169,33
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &r600_hpd_init,
.fini = &r600_hpd_fini,
.sense = &r600_hpd_sense,
.set_polarity = &r600_hpd_set_polarity,
// .init = &r600_hpd_init,
// .fini = &r600_hpd_fini,
// .sense = &r600_hpd_sense,
// .set_polarity = &r600_hpd_set_polarity,
},
.pm = {
.misc = &rv770_pm_misc,
.prepare = &rs600_pm_prepare,
.finish = &rs600_pm_finish,
.init_profile = &r600_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
// .misc = &rv770_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &r600_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = &r600_get_pcie_lanes,
// .set_pcie_lanes = &r600_set_pcie_lanes,
// .set_clock_gating = &radeon_atom_set_clock_gating,
.set_uvd_clocks = &rv770_set_uvd_clocks,
.get_temperature = &rv770_get_temp,
},
.dpm = {
.init = &rv770_dpm_init,
.setup_asic = &rv770_dpm_setup_asic,
.enable = &rv770_dpm_enable,
.late_enable = &rv770_dpm_late_enable,
.disable = &rv770_dpm_disable,
.pre_set_power_state = &r600_dpm_pre_set_power_state,
.set_power_state = &rv770_dpm_set_power_state,
.post_set_power_state = &r600_dpm_post_set_power_state,
.display_configuration_changed = &rv770_dpm_display_configuration_changed,
.fini = &rv770_dpm_fini,
.get_sclk = &rv770_dpm_get_sclk,
.get_mclk = &rv770_dpm_get_mclk,
.print_power_state = &rv770_dpm_print_power_state,
.debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &rv770_dpm_vblank_too_short,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rv770_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
};
 
static struct radeon_asic_ring evergreen_gfx_ring = {
.ib_execute = &evergreen_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gfx_is_lockup,
.get_rptr = &r600_gfx_get_rptr,
.get_wptr = &r600_gfx_get_wptr,
.set_wptr = &r600_gfx_set_wptr,
};
 
static struct radeon_asic_ring evergreen_dma_ring = {
.ib_execute = &evergreen_dma_ring_ib_execute,
.emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit,
.cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &evergreen_dma_is_lockup,
.get_rptr = &r600_dma_get_rptr,
.get_wptr = &r600_dma_get_wptr,
.set_wptr = &r600_dma_set_wptr,
};
 
static struct radeon_asic evergreen_asic = {
.init = &evergreen_init,
// .fini = &evergreen_fini,
1283,7 → 1203,7
// .resume = &evergreen_resume,
.asic_reset = &evergreen_asic_reset,
// .vga_set_state = &r600_vga_set_state,
.mmio_hdp_flush = r600_mmio_hdp_flush,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
1293,10 → 1213,34
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
[R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
[R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &evergreen_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &evergreen_dma_ring_ib_execute,
.emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit,
// .cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &evergreen_dma_is_lockup,
},
[R600_RING_TYPE_UVD_INDEX] = {
// .ib_execute = &r600_uvd_ib_execute,
// .emit_fence = &r600_uvd_fence_emit,
// .emit_semaphore = &r600_uvd_semaphore_emit,
// .cs_parse = &radeon_uvd_cs_parse,
// .ring_test = &r600_uvd_ring_test,
// .ib_test = &r600_uvd_ib_test,
// .is_lockup = &radeon_ring_test_lockup,
}
},
.irq = {
.set = &evergreen_irq_set,
.process = &evergreen_irq_process,
1305,13 → 1249,11
.bandwidth_update = &evergreen_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &evergreen_hdmi_enable,
.hdmi_setmode = &evergreen_hdmi_setmode,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_cpdma,
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1323,46 → 1265,30
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &evergreen_hpd_init,
.fini = &evergreen_hpd_fini,
.sense = &evergreen_hpd_sense,
.set_polarity = &evergreen_hpd_set_polarity,
// .init = &evergreen_hpd_init,
// .fini = &evergreen_hpd_fini,
// .sense = &evergreen_hpd_sense,
// .set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
.misc = &evergreen_pm_misc,
.prepare = &evergreen_pm_prepare,
.finish = &evergreen_pm_finish,
.init_profile = &r600_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
// .misc = &evergreen_pm_misc,
// .prepare = &evergreen_pm_prepare,
// .finish = &evergreen_pm_finish,
// .init_profile = &r600_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = &r600_get_pcie_lanes,
// .set_pcie_lanes = &r600_set_pcie_lanes,
// .set_clock_gating = NULL,
.set_uvd_clocks = &evergreen_set_uvd_clocks,
.get_temperature = &evergreen_get_temp,
},
.dpm = {
.init = &cypress_dpm_init,
.setup_asic = &cypress_dpm_setup_asic,
.enable = &cypress_dpm_enable,
.late_enable = &rv770_dpm_late_enable,
.disable = &cypress_dpm_disable,
.pre_set_power_state = &r600_dpm_pre_set_power_state,
.set_power_state = &cypress_dpm_set_power_state,
.post_set_power_state = &r600_dpm_post_set_power_state,
.display_configuration_changed = &cypress_dpm_display_configuration_changed,
.fini = &cypress_dpm_fini,
.get_sclk = &rv770_dpm_get_sclk,
.get_mclk = &rv770_dpm_get_mclk,
.print_power_state = &rv770_dpm_print_power_state,
.debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &cypress_dpm_vblank_too_short,
},
.pflip = {
// .pre_page_flip = &evergreen_pre_page_flip,
// .page_flip = &evergreen_page_flip,
// .post_page_flip = &evergreen_post_page_flip,
},
};
 
1373,7 → 1299,7
// .resume = &evergreen_resume,
.asic_reset = &evergreen_asic_reset,
// .vga_set_state = &r600_vga_set_state,
.mmio_hdp_flush = r600_mmio_hdp_flush,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
1383,10 → 1309,34
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
[R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
[R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &evergreen_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &evergreen_dma_ring_ib_execute,
.emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit,
// .cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &evergreen_dma_is_lockup,
},
[R600_RING_TYPE_UVD_INDEX] = {
// .ib_execute = &r600_uvd_ib_execute,
// .emit_fence = &r600_uvd_fence_emit,
// .emit_semaphore = &r600_uvd_semaphore_emit,
// .cs_parse = &radeon_uvd_cs_parse,
// .ring_test = &r600_uvd_ring_test,
// .ib_test = &r600_uvd_ib_test,
// .is_lockup = &radeon_ring_test_lockup,
}
},
.irq = {
.set = &evergreen_irq_set,
.process = &evergreen_irq_process,
1395,13 → 1345,11
.bandwidth_update = &evergreen_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &evergreen_hdmi_enable,
.hdmi_setmode = &evergreen_hdmi_setmode,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_cpdma,
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1413,19 → 1361,19
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &evergreen_hpd_init,
.fini = &evergreen_hpd_fini,
.sense = &evergreen_hpd_sense,
.set_polarity = &evergreen_hpd_set_polarity,
// .init = &evergreen_hpd_init,
// .fini = &evergreen_hpd_fini,
// .sense = &evergreen_hpd_sense,
// .set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
.misc = &evergreen_pm_misc,
.prepare = &evergreen_pm_prepare,
.finish = &evergreen_pm_finish,
.init_profile = &sumo_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
// .misc = &evergreen_pm_misc,
// .prepare = &evergreen_pm_prepare,
// .finish = &evergreen_pm_finish,
// .init_profile = &sumo_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = NULL,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
1432,28 → 1380,11
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_uvd_clocks = &sumo_set_uvd_clocks,
.get_temperature = &sumo_get_temp,
},
.dpm = {
.init = &sumo_dpm_init,
.setup_asic = &sumo_dpm_setup_asic,
.enable = &sumo_dpm_enable,
.late_enable = &sumo_dpm_late_enable,
.disable = &sumo_dpm_disable,
.pre_set_power_state = &sumo_dpm_pre_set_power_state,
.set_power_state = &sumo_dpm_set_power_state,
.post_set_power_state = &sumo_dpm_post_set_power_state,
.display_configuration_changed = &sumo_dpm_display_configuration_changed,
.fini = &sumo_dpm_fini,
.get_sclk = &sumo_dpm_get_sclk,
.get_mclk = &sumo_dpm_get_mclk,
.print_power_state = &sumo_dpm_print_power_state,
.debugfs_print_current_performance_level = &sumo_dpm_debugfs_print_current_performance_level,
.force_performance_level = &sumo_dpm_force_performance_level,
},
.pflip = {
// .pre_page_flip = &evergreen_pre_page_flip,
// .page_flip = &evergreen_page_flip,
// .post_page_flip = &evergreen_post_page_flip,
},
};
 
1464,7 → 1395,7
// .resume = &evergreen_resume,
.asic_reset = &evergreen_asic_reset,
// .vga_set_state = &r600_vga_set_state,
.mmio_hdp_flush = r600_mmio_hdp_flush,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
1474,10 → 1405,34
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
[R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
[R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &evergreen_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gfx_is_lockup,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &evergreen_dma_ring_ib_execute,
.emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit,
// .cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &evergreen_dma_is_lockup,
},
[R600_RING_TYPE_UVD_INDEX] = {
// .ib_execute = &r600_uvd_ib_execute,
// .emit_fence = &r600_uvd_fence_emit,
// .emit_semaphore = &r600_uvd_semaphore_emit,
// .cs_parse = &radeon_uvd_cs_parse,
// .ring_test = &r600_uvd_ring_test,
// .ib_test = &r600_uvd_ib_test,
// .is_lockup = &radeon_ring_test_lockup,
}
},
.irq = {
.set = &evergreen_irq_set,
.process = &evergreen_irq_process,
1486,13 → 1441,11
.bandwidth_update = &evergreen_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &evergreen_hdmi_enable,
.hdmi_setmode = &evergreen_hdmi_setmode,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_cpdma,
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1504,94 → 1457,33
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &evergreen_hpd_init,
.fini = &evergreen_hpd_fini,
.sense = &evergreen_hpd_sense,
.set_polarity = &evergreen_hpd_set_polarity,
// .init = &evergreen_hpd_init,
// .fini = &evergreen_hpd_fini,
// .sense = &evergreen_hpd_sense,
// .set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
.misc = &evergreen_pm_misc,
.prepare = &evergreen_pm_prepare,
.finish = &evergreen_pm_finish,
.init_profile = &btc_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
// .misc = &evergreen_pm_misc,
// .prepare = &evergreen_pm_prepare,
// .finish = &evergreen_pm_finish,
// .init_profile = &btc_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_uvd_clocks = &evergreen_set_uvd_clocks,
.get_temperature = &evergreen_get_temp,
},
.dpm = {
.init = &btc_dpm_init,
.setup_asic = &btc_dpm_setup_asic,
.enable = &btc_dpm_enable,
.late_enable = &rv770_dpm_late_enable,
.disable = &btc_dpm_disable,
.pre_set_power_state = &btc_dpm_pre_set_power_state,
.set_power_state = &btc_dpm_set_power_state,
.post_set_power_state = &btc_dpm_post_set_power_state,
.display_configuration_changed = &cypress_dpm_display_configuration_changed,
.fini = &btc_dpm_fini,
.get_sclk = &btc_dpm_get_sclk,
.get_mclk = &btc_dpm_get_mclk,
.print_power_state = &rv770_dpm_print_power_state,
.debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &btc_dpm_vblank_too_short,
},
.pflip = {
// .pre_page_flip = &evergreen_pre_page_flip,
// .page_flip = &evergreen_page_flip,
// .post_page_flip = &evergreen_post_page_flip,
},
};
 
static struct radeon_asic_ring cayman_gfx_ring = {
.ib_execute = &cayman_ring_ib_execute,
.ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
.get_rptr = &cayman_gfx_get_rptr,
.get_wptr = &cayman_gfx_get_wptr,
.set_wptr = &cayman_gfx_set_wptr,
};
 
static struct radeon_asic_ring cayman_dma_ring = {
.ib_execute = &cayman_dma_ring_ib_execute,
.ib_parse = &evergreen_dma_ib_parse,
.emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit,
.cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &cayman_dma_is_lockup,
.vm_flush = &cayman_dma_vm_flush,
.get_rptr = &cayman_dma_get_rptr,
.get_wptr = &cayman_dma_get_wptr,
.set_wptr = &cayman_dma_set_wptr
};
 
static struct radeon_asic_ring cayman_uvd_ring = {
.ib_execute = &uvd_v1_0_ib_execute,
.emit_fence = &uvd_v2_2_fence_emit,
.emit_semaphore = &uvd_v3_1_semaphore_emit,
.cs_parse = &radeon_uvd_cs_parse,
.ring_test = &uvd_v1_0_ring_test,
.ib_test = &uvd_v1_0_ib_test,
.is_lockup = &radeon_ring_test_lockup,
.get_rptr = &uvd_v1_0_get_rptr,
.get_wptr = &uvd_v1_0_get_wptr,
.set_wptr = &uvd_v1_0_set_wptr,
};
 
static struct radeon_asic cayman_asic = {
.init = &cayman_init,
// .fini = &cayman_fini,
1599,7 → 1491,7
// .resume = &cayman_resume,
.asic_reset = &cayman_asic_reset,
// .vga_set_state = &r600_vga_set_state,
.mmio_hdp_flush = r600_mmio_hdp_flush,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
1611,19 → 1503,75
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
.copy_pages = &cayman_dma_vm_copy_pages,
.write_pages = &cayman_dma_vm_write_pages,
.set_pages = &cayman_dma_vm_set_pages,
.pad_ib = &cayman_dma_vm_pad_ib,
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &cayman_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
[CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring,
[CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring,
[R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring,
[CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring,
[R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
// .ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
// .ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
// .ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &cayman_dma_ring_ib_execute,
// .ib_parse = &evergreen_dma_ib_parse,
.emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit,
// .cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &cayman_dma_is_lockup,
.vm_flush = &cayman_dma_vm_flush,
},
[CAYMAN_RING_TYPE_DMA1_INDEX] = {
.ib_execute = &cayman_dma_ring_ib_execute,
// .ib_parse = &evergreen_dma_ib_parse,
.emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit,
// .cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &cayman_dma_is_lockup,
.vm_flush = &cayman_dma_vm_flush,
},
[R600_RING_TYPE_UVD_INDEX] = {
// .ib_execute = &r600_uvd_ib_execute,
// .emit_fence = &r600_uvd_fence_emit,
// .emit_semaphore = &cayman_uvd_semaphore_emit,
// .cs_parse = &radeon_uvd_cs_parse,
// .ring_test = &r600_uvd_ring_test,
// .ib_test = &r600_uvd_ib_test,
// .is_lockup = &radeon_ring_test_lockup,
}
},
.irq = {
.set = &evergreen_irq_set,
.process = &evergreen_irq_process,
1632,13 → 1580,11
.bandwidth_update = &evergreen_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &evergreen_hdmi_enable,
.hdmi_setmode = &evergreen_hdmi_setmode,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_cpdma,
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1650,48 → 1596,30
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &evergreen_hpd_init,
.fini = &evergreen_hpd_fini,
.sense = &evergreen_hpd_sense,
.set_polarity = &evergreen_hpd_set_polarity,
// .init = &evergreen_hpd_init,
// .fini = &evergreen_hpd_fini,
// .sense = &evergreen_hpd_sense,
// .set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
.misc = &evergreen_pm_misc,
.prepare = &evergreen_pm_prepare,
.finish = &evergreen_pm_finish,
.init_profile = &btc_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
// .misc = &evergreen_pm_misc,
// .prepare = &evergreen_pm_prepare,
// .finish = &evergreen_pm_finish,
// .init_profile = &btc_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_uvd_clocks = &evergreen_set_uvd_clocks,
.get_temperature = &evergreen_get_temp,
},
.dpm = {
.init = &ni_dpm_init,
.setup_asic = &ni_dpm_setup_asic,
.enable = &ni_dpm_enable,
.late_enable = &rv770_dpm_late_enable,
.disable = &ni_dpm_disable,
.pre_set_power_state = &ni_dpm_pre_set_power_state,
.set_power_state = &ni_dpm_set_power_state,
.post_set_power_state = &ni_dpm_post_set_power_state,
.display_configuration_changed = &cypress_dpm_display_configuration_changed,
.fini = &ni_dpm_fini,
.get_sclk = &ni_dpm_get_sclk,
.get_mclk = &ni_dpm_get_mclk,
.print_power_state = &ni_dpm_print_power_state,
.debugfs_print_current_performance_level = &ni_dpm_debugfs_print_current_performance_level,
.force_performance_level = &ni_dpm_force_performance_level,
.vblank_too_short = &ni_dpm_vblank_too_short,
},
.pflip = {
// .pre_page_flip = &evergreen_pre_page_flip,
// .page_flip = &evergreen_page_flip,
// .post_page_flip = &evergreen_post_page_flip,
},
};
 
1702,7 → 1630,7
// .resume = &cayman_resume,
.asic_reset = &cayman_asic_reset,
// .vga_set_state = &r600_vga_set_state,
.mmio_hdp_flush = r600_mmio_hdp_flush,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
1714,19 → 1642,75
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
.copy_pages = &cayman_dma_vm_copy_pages,
.write_pages = &cayman_dma_vm_write_pages,
.set_pages = &cayman_dma_vm_set_pages,
.pad_ib = &cayman_dma_vm_pad_ib,
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &cayman_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
[CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring,
[CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring,
[R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring,
[CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring,
[R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
// .ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
// .ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
// .ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &cayman_gfx_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &cayman_dma_ring_ib_execute,
// .ib_parse = &evergreen_dma_ib_parse,
.emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit,
// .cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &cayman_dma_is_lockup,
.vm_flush = &cayman_dma_vm_flush,
},
[CAYMAN_RING_TYPE_DMA1_INDEX] = {
.ib_execute = &cayman_dma_ring_ib_execute,
// .ib_parse = &evergreen_dma_ib_parse,
.emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit,
// .cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &cayman_dma_is_lockup,
.vm_flush = &cayman_dma_vm_flush,
},
[R600_RING_TYPE_UVD_INDEX] = {
// .ib_execute = &r600_uvd_ib_execute,
// .emit_fence = &r600_uvd_fence_emit,
// .emit_semaphore = &cayman_uvd_semaphore_emit,
// .cs_parse = &radeon_uvd_cs_parse,
// .ring_test = &r600_uvd_ring_test,
// .ib_test = &r600_uvd_ib_test,
// .is_lockup = &radeon_ring_test_lockup,
}
},
.irq = {
.set = &evergreen_irq_set,
.process = &evergreen_irq_process,
1735,13 → 1719,11
.bandwidth_update = &dce6_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &evergreen_hdmi_enable,
.hdmi_setmode = &evergreen_hdmi_setmode,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_cpdma,
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1753,19 → 1735,19
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &evergreen_hpd_init,
.fini = &evergreen_hpd_fini,
.sense = &evergreen_hpd_sense,
.set_polarity = &evergreen_hpd_set_polarity,
// .init = &evergreen_hpd_init,
// .fini = &evergreen_hpd_fini,
// .sense = &evergreen_hpd_sense,
// .set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
.misc = &evergreen_pm_misc,
.prepare = &evergreen_pm_prepare,
.finish = &evergreen_pm_finish,
.init_profile = &sumo_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
// .misc = &evergreen_pm_misc,
// .prepare = &evergreen_pm_prepare,
// .finish = &evergreen_pm_finish,
// .init_profile = &sumo_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = NULL,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
1772,62 → 1754,14
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_uvd_clocks = &sumo_set_uvd_clocks,
.get_temperature = &tn_get_temp,
},
.dpm = {
.init = &trinity_dpm_init,
.setup_asic = &trinity_dpm_setup_asic,
.enable = &trinity_dpm_enable,
.late_enable = &trinity_dpm_late_enable,
.disable = &trinity_dpm_disable,
.pre_set_power_state = &trinity_dpm_pre_set_power_state,
.set_power_state = &trinity_dpm_set_power_state,
.post_set_power_state = &trinity_dpm_post_set_power_state,
.display_configuration_changed = &trinity_dpm_display_configuration_changed,
.fini = &trinity_dpm_fini,
.get_sclk = &trinity_dpm_get_sclk,
.get_mclk = &trinity_dpm_get_mclk,
.print_power_state = &trinity_dpm_print_power_state,
.debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level,
.force_performance_level = &trinity_dpm_force_performance_level,
.enable_bapm = &trinity_dpm_enable_bapm,
},
.pflip = {
// .pre_page_flip = &evergreen_pre_page_flip,
// .page_flip = &evergreen_page_flip,
// .post_page_flip = &evergreen_post_page_flip,
},
};
 
static struct radeon_asic_ring si_gfx_ring = {
.ib_execute = &si_ring_ib_execute,
.ib_parse = &si_ib_parse,
.emit_fence = &si_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &si_gfx_is_lockup,
.vm_flush = &si_vm_flush,
.get_rptr = &cayman_gfx_get_rptr,
.get_wptr = &cayman_gfx_get_wptr,
.set_wptr = &cayman_gfx_set_wptr,
};
 
static struct radeon_asic_ring si_dma_ring = {
.ib_execute = &cayman_dma_ring_ib_execute,
.ib_parse = &evergreen_dma_ib_parse,
.emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit,
.cs_parse = NULL,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &si_dma_is_lockup,
.vm_flush = &si_dma_vm_flush,
.get_rptr = &cayman_dma_get_rptr,
.get_wptr = &cayman_dma_get_wptr,
.set_wptr = &cayman_dma_set_wptr,
};
 
static struct radeon_asic si_asic = {
.init = &si_init,
// .fini = &si_fini,
1835,7 → 1769,7
// .resume = &si_resume,
.asic_reset = &si_asic_reset,
// .vga_set_state = &r600_vga_set_state,
.mmio_hdp_flush = r600_mmio_hdp_flush,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &si_get_xclk,
1847,201 → 1781,92
.vm = {
.init = &si_vm_init,
.fini = &si_vm_fini,
.copy_pages = &si_dma_vm_copy_pages,
.write_pages = &si_dma_vm_write_pages,
.set_pages = &si_dma_vm_set_pages,
.pad_ib = &cayman_dma_vm_pad_ib,
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
.set_page = &si_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
[CAYMAN_RING_TYPE_CP1_INDEX] = &si_gfx_ring,
[CAYMAN_RING_TYPE_CP2_INDEX] = &si_gfx_ring,
[R600_RING_TYPE_DMA_INDEX] = &si_dma_ring,
[CAYMAN_RING_TYPE_DMA1_INDEX] = &si_dma_ring,
[R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &si_ring_ib_execute,
// .ib_parse = &si_ib_parse,
.emit_fence = &si_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &si_gfx_is_lockup,
.vm_flush = &si_vm_flush,
},
.irq = {
.set = &si_irq_set,
.process = &si_irq_process,
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &si_ring_ib_execute,
// .ib_parse = &si_ib_parse,
.emit_fence = &si_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &si_gfx_is_lockup,
.vm_flush = &si_vm_flush,
},
.display = {
.bandwidth_update = &dce6_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &evergreen_hdmi_enable,
.hdmi_setmode = &evergreen_hdmi_setmode,
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &si_ring_ib_execute,
// .ib_parse = &si_ib_parse,
.emit_fence = &si_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &si_gfx_is_lockup,
.vm_flush = &si_vm_flush,
},
.copy = {
.blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &si_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
.copy = &si_copy_dma,
.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
[R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &cayman_dma_ring_ib_execute,
// .ib_parse = &evergreen_dma_ib_parse,
.emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit,
.cs_parse = NULL,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &si_dma_is_lockup,
.vm_flush = &si_dma_vm_flush,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &evergreen_hpd_init,
.fini = &evergreen_hpd_fini,
.sense = &evergreen_hpd_sense,
.set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
.misc = &evergreen_pm_misc,
.prepare = &evergreen_pm_prepare,
.finish = &evergreen_pm_finish,
.init_profile = &sumo_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
.set_uvd_clocks = &si_set_uvd_clocks,
.get_temperature = &si_get_temp,
},
.dpm = {
.init = &si_dpm_init,
.setup_asic = &si_dpm_setup_asic,
.enable = &si_dpm_enable,
.late_enable = &si_dpm_late_enable,
.disable = &si_dpm_disable,
.pre_set_power_state = &si_dpm_pre_set_power_state,
.set_power_state = &si_dpm_set_power_state,
.post_set_power_state = &si_dpm_post_set_power_state,
.display_configuration_changed = &si_dpm_display_configuration_changed,
.fini = &si_dpm_fini,
.get_sclk = &ni_dpm_get_sclk,
.get_mclk = &ni_dpm_get_mclk,
.print_power_state = &ni_dpm_print_power_state,
.debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
.force_performance_level = &si_dpm_force_performance_level,
.vblank_too_short = &ni_dpm_vblank_too_short,
},
.pflip = {
// .pre_page_flip = &evergreen_pre_page_flip,
// .page_flip = &evergreen_page_flip,
},
};
 
static struct radeon_asic_ring ci_gfx_ring = {
.ib_execute = &cik_ring_ib_execute,
.ib_parse = &cik_ib_parse,
.emit_fence = &cik_fence_gfx_ring_emit,
.emit_semaphore = &cik_semaphore_ring_emit,
[CAYMAN_RING_TYPE_DMA1_INDEX] = {
.ib_execute = &cayman_dma_ring_ib_execute,
// .ib_parse = &evergreen_dma_ib_parse,
.emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit,
.cs_parse = NULL,
.ring_test = &cik_ring_test,
.ib_test = &cik_ib_test,
.is_lockup = &cik_gfx_is_lockup,
.vm_flush = &cik_vm_flush,
.get_rptr = &cik_gfx_get_rptr,
.get_wptr = &cik_gfx_get_wptr,
.set_wptr = &cik_gfx_set_wptr,
};
 
static struct radeon_asic_ring ci_cp_ring = {
.ib_execute = &cik_ring_ib_execute,
.ib_parse = &cik_ib_parse,
.emit_fence = &cik_fence_compute_ring_emit,
.emit_semaphore = &cik_semaphore_ring_emit,
.cs_parse = NULL,
.ring_test = &cik_ring_test,
.ib_test = &cik_ib_test,
.is_lockup = &cik_gfx_is_lockup,
.vm_flush = &cik_vm_flush,
.get_rptr = &cik_compute_get_rptr,
.get_wptr = &cik_compute_get_wptr,
.set_wptr = &cik_compute_set_wptr,
};
 
static struct radeon_asic_ring ci_dma_ring = {
.ib_execute = &cik_sdma_ring_ib_execute,
.ib_parse = &cik_ib_parse,
.emit_fence = &cik_sdma_fence_ring_emit,
.emit_semaphore = &cik_sdma_semaphore_ring_emit,
.cs_parse = NULL,
.ring_test = &cik_sdma_ring_test,
.ib_test = &cik_sdma_ib_test,
.is_lockup = &cik_sdma_is_lockup,
.vm_flush = &cik_dma_vm_flush,
.get_rptr = &cik_sdma_get_rptr,
.get_wptr = &cik_sdma_get_wptr,
.set_wptr = &cik_sdma_set_wptr,
};
 
static struct radeon_asic_ring ci_vce_ring = {
.ib_execute = &radeon_vce_ib_execute,
.emit_fence = &radeon_vce_fence_emit,
.emit_semaphore = &radeon_vce_semaphore_emit,
.cs_parse = &radeon_vce_cs_parse,
.ring_test = &radeon_vce_ring_test,
.ib_test = &radeon_vce_ib_test,
.is_lockup = &radeon_ring_test_lockup,
.get_rptr = &vce_v1_0_get_rptr,
.get_wptr = &vce_v1_0_get_wptr,
.set_wptr = &vce_v1_0_set_wptr,
};
 
static struct radeon_asic ci_asic = {
.init = &cik_init,
// .fini = &si_fini,
// .suspend = &si_suspend,
// .resume = &si_resume,
.asic_reset = &cik_asic_reset,
// .vga_set_state = &r600_vga_set_state,
.mmio_hdp_flush = &r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &cik_get_xclk,
.get_gpu_clock_counter = &cik_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
.ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test,
.is_lockup = &si_dma_is_lockup,
.vm_flush = &si_dma_vm_flush,
},
.vm = {
.init = &cik_vm_init,
.fini = &cik_vm_fini,
.copy_pages = &cik_sdma_vm_copy_pages,
.write_pages = &cik_sdma_vm_write_pages,
.set_pages = &cik_sdma_vm_set_pages,
.pad_ib = &cik_sdma_vm_pad_ib,
[R600_RING_TYPE_UVD_INDEX] = {
// .ib_execute = &r600_uvd_ib_execute,
// .emit_fence = &r600_uvd_fence_emit,
// .emit_semaphore = &cayman_uvd_semaphore_emit,
// .cs_parse = &radeon_uvd_cs_parse,
// .ring_test = &r600_uvd_ring_test,
// .ib_test = &r600_uvd_ib_test,
// .is_lockup = &radeon_ring_test_lockup,
}
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
[CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring,
[CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring,
[R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
[CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
[R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
[TN_RING_TYPE_VCE1_INDEX] = &ci_vce_ring,
[TN_RING_TYPE_VCE2_INDEX] = &ci_vce_ring,
},
.irq = {
.set = &cik_irq_set,
.process = &cik_irq_process,
.set = &si_irq_set,
.process = &si_irq_process,
},
.display = {
.bandwidth_update = &dce8_bandwidth_update,
.bandwidth_update = &dce6_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &evergreen_hdmi_enable,
.hdmi_setmode = &evergreen_hdmi_setmode,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &cik_copy_cpdma,
.blit = NULL,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &cik_copy_dma,
.dma = &si_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
.copy = &cik_copy_dma,
.copy = &si_copy_dma,
.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
2055,154 → 1880,27
.set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
.misc = &evergreen_pm_misc,
.prepare = &evergreen_pm_prepare,
.finish = &evergreen_pm_finish,
.init_profile = &sumo_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
// .misc = &evergreen_pm_misc,
// .prepare = &evergreen_pm_prepare,
// .finish = &evergreen_pm_finish,
// .init_profile = &sumo_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_uvd_clocks = &cik_set_uvd_clocks,
.set_vce_clocks = &cik_set_vce_clocks,
.get_temperature = &ci_get_temp,
// .set_uvd_clocks = &si_set_uvd_clocks,
},
.dpm = {
.init = &ci_dpm_init,
.setup_asic = &ci_dpm_setup_asic,
.enable = &ci_dpm_enable,
.late_enable = &ci_dpm_late_enable,
.disable = &ci_dpm_disable,
.pre_set_power_state = &ci_dpm_pre_set_power_state,
.set_power_state = &ci_dpm_set_power_state,
.post_set_power_state = &ci_dpm_post_set_power_state,
.display_configuration_changed = &ci_dpm_display_configuration_changed,
.fini = &ci_dpm_fini,
.get_sclk = &ci_dpm_get_sclk,
.get_mclk = &ci_dpm_get_mclk,
.print_power_state = &ci_dpm_print_power_state,
.debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
.force_performance_level = &ci_dpm_force_performance_level,
.vblank_too_short = &ci_dpm_vblank_too_short,
.powergate_uvd = &ci_dpm_powergate_uvd,
},
.pflip = {
// .pre_page_flip = &evergreen_pre_page_flip,
// .page_flip = &evergreen_page_flip,
// .post_page_flip = &evergreen_post_page_flip,
},
};
 
static struct radeon_asic kv_asic = {
.init = &cik_init,
// .fini = &si_fini,
// .suspend = &si_suspend,
// .resume = &si_resume,
.asic_reset = &cik_asic_reset,
// .vga_set_state = &r600_vga_set_state,
.mmio_hdp_flush = &r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &cik_get_xclk,
.get_gpu_clock_counter = &cik_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.vm = {
.init = &cik_vm_init,
.fini = &cik_vm_fini,
.copy_pages = &cik_sdma_vm_copy_pages,
.write_pages = &cik_sdma_vm_write_pages,
.set_pages = &cik_sdma_vm_set_pages,
.pad_ib = &cik_sdma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
[CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring,
[CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring,
[R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
[CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
[R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
[TN_RING_TYPE_VCE1_INDEX] = &ci_vce_ring,
[TN_RING_TYPE_VCE2_INDEX] = &ci_vce_ring,
},
.irq = {
.set = &cik_irq_set,
.process = &cik_irq_process,
},
.display = {
.bandwidth_update = &dce8_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &evergreen_hdmi_enable,
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = &cik_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &cik_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
.copy = &cik_copy_dma,
.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &evergreen_hpd_init,
.fini = &evergreen_hpd_fini,
.sense = &evergreen_hpd_sense,
.set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
.misc = &evergreen_pm_misc,
.prepare = &evergreen_pm_prepare,
.finish = &evergreen_pm_finish,
.init_profile = &sumo_pm_init_profile,
.get_dynpm_state = &r600_pm_get_dynpm_state,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_uvd_clocks = &cik_set_uvd_clocks,
.set_vce_clocks = &cik_set_vce_clocks,
.get_temperature = &kv_get_temp,
},
.dpm = {
.init = &kv_dpm_init,
.setup_asic = &kv_dpm_setup_asic,
.enable = &kv_dpm_enable,
.late_enable = &kv_dpm_late_enable,
.disable = &kv_dpm_disable,
.pre_set_power_state = &kv_dpm_pre_set_power_state,
.set_power_state = &kv_dpm_set_power_state,
.post_set_power_state = &kv_dpm_post_set_power_state,
.display_configuration_changed = &kv_dpm_display_configuration_changed,
.fini = &kv_dpm_fini,
.get_sclk = &kv_dpm_get_sclk,
.get_mclk = &kv_dpm_get_mclk,
.print_power_state = &kv_dpm_print_power_state,
.debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
.force_performance_level = &kv_dpm_force_performance_level,
.powergate_uvd = &kv_dpm_powergate_uvd,
.enable_bapm = &kv_dpm_enable_bapm,
},
.pflip = {
// .pre_page_flip = &evergreen_pre_page_flip,
// .page_flip = &evergreen_page_flip,
},
};
 
/**
* radeon_asic_init - register asic specific callbacks
*
2283,14 → 1981,15
rdev->asic = &r520_asic;
break;
case CHIP_R600:
rdev->asic = &r600_asic;
break;
case CHIP_RV610:
case CHIP_RV630:
case CHIP_RV620:
case CHIP_RV635:
case CHIP_RV670:
rdev->asic = &rv6xx_asic;
rdev->asic = &r600_asic;
if (rdev->family == CHIP_R600)
rdev->has_uvd = false;
else
rdev->has_uvd = true;
break;
case CHIP_RS780:
2364,212 → 2063,8
rdev->has_uvd = false;
else
rdev->has_uvd = true;
switch (rdev->family) {
case CHIP_TAHITI:
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
/*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CP_LS |
RADEON_CG_SUPPORT_MC_MGCG |
RADEON_CG_SUPPORT_SDMA_MGCG |
RADEON_CG_SUPPORT_BIF_LS |
RADEON_CG_SUPPORT_VCE_MGCG |
RADEON_CG_SUPPORT_UVD_MGCG |
RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0;
break;
case CHIP_PITCAIRN:
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
/*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CP_LS |
RADEON_CG_SUPPORT_GFX_RLC_LS |
RADEON_CG_SUPPORT_MC_LS |
RADEON_CG_SUPPORT_MC_MGCG |
RADEON_CG_SUPPORT_SDMA_MGCG |
RADEON_CG_SUPPORT_BIF_LS |
RADEON_CG_SUPPORT_VCE_MGCG |
RADEON_CG_SUPPORT_UVD_MGCG |
RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0;
break;
case CHIP_VERDE:
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
/*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CP_LS |
RADEON_CG_SUPPORT_GFX_RLC_LS |
RADEON_CG_SUPPORT_MC_LS |
RADEON_CG_SUPPORT_MC_MGCG |
RADEON_CG_SUPPORT_SDMA_MGCG |
RADEON_CG_SUPPORT_BIF_LS |
RADEON_CG_SUPPORT_VCE_MGCG |
RADEON_CG_SUPPORT_UVD_MGCG |
RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0 |
/*RADEON_PG_SUPPORT_GFX_PG | */
RADEON_PG_SUPPORT_SDMA;
break;
case CHIP_OLAND:
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
/*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CP_LS |
RADEON_CG_SUPPORT_GFX_RLC_LS |
RADEON_CG_SUPPORT_MC_LS |
RADEON_CG_SUPPORT_MC_MGCG |
RADEON_CG_SUPPORT_SDMA_MGCG |
RADEON_CG_SUPPORT_BIF_LS |
RADEON_CG_SUPPORT_UVD_MGCG |
RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0;
break;
case CHIP_HAINAN:
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
/*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CP_LS |
RADEON_CG_SUPPORT_GFX_RLC_LS |
RADEON_CG_SUPPORT_MC_LS |
RADEON_CG_SUPPORT_MC_MGCG |
RADEON_CG_SUPPORT_SDMA_MGCG |
RADEON_CG_SUPPORT_BIF_LS |
RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0;
break;
default:
rdev->cg_flags = 0;
rdev->pg_flags = 0;
break;
}
break;
case CHIP_BONAIRE:
case CHIP_HAWAII:
rdev->asic = &ci_asic;
rdev->num_crtc = 6;
rdev->has_uvd = true;
if (rdev->family == CHIP_BONAIRE) {
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
/*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
RADEON_CG_SUPPORT_GFX_CP_LS |
RADEON_CG_SUPPORT_MC_LS |
RADEON_CG_SUPPORT_MC_MGCG |
RADEON_CG_SUPPORT_SDMA_MGCG |
RADEON_CG_SUPPORT_SDMA_LS |
RADEON_CG_SUPPORT_BIF_LS |
RADEON_CG_SUPPORT_VCE_MGCG |
RADEON_CG_SUPPORT_UVD_MGCG |
RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0;
} else {
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
/*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CP_LS |
RADEON_CG_SUPPORT_MC_LS |
RADEON_CG_SUPPORT_MC_MGCG |
RADEON_CG_SUPPORT_SDMA_MGCG |
RADEON_CG_SUPPORT_SDMA_LS |
RADEON_CG_SUPPORT_BIF_LS |
RADEON_CG_SUPPORT_VCE_MGCG |
RADEON_CG_SUPPORT_UVD_MGCG |
RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0;
}
break;
case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
rdev->asic = &kv_asic;
/* set num crtcs */
if (rdev->family == CHIP_KAVERI) {
rdev->num_crtc = 4;
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
/*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
RADEON_CG_SUPPORT_GFX_CP_LS |
RADEON_CG_SUPPORT_SDMA_MGCG |
RADEON_CG_SUPPORT_SDMA_LS |
RADEON_CG_SUPPORT_BIF_LS |
RADEON_CG_SUPPORT_VCE_MGCG |
RADEON_CG_SUPPORT_UVD_MGCG |
RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0;
/*RADEON_PG_SUPPORT_GFX_PG |
RADEON_PG_SUPPORT_GFX_SMG |
RADEON_PG_SUPPORT_GFX_DMG |
RADEON_PG_SUPPORT_UVD |
RADEON_PG_SUPPORT_VCE |
RADEON_PG_SUPPORT_CP |
RADEON_PG_SUPPORT_GDS |
RADEON_PG_SUPPORT_RLC_SMU_HS |
RADEON_PG_SUPPORT_ACP |
RADEON_PG_SUPPORT_SAMU;*/
} else {
rdev->num_crtc = 2;
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
/*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
RADEON_CG_SUPPORT_GFX_CP_LS |
RADEON_CG_SUPPORT_SDMA_MGCG |
RADEON_CG_SUPPORT_SDMA_LS |
RADEON_CG_SUPPORT_BIF_LS |
RADEON_CG_SUPPORT_VCE_MGCG |
RADEON_CG_SUPPORT_UVD_MGCG |
RADEON_CG_SUPPORT_HDP_LS |
RADEON_CG_SUPPORT_HDP_MGCG;
rdev->pg_flags = 0;
/*RADEON_PG_SUPPORT_GFX_PG |
RADEON_PG_SUPPORT_GFX_SMG |
RADEON_PG_SUPPORT_UVD |
RADEON_PG_SUPPORT_VCE |
RADEON_PG_SUPPORT_CP |
RADEON_PG_SUPPORT_GDS |
RADEON_PG_SUPPORT_RLC_SMU_HS |
RADEON_PG_SUPPORT_SAMU;*/
}
rdev->has_uvd = true;
break;
default:
/* FIXME: not supported yet */
return -EINVAL;
}
/drivers/video/drm/radeon/radeon_asic.h
47,6 → 47,7
void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder);
 
 
/*
* r100,rv100,rs100,rv200,rs200
*/
67,14 → 68,13
int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags);
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
bool r100_semaphore_ring_emit(struct radeon_device *rdev,
void r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
136,20 → 136,12
extern void r100_pm_finish(struct radeon_device *rdev);
extern void r100_pm_init_profile(struct radeon_device *rdev);
extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
extern void r100_page_flip(struct radeon_device *rdev, int crtc,
u64 crtc_base);
extern bool r100_page_flip_pending(struct radeon_device *rdev, int crtc);
extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc);
extern int r100_mc_wait_for_idle(struct radeon_device *rdev);
 
u32 r100_gfx_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring);
u32 r100_gfx_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void r100_gfx_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void r100_ring_hdp_flush(struct radeon_device *rdev,
struct radeon_ring *ring);
/*
* r200,rv250,rs300,rv280
*/
173,8 → 165,7
struct radeon_fence *fence);
extern int r300_cs_parse(struct radeon_cs_parser *p);
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags);
extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
extern void r300_set_reg_safe(struct radeon_device *rdev);
209,8 → 200,7
extern int rs400_suspend(struct radeon_device *rdev);
extern int rs400_resume(struct radeon_device *rdev);
void rs400_gart_tlb_flush(struct radeon_device *rdev);
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags);
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int rs400_gart_init(struct radeon_device *rdev);
233,8 → 223,7
void rs600_irq_disable(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
void rs600_gart_tlb_flush(struct radeon_device *rdev);
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags);
int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev);
246,9 → 235,9
extern void rs600_pm_misc(struct radeon_device *rdev);
extern void rs600_pm_prepare(struct radeon_device *rdev);
extern void rs600_pm_finish(struct radeon_device *rdev);
extern void rs600_page_flip(struct radeon_device *rdev, int crtc,
u64 crtc_base);
extern bool rs600_page_flip_pending(struct radeon_device *rdev, int crtc);
extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
void rs600_set_safe_registers(struct radeon_device *rdev);
extern void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc);
extern int rs600_mc_wait_for_idle(struct radeon_device *rdev);
318,13 → 307,13
int r600_dma_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
bool r600_semaphore_ring_emit(struct radeon_device *rdev,
void r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
void r600_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
341,7 → 330,8
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_copy_cpdma(struct radeon_device *rdev,
int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, struct radeon_fence **fence);
int r600_copy_dma(struct radeon_device *rdev,
352,7 → 342,7
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void r600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
extern void r600_mmio_hdp_flush(struct radeon_device *rdev);
extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
extern bool r600_gui_idle(struct radeon_device *rdev);
extern void r600_pm_misc(struct radeon_device *rdev);
extern void r600_pm_init_profile(struct radeon_device *rdev);
372,13 → 362,9
int r600_mc_wait_for_idle(struct radeon_device *rdev);
int r600_pcie_gart_init(struct radeon_device *rdev);
void r600_scratch_init(struct radeon_device *rdev);
int r600_blit_init(struct radeon_device *rdev);
void r600_blit_fini(struct radeon_device *rdev);
int r600_init_microcode(struct radeon_device *rdev);
u32 r600_gfx_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring);
u32 r600_gfx_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void r600_gfx_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
/* r600 irq */
int r600_irq_process(struct radeon_device *rdev);
int r600_irq_init(struct radeon_device *rdev);
390,65 → 376,39
void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
int r600_audio_init(struct radeon_device *rdev);
struct r600_audio_pin r600_audio_status(struct radeon_device *rdev);
struct r600_audio r600_audio_status(struct radeon_device *rdev);
void r600_audio_fini(struct radeon_device *rdev);
void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock);
void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
size_t size);
void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock);
void r600_hdmi_audio_workaround(struct drm_encoder *encoder);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
/* r600 blit */
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
struct radeon_fence **fence, struct radeon_sa_bo **vb,
struct radeon_semaphore **sem);
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
struct radeon_sa_bo *vb, struct radeon_semaphore *sem);
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
unsigned num_gpu_pages,
struct radeon_sa_bo *vb);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
u32 r600_get_xclk(struct radeon_device *rdev);
uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
int rv6xx_get_temp(struct radeon_device *rdev);
int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
void r600_dpm_post_set_power_state(struct radeon_device *rdev);
int r600_dpm_late_enable(struct radeon_device *rdev);
/* r600 dma */
uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring);
uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void r600_dma_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
/* rv6xx dpm */
int rv6xx_dpm_init(struct radeon_device *rdev);
int rv6xx_dpm_enable(struct radeon_device *rdev);
void rv6xx_dpm_disable(struct radeon_device *rdev);
int rv6xx_dpm_set_power_state(struct radeon_device *rdev);
void rv6xx_setup_asic(struct radeon_device *rdev);
void rv6xx_dpm_display_configuration_changed(struct radeon_device *rdev);
void rv6xx_dpm_fini(struct radeon_device *rdev);
u32 rv6xx_dpm_get_sclk(struct radeon_device *rdev, bool low);
u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low);
void rv6xx_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *ps);
void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
int rv6xx_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
/* rs780 dpm */
int rs780_dpm_init(struct radeon_device *rdev);
int rs780_dpm_enable(struct radeon_device *rdev);
void rs780_dpm_disable(struct radeon_device *rdev);
int rs780_dpm_set_power_state(struct radeon_device *rdev);
void rs780_dpm_setup_asic(struct radeon_device *rdev);
void rs780_dpm_display_configuration_changed(struct radeon_device *rdev);
void rs780_dpm_fini(struct radeon_device *rdev);
u32 rs780_dpm_get_sclk(struct radeon_device *rdev, bool low);
u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low);
void rs780_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *ps);
void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
int rs780_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
 
/* uvd */
int r600_uvd_init(struct radeon_device *rdev);
int r600_uvd_rbc_start(struct radeon_device *rdev);
void r600_uvd_rbc_stop(struct radeon_device *rdev);
int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r600_uvd_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void r600_uvd_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 
/*
* rv770,rv730,rv710,rv740
*/
457,8 → 417,7
int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev);
void rv770_pm_misc(struct radeon_device *rdev);
void rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc);
u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
void r700_cp_stop(struct radeon_device *rdev);
void r700_cp_fini(struct radeon_device *rdev);
467,28 → 426,8
unsigned num_gpu_pages,
struct radeon_fence **fence);
u32 rv770_get_xclk(struct radeon_device *rdev);
int rv770_uvd_resume(struct radeon_device *rdev);
int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int rv770_get_temp(struct radeon_device *rdev);
/* hdmi */
void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
/* rv7xx pm */
int rv770_dpm_init(struct radeon_device *rdev);
int rv770_dpm_enable(struct radeon_device *rdev);
int rv770_dpm_late_enable(struct radeon_device *rdev);
void rv770_dpm_disable(struct radeon_device *rdev);
int rv770_dpm_set_power_state(struct radeon_device *rdev);
void rv770_dpm_setup_asic(struct radeon_device *rdev);
void rv770_dpm_display_configuration_changed(struct radeon_device *rdev);
void rv770_dpm_fini(struct radeon_device *rdev);
u32 rv770_dpm_get_sclk(struct radeon_device *rdev, bool low);
u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low);
void rv770_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *ps);
void rv770_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
int rv770_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
bool rv770_dpm_vblank_too_short(struct radeon_device *rdev);
 
/*
* evergreen
526,11 → 465,12
extern void btc_pm_init_profile(struct radeon_device *rdev);
int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
extern void evergreen_page_flip(struct radeon_device *rdev, int crtc,
u64 crtc_base);
extern bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc);
extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
void evergreen_disable_interrupt_state(struct radeon_device *rdev);
int evergreen_blit_init(struct radeon_device *rdev);
int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
542,48 → 482,6
struct radeon_fence **fence);
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
int evergreen_get_temp(struct radeon_device *rdev);
int sumo_get_temp(struct radeon_device *rdev);
int tn_get_temp(struct radeon_device *rdev);
int cypress_dpm_init(struct radeon_device *rdev);
void cypress_dpm_setup_asic(struct radeon_device *rdev);
int cypress_dpm_enable(struct radeon_device *rdev);
void cypress_dpm_disable(struct radeon_device *rdev);
int cypress_dpm_set_power_state(struct radeon_device *rdev);
void cypress_dpm_display_configuration_changed(struct radeon_device *rdev);
void cypress_dpm_fini(struct radeon_device *rdev);
bool cypress_dpm_vblank_too_short(struct radeon_device *rdev);
int btc_dpm_init(struct radeon_device *rdev);
void btc_dpm_setup_asic(struct radeon_device *rdev);
int btc_dpm_enable(struct radeon_device *rdev);
void btc_dpm_disable(struct radeon_device *rdev);
int btc_dpm_pre_set_power_state(struct radeon_device *rdev);
int btc_dpm_set_power_state(struct radeon_device *rdev);
void btc_dpm_post_set_power_state(struct radeon_device *rdev);
void btc_dpm_fini(struct radeon_device *rdev);
u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low);
u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low);
bool btc_dpm_vblank_too_short(struct radeon_device *rdev);
void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
int sumo_dpm_init(struct radeon_device *rdev);
int sumo_dpm_enable(struct radeon_device *rdev);
int sumo_dpm_late_enable(struct radeon_device *rdev);
void sumo_dpm_disable(struct radeon_device *rdev);
int sumo_dpm_pre_set_power_state(struct radeon_device *rdev);
int sumo_dpm_set_power_state(struct radeon_device *rdev);
void sumo_dpm_post_set_power_state(struct radeon_device *rdev);
void sumo_dpm_setup_asic(struct radeon_device *rdev);
void sumo_dpm_display_configuration_changed(struct radeon_device *rdev);
void sumo_dpm_fini(struct radeon_device *rdev);
u32 sumo_dpm_get_sclk(struct radeon_device *rdev, bool low);
u32 sumo_dpm_get_mclk(struct radeon_device *rdev, bool low);
void sumo_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *ps);
void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
int sumo_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
 
/*
* cayman
590,6 → 488,10
*/
void cayman_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev);
601,6 → 503,11
void cayman_vm_fini(struct radeon_device *rdev);
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
void cayman_vm_set_page(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
607,79 → 514,10
struct radeon_ib *ib);
bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
 
void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe, uint64_t src,
unsigned count);
void cayman_dma_vm_write_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void cayman_dma_vm_set_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void cayman_dma_vm_pad_ib(struct radeon_ib *ib);
 
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 
u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring);
u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void cayman_gfx_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
uint32_t cayman_dma_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring);
uint32_t cayman_dma_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void cayman_dma_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
 
int ni_dpm_init(struct radeon_device *rdev);
void ni_dpm_setup_asic(struct radeon_device *rdev);
int ni_dpm_enable(struct radeon_device *rdev);
void ni_dpm_disable(struct radeon_device *rdev);
int ni_dpm_pre_set_power_state(struct radeon_device *rdev);
int ni_dpm_set_power_state(struct radeon_device *rdev);
void ni_dpm_post_set_power_state(struct radeon_device *rdev);
void ni_dpm_fini(struct radeon_device *rdev);
u32 ni_dpm_get_sclk(struct radeon_device *rdev, bool low);
u32 ni_dpm_get_mclk(struct radeon_device *rdev, bool low);
void ni_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *ps);
void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
int ni_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
bool ni_dpm_vblank_too_short(struct radeon_device *rdev);
int trinity_dpm_init(struct radeon_device *rdev);
int trinity_dpm_enable(struct radeon_device *rdev);
int trinity_dpm_late_enable(struct radeon_device *rdev);
void trinity_dpm_disable(struct radeon_device *rdev);
int trinity_dpm_pre_set_power_state(struct radeon_device *rdev);
int trinity_dpm_set_power_state(struct radeon_device *rdev);
void trinity_dpm_post_set_power_state(struct radeon_device *rdev);
void trinity_dpm_setup_asic(struct radeon_device *rdev);
void trinity_dpm_display_configuration_changed(struct radeon_device *rdev);
void trinity_dpm_fini(struct radeon_device *rdev);
u32 trinity_dpm_get_sclk(struct radeon_device *rdev, bool low);
u32 trinity_dpm_get_mclk(struct radeon_device *rdev, bool low);
void trinity_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *ps);
void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
int trinity_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
 
/* DCE6 - SI */
void dce6_bandwidth_update(struct radeon_device *rdev);
int dce6_audio_init(struct radeon_device *rdev);
void dce6_audio_fini(struct radeon_device *rdev);
 
/*
* si
699,6 → 537,11
int si_irq_process(struct radeon_device *rdev);
int si_vm_init(struct radeon_device *rdev);
void si_vm_fini(struct radeon_device *rdev);
void si_vm_set_page(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
int si_copy_dma(struct radeon_device *rdev,
705,223 → 548,9
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
 
void si_dma_vm_copy_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe, uint64_t src,
unsigned count);
void si_dma_vm_write_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void si_dma_vm_set_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
 
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
u32 si_get_xclk(struct radeon_device *rdev);
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int si_get_temp(struct radeon_device *rdev);
int si_dpm_init(struct radeon_device *rdev);
void si_dpm_setup_asic(struct radeon_device *rdev);
int si_dpm_enable(struct radeon_device *rdev);
int si_dpm_late_enable(struct radeon_device *rdev);
void si_dpm_disable(struct radeon_device *rdev);
int si_dpm_pre_set_power_state(struct radeon_device *rdev);
int si_dpm_set_power_state(struct radeon_device *rdev);
void si_dpm_post_set_power_state(struct radeon_device *rdev);
void si_dpm_fini(struct radeon_device *rdev);
void si_dpm_display_configuration_changed(struct radeon_device *rdev);
void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
int si_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
 
/* DCE8 - CIK */
void dce8_bandwidth_update(struct radeon_device *rdev);
 
/*
* cik
*/
uint64_t cik_get_gpu_clock_counter(struct radeon_device *rdev);
u32 cik_get_xclk(struct radeon_device *rdev);
uint32_t cik_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int cik_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk);
void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int cik_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
int cik_copy_cpdma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void cik_fence_compute_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
void cik_pcie_gart_tlb_flush(struct radeon_device *rdev);
int cik_init(struct radeon_device *rdev);
void cik_fini(struct radeon_device *rdev);
int cik_suspend(struct radeon_device *rdev);
int cik_resume(struct radeon_device *rdev);
bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int cik_asic_reset(struct radeon_device *rdev);
void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
int cik_irq_set(struct radeon_device *rdev);
int cik_irq_process(struct radeon_device *rdev);
int cik_vm_init(struct radeon_device *rdev);
void cik_vm_fini(struct radeon_device *rdev);
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 
void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe, uint64_t src,
unsigned count);
void cik_sdma_vm_write_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void cik_sdma_vm_set_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void cik_sdma_vm_pad_ib(struct radeon_ib *ib);
 
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
u32 cik_gfx_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring);
u32 cik_gfx_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void cik_gfx_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
u32 cik_compute_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring);
u32 cik_compute_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void cik_compute_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
u32 cik_sdma_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring);
u32 cik_sdma_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void cik_sdma_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
int ci_get_temp(struct radeon_device *rdev);
int kv_get_temp(struct radeon_device *rdev);
 
int ci_dpm_init(struct radeon_device *rdev);
int ci_dpm_enable(struct radeon_device *rdev);
int ci_dpm_late_enable(struct radeon_device *rdev);
void ci_dpm_disable(struct radeon_device *rdev);
int ci_dpm_pre_set_power_state(struct radeon_device *rdev);
int ci_dpm_set_power_state(struct radeon_device *rdev);
void ci_dpm_post_set_power_state(struct radeon_device *rdev);
void ci_dpm_setup_asic(struct radeon_device *rdev);
void ci_dpm_display_configuration_changed(struct radeon_device *rdev);
void ci_dpm_fini(struct radeon_device *rdev);
u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low);
u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low);
void ci_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *ps);
void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
int ci_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
bool ci_dpm_vblank_too_short(struct radeon_device *rdev);
void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
 
int kv_dpm_init(struct radeon_device *rdev);
int kv_dpm_enable(struct radeon_device *rdev);
int kv_dpm_late_enable(struct radeon_device *rdev);
void kv_dpm_disable(struct radeon_device *rdev);
int kv_dpm_pre_set_power_state(struct radeon_device *rdev);
int kv_dpm_set_power_state(struct radeon_device *rdev);
void kv_dpm_post_set_power_state(struct radeon_device *rdev);
void kv_dpm_setup_asic(struct radeon_device *rdev);
void kv_dpm_display_configuration_changed(struct radeon_device *rdev);
void kv_dpm_fini(struct radeon_device *rdev);
u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low);
u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low);
void kv_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *ps);
void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
int kv_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
 
/* uvd v1.0 */
uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring);
uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void uvd_v1_0_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
 
int uvd_v1_0_init(struct radeon_device *rdev);
void uvd_v1_0_fini(struct radeon_device *rdev);
int uvd_v1_0_start(struct radeon_device *rdev);
void uvd_v1_0_stop(struct radeon_device *rdev);
 
int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 
/* uvd v2.2 */
int uvd_v2_2_resume(struct radeon_device *rdev);
void uvd_v2_2_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
 
/* uvd v3.1 */
bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
 
/* uvd v4.2 */
int uvd_v4_2_resume(struct radeon_device *rdev);
 
/* vce v1.0 */
uint32_t vce_v1_0_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring);
uint32_t vce_v1_0_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void vce_v1_0_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
int vce_v1_0_init(struct radeon_device *rdev);
int vce_v1_0_start(struct radeon_device *rdev);
 
/* vce v2.0 */
int vce_v2_0_resume(struct radeon_device *rdev);
 
#endif
/drivers/video/drm/radeon/radeon_atombios.c
30,15 → 30,36
#include "atom.h"
#include "atom-bits.h"
 
/* from radeon_encoder.c */
extern uint32_t
radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
uint8_t dac);
extern void radeon_link_encoder_connector(struct drm_device *dev);
extern void
radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
uint32_t supported_device, u16 caps);
 
/* from radeon_connector.c */
extern void
radeon_add_atom_connector(struct drm_device *dev,
uint32_t connector_id,
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
uint32_t igp_lane_info,
uint16_t connector_object_id,
struct radeon_hpd *hpd,
struct radeon_router *router);
 
/* from radeon_legacy_encoder.c */
extern void
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
uint32_t supported_device);
 
/* local */
static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
u16 voltage_id, u16 *voltage);
 
union atom_supported_devices {
struct _ATOM_SUPPORTED_DEVICES_INFO info;
struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2;
146,8 → 167,8
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_I2C_ASSIGMENT);
 
gpio = &i2c_info->asGPIO_Info[0];
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
 
radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
 
155,8 → 176,6
i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
break;
}
gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
}
}
 
180,8 → 199,9
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_I2C_ASSIGMENT);
 
gpio = &i2c_info->asGPIO_Info[0];
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
 
radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
 
i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
190,8 → 210,6
sprintf(stmp, "0x%x", i2c.i2c_id);
rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
}
gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
}
}
}
216,8 → 234,8
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
 
pin = gpio_info->asGPIO_Pin;
for (i = 0; i < num_indices; i++) {
pin = &gpio_info->asGPIO_Pin[i];
if (id == pin->ucGPIO_ID) {
gpio.id = pin->ucGPIO_ID;
gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
225,8 → 243,6
gpio.valid = true;
break;
}
pin = (ATOM_GPIO_PIN_ASSIGNMENT *)
((u8 *)pin + sizeof(ATOM_GPIO_PIN_ASSIGNMENT));
}
}
 
699,16 → 715,13
(ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
(ctx->bios + data_offset +
le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
u8 *num_dst_objs = (u8 *)
((u8 *)router_src_dst_table + 1 +
(router_src_dst_table->ucNumberOfSrc * 2));
u16 *dst_objs = (u16 *)(num_dst_objs + 1);
int enum_id;
 
router.router_id = router_obj_id;
for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) {
for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst;
enum_id++) {
if (le16_to_cpu(path->usConnObjectId) ==
le16_to_cpu(dst_objs[enum_id]))
le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id]))
break;
}
 
1227,22 → 1240,13
rdev->clock.default_dispclk =
le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
if (rdev->clock.default_dispclk == 0) {
if (ASIC_IS_DCE6(rdev))
rdev->clock.default_dispclk = 60000; /* 600 Mhz */
else if (ASIC_IS_DCE5(rdev))
if (ASIC_IS_DCE5(rdev))
rdev->clock.default_dispclk = 54000; /* 540 Mhz */
else
rdev->clock.default_dispclk = 60000; /* 600 Mhz */
}
/* set a reasonable default for DP */
if (ASIC_IS_DCE6(rdev) && (rdev->clock.default_dispclk < 53900)) {
DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
rdev->clock.default_dispclk / 100);
rdev->clock.default_dispclk = 60000;
}
rdev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
rdev->clock.current_dispclk = rdev->clock.default_dispclk;
}
*dcpll = *p1pll;
 
1265,7 → 1269,6
struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
};
 
bool radeon_atombios_sideport_present(struct radeon_device *rdev)
1358,7 → 1361,6
int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
uint16_t data_offset, size;
struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *ss_assign;
uint8_t frev, crev;
int i, num_indices;
 
1370,21 → 1372,18
 
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
((u8 *)&ss_info->asSS_Info[0]);
 
for (i = 0; i < num_indices; i++) {
if (ss_assign->ucSS_Id == id) {
if (ss_info->asSS_Info[i].ucSS_Id == id) {
ss->percentage =
le16_to_cpu(ss_assign->usSpreadSpectrumPercentage);
ss->type = ss_assign->ucSpreadSpectrumType;
ss->step = ss_assign->ucSS_Step;
ss->delay = ss_assign->ucSS_Delay;
ss->range = ss_assign->ucSS_Range;
ss->refdiv = ss_assign->ucRecommendedRef_Div;
le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
ss->step = ss_info->asSS_Info[i].ucSS_Step;
ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
ss->range = ss_info->asSS_Info[i].ucSS_Range;
ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
return true;
}
ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
((u8 *)ss_assign + sizeof(struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT));
}
}
return false;
1439,22 → 1438,6
break;
}
break;
case 8:
switch (id) {
case ASIC_INTERNAL_SS_ON_TMDS:
percentage = le16_to_cpu(igp_info->info_8.usDVISSPercentage);
rate = le16_to_cpu(igp_info->info_8.usDVISSpreadRateIn10Hz);
break;
case ASIC_INTERNAL_SS_ON_HDMI:
percentage = le16_to_cpu(igp_info->info_8.usHDMISSPercentage);
rate = le16_to_cpu(igp_info->info_8.usHDMISSpreadRateIn10Hz);
break;
case ASIC_INTERNAL_SS_ON_LVDS:
percentage = le16_to_cpu(igp_info->info_8.usLvdsSSPercentage);
rate = le16_to_cpu(igp_info->info_8.usLvdsSSpreadRateIn10Hz);
break;
}
break;
default:
DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
break;
1472,12 → 1455,6
struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
};
 
union asic_ss_assignment {
struct _ATOM_ASIC_SS_ASSIGNMENT v1;
struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2;
struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3;
};
 
bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
struct radeon_atom_ss *ss,
int id, u32 clock)
1486,19 → 1463,9
int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
uint16_t data_offset, size;
union asic_ss_info *ss_info;
union asic_ss_assignment *ss_assign;
uint8_t frev, crev;
int i, num_indices;
 
if (id == ASIC_INTERNAL_MEMORY_SS) {
if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT))
return false;
}
if (id == ASIC_INTERNAL_ENGINE_SS) {
if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT))
return false;
}
 
memset(ss, 0, sizeof(struct radeon_atom_ss));
if (atom_parse_data_header(mode_info->atom_context, index, &size,
&frev, &crev, &data_offset)) {
1511,68 → 1478,45
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT);
 
ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]);
for (i = 0; i < num_indices; i++) {
if ((ss_assign->v1.ucClockIndication == id) &&
(clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) {
if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
(clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
ss->percentage =
le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
ss->type = ss_assign->v1.ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
ss->percentage_divider = 100;
le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz);
return true;
}
ss_assign = (union asic_ss_assignment *)
((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT));
}
break;
case 2:
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]);
for (i = 0; i < num_indices; i++) {
if ((ss_assign->v2.ucClockIndication == id) &&
(clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) {
if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
(clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
ss->percentage =
le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
ss->type = ss_assign->v2.ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
ss->percentage_divider = 100;
if ((crev == 2) &&
((id == ASIC_INTERNAL_ENGINE_SS) ||
(id == ASIC_INTERNAL_MEMORY_SS)))
ss->rate /= 100;
le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz);
return true;
}
ss_assign = (union asic_ss_assignment *)
((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2));
}
break;
case 3:
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]);
for (i = 0; i < num_indices; i++) {
if ((ss_assign->v3.ucClockIndication == id) &&
(clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) {
if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
(clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
ss->percentage =
le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
ss->type = ss_assign->v3.ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
if (ss_assign->v3.ucSpreadSpectrumMode &
SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK)
ss->percentage_divider = 1000;
else
ss->percentage_divider = 100;
if ((id == ASIC_INTERNAL_ENGINE_SS) ||
(id == ASIC_INTERNAL_MEMORY_SS))
ss->rate /= 100;
le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
if (rdev->flags & RADEON_IS_IGP)
radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
return true;
}
ss_assign = (union asic_ss_assignment *)
((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3));
}
break;
default:
1707,9 → 1651,7
kfree(edid);
}
}
record += fake_edid_record->ucFakeEDIDLength ?
fake_edid_record->ucFakeEDIDLength + 2 :
sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
break;
case LCD_PANEL_RESOLUTION_RECORD_TYPE:
panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
1807,8 → 1749,7
if (misc & ATOM_DOUBLE_CLOCK_MODE)
mode->flags |= DRM_MODE_FLAG_DBLSCAN;
 
mode->crtc_clock = mode->clock =
le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
mode->clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
 
if (index == 1) {
/* PAL timings appear to have wrong values for totals */
1851,8 → 1792,7
if (misc & ATOM_DOUBLE_CLOCK_MODE)
mode->flags |= DRM_MODE_FLAG_DBLSCAN;
 
mode->crtc_clock = mode->clock =
le16_to_cpu(dtd_timings->usPixClk) * 10;
mode->clock = le16_to_cpu(dtd_timings->usPixClk) * 10;
break;
}
return true;
1963,7 → 1903,7
"adm1032",
"adm1030",
"max6649",
"lm63", /* lm64 */
"lm64",
"f75375",
"asc7xxx",
};
1974,7 → 1914,7
"adm1032",
"adm1030",
"max6649",
"lm63", /* lm64 */
"lm64",
"f75375",
"RV6xx",
"RV770",
1987,7 → 1927,6
"Northern Islands",
"Southern Islands",
"lm96163",
"Sea Islands",
};
 
union power_info {
2005,7 → 1944,6
struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
struct _ATOM_PPLIB_SI_CLOCK_INFO si;
struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
};
 
union pplib_power_state {
2271,16 → 2209,6
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
rdev->pm.int_thermal_type = THERMAL_TYPE_SI;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
rdev->pm.int_thermal_type = THERMAL_TYPE_CI;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
} else if ((controller->ucType ==
ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
(controller->ucType ==
2313,8 → 2241,8
}
}
 
void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
u16 *vddc, u16 *vddci, u16 *mvdd)
static void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
u16 *vddc, u16 *vddci)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
2324,7 → 2252,6
 
*vddc = 0;
*vddci = 0;
*mvdd = 0;
 
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
2332,12 → 2259,10
(union firmware_info *)(mode_info->atom_context->bios +
data_offset);
*vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
if ((frev == 2) && (crev >= 2)) {
if ((frev == 2) && (crev >= 2))
*vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
*mvdd = le16_to_cpu(firmware_info->info_22.usBootUpMVDDCVoltage);
}
}
}
 
static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev,
int state_index, int mode_index,
2346,9 → 2271,9
int j;
u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
u16 vddc, vddci, mvdd;
u16 vddc, vddci;
 
radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
radeon_atombios_get_default_voltages(rdev, &vddc, &vddci);
 
rdev->pm.power_state[state_index].misc = misc;
rdev->pm.power_state[state_index].misc2 = misc2;
2391,13 → 2316,7
rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci;
} else {
u16 max_vddci = 0;
 
if (ASIC_IS_DCE4(rdev))
radeon_atom_get_max_voltage(rdev,
SET_VOLTAGE_TYPE_ASIC_VDDCI,
&max_vddci);
/* patch the table values with the default sclk/mclk from firmware info */
/* patch the table values with the default slck/mclk from firmware info */
for (j = 0; j < mode_index; j++) {
rdev->pm.power_state[state_index].clock_info[j].mclk =
rdev->clock.default_mclk;
2406,9 → 2325,6
if (vddc)
rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
vddc;
if (max_vddci)
rdev->pm.power_state[state_index].clock_info[j].voltage.vddci =
max_vddci;
}
}
}
2431,15 → 2347,6
sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
}
} else if (rdev->family >= CHIP_BONAIRE) {
sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
sclk |= clock_info->ci.ucEngineClockHigh << 16;
mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
mclk |= clock_info->ci.ucMemoryClockHigh << 16;
rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
VOLTAGE_NONE;
} else if (rdev->family >= CHIP_TAHITI) {
sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
sclk |= clock_info->si.ucEngineClockHigh << 16;
2485,10 → 2392,6
case ATOM_VIRTUAL_VOLTAGE_ID1:
case ATOM_VIRTUAL_VOLTAGE_ID2:
case ATOM_VIRTUAL_VOLTAGE_ID3:
case ATOM_VIRTUAL_VOLTAGE_ID4:
case ATOM_VIRTUAL_VOLTAGE_ID5:
case ATOM_VIRTUAL_VOLTAGE_ID6:
case ATOM_VIRTUAL_VOLTAGE_ID7:
if (radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC,
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage,
&vddc) == 0)
2764,8 → 2667,6
struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3;
struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4;
struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5;
struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6 v6_in;
struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 v6_out;
};
 
int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
2798,8 → 2699,7
break;
case 2:
case 3:
case 5:
/* r6xx, r7xx, evergreen, ni, si */
/* r6xx, r7xx, evergreen, ni */
if (rdev->family <= CHIP_RV770) {
args.v2.ucAction = clock_type;
args.v2.ulClock = cpu_to_le32(clock); /* 10 khz */
2826,15 → 2726,12
ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
dividers->enable_dithen = (args.v3.ucCntlFlag &
ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
dividers->fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
dividers->ref_div = args.v3.ucRefDiv;
dividers->vco_mode = (args.v3.ucCntlFlag &
ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
} else {
/* for SI we use ComputeMemoryClockParam for memory plls */
if (rdev->family >= CHIP_TAHITI)
return -EINVAL;
args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
if (strobe_mode)
args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
2860,25 → 2757,9
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
dividers->post_div = args.v4.ucPostDiv;
dividers->real_clock = le32_to_cpu(args.v4.ulClock);
break;
case 6:
/* CI */
/* COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, COMPUTE_GPUCLK_INPUT_FLAG_SCLK */
args.v6_in.ulClock.ulComputeClockFlag = clock_type;
args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
dividers->ref_div = args.v6_out.ucPllRefDiv;
dividers->post_div = args.v6_out.ucPllPostDiv;
dividers->flags = args.v6_out.ucPllCntlFlag;
dividers->real_clock = le32_to_cpu(args.v6_out.ulClock.ulClock);
dividers->post_divider = args.v6_out.ulClock.ucPostDiv;
break;
default:
return -EINVAL;
}
2885,57 → 2766,6
return 0;
}
 
int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
u32 clock,
bool strobe_mode,
struct atom_mpll_param *mpll_param)
{
COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 args;
int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam);
u8 frev, crev;
 
memset(&args, 0, sizeof(args));
memset(mpll_param, 0, sizeof(struct atom_mpll_param));
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return -EINVAL;
 
switch (frev) {
case 2:
switch (crev) {
case 1:
/* SI */
args.ulClock = cpu_to_le32(clock); /* 10 khz */
args.ucInputFlag = 0;
if (strobe_mode)
args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
mpll_param->post_div = args.ucPostDiv;
mpll_param->dll_speed = args.ucDllSpeed;
mpll_param->bwcntl = args.ucBWCntl;
mpll_param->vco_mode =
(args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK);
mpll_param->yclk_sel =
(args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0;
mpll_param->qdr =
(args.ucPllCntlFlag & MPLL_CNTL_FLAG_QDR_ENABLE) ? 1 : 0;
mpll_param->half_rate =
(args.ucPllCntlFlag & MPLL_CNTL_FLAG_AD_HALF_RATE) ? 1 : 0;
break;
default:
return -EINVAL;
}
break;
default:
return -EINVAL;
}
return 0;
}
 
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
{
DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
2989,48 → 2819,6
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev,
u32 eng_clock, u32 mem_clock)
{
SET_ENGINE_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
u32 tmp;
 
memset(&args, 0, sizeof(args));
 
tmp = eng_clock & SET_CLOCK_FREQ_MASK;
tmp |= (COMPUTE_ENGINE_PLL_PARAM << 24);
 
args.ulTargetEngineClock = cpu_to_le32(tmp);
if (mem_clock)
args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK);
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
void radeon_atom_update_memory_dll(struct radeon_device *rdev,
u32 mem_clock)
{
u32 args;
int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
 
args = cpu_to_le32(mem_clock); /* 10 khz */
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
void radeon_atom_set_ac_timing(struct radeon_device *rdev,
u32 mem_clock)
{
SET_MEMORY_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
u32 tmp = mem_clock | (COMPUTE_MEMORY_PLL_PARAM << 24);
 
args.ulTargetMemoryClock = cpu_to_le32(tmp); /* 10 khz */
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
union set_voltage {
struct _SET_VOLTAGE_PS_ALLOCATION alloc;
struct _SET_VOLTAGE_PARAMETERS v1;
3075,7 → 2863,7
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
u16 voltage_id, u16 *voltage)
{
union set_voltage args;
3114,898 → 2902,6
return 0;
}
 
int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev,
u16 *voltage,
u16 leakage_idx)
{
return radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
}
 
int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
u16 *leakage_id)
{
union set_voltage args;
int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
u8 frev, crev;
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return -EINVAL;
 
switch (crev) {
case 3:
case 4:
args.v3.ucVoltageType = 0;
args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID;
args.v3.usVoltageLevel = 0;
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
*leakage_id = le16_to_cpu(args.v3.usVoltageLevel);
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
return -EINVAL;
}
 
return 0;
}
 
int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
u16 *vddc, u16 *vddci,
u16 virtual_voltage_id,
u16 vbios_voltage_id)
{
int index = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
u8 frev, crev;
u16 data_offset, size;
int i, j;
ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
 
*vddc = 0;
*vddci = 0;
 
if (!atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
&frev, &crev, &data_offset))
return -EINVAL;
 
profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
(rdev->mode_info.atom_context->bios + data_offset);
 
switch (frev) {
case 1:
return -EINVAL;
case 2:
switch (crev) {
case 1:
if (size < sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))
return -EINVAL;
leakage_bin = (u16 *)
(rdev->mode_info.atom_context->bios + data_offset +
le16_to_cpu(profile->usLeakageBinArrayOffset));
vddc_id_buf = (u16 *)
(rdev->mode_info.atom_context->bios + data_offset +
le16_to_cpu(profile->usElbVDDC_IdArrayOffset));
vddc_buf = (u16 *)
(rdev->mode_info.atom_context->bios + data_offset +
le16_to_cpu(profile->usElbVDDC_LevelArrayOffset));
vddci_id_buf = (u16 *)
(rdev->mode_info.atom_context->bios + data_offset +
le16_to_cpu(profile->usElbVDDCI_IdArrayOffset));
vddci_buf = (u16 *)
(rdev->mode_info.atom_context->bios + data_offset +
le16_to_cpu(profile->usElbVDDCI_LevelArrayOffset));
 
if (profile->ucElbVDDC_Num > 0) {
for (i = 0; i < profile->ucElbVDDC_Num; i++) {
if (vddc_id_buf[i] == virtual_voltage_id) {
for (j = 0; j < profile->ucLeakageBinNum; j++) {
if (vbios_voltage_id <= leakage_bin[j]) {
*vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
break;
}
}
break;
}
}
}
if (profile->ucElbVDDCI_Num > 0) {
for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
if (vddci_id_buf[i] == virtual_voltage_id) {
for (j = 0; j < profile->ucLeakageBinNum; j++) {
if (vbios_voltage_id <= leakage_bin[j]) {
*vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
break;
}
}
break;
}
}
}
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
return -EINVAL;
}
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
return -EINVAL;
}
 
return 0;
}
 
union get_voltage_info {
struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in;
struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out;
};
 
int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
u16 virtual_voltage_id,
u16 *voltage)
{
int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo);
u32 entry_id;
u32 count = rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count;
union get_voltage_info args;
 
for (entry_id = 0; entry_id < count; entry_id++) {
if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v ==
virtual_voltage_id)
break;
}
 
if (entry_id >= count)
return -EINVAL;
 
args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
args.in.ulSCLKFreq =
cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
*voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
 
return 0;
}
 
int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
u16 voltage_level, u8 voltage_type,
u32 *gpio_value, u32 *gpio_mask)
{
union set_voltage args;
int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
u8 frev, crev;
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return -EINVAL;
 
switch (crev) {
case 1:
return -EINVAL;
case 2:
args.v2.ucVoltageType = voltage_type;
args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK;
args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
*gpio_mask = le32_to_cpu(*(u32 *)&args.v2);
 
args.v2.ucVoltageType = voltage_type;
args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL;
args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
*gpio_value = le32_to_cpu(*(u32 *)&args.v2);
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
return -EINVAL;
}
 
return 0;
}
 
union voltage_object_info {
struct _ATOM_VOLTAGE_OBJECT_INFO v1;
struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3;
};
 
union voltage_object {
struct _ATOM_VOLTAGE_OBJECT v1;
struct _ATOM_VOLTAGE_OBJECT_V2 v2;
union _ATOM_VOLTAGE_OBJECT_V3 v3;
};
 
static ATOM_VOLTAGE_OBJECT *atom_lookup_voltage_object_v1(ATOM_VOLTAGE_OBJECT_INFO *v1,
u8 voltage_type)
{
u32 size = le16_to_cpu(v1->sHeader.usStructureSize);
u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO, asVoltageObj[0]);
u8 *start = (u8 *)v1;
 
while (offset < size) {
ATOM_VOLTAGE_OBJECT *vo = (ATOM_VOLTAGE_OBJECT *)(start + offset);
if (vo->ucVoltageType == voltage_type)
return vo;
offset += offsetof(ATOM_VOLTAGE_OBJECT, asFormula.ucVIDAdjustEntries) +
vo->asFormula.ucNumOfVoltageEntries;
}
return NULL;
}
 
static ATOM_VOLTAGE_OBJECT_V2 *atom_lookup_voltage_object_v2(ATOM_VOLTAGE_OBJECT_INFO_V2 *v2,
u8 voltage_type)
{
u32 size = le16_to_cpu(v2->sHeader.usStructureSize);
u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V2, asVoltageObj[0]);
u8 *start = (u8*)v2;
 
while (offset < size) {
ATOM_VOLTAGE_OBJECT_V2 *vo = (ATOM_VOLTAGE_OBJECT_V2 *)(start + offset);
if (vo->ucVoltageType == voltage_type)
return vo;
offset += offsetof(ATOM_VOLTAGE_OBJECT_V2, asFormula.asVIDAdjustEntries) +
(vo->asFormula.ucNumOfVoltageEntries * sizeof(VOLTAGE_LUT_ENTRY));
}
return NULL;
}
 
static ATOM_VOLTAGE_OBJECT_V3 *atom_lookup_voltage_object_v3(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *v3,
u8 voltage_type, u8 voltage_mode)
{
u32 size = le16_to_cpu(v3->sHeader.usStructureSize);
u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
u8 *start = (u8*)v3;
 
while (offset < size) {
ATOM_VOLTAGE_OBJECT_V3 *vo = (ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);
if ((vo->asGpioVoltageObj.sHeader.ucVoltageType == voltage_type) &&
(vo->asGpioVoltageObj.sHeader.ucVoltageMode == voltage_mode))
return vo;
offset += le16_to_cpu(vo->asGpioVoltageObj.sHeader.usSize);
}
return NULL;
}
 
bool
radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
u8 voltage_type, u8 voltage_mode)
{
int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
u8 frev, crev;
u16 data_offset, size;
union voltage_object_info *voltage_info;
union voltage_object *voltage_object = NULL;
 
if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
&frev, &crev, &data_offset)) {
voltage_info = (union voltage_object_info *)
(rdev->mode_info.atom_context->bios + data_offset);
 
switch (frev) {
case 1:
case 2:
switch (crev) {
case 1:
voltage_object = (union voltage_object *)
atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type);
if (voltage_object &&
(voltage_object->v1.asControl.ucVoltageControlId == VOLTAGE_CONTROLLED_BY_GPIO))
return true;
break;
case 2:
voltage_object = (union voltage_object *)
atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type);
if (voltage_object &&
(voltage_object->v2.asControl.ucVoltageControlId == VOLTAGE_CONTROLLED_BY_GPIO))
return true;
break;
default:
DRM_ERROR("unknown voltage object table\n");
return false;
}
break;
case 3:
switch (crev) {
case 1:
if (atom_lookup_voltage_object_v3(&voltage_info->v3,
voltage_type, voltage_mode))
return true;
break;
default:
DRM_ERROR("unknown voltage object table\n");
return false;
}
break;
default:
DRM_ERROR("unknown voltage object table\n");
return false;
}
 
}
return false;
}
 
int radeon_atom_get_svi2_info(struct radeon_device *rdev,
u8 voltage_type,
u8 *svd_gpio_id, u8 *svc_gpio_id)
{
int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
u8 frev, crev;
u16 data_offset, size;
union voltage_object_info *voltage_info;
union voltage_object *voltage_object = NULL;
 
if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
&frev, &crev, &data_offset)) {
voltage_info = (union voltage_object_info *)
(rdev->mode_info.atom_context->bios + data_offset);
 
switch (frev) {
case 3:
switch (crev) {
case 1:
voltage_object = (union voltage_object *)
atom_lookup_voltage_object_v3(&voltage_info->v3,
voltage_type,
VOLTAGE_OBJ_SVID2);
if (voltage_object) {
*svd_gpio_id = voltage_object->v3.asSVID2Obj.ucSVDGpioId;
*svc_gpio_id = voltage_object->v3.asSVID2Obj.ucSVCGpioId;
} else {
return -EINVAL;
}
break;
default:
DRM_ERROR("unknown voltage object table\n");
return -EINVAL;
}
break;
default:
DRM_ERROR("unknown voltage object table\n");
return -EINVAL;
}
 
}
return 0;
}
 
int radeon_atom_get_max_voltage(struct radeon_device *rdev,
u8 voltage_type, u16 *max_voltage)
{
int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
u8 frev, crev;
u16 data_offset, size;
union voltage_object_info *voltage_info;
union voltage_object *voltage_object = NULL;
 
if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
&frev, &crev, &data_offset)) {
voltage_info = (union voltage_object_info *)
(rdev->mode_info.atom_context->bios + data_offset);
 
switch (crev) {
case 1:
voltage_object = (union voltage_object *)
atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type);
if (voltage_object) {
ATOM_VOLTAGE_FORMULA *formula =
&voltage_object->v1.asFormula;
if (formula->ucFlag & 1)
*max_voltage =
le16_to_cpu(formula->usVoltageBaseLevel) +
formula->ucNumOfVoltageEntries / 2 *
le16_to_cpu(formula->usVoltageStep);
else
*max_voltage =
le16_to_cpu(formula->usVoltageBaseLevel) +
(formula->ucNumOfVoltageEntries - 1) *
le16_to_cpu(formula->usVoltageStep);
return 0;
}
break;
case 2:
voltage_object = (union voltage_object *)
atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type);
if (voltage_object) {
ATOM_VOLTAGE_FORMULA_V2 *formula =
&voltage_object->v2.asFormula;
if (formula->ucNumOfVoltageEntries) {
VOLTAGE_LUT_ENTRY *lut = (VOLTAGE_LUT_ENTRY *)
((u8 *)&formula->asVIDAdjustEntries[0] +
(sizeof(VOLTAGE_LUT_ENTRY) * (formula->ucNumOfVoltageEntries - 1)));
*max_voltage =
le16_to_cpu(lut->usVoltageValue);
return 0;
}
}
break;
default:
DRM_ERROR("unknown voltage object table\n");
return -EINVAL;
}
 
}
return -EINVAL;
}
 
int radeon_atom_get_min_voltage(struct radeon_device *rdev,
u8 voltage_type, u16 *min_voltage)
{
int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
u8 frev, crev;
u16 data_offset, size;
union voltage_object_info *voltage_info;
union voltage_object *voltage_object = NULL;
 
if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
&frev, &crev, &data_offset)) {
voltage_info = (union voltage_object_info *)
(rdev->mode_info.atom_context->bios + data_offset);
 
switch (crev) {
case 1:
voltage_object = (union voltage_object *)
atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type);
if (voltage_object) {
ATOM_VOLTAGE_FORMULA *formula =
&voltage_object->v1.asFormula;
*min_voltage =
le16_to_cpu(formula->usVoltageBaseLevel);
return 0;
}
break;
case 2:
voltage_object = (union voltage_object *)
atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type);
if (voltage_object) {
ATOM_VOLTAGE_FORMULA_V2 *formula =
&voltage_object->v2.asFormula;
if (formula->ucNumOfVoltageEntries) {
*min_voltage =
le16_to_cpu(formula->asVIDAdjustEntries[
0
].usVoltageValue);
return 0;
}
}
break;
default:
DRM_ERROR("unknown voltage object table\n");
return -EINVAL;
}
 
}
return -EINVAL;
}
 
int radeon_atom_get_voltage_step(struct radeon_device *rdev,
u8 voltage_type, u16 *voltage_step)
{
int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
u8 frev, crev;
u16 data_offset, size;
union voltage_object_info *voltage_info;
union voltage_object *voltage_object = NULL;
 
if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
&frev, &crev, &data_offset)) {
voltage_info = (union voltage_object_info *)
(rdev->mode_info.atom_context->bios + data_offset);
 
switch (crev) {
case 1:
voltage_object = (union voltage_object *)
atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type);
if (voltage_object) {
ATOM_VOLTAGE_FORMULA *formula =
&voltage_object->v1.asFormula;
if (formula->ucFlag & 1)
*voltage_step =
(le16_to_cpu(formula->usVoltageStep) + 1) / 2;
else
*voltage_step =
le16_to_cpu(formula->usVoltageStep);
return 0;
}
break;
case 2:
return -EINVAL;
default:
DRM_ERROR("unknown voltage object table\n");
return -EINVAL;
}
 
}
return -EINVAL;
}
 
int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
u8 voltage_type,
u16 nominal_voltage,
u16 *true_voltage)
{
u16 min_voltage, max_voltage, voltage_step;
 
if (radeon_atom_get_max_voltage(rdev, voltage_type, &max_voltage))
return -EINVAL;
if (radeon_atom_get_min_voltage(rdev, voltage_type, &min_voltage))
return -EINVAL;
if (radeon_atom_get_voltage_step(rdev, voltage_type, &voltage_step))
return -EINVAL;
 
if (nominal_voltage <= min_voltage)
*true_voltage = min_voltage;
else if (nominal_voltage >= max_voltage)
*true_voltage = max_voltage;
else
*true_voltage = min_voltage +
((nominal_voltage - min_voltage) / voltage_step) *
voltage_step;
 
return 0;
}
 
int radeon_atom_get_voltage_table(struct radeon_device *rdev,
u8 voltage_type, u8 voltage_mode,
struct atom_voltage_table *voltage_table)
{
int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
u8 frev, crev;
u16 data_offset, size;
int i, ret;
union voltage_object_info *voltage_info;
union voltage_object *voltage_object = NULL;
 
if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
&frev, &crev, &data_offset)) {
voltage_info = (union voltage_object_info *)
(rdev->mode_info.atom_context->bios + data_offset);
 
switch (frev) {
case 1:
case 2:
switch (crev) {
case 1:
DRM_ERROR("old table version %d, %d\n", frev, crev);
return -EINVAL;
case 2:
voltage_object = (union voltage_object *)
atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type);
if (voltage_object) {
ATOM_VOLTAGE_FORMULA_V2 *formula =
&voltage_object->v2.asFormula;
VOLTAGE_LUT_ENTRY *lut;
if (formula->ucNumOfVoltageEntries > MAX_VOLTAGE_ENTRIES)
return -EINVAL;
lut = &formula->asVIDAdjustEntries[0];
for (i = 0; i < formula->ucNumOfVoltageEntries; i++) {
voltage_table->entries[i].value =
le16_to_cpu(lut->usVoltageValue);
ret = radeon_atom_get_voltage_gpio_settings(rdev,
voltage_table->entries[i].value,
voltage_type,
&voltage_table->entries[i].smio_low,
&voltage_table->mask_low);
if (ret)
return ret;
lut = (VOLTAGE_LUT_ENTRY *)
((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY));
}
voltage_table->count = formula->ucNumOfVoltageEntries;
return 0;
}
break;
default:
DRM_ERROR("unknown voltage object table\n");
return -EINVAL;
}
break;
case 3:
switch (crev) {
case 1:
voltage_object = (union voltage_object *)
atom_lookup_voltage_object_v3(&voltage_info->v3,
voltage_type, voltage_mode);
if (voltage_object) {
ATOM_GPIO_VOLTAGE_OBJECT_V3 *gpio =
&voltage_object->v3.asGpioVoltageObj;
VOLTAGE_LUT_ENTRY_V2 *lut;
if (gpio->ucGpioEntryNum > MAX_VOLTAGE_ENTRIES)
return -EINVAL;
lut = &gpio->asVolGpioLut[0];
for (i = 0; i < gpio->ucGpioEntryNum; i++) {
voltage_table->entries[i].value =
le16_to_cpu(lut->usVoltageValue);
voltage_table->entries[i].smio_low =
le32_to_cpu(lut->ulVoltageId);
lut = (VOLTAGE_LUT_ENTRY_V2 *)
((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY_V2));
}
voltage_table->mask_low = le32_to_cpu(gpio->ulGpioMaskVal);
voltage_table->count = gpio->ucGpioEntryNum;
voltage_table->phase_delay = gpio->ucPhaseDelay;
return 0;
}
break;
default:
DRM_ERROR("unknown voltage object table\n");
return -EINVAL;
}
break;
default:
DRM_ERROR("unknown voltage object table\n");
return -EINVAL;
}
}
return -EINVAL;
}
 
union vram_info {
struct _ATOM_VRAM_INFO_V3 v1_3;
struct _ATOM_VRAM_INFO_V4 v1_4;
struct _ATOM_VRAM_INFO_HEADER_V2_1 v2_1;
};
 
int radeon_atom_get_memory_info(struct radeon_device *rdev,
u8 module_index, struct atom_memory_info *mem_info)
{
int index = GetIndexIntoMasterTable(DATA, VRAM_Info);
u8 frev, crev, i;
u16 data_offset, size;
union vram_info *vram_info;
 
memset(mem_info, 0, sizeof(struct atom_memory_info));
 
if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
&frev, &crev, &data_offset)) {
vram_info = (union vram_info *)
(rdev->mode_info.atom_context->bios + data_offset);
switch (frev) {
case 1:
switch (crev) {
case 3:
/* r6xx */
if (module_index < vram_info->v1_3.ucNumOfVRAMModule) {
ATOM_VRAM_MODULE_V3 *vram_module =
(ATOM_VRAM_MODULE_V3 *)vram_info->v1_3.aVramInfo;
 
for (i = 0; i < module_index; i++) {
if (le16_to_cpu(vram_module->usSize) == 0)
return -EINVAL;
vram_module = (ATOM_VRAM_MODULE_V3 *)
((u8 *)vram_module + le16_to_cpu(vram_module->usSize));
}
mem_info->mem_vendor = vram_module->asMemory.ucMemoryVenderID & 0xf;
mem_info->mem_type = vram_module->asMemory.ucMemoryType & 0xf0;
} else
return -EINVAL;
break;
case 4:
/* r7xx, evergreen */
if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
ATOM_VRAM_MODULE_V4 *vram_module =
(ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
 
for (i = 0; i < module_index; i++) {
if (le16_to_cpu(vram_module->usModuleSize) == 0)
return -EINVAL;
vram_module = (ATOM_VRAM_MODULE_V4 *)
((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize));
}
mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf;
mem_info->mem_type = vram_module->ucMemoryType & 0xf0;
} else
return -EINVAL;
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
return -EINVAL;
}
break;
case 2:
switch (crev) {
case 1:
/* ni */
if (module_index < vram_info->v2_1.ucNumOfVRAMModule) {
ATOM_VRAM_MODULE_V7 *vram_module =
(ATOM_VRAM_MODULE_V7 *)vram_info->v2_1.aVramInfo;
 
for (i = 0; i < module_index; i++) {
if (le16_to_cpu(vram_module->usModuleSize) == 0)
return -EINVAL;
vram_module = (ATOM_VRAM_MODULE_V7 *)
((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize));
}
mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf;
mem_info->mem_type = vram_module->ucMemoryType & 0xf0;
} else
return -EINVAL;
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
return -EINVAL;
}
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
return -EINVAL;
}
return 0;
}
return -EINVAL;
}
 
int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
bool gddr5, u8 module_index,
struct atom_memory_clock_range_table *mclk_range_table)
{
int index = GetIndexIntoMasterTable(DATA, VRAM_Info);
u8 frev, crev, i;
u16 data_offset, size;
union vram_info *vram_info;
u32 mem_timing_size = gddr5 ?
sizeof(ATOM_MEMORY_TIMING_FORMAT_V2) : sizeof(ATOM_MEMORY_TIMING_FORMAT);
 
memset(mclk_range_table, 0, sizeof(struct atom_memory_clock_range_table));
 
if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
&frev, &crev, &data_offset)) {
vram_info = (union vram_info *)
(rdev->mode_info.atom_context->bios + data_offset);
switch (frev) {
case 1:
switch (crev) {
case 3:
DRM_ERROR("old table version %d, %d\n", frev, crev);
return -EINVAL;
case 4:
/* r7xx, evergreen */
if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
ATOM_VRAM_MODULE_V4 *vram_module =
(ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
ATOM_MEMORY_TIMING_FORMAT *format;
 
for (i = 0; i < module_index; i++) {
if (le16_to_cpu(vram_module->usModuleSize) == 0)
return -EINVAL;
vram_module = (ATOM_VRAM_MODULE_V4 *)
((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize));
}
mclk_range_table->num_entries = (u8)
((le16_to_cpu(vram_module->usModuleSize) - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) /
mem_timing_size);
format = &vram_module->asMemTiming[0];
for (i = 0; i < mclk_range_table->num_entries; i++) {
mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange);
format = (ATOM_MEMORY_TIMING_FORMAT *)
((u8 *)format + mem_timing_size);
}
} else
return -EINVAL;
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
return -EINVAL;
}
break;
case 2:
DRM_ERROR("new table version %d, %d\n", frev, crev);
return -EINVAL;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
return -EINVAL;
}
return 0;
}
return -EINVAL;
}
 
#define MEM_ID_MASK 0xff000000
#define MEM_ID_SHIFT 24
#define CLOCK_RANGE_MASK 0x00ffffff
#define CLOCK_RANGE_SHIFT 0
#define LOW_NIBBLE_MASK 0xf
#define DATA_EQU_PREV 0
#define DATA_FROM_TABLE 4
 
int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
u8 module_index,
struct atom_mc_reg_table *reg_table)
{
int index = GetIndexIntoMasterTable(DATA, VRAM_Info);
u8 frev, crev, num_entries, t_mem_id, num_ranges = 0;
u32 i = 0, j;
u16 data_offset, size;
union vram_info *vram_info;
 
memset(reg_table, 0, sizeof(struct atom_mc_reg_table));
 
if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
&frev, &crev, &data_offset)) {
vram_info = (union vram_info *)
(rdev->mode_info.atom_context->bios + data_offset);
switch (frev) {
case 1:
DRM_ERROR("old table version %d, %d\n", frev, crev);
return -EINVAL;
case 2:
switch (crev) {
case 1:
if (module_index < vram_info->v2_1.ucNumOfVRAMModule) {
ATOM_INIT_REG_BLOCK *reg_block =
(ATOM_INIT_REG_BLOCK *)
((u8 *)vram_info + le16_to_cpu(vram_info->v2_1.usMemClkPatchTblOffset));
ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data =
(ATOM_MEMORY_SETTING_DATA_BLOCK *)
((u8 *)reg_block + (2 * sizeof(u16)) +
le16_to_cpu(reg_block->usRegIndexTblSize));
ATOM_INIT_REG_INDEX_FORMAT *format = &reg_block->asRegIndexBuf[0];
num_entries = (u8)((le16_to_cpu(reg_block->usRegIndexTblSize)) /
sizeof(ATOM_INIT_REG_INDEX_FORMAT)) - 1;
if (num_entries > VBIOS_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
while (i < num_entries) {
if (format->ucPreRegDataLength & ACCESS_PLACEHOLDER)
break;
reg_table->mc_reg_address[i].s1 =
(u16)(le16_to_cpu(format->usRegIndex));
reg_table->mc_reg_address[i].pre_reg_data =
(u8)(format->ucPreRegDataLength);
i++;
format = (ATOM_INIT_REG_INDEX_FORMAT *)
((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
}
reg_table->last = i;
while ((le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK) &&
(num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES)) {
t_mem_id = (u8)((le32_to_cpu(*(u32 *)reg_data) & MEM_ID_MASK)
>> MEM_ID_SHIFT);
if (module_index == t_mem_id) {
reg_table->mc_reg_table_entry[num_ranges].mclk_max =
(u32)((le32_to_cpu(*(u32 *)reg_data) & CLOCK_RANGE_MASK)
>> CLOCK_RANGE_SHIFT);
for (i = 0, j = 1; i < reg_table->last; i++) {
if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
(u32)le32_to_cpu(*((u32 *)reg_data + j));
j++;
} else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1];
}
}
num_ranges++;
}
reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
((u8 *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize));
}
if (le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK)
return -EINVAL;
reg_table->num_entries = num_ranges;
} else
return -EINVAL;
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
return -EINVAL;
}
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
return -EINVAL;
}
return 0;
}
return -EINVAL;
}
 
void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
4025,10 → 2921,6
/* tell the bios not to handle mode switching */
bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
 
/* clear the vbios dpms state */
if (ASIC_IS_DCE4(rdev))
bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
 
if (rdev->family >= CHIP_R600) {
WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
/drivers/video/drm/radeon/radeon_bios.c
160,7 → 160,7
return false;
 
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
dhandle = ACPI_HANDLE(&pdev->dev);
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
continue;
 
474,7 → 474,7
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
fp2_gen_cntl = 0;
 
if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
}
 
511,7 → 511,7
(RADEON_CRTC_SYNC_TRISTAT |
RADEON_CRTC_DISPLAY_DIS)));
 
if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
}
 
529,7 → 529,7
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
}
WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
}
return r;
587,7 → 587,7
vhdr->DeviceID != rdev->pdev->device) {
DRM_INFO("ACPI VFCT table is not for this card\n");
goto out_unmap;
}
};
 
if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
DRM_ERROR("ACPI VFCT image truncated\n");
/drivers/video/drm/radeon/radeon_combios.c
37,6 → 37,22
#include <asm/pci-bridge.h>
#endif /* CONFIG_PPC_PMAC */
 
/* from radeon_encoder.c */
extern uint32_t
radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
uint8_t dac);
extern void radeon_link_encoder_connector(struct drm_device *dev);
 
/* from radeon_connector.c */
extern void
radeon_add_legacy_connector(struct drm_device *dev,
uint32_t connector_id,
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
uint16_t connector_object_id,
struct radeon_hpd *hpd);
 
/* from radeon_legacy_encoder.c */
extern void
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
131,7 → 147,7
enum radeon_combios_table_offset table)
{
struct radeon_device *rdev = dev->dev_private;
int rev, size;
int rev;
uint16_t offset = 0, check_offset;
 
if (!rdev->bios)
140,106 → 156,174
switch (table) {
/* absolute offset tables */
case COMBIOS_ASIC_INIT_1_TABLE:
check_offset = 0xc;
check_offset = RBIOS16(rdev->bios_header_start + 0xc);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_BIOS_SUPPORT_TABLE:
check_offset = 0x14;
check_offset = RBIOS16(rdev->bios_header_start + 0x14);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_DAC_PROGRAMMING_TABLE:
check_offset = 0x2a;
check_offset = RBIOS16(rdev->bios_header_start + 0x2a);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_MAX_COLOR_DEPTH_TABLE:
check_offset = 0x2c;
check_offset = RBIOS16(rdev->bios_header_start + 0x2c);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_CRTC_INFO_TABLE:
check_offset = 0x2e;
check_offset = RBIOS16(rdev->bios_header_start + 0x2e);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_PLL_INFO_TABLE:
check_offset = 0x30;
check_offset = RBIOS16(rdev->bios_header_start + 0x30);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_TV_INFO_TABLE:
check_offset = 0x32;
check_offset = RBIOS16(rdev->bios_header_start + 0x32);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_DFP_INFO_TABLE:
check_offset = 0x34;
check_offset = RBIOS16(rdev->bios_header_start + 0x34);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_HW_CONFIG_INFO_TABLE:
check_offset = 0x36;
check_offset = RBIOS16(rdev->bios_header_start + 0x36);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_MULTIMEDIA_INFO_TABLE:
check_offset = 0x38;
check_offset = RBIOS16(rdev->bios_header_start + 0x38);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_TV_STD_PATCH_TABLE:
check_offset = 0x3e;
check_offset = RBIOS16(rdev->bios_header_start + 0x3e);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_LCD_INFO_TABLE:
check_offset = 0x40;
check_offset = RBIOS16(rdev->bios_header_start + 0x40);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_MOBILE_INFO_TABLE:
check_offset = 0x42;
check_offset = RBIOS16(rdev->bios_header_start + 0x42);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_PLL_INIT_TABLE:
check_offset = 0x46;
check_offset = RBIOS16(rdev->bios_header_start + 0x46);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_MEM_CONFIG_TABLE:
check_offset = 0x48;
check_offset = RBIOS16(rdev->bios_header_start + 0x48);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_SAVE_MASK_TABLE:
check_offset = 0x4a;
check_offset = RBIOS16(rdev->bios_header_start + 0x4a);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_HARDCODED_EDID_TABLE:
check_offset = 0x4c;
check_offset = RBIOS16(rdev->bios_header_start + 0x4c);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_ASIC_INIT_2_TABLE:
check_offset = 0x4e;
check_offset = RBIOS16(rdev->bios_header_start + 0x4e);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_CONNECTOR_INFO_TABLE:
check_offset = 0x50;
check_offset = RBIOS16(rdev->bios_header_start + 0x50);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_DYN_CLK_1_TABLE:
check_offset = 0x52;
check_offset = RBIOS16(rdev->bios_header_start + 0x52);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_RESERVED_MEM_TABLE:
check_offset = 0x54;
check_offset = RBIOS16(rdev->bios_header_start + 0x54);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_EXT_TMDS_INFO_TABLE:
check_offset = 0x58;
check_offset = RBIOS16(rdev->bios_header_start + 0x58);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_MEM_CLK_INFO_TABLE:
check_offset = 0x5a;
check_offset = RBIOS16(rdev->bios_header_start + 0x5a);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_EXT_DAC_INFO_TABLE:
check_offset = 0x5c;
check_offset = RBIOS16(rdev->bios_header_start + 0x5c);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_MISC_INFO_TABLE:
check_offset = 0x5e;
check_offset = RBIOS16(rdev->bios_header_start + 0x5e);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_CRT_INFO_TABLE:
check_offset = 0x60;
check_offset = RBIOS16(rdev->bios_header_start + 0x60);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
check_offset = 0x62;
check_offset = RBIOS16(rdev->bios_header_start + 0x62);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
check_offset = 0x64;
check_offset = RBIOS16(rdev->bios_header_start + 0x64);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_FAN_SPEED_INFO_TABLE:
check_offset = 0x66;
check_offset = RBIOS16(rdev->bios_header_start + 0x66);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_OVERDRIVE_INFO_TABLE:
check_offset = 0x68;
check_offset = RBIOS16(rdev->bios_header_start + 0x68);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_OEM_INFO_TABLE:
check_offset = 0x6a;
check_offset = RBIOS16(rdev->bios_header_start + 0x6a);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_DYN_CLK_2_TABLE:
check_offset = 0x6c;
check_offset = RBIOS16(rdev->bios_header_start + 0x6c);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
check_offset = 0x6e;
check_offset = RBIOS16(rdev->bios_header_start + 0x6e);
if (check_offset)
offset = check_offset;
break;
case COMBIOS_I2C_INFO_TABLE:
check_offset = 0x70;
check_offset = RBIOS16(rdev->bios_header_start + 0x70);
if (check_offset)
offset = check_offset;
break;
/* relative offset tables */
case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */
355,16 → 439,11
}
break;
default:
check_offset = 0;
break;
}
 
size = RBIOS8(rdev->bios_header_start + 0x6);
/* check absolute offset tables */
if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size)
offset = RBIOS16(rdev->bios_header_start + check_offset);
return offset;
 
return offset;
}
 
bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
886,22 → 965,16
dac = RBIOS8(dac_info + 0x3) & 0xf;
p_dac->ps2_pdac_adj = (bg << 8) | (dac);
}
/* if the values are zeros, use the table */
if ((dac == 0) || (bg == 0))
found = 0;
else
/* if the values are all zeros, use the table */
if (p_dac->ps2_pdac_adj)
found = 1;
}
 
/* quirks */
/* Radeon 7000 (RV100) */
if (((dev->pdev->device == 0x5159) &&
(dev->pdev->subsystem_vendor == 0x174B) &&
(dev->pdev->subsystem_device == 0x7c28)) ||
/* Radeon 9100 (R200) */
((dev->pdev->device == 0x514D) &&
if ((dev->pdev->device == 0x514D) &&
(dev->pdev->subsystem_vendor == 0x174B) &&
(dev->pdev->subsystem_device == 0x7149))) {
(dev->pdev->subsystem_device == 0x7149)) {
/* vbios value is bad, use the default */
found = 0;
}
/drivers/video/drm/radeon/radeon_connectors.c
31,8 → 31,18
#include "radeon.h"
#include "atom.h"
 
#define DISABLE_DP 0
 
 
extern void
radeon_combios_connected_scratch_regs(struct drm_connector *connector,
struct drm_encoder *encoder,
bool connected);
extern void
radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
struct drm_encoder *encoder,
bool connected);
 
void radeon_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
48,7 → 58,6
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
 
/* if the connector is already off, don't turn it back on */
/* FIXME: This access isn't protected by any locks. */
if (connector->dpms != DRM_MODE_DPMS_ON)
return;
 
90,7 → 99,7
 
if (crtc && crtc->enabled) {
drm_crtc_helper_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->primary->fb);
crtc->x, crtc->y, crtc->fb);
}
}
 
101,13 → 110,12
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
int bpc = 8;
int mode_clock, max_tmds_clock;
 
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB:
if (radeon_connector->use_digital) {
if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
115,7 → 123,7
break;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
124,7 → 132,7
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
147,73 → 155,6
}
break;
}
 
if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
/* hdmi deep color only implemented on DCE4+ */
if ((bpc > 8) && !ASIC_IS_DCE4(rdev)) {
DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 8 bpc.\n",
connector->name, bpc);
bpc = 8;
}
 
/*
* Pre DCE-8 hw can't handle > 12 bpc, and more than 12 bpc doesn't make
* much sense without support for > 12 bpc framebuffers. RGB 4:4:4 at
* 12 bpc is always supported on hdmi deep color sinks, as this is
* required by the HDMI-1.3 spec. Clamp to a safe 12 bpc maximum.
*/
if (bpc > 12) {
DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 12 bpc.\n",
connector->name, bpc);
bpc = 12;
}
 
/* Any defined maximum tmds clock limit we must not exceed? */
if (connector->max_tmds_clock > 0) {
/* mode_clock is clock in kHz for mode to be modeset on this connector */
mode_clock = radeon_connector->pixelclock_for_modeset;
 
/* Maximum allowable input clock in kHz */
max_tmds_clock = connector->max_tmds_clock * 1000;
 
DRM_DEBUG("%s: hdmi mode dotclock %d kHz, max tmds input clock %d kHz.\n",
connector->name, mode_clock, max_tmds_clock);
 
/* Check if bpc is within clock limit. Try to degrade gracefully otherwise */
if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) {
if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) &&
(mode_clock * 5/4 <= max_tmds_clock))
bpc = 10;
else
bpc = 8;
 
DRM_DEBUG("%s: HDMI deep color 12 bpc exceeds max tmds clock. Using %d bpc.\n",
connector->name, bpc);
}
 
if ((bpc == 10) && (mode_clock * 5/4 > max_tmds_clock)) {
bpc = 8;
DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n",
connector->name, bpc);
}
}
else if (bpc > 8) {
/* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
connector->name);
bpc = 8;
}
}
 
if ((radeon_deep_color == 0) && (bpc > 8)) {
DRM_DEBUG("%s: Deep color disabled. Set radeon module param deep_color=1 to enable.\n",
connector->name);
bpc = 8;
}
 
DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
connector->name, connector->display_info.bpc, bpc);
 
return bpc;
}
 
225,6 → 166,7
struct drm_encoder *best_encoder = NULL;
struct drm_encoder *encoder = NULL;
struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
struct drm_mode_object *obj;
bool connected;
int i;
 
234,11 → 176,14
if (connector->encoder_ids[i] == 0)
break;
 
encoder = drm_encoder_find(connector->dev,
connector->encoder_ids[i]);
if (!encoder)
obj = drm_mode_object_find(connector->dev,
connector->encoder_ids[i],
DRM_MODE_OBJECT_ENCODER);
if (!obj)
continue;
 
encoder = obj_to_encoder(obj);
 
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
254,6 → 199,7
 
static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
{
struct drm_mode_object *obj;
struct drm_encoder *encoder;
int i;
 
261,10 → 207,11
if (connector->encoder_ids[i] == 0)
break;
 
encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
if (!encoder)
obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
if (!obj)
continue;
 
encoder = obj_to_encoder(obj);
if (encoder->encoder_type == encoder_type)
return encoder;
}
271,123 → 218,22
return NULL;
}
 
struct edid *radeon_connector_edid(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_property_blob *edid_blob = connector->edid_blob_ptr;
 
if (radeon_connector->edid) {
return radeon_connector->edid;
} else if (edid_blob) {
struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL);
if (edid)
radeon_connector->edid = edid;
}
return radeon_connector->edid;
}
 
static void radeon_connector_get_edid(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
if (radeon_connector->edid)
return;
 
/* on hw with routers, select right port */
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
 
if ((radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
ENCODER_OBJECT_ID_NONE) &&
radeon_connector->ddc_bus->has_aux) {
radeon_connector->edid = drm_get_edid(connector,
&radeon_connector->ddc_bus->aux.ddc);
} else if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
(connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
 
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
radeon_connector->ddc_bus->has_aux)
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&radeon_connector->ddc_bus->aux.ddc);
else if (radeon_connector->ddc_bus)
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&radeon_connector->ddc_bus->adapter);
} else if (radeon_connector->ddc_bus) {
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&radeon_connector->ddc_bus->adapter);
}
 
if (!radeon_connector->edid) {
if (rdev->is_atom_bios) {
/* some laptops provide a hardcoded edid in rom for LCDs */
if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
(connector->connector_type == DRM_MODE_CONNECTOR_eDP)))
radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
} else {
/* some servers provide a hardcoded edid in rom for KVMs */
radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
}
}
}
 
static void radeon_connector_free_edid(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
}
 
static int radeon_ddc_get_modes(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int ret;
 
if (radeon_connector->edid) {
drm_mode_connector_update_edid_property(connector, radeon_connector->edid);
ret = drm_add_edid_modes(connector, radeon_connector->edid);
drm_edid_to_eld(connector, radeon_connector->edid);
return ret;
}
drm_mode_connector_update_edid_property(connector, NULL);
return 0;
}
 
static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
struct drm_mode_object *obj;
struct drm_encoder *encoder;
 
/* pick the encoder ids */
if (enc_id)
return drm_encoder_find(connector->dev, enc_id);
if (enc_id) {
obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
if (!obj)
return NULL;
encoder = obj_to_encoder(obj);
return encoder;
}
 
static void radeon_get_native_mode(struct drm_connector *connector)
{
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
struct radeon_encoder *radeon_encoder;
 
if (encoder == NULL)
return;
 
radeon_encoder = to_radeon_encoder(encoder);
 
if (!list_empty(&connector->probed_modes)) {
struct drm_display_mode *preferred_mode =
list_first_entry(&connector->probed_modes,
struct drm_display_mode, head);
 
radeon_encoder->native_mode = *preferred_mode;
} else {
radeon_encoder->native_mode.clock = 0;
return NULL;
}
}
 
/*
* radeon_connector_analog_encoder_conflict_solve
424,17 → 270,13
continue;
 
if (priority == true) {
DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n",
conflict->name);
DRM_DEBUG_KMS("in favor of %s\n",
connector->name);
DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector));
conflict->status = connector_status_disconnected;
radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
} else {
DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n",
connector->name);
DRM_DEBUG_KMS("in favor of %s\n",
conflict->name);
DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict));
current_status = connector_status_disconnected;
}
break;
557,36 → 399,6
}
}
 
if (property == rdev->mode_info.audio_property) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
/* need to find digital encoder on connector */
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
if (!encoder)
return 0;
 
radeon_encoder = to_radeon_encoder(encoder);
 
if (radeon_connector->audio != val) {
radeon_connector->audio = val;
radeon_property_change_mode(&radeon_encoder->base);
}
}
 
if (property == rdev->mode_info.dither_property) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
/* need to find digital encoder on connector */
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
if (!encoder)
return 0;
 
radeon_encoder = to_radeon_encoder(encoder);
 
if (radeon_connector->dither != val) {
radeon_connector->dither = val;
radeon_property_change_mode(&radeon_encoder->base);
}
}
 
if (property == rdev->mode_info.underscan_property) {
/* need to find digital encoder on connector */
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
689,38 → 501,9
radeon_property_change_mode(&radeon_encoder->base);
}
 
if (property == dev->mode_config.scaling_mode_property) {
enum radeon_rmx_type rmx_type;
 
if (connector->encoder)
radeon_encoder = to_radeon_encoder(connector->encoder);
else {
struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
}
 
switch (val) {
default:
case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
}
if (radeon_encoder->rmx_type == rmx_type)
return 0;
 
if ((rmx_type != DRM_MODE_SCALE_NONE) &&
(radeon_encoder->native_mode.clock == 0))
return 0;
 
radeon_encoder->rmx_type = rmx_type;
 
radeon_property_change_mode(&radeon_encoder->base);
}
 
return 0;
}
 
static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
struct drm_connector *connector)
{
758,12 → 541,13
 
static int radeon_lvds_get_modes(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
int ret = 0;
struct drm_display_mode *mode;
 
radeon_connector_get_edid(connector);
ret = radeon_ddc_get_modes(connector);
if (radeon_connector->ddc_bus) {
ret = radeon_ddc_get_modes(radeon_connector);
if (ret > 0) {
encoder = radeon_best_single_encoder(connector);
if (encoder) {
773,6 → 557,7
}
return ret;
}
}
 
encoder = radeon_best_single_encoder(connector);
if (!encoder)
841,9 → 626,16
}
 
/* check for edid as well */
radeon_connector_get_edid(connector);
if (radeon_connector->edid)
ret = connector_status_connected;
else {
if (radeon_connector->ddc_bus) {
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&radeon_connector->ddc_bus->adapter);
if (radeon_connector->edid)
ret = connector_status_connected;
}
}
/* check acpi lid status ??? */
 
radeon_connector_update_scratch_regs(connector, ret);
854,9 → 646,10
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
radeon_connector_free_edid(connector);
if (radeon_connector->edid)
kfree(radeon_connector->edid);
kfree(radeon_connector->con_priv);
drm_connector_unregister(connector);
// drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
913,13 → 706,11
 
static int radeon_vga_get_modes(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int ret;
 
radeon_connector_get_edid(connector);
ret = radeon_ddc_get_modes(connector);
ret = radeon_ddc_get_modes(radeon_connector);
 
radeon_get_native_mode(connector);
 
return ret;
}
 
956,27 → 747,29
dret = radeon_ddc_probe(radeon_connector, false);
if (dret) {
radeon_connector->detected_by_load = false;
radeon_connector_free_edid(connector);
radeon_connector_get_edid(connector);
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
 
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
connector->name);
drm_get_connector_name(connector));
ret = connector_status_connected;
} else {
radeon_connector->use_digital =
!!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
 
/* some oems have boards with separate digital and analog connectors
* with a shared ddc line (often vga + hdmi)
*/
if (radeon_connector->use_digital && radeon_connector->shared_ddc) {
radeon_connector_free_edid(connector);
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
ret = connector_status_disconnected;
} else {
} else
ret = connector_status_connected;
}
}
} else {
 
/* if we aren't forcing don't do destructive polling */
985,8 → 778,9
* detected a monitor via load.
*/
if (radeon_connector->detected_by_load)
ret = connector->status;
goto out;
return connector->status;
else
return ret;
}
 
if (radeon_connector->dac_load_detect && encoder) {
1011,8 → 805,6
}
 
radeon_connector_update_scratch_regs(connector, ret);
 
out:
return ret;
}
 
1069,7 → 861,6
struct drm_encoder_helper_funcs *encoder_funcs;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
enum drm_connector_status ret = connector_status_disconnected;
int r;
 
if (!radeon_connector->dac_load_detect)
return ret;
1101,6 → 892,15
.set_property = radeon_connector_set_property,
};
 
static int radeon_dvi_get_modes(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int ret;
 
ret = radeon_ddc_get_modes(radeon_connector);
return ret;
}
 
static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
1141,33 → 941,32
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = NULL;
struct drm_encoder_helper_funcs *encoder_funcs;
int i, r;
struct drm_mode_object *obj;
int i;
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
 
if (!force && radeon_check_hpd_status_unchanged(connector))
return connector->status;
 
if (!force && radeon_check_hpd_status_unchanged(connector)) {
ret = connector->status;
goto exit;
}
 
if (radeon_connector->ddc_bus)
dret = radeon_ddc_probe(radeon_connector, false);
if (dret) {
radeon_connector->detected_by_load = false;
radeon_connector_free_edid(connector);
radeon_connector_get_edid(connector);
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
 
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
connector->name);
drm_get_connector_name(connector));
/* rs690 seems to have a problem with connectors not existing and always
* return a block of 0's. If we see this just stop polling on this output */
if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) &&
radeon_connector->base.null_edid_counter) {
if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) {
ret = connector_status_disconnected;
DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n",
connector->name);
DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector));
radeon_connector->ddc_bus = NULL;
} else {
ret = connector_status_connected;
1174,18 → 973,18
broken_edid = true; /* defer use_digital to later */
}
} else {
radeon_connector->use_digital =
!!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
 
/* some oems have boards with separate digital and analog connectors
* with a shared ddc line (often vga + hdmi)
*/
if ((!radeon_connector->use_digital) && radeon_connector->shared_ddc) {
radeon_connector_free_edid(connector);
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
ret = connector_status_disconnected;
} else {
} else
ret = connector_status_connected;
}
 
/* This gets complicated. We have boards with VGA + HDMI with a
* shared DDC line and we have boards with DVI-D + HDMI with a shared
* DDC line. The latter is more complex because with DVI<->HDMI adapters
1205,7 → 1004,8
if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
/* hpd is our only option in this case */
if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
radeon_connector_free_edid(connector);
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
ret = connector_status_disconnected;
}
}
1239,11 → 1039,14
if (connector->encoder_ids[i] == 0)
break;
 
encoder = drm_encoder_find(connector->dev,
connector->encoder_ids[i]);
if (!encoder)
obj = drm_mode_object_find(connector->dev,
connector->encoder_ids[i],
DRM_MODE_OBJECT_ENCODER);
if (!obj)
continue;
 
encoder = obj_to_encoder(obj);
 
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
1295,8 → 1098,6
 
/* updated in get modes as well since we need to know if it's analog or digital */
radeon_connector_update_scratch_regs(connector, ret);
 
exit:
return ret;
}
 
1305,6 → 1106,7
{
int enc_id = connector->encoder_ids[0];
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_mode_object *obj;
struct drm_encoder *encoder;
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
1311,10 → 1113,12
if (connector->encoder_ids[i] == 0)
break;
 
encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
if (!encoder)
obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
if (!obj)
continue;
 
encoder = obj_to_encoder(obj);
 
if (radeon_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
1329,10 → 1133,15
 
/* then check use digitial */
/* pick the first one */
if (enc_id)
return drm_encoder_find(connector->dev, enc_id);
if (enc_id) {
obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
if (!obj)
return NULL;
encoder = obj_to_encoder(obj);
return encoder;
}
return NULL;
}
 
static void radeon_dvi_force(struct drm_connector *connector)
{
1363,16 → 1172,18
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
return MODE_OK;
else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) {
if (ASIC_IS_DCE6(rdev)) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
else
return MODE_OK;
} else {
} else
return MODE_CLOCK_HIGH;
} else
return MODE_CLOCK_HIGH;
}
}
 
/* check against the max pixel clock */
if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
1382,7 → 1193,7
}
 
static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
.get_modes = radeon_vga_get_modes,
.get_modes = radeon_dvi_get_modes,
.mode_valid = radeon_dvi_mode_valid,
.best_encoder = radeon_dvi_encoder,
};
1396,6 → 1207,21
.force = radeon_dvi_force,
};
 
static void radeon_dp_connector_destroy(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
 
if (radeon_connector->edid)
kfree(radeon_connector->edid);
if (radeon_dig_connector->dp_i2c_bus)
radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
kfree(radeon_connector->con_priv);
// drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
 
static int radeon_dp_get_modes(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1411,8 → 1237,7
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
radeon_connector_get_edid(connector);
ret = radeon_ddc_get_modes(connector);
ret = radeon_ddc_get_modes(radeon_connector);
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
1423,8 → 1248,7
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
}
radeon_connector_get_edid(connector);
ret = radeon_ddc_get_modes(connector);
ret = radeon_ddc_get_modes(radeon_connector);
}
 
if (ret > 0) {
1457,10 → 1281,7
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
}
radeon_connector_get_edid(connector);
ret = radeon_ddc_get_modes(connector);
 
radeon_get_native_mode(connector);
ret = radeon_ddc_get_modes(radeon_connector);
}
 
return ret;
1468,6 → 1289,7
 
u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector)
{
struct drm_mode_object *obj;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
int i;
1476,10 → 1298,11
if (connector->encoder_ids[i] == 0)
break;
 
encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
if (!encoder)
obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
if (!obj)
continue;
 
encoder = obj_to_encoder(obj);
radeon_encoder = to_radeon_encoder(encoder);
 
switch (radeon_encoder->encoder_id) {
1494,8 → 1317,9
return ENCODER_OBJECT_ID_NONE;
}
 
static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
{
struct drm_mode_object *obj;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
int i;
1505,10 → 1329,11
if (connector->encoder_ids[i] == 0)
break;
 
encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
if (!encoder)
obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
if (!obj)
continue;
 
encoder = obj_to_encoder(obj);
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
1523,7 → 1348,7
struct radeon_device *rdev = dev->dev_private;
 
if (ASIC_IS_DCE5(rdev) &&
(rdev->clock.default_dispclk >= 53900) &&
(rdev->clock.dp_extclk >= 53900) &&
radeon_connector_encoder_is_hbr2(connector)) {
return true;
}
1540,16 → 1365,21
enum drm_connector_status ret = connector_status_disconnected;
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
int r;
 
 
if (!force && radeon_check_hpd_status_unchanged(connector)) {
ret = connector->status;
goto out;
#if DISABLE_DP
connector->status = connector_status_disconnected;
return connector->status;
#endif
 
if (!force && radeon_check_hpd_status_unchanged(connector))
return connector->status;
 
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
 
radeon_connector_free_edid(connector);
 
if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
(connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
if (encoder) {
1599,7 → 1429,7
if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
} else {
/* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
/* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
if (radeon_ddc_probe(radeon_connector, false))
ret = connector_status_connected;
}
1607,7 → 1437,6
}
 
radeon_connector_update_scratch_regs(connector, ret);
out:
return ret;
}
 
1614,8 → 1443,6
static int radeon_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
 
1646,25 → 1473,16
return MODE_PANEL;
}
}
return MODE_OK;
} else {
if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
(radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return radeon_dp_mode_valid_helper(connector, mode);
} else {
if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
} else {
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
else
return MODE_OK;
}
}
}
 
return MODE_OK;
}
 
static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
.get_modes = radeon_dp_get_modes,
.mode_valid = radeon_dp_mode_valid,
1676,28 → 1494,10
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_connector_set_property,
.destroy = radeon_connector_destroy,
.destroy = radeon_dp_connector_destroy,
.force = radeon_dvi_force,
};
 
static const struct drm_connector_funcs radeon_edp_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_lvds_set_property,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
 
static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_lvds_set_property,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
 
void
radeon_add_atom_connector(struct drm_device *dev,
uint32_t connector_id,
1718,7 → 1518,6
uint32_t subpixel_order = SubPixelNone;
bool shared_ddc = false;
bool is_dp_bridge = false;
bool has_aux = false;
 
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
return;
1790,11 → 1589,18
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
/* add DP i2c bus */
if (connector_type == DRM_MODE_CONNECTOR_eDP)
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
else
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
if (!radeon_dig_connector->dp_i2c_bus)
DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (radeon_connector->ddc_bus)
has_aux = true;
else
if (!radeon_connector->ddc_bus)
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
switch (connector_type) {
1801,10 → 1607,6
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_DVIA:
default:
drm_connector_init(dev, &radeon_connector->base,
&radeon_dp_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base,
&radeon_dp_connector_helper_funcs);
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
radeon_connector->dac_load_detect = true;
1811,9 → 1613,6
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
1820,10 → 1619,6
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
case DRM_MODE_CONNECTOR_DisplayPort:
drm_connector_init(dev, &radeon_connector->base,
&radeon_dp_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base,
&radeon_dp_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_property,
UNDERSCAN_OFF);
1833,20 → 1628,6
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
 
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
 
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.dither_property,
RADEON_FMT_DITHER_DISABLE);
 
if (radeon_audio != 0)
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
 
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
1862,10 → 1643,6
break;
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
drm_connector_init(dev, &radeon_connector->base,
&radeon_lvds_bridge_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base,
&radeon_dp_connector_helper_funcs);
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
1888,10 → 1665,6
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
if (ASIC_IS_AVIVO(rdev))
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1910,10 → 1683,6
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
if (ASIC_IS_AVIVO(rdev))
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->interlace_allowed = true;
1947,18 → 1716,7
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.dither_property,
RADEON_FMT_DITHER_DISABLE);
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
}
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base,
1998,18 → 1756,7
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.dither_property,
RADEON_FMT_DITHER_DISABLE);
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
}
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
}
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
2026,10 → 1773,12
drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
/* add DP i2c bus */
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
if (!radeon_dig_connector->dp_i2c_bus)
DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (radeon_connector->ddc_bus)
has_aux = true;
else
if (!radeon_connector->ddc_bus)
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
subpixel_order = SubPixelHorizontalRGB;
2046,18 → 1795,7
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.dither_property,
RADEON_FMT_DITHER_DISABLE);
drm_object_attach_property(&radeon_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_NONE);
}
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
}
connector->interlace_allowed = true;
/* in theory with a DP to VGA converter... */
connector->doublescan_allowed = false;
2068,13 → 1806,15
goto failed;
radeon_dig_connector->igp_lane_info = igp_lane_info;
radeon_connector->con_priv = radeon_dig_connector;
drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type);
drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
if (i2c_bus->valid) {
/* add DP i2c bus */
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
if (!radeon_dig_connector->dp_i2c_bus)
DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (radeon_connector->ddc_bus)
has_aux = true;
else
if (!radeon_connector->ddc_bus)
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
drm_object_attach_property(&radeon_connector->base.base,
2131,11 → 1871,7
connector->polled = DRM_CONNECTOR_POLL_HPD;
 
connector->display_info.subpixel_order = subpixel_order;
drm_connector_register(connector);
 
if (has_aux)
radeon_dp_aux_init(radeon_connector);
 
drm_sysfs_connector_add(connector);
return;
 
failed:
2292,5 → 2028,5
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
drm_connector_register(connector);
drm_sysfs_connector_add(connector);
}
/drivers/video/drm/radeon/radeon_device.c
40,47 → 40,37
 
#include <drm/drm_pciids.h>
 
#define PCI_VENDOR_ID_ATI 0x1002
#define PCI_VENDOR_ID_APPLE 0x106b
 
int radeon_no_wb;
int radeon_no_wb = 1;
int radeon_modeset = -1;
int radeon_dynclks = -1;
int radeon_r4xx_atom = 0;
int radeon_agpmode = 0;
int radeon_vram_limit = 0;
int radeon_gart_size = -1; /* auto */
int radeon_gart_size = 512; /* default gart size */
int radeon_benchmarking = 0;
int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
int radeon_audio = -1;
int radeon_new_pll = -1;
int radeon_dynpm = -1;
int radeon_audio = 1;
int radeon_hw_i2c = 0;
int radeon_pcie_gen2 = 0;
int radeon_disp_priority = 0;
int radeon_hw_i2c = 0;
int radeon_pcie_gen2 = -1;
int radeon_msi = -1;
int radeon_lockup_timeout = 10000;
int radeon_fastfb = 0;
int radeon_dpm = -1;
int radeon_aspm = -1;
int radeon_runtime_pm = -1;
int radeon_hard_reset = 0;
int radeon_vm_size = 8;
int radeon_vm_block_size = -1;
int radeon_deep_color = 0;
int radeon_use_pflipirq = 2;
 
int irq_override = 0;
int radeon_bapm = -1;
 
 
extern display_t *os_display;
extern struct drm_device *main_device;
extern videomode_t usermode;
extern display_t *rdisplay;
struct drm_device *main_drm_device;
 
 
void parse_cmdline(char *cmdline, videomode_t *mode, char *log, int *kms);
int init_display(struct radeon_device *rdev, videomode_t *mode);
int init_display_kms(struct drm_device *dev, videomode_t *usermode);
int init_display_kms(struct radeon_device *rdev, videomode_t *mode);
 
int get_modes(videomode_t *mode, u32_t *count);
int set_user_mode(videomode_t *mode);
155,68 → 145,9
"VERDE",
"OLAND",
"HAINAN",
"BONAIRE",
"KAVERI",
"KABINI",
"HAWAII",
"MULLINS",
"LAST",
};
 
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
 
struct radeon_px_quirk {
u32 chip_vendor;
u32 chip_device;
u32 subsys_vendor;
u32 subsys_device;
u32 px_quirk_flags;
};
 
static struct radeon_px_quirk radeon_px_quirk_list[] = {
/* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
* https://bugzilla.kernel.org/show_bug.cgi?id=74551
*/
{ PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
/* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
* https://bugzilla.kernel.org/show_bug.cgi?id=51381
*/
{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
/* macbook pro 8.2 */
{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
{ 0, 0, 0, 0, 0 },
};
 
bool radeon_is_px(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
 
if (rdev->flags & RADEON_IS_PX)
return true;
return false;
}
 
static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
{
struct radeon_px_quirk *p = radeon_px_quirk_list;
 
/* Apply PX quirks */
while (p && p->chip_device != 0) {
if (rdev->pdev->vendor == p->chip_vendor &&
rdev->pdev->device == p->chip_device &&
rdev->pdev->subsystem_vendor == p->subsys_vendor &&
rdev->pdev->subsystem_device == p->subsys_device) {
rdev->px_quirk_flags = p->px_quirk_flags;
break;
}
++p;
}
 
if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
rdev->flags &= ~RADEON_IS_PX;
}
 
/**
* radeon_program_register_sequence - program an array of registers.
*
253,11 → 184,6
}
}
 
void radeon_pci_config_reset(struct radeon_device *rdev)
{
pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
}
 
/**
* radeon_surface_init - Clear GPU surface registers.
*
272,9 → 198,6
int i;
 
for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
if (rdev->surface_regs[i].bo)
radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
else
radeon_clear_surface_reg(rdev, i);
}
/* enable surfaces */
353,87 → 276,6
}
 
/*
* GPU doorbell aperture helpers function.
*/
/**
* radeon_doorbell_init - Init doorbell driver information.
*
* @rdev: radeon_device pointer
*
* Init doorbell driver information (CIK)
* Returns 0 on success, error on failure.
*/
static int radeon_doorbell_init(struct radeon_device *rdev)
{
/* doorbell bar mapping */
rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
 
rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
if (rdev->doorbell.num_doorbells == 0)
return -EINVAL;
 
rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
if (rdev->doorbell.ptr == NULL) {
return -ENOMEM;
}
DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
 
memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
 
return 0;
}
 
/**
* radeon_doorbell_fini - Tear down doorbell driver information.
*
* @rdev: radeon_device pointer
*
* Tear down doorbell driver information (CIK)
*/
static void radeon_doorbell_fini(struct radeon_device *rdev)
{
iounmap(rdev->doorbell.ptr);
rdev->doorbell.ptr = NULL;
}
 
/**
* radeon_doorbell_get - Allocate a doorbell entry
*
* @rdev: radeon_device pointer
* @doorbell: doorbell index
*
* Allocate a doorbell for use by the driver (all asics).
* Returns 0 on success or -EINVAL on failure.
*/
int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
{
unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
if (offset < rdev->doorbell.num_doorbells) {
__set_bit(offset, rdev->doorbell.used);
*doorbell = offset;
return 0;
} else {
return -EINVAL;
}
}
 
/**
* radeon_doorbell_free - Free a doorbell entry
*
* @rdev: radeon_device pointer
* @doorbell: doorbell index
*
* Free a doorbell allocated for use by the driver (all asics)
*/
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
{
if (doorbell < rdev->doorbell.num_doorbells)
__clear_bit(doorbell, rdev->doorbell.used);
}
 
/*
* radeon_wb_*()
* Writeback is the the method by which the the GPU updates special pages
* in memory with the status of certain GPU events (fences, ring pointers,
464,11 → 306,6
{
radeon_wb_disable(rdev);
if (rdev->wb.wb_obj) {
if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
radeon_bo_kunmap(rdev->wb.wb_obj);
radeon_bo_unpin(rdev->wb.wb_obj);
radeon_bo_unreserve(rdev->wb.wb_obj);
}
radeon_bo_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL;
490,8 → 327,7
 
if (rdev->wb.wb_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, 0, NULL,
&rdev->wb.wb_obj);
RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r;
782,11 → 618,15
{
if (rdev->dummy_page.page)
return 0;
rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
rdev->dummy_page.page = (void*)AllocPage();
if (rdev->dummy_page.page == NULL)
return -ENOMEM;
rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->dummy_page.addr = MapIoMem((addr_t)rdev->dummy_page.page, 4096, 3);
if (!rdev->dummy_page.addr) {
// __free_page(rdev->dummy_page.page);
rdev->dummy_page.page = NULL;
return -ENOMEM;
}
return 0;
}
 
801,7 → 641,7
{
if (rdev->dummy_page.page == NULL)
return;
 
KernelFree((void*)rdev->dummy_page.addr);
rdev->dummy_page.page = NULL;
}
 
1104,27 → 944,15
radeon_vram_limit = 0;
}
 
if (radeon_gart_size == -1) {
/* default to a larger gart size on newer asics */
if (rdev->family >= CHIP_RV770)
radeon_gart_size = 1024;
else
radeon_gart_size = 512;
}
/* gtt size must be power of two and greater or equal to 32M */
if (radeon_gart_size < 32) {
dev_warn(rdev->dev, "gart size (%d) too small\n",
dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
radeon_gart_size);
if (rdev->family >= CHIP_RV770)
radeon_gart_size = 1024;
else
radeon_gart_size = 512;
 
} else if (!radeon_check_pot_argument(radeon_gart_size)) {
dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
radeon_gart_size);
if (rdev->family >= CHIP_RV770)
radeon_gart_size = 1024;
else
radeon_gart_size = 512;
}
rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1144,69 → 972,8
radeon_agpmode = 0;
break;
}
 
if (!radeon_check_pot_argument(radeon_vm_size)) {
dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
radeon_vm_size);
radeon_vm_size = 4;
}
 
if (radeon_vm_size < 1) {
dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
radeon_vm_size);
radeon_vm_size = 4;
}
 
/*
* Max GPUVM size for Cayman, SI and CI are 40 bits.
*/
if (radeon_vm_size > 1024) {
dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
radeon_vm_size);
radeon_vm_size = 4;
}
 
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
* page table and the remaining bits are in the page directory */
if (radeon_vm_block_size == -1) {
 
/* Total bits covered by PD + PTs */
unsigned bits = ilog2(radeon_vm_size) + 17;
 
/* Make sure the PD is 4K in size up to 8GB address space.
Above that split equal between PD and PTs */
if (radeon_vm_size <= 8)
radeon_vm_block_size = bits - 9;
else
radeon_vm_block_size = (bits + 3) / 2;
 
} else if (radeon_vm_block_size < 9) {
dev_warn(rdev->dev, "VM page table size (%d) too small\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
}
 
if (radeon_vm_block_size > 24 ||
(radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
dev_warn(rdev->dev, "VM page table size (%d) too large\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
}
}
 
/**
* radeon_device_init - initialize the driver
*
* @rdev: radeon_device pointer
* @pdev: drm dev pointer
* @pdev: pci dev pointer
* @flags: driver flags
*
* Initializes the driver info and hw (all asics).
* Returns 0 for success or an error on failure.
* Called at driver startup.
*/
int radeon_device_init(struct radeon_device *rdev,
struct drm_device *ddev,
struct pci_dev *pdev,
1214,10 → 981,8
{
int r, i;
int dma_bits;
bool runtime = false;
 
rdev->shutdown = false;
rdev->dev = &pdev->dev;
rdev->ddev = ddev;
rdev->pdev = pdev;
rdev->flags = flags;
1224,7 → 989,7
rdev->family = flags & RADEON_FAMILY_MASK;
rdev->is_atom_bios = false;
rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
rdev->mc.gtt_size = 512 * 1024 * 1024;
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
rdev->accel_working = false;
/* set up ring ids */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
1243,7 → 1008,6
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->gpu_clock_mutex);
mutex_init(&rdev->srbm_mutex);
// init_rwsem(&rdev->pm.mclk_lock);
// init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
1250,17 → 1014,20
r = radeon_gem_init(rdev);
if (r)
return r;
 
radeon_check_arguments(rdev);
/* initialize vm here */
mutex_init(&rdev->vm_manager.lock);
/* Adjust VM size here.
* Max GPUVM size for cayman+ is 40 bits.
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
rdev->vm_manager.max_pfn = radeon_vm_size << 18;
rdev->vm_manager.max_pfn = 1 << 20;
INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
 
/* Set asic functions */
r = radeon_asic_init(rdev);
if (r)
return r;
radeon_check_arguments(rdev);
 
/* all of the newer IGP chips have an internal gart
* However some rs4xx report as AGP, so remove that here.
1309,24 → 1076,8
/* Registers mapping */
/* TODO: block userspace mapping of io register */
spin_lock_init(&rdev->mmio_idx_lock);
spin_lock_init(&rdev->smc_idx_lock);
spin_lock_init(&rdev->pll_idx_lock);
spin_lock_init(&rdev->mc_idx_lock);
spin_lock_init(&rdev->pcie_idx_lock);
spin_lock_init(&rdev->pciep_idx_lock);
spin_lock_init(&rdev->pif_idx_lock);
spin_lock_init(&rdev->cg_idx_lock);
spin_lock_init(&rdev->uvd_idx_lock);
spin_lock_init(&rdev->rcu_idx_lock);
spin_lock_init(&rdev->didt_idx_lock);
spin_lock_init(&rdev->end_idx_lock);
if (rdev->family >= CHIP_BONAIRE) {
rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
} else {
rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
}
rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
if (rdev->rmmio == NULL) {
return -ENOMEM;
1334,10 → 1085,6
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
 
/* doorbell bar mapping */
if (rdev->family >= CHIP_BONAIRE)
radeon_doorbell_init(rdev);
 
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1349,20 → 1096,15
if (rdev->rio_mem == NULL)
DRM_ERROR("Unable to find PCI I/O BAR\n");
 
if (rdev->flags & RADEON_IS_PX)
radeon_device_handle_px_quirks(rdev);
if (rdev->flags & RADEON_IS_PX)
runtime = true;
 
r = radeon_init(rdev);
if (r)
return r;
 
r = radeon_ib_ring_tests(rdev);
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
// r = radeon_ib_ring_tests(rdev);
// if (r)
// DRM_ERROR("ib ring test failed (%d).\n", r);
 
 
if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
/* Acceleration not working on AGP card try again
* with fallback to PCI or PCIE GART
1374,24 → 1116,14
if (r)
return r;
}
 
if ((radeon_testing & 1)) {
if (rdev->accel_working)
radeon_test_moves(rdev);
else
DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
}
if ((radeon_testing & 2)) {
if (rdev->accel_working)
radeon_test_syncing(rdev);
else
DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
}
// if (radeon_testing) {
// radeon_test_moves(rdev);
// }
// if ((radeon_testing & 2)) {
// radeon_test_syncing(rdev);
// }
if (radeon_benchmarking) {
if (rdev->accel_working)
radeon_benchmark(rdev, radeon_benchmarking);
else
DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
}
return 0;
}
1415,8 → 1147,6
int resched;
 
// down_write(&rdev->exclusive_lock);
rdev->needs_reset = false;
 
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
// resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
1515,8 → 1245,6
* otherwise it should provide enough functionalities
* for shadowfb to run
*/
main_device = dev;
 
if( radeon_modeset )
{
r = radeon_modeset_init(rdev);
1523,16 → 1251,76
if (r) {
return r;
}
init_display_kms(dev, &usermode);
};
return 0;
}
 
videomode_t usermode;
 
 
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static struct drm_device *dev;
int ret;
 
dev = kzalloc(sizeof(*dev), 0);
if (!dev)
return -ENOMEM;
 
// ret = pci_enable_device(pdev);
// if (ret)
// goto err_g1;
 
// pci_set_master(pdev);
 
// if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
// printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
// goto err_g2;
// }
 
dev->pdev = pdev;
dev->pci_device = pdev->device;
dev->pci_vendor = pdev->vendor;
 
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->ctxlist);
INIT_LIST_HEAD(&dev->vmalist);
INIT_LIST_HEAD(&dev->maplist);
 
spin_lock_init(&dev->count_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
 
 
ret = radeon_driver_load_kms(dev, ent->driver_data );
if (ret)
goto err_g4;
 
main_drm_device = dev;
 
if( radeon_modeset )
init_display_kms(dev->dev_private, &usermode);
else
init_display(rdev, &usermode);
init_display(dev->dev_private, &usermode);
 
 
return 0;
}
 
err_g4:
// drm_put_minor(&dev->primary);
//err_g3:
// if (drm_core_check_feature(dev, DRIVER_MODESET))
// drm_put_minor(&dev->control);
//err_g2:
// pci_disable_device(pdev);
//err_g1:
free(dev);
 
LEAVE();
 
return ret;
}
 
resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
{
return pci_resource_start(dev->pdev, resource);
1577,85 → 1365,181
return rem;
}
 
 
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
 
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg);
 
#define CURRENT_API 0x0200 /* 2.00 */
#define COMPATIBLE_API 0x0100 /* 1.00 */
 
static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
DRIVER_PRIME | DRIVER_RENDER,
.load = radeon_driver_load_kms,
// .open = radeon_driver_open_kms,
// .preclose = radeon_driver_preclose_kms,
// .postclose = radeon_driver_postclose_kms,
// .lastclose = radeon_driver_lastclose_kms,
// .unload = radeon_driver_unload_kms,
// .get_vblank_counter = radeon_get_vblank_counter_kms,
// .enable_vblank = radeon_enable_vblank_kms,
// .disable_vblank = radeon_disable_vblank_kms,
// .get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
// .get_scanout_position = radeon_get_crtc_scanoutpos,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = radeon_debugfs_init,
.debugfs_cleanup = radeon_debugfs_cleanup,
#endif
.irq_preinstall = radeon_driver_irq_preinstall_kms,
.irq_postinstall = radeon_driver_irq_postinstall_kms,
.irq_uninstall = radeon_driver_irq_uninstall_kms,
.irq_handler = radeon_driver_irq_handler_kms,
// .ioctls = radeon_ioctls_kms,
// .gem_free_object = radeon_gem_object_free,
// .gem_open_object = radeon_gem_object_open,
// .gem_close_object = radeon_gem_object_close,
// .dumb_create = radeon_mode_dumb_create,
// .dumb_map_offset = radeon_mode_dumb_mmap,
// .dumb_destroy = drm_gem_dumb_destroy,
// .fops = &radeon_driver_kms_fops,
#define API_VERSION (COMPATIBLE_API << 16) | CURRENT_API
 
// .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
// .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
// .gem_prime_export = drm_gem_prime_export,
// .gem_prime_import = drm_gem_prime_import,
// .gem_prime_pin = radeon_gem_prime_pin,
// .gem_prime_unpin = radeon_gem_prime_unpin,
// .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
// .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
// .gem_prime_vmap = radeon_gem_prime_vmap,
// .gem_prime_vunmap = radeon_gem_prime_vunmap,
#define SRV_GETVERSION 0
#define SRV_ENUM_MODES 1
#define SRV_SET_MODE 2
#define SRV_GET_CAPS 3
 
#define SRV_CREATE_SURFACE 10
#define SRV_DESTROY_SURFACE 11
#define SRV_LOCK_SURFACE 12
#define SRV_UNLOCK_SURFACE 13
#define SRV_RESIZE_SURFACE 14
#define SRV_BLIT_BITMAP 15
#define SRV_BLIT_TEXTURE 16
#define SRV_BLIT_VIDEO 17
 
 
 
int r600_video_blit(uint64_t src_offset, int x, int y,
int w, int h, int pitch);
 
#define check_input(size) \
if( unlikely((inp==NULL)||(io->inp_size != (size))) ) \
break;
 
#define check_output(size) \
if( unlikely((outp==NULL)||(io->out_size != (size))) ) \
break;
 
int _stdcall display_handler(ioctl_t *io)
{
int retval = -1;
u32_t *inp;
u32_t *outp;
 
inp = io->input;
outp = io->output;
 
switch(io->io_code)
{
case SRV_GETVERSION:
check_output(4);
*outp = API_VERSION;
retval = 0;
break;
 
case SRV_ENUM_MODES:
dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n",
inp, io->inp_size, io->out_size );
check_output(4);
if( radeon_modeset)
retval = get_modes((videomode_t*)inp, outp);
break;
 
case SRV_SET_MODE:
dbgprintf("SRV_SET_MODE inp %x inp_size %x\n",
inp, io->inp_size);
check_input(sizeof(videomode_t));
if( radeon_modeset )
retval = set_user_mode((videomode_t*)inp);
break;
/*
case SRV_GET_CAPS:
retval = get_driver_caps((hwcaps_t*)inp);
break;
 
case SRV_CREATE_SURFACE:
// check_input(8);
retval = create_surface(main_drm_device, (struct io_call_10*)inp);
break;
 
case SRV_LOCK_SURFACE:
retval = lock_surface((struct io_call_12*)inp);
break;
 
case SRV_BLIT_BITMAP:
srv_blit_bitmap( inp[0], inp[1], inp[2],
inp[3], inp[4], inp[5], inp[6]);
*/
};
 
int ati_init(void)
return retval;
}
 
static char log[256];
static pci_dev_t device;
 
u32_t drvEntry(int action, char *cmdline)
{
static pci_dev_t device;
struct radeon_device *rdev = NULL;
 
const struct pci_device_id *ent;
 
int err;
u32_t retval = 0;
 
if(action != 1)
return 0;
 
if( GetService("DISPLAY") != 0 )
return 0;
 
if( cmdline && *cmdline )
parse_cmdline(cmdline, &usermode, log, &radeon_modeset);
 
if(!dbg_open(log))
{
strcpy(log, "/TMP1/1/ati.log");
 
if(!dbg_open(log))
{
printf("Can't open %s\nExit\n", log);
return 0;
};
}
dbgprintf("Radeon v3.10 preview-1 cmdline %s\n", cmdline);
 
cpu_detect();
 
enum_pci_devices();
 
ent = find_pci_device(&device, pciidlist);
 
if( unlikely(ent == NULL) )
{
dbgprintf("device not found\n");
return -ENODEV;
return 0;
};
 
drm_core_init();
 
DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
dbgprintf("device %x:%x\n", device.pci_dev.vendor,
device.pci_dev.device);
 
kms_driver.driver_features |= DRIVER_MODESET;
drm_global_init();
 
err = drm_get_pci_dev(&device.pci_dev, ent, &kms_driver);
err = drm_get_dev(&device.pci_dev, ent);
 
rdev = rdisplay->ddev->dev_private;
 
err = RegService("DISPLAY", display_handler);
 
if( err != 0)
dbgprintf("Set DISPLAY handler\n");
 
return err;
};
 
#define PCI_CLASS_REVISION 0x08
#define PCI_CLASS_DISPLAY_VGA 0x0300
 
int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn)
{
u16_t vendor, device;
u32_t class;
int ret = 0;
 
vendor = id & 0xffff;
device = (id >> 16) & 0xffff;
 
if(vendor == 0x1002)
{
class = PciRead32(busnr, devfn, PCI_CLASS_REVISION);
class >>= 16;
 
if( class == PCI_CLASS_DISPLAY_VGA)
ret = 1;
}
return ret;
}
 
 
/drivers/video/drm/radeon/radeon_display.c
33,23 → 33,6
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
 
/* Greatest common divisor */
unsigned long gcd(unsigned long a, unsigned long b)
{
unsigned long r;
 
if (a < b)
swap(a, b);
 
if (!b)
return a;
while ((r = a % b) != 0) {
a = b;
b = r;
}
return b;
}
 
static void avivo_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
80,8 → 63,7
(radeon_crtc->lut_b[i] << 0));
}
 
/* Only change bit 0 of LUT_SEL, other bits are set elsewhere */
WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id, ~1);
WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
}
 
static void dce4_crtc_load_lut(struct drm_crtc *crtc)
171,14 → 153,8
NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
/* XXX match this to the depth of the crtc fmt block, move to modeset? */
WREG32(0x6940 + radeon_crtc->crtc_offset, 0);
if (ASIC_IS_DCE8(rdev)) {
/* XXX this only needs to be programmed once per crtc at startup,
* not sure where the best place for it is
*/
WREG32(CIK_ALPHA_CONTROL + radeon_crtc->crtc_offset,
CIK_CURSOR_ALPHA_BLND_ENA);
 
}
}
 
static void legacy_crtc_load_lut(struct drm_crtc *crtc)
{
267,51 → 243,11
kfree(radeon_crtc);
}
 
static int
radeon_crtc_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
struct radeon_device *rdev;
struct drm_crtc *crtc;
bool active = false;
int ret;
 
if (!set || !set->crtc)
return -EINVAL;
 
dev = set->crtc->dev;
 
ret = drm_crtc_helper_set_config(set);
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
if (crtc->enabled)
active = true;
 
// pm_runtime_mark_last_busy(dev->dev);
 
rdev = dev->dev_private;
/* if we have active crtcs and we don't have a power ref,
take the current one */
if (active && !rdev->have_disp_power_ref) {
rdev->have_disp_power_ref = true;
return ret;
}
/* if we have no active crtcs, then drop the power ref
we got before */
if (!active && rdev->have_disp_power_ref) {
// pm_runtime_put_autosuspend(dev->dev);
rdev->have_disp_power_ref = false;
}
 
/* drop the power reference we got coming in here */
// pm_runtime_put_autosuspend(dev->dev);
return ret;
}
static const struct drm_crtc_funcs radeon_crtc_funcs = {
.cursor_set = NULL,
.cursor_move = NULL,
.gamma_set = radeon_crtc_gamma_set,
.set_config = radeon_crtc_set_config,
.set_config = drm_crtc_helper_set_config,
.destroy = radeon_crtc_destroy,
.page_flip = NULL,
};
332,16 → 268,6
radeon_crtc->crtc_id = index;
rdev->mode_info.crtcs[index] = radeon_crtc;
 
if (rdev->family >= CHIP_BONAIRE) {
radeon_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
radeon_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
} else {
radeon_crtc->max_cursor_width = CURSOR_WIDTH;
radeon_crtc->max_cursor_height = CURSOR_HEIGHT;
}
dev->mode_config.cursor_width = radeon_crtc->max_cursor_width;
dev->mode_config.cursor_height = radeon_crtc->max_cursor_height;
 
#if 0
radeon_crtc->mode_set.crtc = &radeon_crtc->base;
radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
360,7 → 286,7
radeon_legacy_init_crtc(dev, radeon_crtc);
}
 
static const char *encoder_names[38] = {
static const char *encoder_names[37] = {
"NONE",
"INTERNAL_LVDS",
"INTERNAL_TMDS1",
397,8 → 323,7
"INTERNAL_UNIPHY2",
"NUTMEG",
"TRAVIS",
"INTERNAL_VCE",
"INTERNAL_UNIPHY3",
"INTERNAL_VCE"
};
 
static const char *hpd_names[6] = {
423,7 → 348,7
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
DRM_INFO("Connector %d:\n", i);
DRM_INFO(" %s\n", connector->name);
DRM_INFO(" %s\n", drm_get_connector_name(connector));
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
if (radeon_connector->ddc_bus) {
513,227 → 438,175
return ret;
}
 
/* avivo */
 
/**
* avivo_reduce_ratio - fractional number reduction
*
* @nom: nominator
* @den: denominator
* @nom_min: minimum value for nominator
* @den_min: minimum value for denominator
*
* Find the greatest common divisor and apply it on both nominator and
* denominator, but make nominator and denominator are at least as large
* as their minimum values.
*/
static void avivo_reduce_ratio(unsigned *nom, unsigned *den,
unsigned nom_min, unsigned den_min)
int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
{
unsigned tmp;
struct drm_device *dev = radeon_connector->base.dev;
struct radeon_device *rdev = dev->dev_private;
int ret = 0;
 
/* reduce the numbers to a simpler ratio */
tmp = gcd(*nom, *den);
*nom /= tmp;
*den /= tmp;
/* on hw with routers, select right port */
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
 
/* make sure nominator is large enough */
if (*nom < nom_min) {
tmp = DIV_ROUND_UP(nom_min, *nom);
*nom *= tmp;
*den *= tmp;
if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
ENCODER_OBJECT_ID_NONE) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
 
if (dig->dp_i2c_bus)
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&dig->dp_i2c_bus->adapter);
} else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
 
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&dig->dp_i2c_bus->adapter);
else if (radeon_connector->ddc_bus && !radeon_connector->edid)
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&radeon_connector->ddc_bus->adapter);
} else {
if (radeon_connector->ddc_bus && !radeon_connector->edid)
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&radeon_connector->ddc_bus->adapter);
}
 
/* make sure the denominator is large enough */
if (*den < den_min) {
tmp = DIV_ROUND_UP(den_min, *den);
*nom *= tmp;
*den *= tmp;
if (!radeon_connector->edid) {
if (rdev->is_atom_bios) {
/* some laptops provide a hardcoded edid in rom for LCDs */
if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) ||
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)))
radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
} else
/* some servers provide a hardcoded edid in rom for KVMs */
radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
}
if (radeon_connector->edid) {
drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
return ret;
}
drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
return 0;
}
 
/**
* avivo_get_fb_ref_div - feedback and ref divider calculation
*
* @nom: nominator
* @den: denominator
* @post_div: post divider
* @fb_div_max: feedback divider maximum
* @ref_div_max: reference divider maximum
* @fb_div: resulting feedback divider
* @ref_div: resulting reference divider
*
* Calculate feedback and reference divider for a given post divider. Makes
* sure we stay within the limits.
*/
static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
unsigned fb_div_max, unsigned ref_div_max,
unsigned *fb_div, unsigned *ref_div)
/* avivo */
static void avivo_get_fb_div(struct radeon_pll *pll,
u32 target_clock,
u32 post_div,
u32 ref_div,
u32 *fb_div,
u32 *frac_fb_div)
{
/* limit reference * post divider to a maximum */
ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
u32 tmp = post_div * ref_div;
 
/* get matching reference and feedback divider */
*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
tmp *= target_clock;
*fb_div = tmp / pll->reference_freq;
*frac_fb_div = tmp % pll->reference_freq;
 
/* limit fb divider to its maximum */
if (*fb_div > fb_div_max) {
*ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
*fb_div = fb_div_max;
if (*fb_div > pll->max_feedback_div)
*fb_div = pll->max_feedback_div;
else if (*fb_div < pll->min_feedback_div)
*fb_div = pll->min_feedback_div;
}
}
 
/**
* radeon_compute_pll_avivo - compute PLL paramaters
*
* @pll: information about the PLL
* @dot_clock_p: resulting pixel clock
* fb_div_p: resulting feedback divider
* frac_fb_div_p: fractional part of the feedback divider
* ref_div_p: resulting reference divider
* post_div_p: resulting reference divider
*
* Try to calculate the PLL parameters to generate the given frequency:
* dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
*/
void radeon_compute_pll_avivo(struct radeon_pll *pll,
u32 freq,
u32 *dot_clock_p,
u32 *fb_div_p,
u32 *frac_fb_div_p,
u32 *ref_div_p,
u32 *post_div_p)
static u32 avivo_get_post_div(struct radeon_pll *pll,
u32 target_clock)
{
unsigned target_clock = pll->flags & RADEON_PLL_USE_FRAC_FB_DIV ?
freq : freq / 10;
u32 vco, post_div, tmp;
 
unsigned fb_div_min, fb_div_max, fb_div;
unsigned post_div_min, post_div_max, post_div;
unsigned ref_div_min, ref_div_max, ref_div;
unsigned post_div_best, diff_best;
unsigned nom, den;
if (pll->flags & RADEON_PLL_USE_POST_DIV)
return pll->post_div;
 
/* determine allowed feedback divider range */
fb_div_min = pll->min_feedback_div;
fb_div_max = pll->max_feedback_div;
 
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
fb_div_min *= 10;
fb_div_max *= 10;
}
 
/* determine allowed ref divider range */
if (pll->flags & RADEON_PLL_USE_REF_DIV)
ref_div_min = pll->reference_div;
if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
if (pll->flags & RADEON_PLL_IS_LCD)
vco = pll->lcd_pll_out_min;
else
ref_div_min = pll->min_ref_div;
 
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV &&
pll->flags & RADEON_PLL_USE_REF_DIV)
ref_div_max = pll->reference_div;
vco = pll->pll_out_min;
} else {
if (pll->flags & RADEON_PLL_IS_LCD)
vco = pll->lcd_pll_out_max;
else
ref_div_max = pll->max_ref_div;
vco = pll->pll_out_max;
}
 
/* determine allowed post divider range */
if (pll->flags & RADEON_PLL_USE_POST_DIV) {
post_div_min = pll->post_div;
post_div_max = pll->post_div;
} else {
unsigned vco_min, vco_max;
post_div = vco / target_clock;
tmp = vco % target_clock;
 
if (pll->flags & RADEON_PLL_IS_LCD) {
vco_min = pll->lcd_pll_out_min;
vco_max = pll->lcd_pll_out_max;
if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
if (tmp)
post_div++;
} else {
vco_min = pll->pll_out_min;
vco_max = pll->pll_out_max;
if (!tmp)
post_div--;
}
 
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
vco_min *= 10;
vco_max *= 10;
}
if (post_div > pll->max_post_div)
post_div = pll->max_post_div;
else if (post_div < pll->min_post_div)
post_div = pll->min_post_div;
 
post_div_min = vco_min / target_clock;
if ((target_clock * post_div_min) < vco_min)
++post_div_min;
if (post_div_min < pll->min_post_div)
post_div_min = pll->min_post_div;
 
post_div_max = vco_max / target_clock;
if ((target_clock * post_div_max) > vco_max)
--post_div_max;
if (post_div_max > pll->max_post_div)
post_div_max = pll->max_post_div;
return post_div;
}
 
/* represent the searched ratio as fractional number */
nom = target_clock;
den = pll->reference_freq;
#define MAX_TOLERANCE 10
 
/* reduce the numbers to a simpler ratio */
avivo_reduce_ratio(&nom, &den, fb_div_min, post_div_min);
void radeon_compute_pll_avivo(struct radeon_pll *pll,
u32 freq,
u32 *dot_clock_p,
u32 *fb_div_p,
u32 *frac_fb_div_p,
u32 *ref_div_p,
u32 *post_div_p)
{
u32 target_clock = freq / 10;
u32 post_div = avivo_get_post_div(pll, target_clock);
u32 ref_div = pll->min_ref_div;
u32 fb_div = 0, frac_fb_div = 0, tmp;
 
/* now search for a post divider */
if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP)
post_div_best = post_div_min;
else
post_div_best = post_div_max;
diff_best = ~0;
if (pll->flags & RADEON_PLL_USE_REF_DIV)
ref_div = pll->reference_div;
 
for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
unsigned diff;
avivo_get_fb_ref_div(nom, den, post_div, fb_div_max,
ref_div_max, &fb_div, &ref_div);
diff = abs(target_clock - (pll->reference_freq * fb_div) /
(ref_div * post_div));
 
if (diff < diff_best || (diff == diff_best &&
!(pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP))) {
 
post_div_best = post_div;
diff_best = diff;
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div);
frac_fb_div = (100 * frac_fb_div) / pll->reference_freq;
if (frac_fb_div >= 5) {
frac_fb_div -= 5;
frac_fb_div = frac_fb_div / 10;
frac_fb_div++;
}
if (frac_fb_div >= 10) {
fb_div++;
frac_fb_div = 0;
}
post_div = post_div_best;
} else {
while (ref_div <= pll->max_ref_div) {
avivo_get_fb_div(pll, target_clock, post_div, ref_div,
&fb_div, &frac_fb_div);
if (frac_fb_div >= (pll->reference_freq / 2))
fb_div++;
frac_fb_div = 0;
tmp = (pll->reference_freq * fb_div) / (post_div * ref_div);
tmp = (tmp * 10000) / target_clock;
 
/* get the feedback and reference divider for the optimal value */
avivo_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
&fb_div, &ref_div);
 
/* reduce the numbers to a simpler ratio once more */
/* this also makes sure that the reference divider is large enough */
avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
 
/* avoid high jitter with small fractional dividers */
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50);
if (fb_div < fb_div_min) {
unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
fb_div *= tmp;
ref_div *= tmp;
if (tmp > (10000 + MAX_TOLERANCE))
ref_div++;
else if (tmp >= (10000 - MAX_TOLERANCE))
break;
else
ref_div++;
}
}
 
/* and finally save the result */
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
*fb_div_p = fb_div / 10;
*frac_fb_div_p = fb_div % 10;
} else {
*dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) /
(ref_div * post_div * 10);
*fb_div_p = fb_div;
*frac_fb_div_p = 0;
}
 
*dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) +
(pll->reference_freq * *frac_fb_div_p)) /
(ref_div * post_div * 10);
*frac_fb_div_p = frac_fb_div;
*ref_div_p = ref_div;
*post_div_p = post_div;
 
DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p,
ref_div, post_div);
DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n",
*dot_clock_p, fb_div, frac_fb_div, ref_div, post_div);
}
 
/* pre-avivo */
936,9 → 809,6
{
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
 
if (radeon_fb->obj) {
drm_gem_object_unreference_unlocked(radeon_fb->obj);
}
drm_framebuffer_cleanup(fb);
kfree(radeon_fb);
}
1004,18 → 874,6
{ UNDERSCAN_AUTO, "auto" },
};
 
static struct drm_prop_enum_list radeon_audio_enum_list[] =
{ { RADEON_AUDIO_DISABLE, "off" },
{ RADEON_AUDIO_ENABLE, "on" },
{ RADEON_AUDIO_AUTO, "auto" },
};
 
/* XXX support different dither options? spatial, temporal, both, etc. */
static struct drm_prop_enum_list radeon_dither_enum_list[] =
{ { RADEON_FMT_DITHER_DISABLE, "off" },
{ RADEON_FMT_DITHER_ENABLE, "on" },
};
 
static int radeon_modeset_create_props(struct radeon_device *rdev)
{
int sz;
1066,18 → 924,6
if (!rdev->mode_info.underscan_vborder_property)
return -ENOMEM;
 
sz = ARRAY_SIZE(radeon_audio_enum_list);
rdev->mode_info.audio_property =
drm_property_create_enum(rdev->ddev, 0,
"audio",
radeon_audio_enum_list, sz);
 
sz = ARRAY_SIZE(radeon_dither_enum_list);
rdev->mode_info.dither_property =
drm_property_create_enum(rdev->ddev, 0,
"dither",
radeon_dither_enum_list, sz);
 
return 0;
}
 
1111,43 → 957,43
for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++)
rdev->mode_info.afmt[i] = NULL;
 
if (ASIC_IS_NODCE(rdev)) {
/* nothing to do */
if (ASIC_IS_DCE6(rdev)) {
/* todo */
} else if (ASIC_IS_DCE4(rdev)) {
static uint32_t eg_offsets[] = {
EVERGREEN_CRTC0_REGISTER_OFFSET,
EVERGREEN_CRTC1_REGISTER_OFFSET,
EVERGREEN_CRTC2_REGISTER_OFFSET,
EVERGREEN_CRTC3_REGISTER_OFFSET,
EVERGREEN_CRTC4_REGISTER_OFFSET,
EVERGREEN_CRTC5_REGISTER_OFFSET,
0x13830 - 0x7030,
};
int num_afmt;
 
/* DCE8 has 7 audio blocks tied to DIG encoders */
/* DCE6 has 6 audio blocks tied to DIG encoders */
/* DCE4/5 has 6 audio blocks tied to DIG encoders */
/* DCE4.1 has 2 audio blocks tied to DIG encoders */
if (ASIC_IS_DCE8(rdev))
num_afmt = 7;
else if (ASIC_IS_DCE6(rdev))
num_afmt = 6;
else if (ASIC_IS_DCE5(rdev))
num_afmt = 6;
else if (ASIC_IS_DCE41(rdev))
num_afmt = 2;
else /* DCE4 */
num_afmt = 6;
 
BUG_ON(num_afmt > ARRAY_SIZE(eg_offsets));
for (i = 0; i < num_afmt; i++) {
rdev->mode_info.afmt[i] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[i]) {
rdev->mode_info.afmt[i]->offset = eg_offsets[i];
rdev->mode_info.afmt[i]->id = i;
rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[0]) {
rdev->mode_info.afmt[0]->offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
rdev->mode_info.afmt[0]->id = 0;
}
rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[1]) {
rdev->mode_info.afmt[1]->offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
rdev->mode_info.afmt[1]->id = 1;
}
if (!ASIC_IS_DCE41(rdev)) {
rdev->mode_info.afmt[2] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[2]) {
rdev->mode_info.afmt[2]->offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
rdev->mode_info.afmt[2]->id = 2;
}
rdev->mode_info.afmt[3] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[3]) {
rdev->mode_info.afmt[3]->offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
rdev->mode_info.afmt[3]->id = 3;
}
rdev->mode_info.afmt[4] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[4]) {
rdev->mode_info.afmt[4]->offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
rdev->mode_info.afmt[4]->id = 4;
}
rdev->mode_info.afmt[5] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[5]) {
rdev->mode_info.afmt[5]->offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
rdev->mode_info.afmt[5]->id = 5;
}
}
} else if (ASIC_IS_DCE3(rdev)) {
/* DCE3.x has 2 audio blocks tied to DIG encoders */
rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
1193,8 → 1039,6
int i;
int ret;
 
ENTER();
 
drm_mode_config_init(rdev->ddev);
rdev->mode_info.mode_config_initialized = true;
 
1251,12 → 1095,14
// radeon_hpd_init(rdev);
 
/* setup afmt */
radeon_afmt_init(rdev);
// radeon_afmt_init(rdev);
 
/* Initialize power management */
// radeon_pm_init(rdev);
 
radeon_fbdev_init(rdev);
// drm_kms_helper_poll_init(rdev->ddev);
 
LEAVE();
 
return 0;
}
 
1335,7 → 1181,7
(!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
drm_detect_hdmi_monitor(radeon_connector->edid) &&
is_hdtv_mode(mode)))) {
if (radeon_encoder->underscan_hborder != 0)
radeon_crtc->h_border = radeon_encoder->underscan_hborder;
1381,18 → 1227,12
}
 
/*
* Retrieve current video scanout position of crtc on a given gpu, and
* an optional accurate timestamp of when query happened.
* Retrieve current video scanout position of crtc on a given gpu.
*
* \param dev Device to query.
* \param crtc Crtc to query.
* \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
* \param *vpos Location where vertical scanout position should be stored.
* \param *hpos Location where horizontal scanout position should go.
* \param *stime Target location for timestamp taken immediately before
* scanout position query. Can be NULL to skip timestamp.
* \param *etime Target location for timestamp taken immediately after
* scanout position query. Can be NULL to skip timestamp.
*
* Returns vpos as a positive number while in active scanout area.
* Returns vpos as a negative number inside vblank, counting the number
1408,8 → 1248,7
* unknown small number of scanlines wrt. real scanout position.
*
*/
int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags,
int *vpos, int *hpos, void *stime, void *etime)
int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos)
{
u32 stat_crtc = 0, vbl = 0, position = 0;
int vbl_start, vbl_end, vtotal, ret = 0;
1539,27 → 1378,5
if (in_vbl)
ret |= DRM_SCANOUTPOS_INVBL;
 
/* Is vpos outside nominal vblank area, but less than
* 1/100 of a frame height away from start of vblank?
* If so, assume this isn't a massively delayed vblank
* interrupt, but a vblank interrupt that fired a few
* microseconds before true start of vblank. Compensate
* by adding a full frame duration to the final timestamp.
* Happens, e.g., on ATI R500, R600.
*
* We only do this if DRM_CALLED_FROM_VBLIRQ.
*/
if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) {
vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
 
if (vbl_start - *vpos < vtotal / 100) {
*vpos -= vtotal;
 
/* Signal this correction as "applied". */
ret |= 0x8;
}
}
 
return ret;
}
/drivers/video/drm/radeon/radeon_family.h
93,11 → 93,6
CHIP_VERDE,
CHIP_OLAND,
CHIP_HAINAN,
CHIP_BONAIRE,
CHIP_KAVERI,
CHIP_KABINI,
CHIP_HAWAII,
CHIP_MULLINS,
CHIP_LAST,
};
 
116,7 → 111,6
RADEON_NEW_MEMMAP = 0x00400000UL,
RADEON_IS_PCI = 0x00800000UL,
RADEON_IS_IGPGART = 0x01000000UL,
RADEON_IS_PX = 0x02000000UL,
};
 
#endif
/drivers/video/drm/radeon/radeon_fence.c
37,7 → 37,6
#include <drm/drmP.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_trace.h"
 
/*
* Fences
121,7 → 120,7
(*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
(*fence)->ring = ring;
radeon_fence_ring_emit(rdev, ring, *fence);
trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
// trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
return 0;
}
 
190,9 → 189,11
}
} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
 
if (wake)
if (wake) {
rdev->fence_drv[ring].last_activity = GetTimerTicks();
wake_up_all(&rdev->fence_queue);
}
}
 
/**
* radeon_fence_destroy - destroy a fence
210,13 → 211,13
}
 
/**
* radeon_fence_seq_signaled - check if a fence sequence number has signaled
* radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
*
* @rdev: radeon device pointer
* @seq: sequence number
* @ring: ring index the fence is associated with
*
* Check if the last signaled fence sequnce number is >= the requested
* Check if the last singled fence sequnce number is >= the requested
* sequence number (all asics).
* Returns true if the fence has signaled (current fence value
* is >= requested value) or false if it has not (current fence
261,124 → 262,115
}
 
/**
* radeon_fence_any_seq_signaled - check if any sequence number is signaled
* radeon_fence_wait_seq - wait for a specific sequence number
*
* @rdev: radeon device pointer
* @seq: sequence numbers
*
* Check if the last signaled fence sequnce number is >= the requested
* sequence number (all asics).
* Returns true if any has signaled (current value is >= requested value)
* or false if it has not. Helper function for radeon_fence_wait_seq.
*/
static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
{
unsigned i;
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
return true;
}
return false;
}
 
/**
* radeon_fence_wait_seq - wait for a specific sequence numbers
*
* @rdev: radeon device pointer
* @target_seq: sequence number(s) we want to wait for
* @target_seq: sequence number we want to wait for
* @ring: ring index the fence is associated with
* @intr: use interruptable sleep
* @lock_ring: whether the ring should be locked or not
*
* Wait for the requested sequence number(s) to be written by any ring
* (all asics). Sequnce number array is indexed by ring id.
* Wait for the requested sequence number to be written (all asics).
* @intr selects whether to use interruptable (true) or non-interruptable
* (false) sleep when waiting for the sequence number. Helper function
* for radeon_fence_wait_*().
* for radeon_fence_wait(), et al.
* Returns 0 if the sequence number has passed, error for all other cases.
* -EDEADLK is returned when a GPU lockup has been detected.
* -EDEADLK is returned when a GPU lockup has been detected and the ring is
* marked as not ready so no further jobs get scheduled until a successful
* reset.
*/
static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
bool intr)
static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
unsigned ring, bool intr, bool lock_ring)
{
uint64_t last_seq[RADEON_NUM_RINGS];
unsigned long timeout, last_activity;
uint64_t seq;
unsigned i;
bool signaled;
int i, r;
int r;
 
while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
if (!rdev->ring[ring].ready) {
return -EBUSY;
}
 
/* Save current sequence values, used to check for GPU lockups */
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!target_seq[i])
continue;
 
last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq);
trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
radeon_irq_kms_sw_irq_get(rdev, i);
timeout = GetTimerTicks() - RADEON_FENCE_JIFFIES_TIMEOUT;
if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
/* the normal case, timeout is somewhere before last_activity */
timeout = rdev->fence_drv[ring].last_activity - timeout;
} else {
/* either jiffies wrapped around, or no fence was signaled in the last 500ms
* anyway we will just wait for the minimum amount and then check for a lockup
*/
timeout = 1;
}
seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
/* Save current last activity valuee, used to check for GPU lockups */
last_activity = rdev->fence_drv[ring].last_activity;
 
// trace_radeon_fence_wait_begin(rdev->ddev, seq);
radeon_irq_kms_sw_irq_get(rdev, ring);
if (intr) {
r = wait_event_interruptible_timeout(rdev->fence_queue, (
(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
|| rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
r = wait_event_interruptible_timeout(rdev->fence_queue,
(signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
timeout);
} else {
r = wait_event_timeout(rdev->fence_queue, (
(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
|| rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
r = wait_event_timeout(rdev->fence_queue,
(signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
timeout);
}
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!target_seq[i])
continue;
 
radeon_irq_kms_sw_irq_put(rdev, i);
trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
radeon_irq_kms_sw_irq_put(rdev, ring);
if (unlikely(r < 0)) {
return r;
}
// trace_radeon_fence_wait_end(rdev->ddev, seq);
 
if (unlikely(r < 0))
return r;
 
if (unlikely(!signaled)) {
if (rdev->needs_reset)
return -EDEADLK;
 
/* we were interrupted for some reason and fence
* isn't signaled yet, resume waiting */
if (r)
if (r) {
continue;
}
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!target_seq[i])
/* check if sequence value has changed since last_activity */
if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
continue;
}
 
if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq))
break;
if (lock_ring) {
mutex_lock(&rdev->ring_lock);
}
 
if (i != RADEON_NUM_RINGS)
/* test if somebody else has already decided that this is a lockup */
if (last_activity != rdev->fence_drv[ring].last_activity) {
if (lock_ring) {
mutex_unlock(&rdev->ring_lock);
}
continue;
}
 
if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
/* good news we believe it's a lockup */
dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
target_seq, seq);
 
/* change last activity so nobody else think there is a lockup */
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!target_seq[i])
continue;
rdev->fence_drv[i].last_activity = GetTimerTicks();
}
 
if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i]))
break;
/* mark the ring as not ready any more */
rdev->ring[ring].ready = false;
if (lock_ring) {
mutex_unlock(&rdev->ring_lock);
}
 
if (i < RADEON_NUM_RINGS) {
/* good news we believe it's a lockup */
dev_warn(rdev->dev, "GPU lockup (waiting for "
"0x%016llx last fence id 0x%016llx on"
" ring %d)\n",
target_seq[i], last_seq[i], i);
 
/* remember that we need an reset */
rdev->needs_reset = true;
wake_up_all(&rdev->fence_queue);
return -EDEADLK;
}
 
if (lock_ring) {
mutex_unlock(&rdev->ring_lock);
}
}
}
return 0;
}
 
395,7 → 387,6
*/
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
uint64_t seq[RADEON_NUM_RINGS] = {};
int r;
 
if (fence == NULL) {
403,15 → 394,147
return -EINVAL;
}
 
seq[fence->ring] = fence->seq;
if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
r = radeon_fence_wait_seq(fence->rdev, fence->seq,
fence->ring, intr, true);
if (r) {
return r;
}
fence->seq = RADEON_FENCE_SIGNALED_SEQ;
return 0;
}
 
r = radeon_fence_wait_seq(fence->rdev, seq, intr);
if (r)
static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
{
unsigned i;
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
return true;
}
}
return false;
}
 
/**
* radeon_fence_wait_any_seq - wait for a sequence number on any ring
*
* @rdev: radeon device pointer
* @target_seq: sequence number(s) we want to wait for
* @intr: use interruptable sleep
*
* Wait for the requested sequence number(s) to be written by any ring
* (all asics). Sequnce number array is indexed by ring id.
* @intr selects whether to use interruptable (true) or non-interruptable
* (false) sleep when waiting for the sequence number. Helper function
* for radeon_fence_wait_any(), et al.
* Returns 0 if the sequence number has passed, error for all other cases.
*/
static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
u64 *target_seq, bool intr)
{
unsigned long timeout, last_activity, tmp;
unsigned i, ring = RADEON_NUM_RINGS;
bool signaled;
int r;
 
for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
if (!target_seq[i]) {
continue;
}
 
/* use the most recent one as indicator */
if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
last_activity = rdev->fence_drv[i].last_activity;
}
 
/* For lockup detection just pick the lowest ring we are
* actively waiting for
*/
if (i < ring) {
ring = i;
}
}
 
/* nothing to wait for ? */
if (ring == RADEON_NUM_RINGS) {
return -ENOENT;
}
 
while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
timeout = GetTimerTicks() - RADEON_FENCE_JIFFIES_TIMEOUT;
if (time_after(last_activity, timeout)) {
/* the normal case, timeout is somewhere before last_activity */
timeout = last_activity - timeout;
} else {
/* either jiffies wrapped around, or no fence was signaled in the last 500ms
* anyway we will just wait for the minimum amount and then check for a lockup
*/
timeout = 1;
}
 
// trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (target_seq[i]) {
radeon_irq_kms_sw_irq_get(rdev, i);
}
}
if (intr) {
r = wait_event_interruptible_timeout(rdev->fence_queue,
(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
timeout);
} else {
r = wait_event_timeout(rdev->fence_queue,
(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
timeout);
}
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (target_seq[i]) {
radeon_irq_kms_sw_irq_put(rdev, i);
}
}
if (unlikely(r < 0)) {
return r;
}
// trace_radeon_fence_wait_end(rdev->ddev, seq);
 
fence->seq = RADEON_FENCE_SIGNALED_SEQ;
if (unlikely(!signaled)) {
/* we were interrupted for some reason and fence
* isn't signaled yet, resume waiting */
if (r) {
continue;
}
 
mutex_lock(&rdev->ring_lock);
for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
tmp = rdev->fence_drv[i].last_activity;
}
}
/* test if somebody else has already decided that this is a lockup */
if (last_activity != tmp) {
last_activity = tmp;
mutex_unlock(&rdev->ring_lock);
continue;
}
 
if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
/* good news we believe it's a lockup */
dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
target_seq[ring]);
 
/* change last activity so nobody else think there is a lockup */
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
rdev->fence_drv[i].last_activity = GetTimerTicks();
}
 
/* mark the ring as not ready any more */
rdev->ring[ring].ready = false;
mutex_unlock(&rdev->ring_lock);
return -EDEADLK;
}
mutex_unlock(&rdev->ring_lock);
}
}
return 0;
}
 
433,7 → 556,7
bool intr)
{
uint64_t seq[RADEON_NUM_RINGS];
unsigned i, num_rings = 0;
unsigned i;
int r;
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
443,19 → 566,15
continue;
}
 
seq[i] = fences[i]->seq;
++num_rings;
 
/* test if something was allready signaled */
if (seq[i] == RADEON_FENCE_SIGNALED_SEQ)
if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
/* something was allready signaled */
return 0;
}
 
/* nothing to wait for ? */
if (num_rings == 0)
return -ENOENT;
seq[i] = fences[i]->seq;
}
 
r = radeon_fence_wait_seq(rdev, seq, intr);
r = radeon_fence_wait_any_seq(rdev, seq, intr);
if (r) {
return r;
}
463,7 → 582,7
}
 
/**
* radeon_fence_wait_next - wait for the next fence to signal
* radeon_fence_wait_next_locked - wait for the next fence to signal
*
* @rdev: radeon device pointer
* @ring: ring index the fence is associated with
472,21 → 591,21
* Returns 0 if the next fence has passed, error for all other cases.
* Caller must hold ring lock.
*/
int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
{
uint64_t seq[RADEON_NUM_RINGS] = {};
uint64_t seq;
 
seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
/* nothing to wait for, last_seq is
already the last emited fence */
return -ENOENT;
}
return radeon_fence_wait_seq(rdev, seq, false);
return radeon_fence_wait_seq(rdev, seq, ring, false, false);
}
 
/**
* radeon_fence_wait_empty - wait for all fences to signal
* radeon_fence_wait_empty_locked - wait for all fences to signal
*
* @rdev: radeon device pointer
* @ring: ring index the fence is associated with
495,20 → 614,16
* Returns 0 if the fences have passed, error for all other cases.
* Caller must hold ring lock.
*/
int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
{
uint64_t seq[RADEON_NUM_RINGS] = {};
uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
int r;
 
seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
if (!seq[ring])
return 0;
 
r = radeon_fence_wait_seq(rdev, seq, false);
r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
if (r) {
if (r == -EDEADLK)
if (r == -EDEADLK) {
return -EDEADLK;
 
}
dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
ring, r);
}
710,6 → 825,7
for (i = 0; i < RADEON_NUM_RINGS; ++i)
rdev->fence_drv[ring].sync_seq[i] = 0;
atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
rdev->fence_drv[ring].last_activity = GetTimerTicks();
rdev->fence_drv[ring].initialized = false;
}
 
755,7 → 871,7
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
if (!rdev->fence_drv[ring].initialized)
continue;
r = radeon_fence_wait_empty(rdev, ring);
r = radeon_fence_wait_empty_locked(rdev, ring);
if (r) {
/* no need to trigger GPU reset as we are unloading */
radeon_fence_driver_force_completion(rdev);
802,8 → 918,6
if (!rdev->fence_drv[i].initialized)
continue;
 
radeon_fence_process(rdev, i);
 
seq_printf(m, "--- ring %d ---\n", i);
seq_printf(m, "Last signaled fence 0x%016llx\n",
(unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
819,28 → 933,8
return 0;
}
 
/**
* radeon_debugfs_gpu_reset - manually trigger a gpu reset
*
* Manually trigger a gpu reset at the next fence wait.
*/
static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
 
down_read(&rdev->exclusive_lock);
seq_printf(m, "%d\n", rdev->needs_reset);
rdev->needs_reset = true;
up_read(&rdev->exclusive_lock);
 
return 0;
}
 
static struct drm_info_list radeon_debugfs_fence_list[] = {
{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
{"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL}
};
#endif
 
847,7 → 941,7
int radeon_debugfs_fence_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2);
return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
#else
return 0;
#endif
/drivers/video/drm/radeon/radeon_gart.c
28,6 → 28,7
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_reg.h"
 
 
static inline void *
137,7 → 138,7
if (rdev->gart.robj == NULL) {
r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
0, NULL, &rdev->gart.robj);
NULL, &rdev->gart.robj);
if (r) {
return r;
}
215,6 → 216,7
if (rdev->gart.robj == NULL) {
return;
}
radeon_gart_table_vram_unpin(rdev);
radeon_bo_unref(&rdev->gart.robj);
}
 
252,8 → 254,7
page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
if (rdev->gart.ptr) {
radeon_gart_set_page(rdev, t, page_base,
RADEON_GART_PAGE_DUMMY);
radeon_gart_set_page(rdev, t, page_base);
}
page_base += RADEON_GPU_PAGE_SIZE;
}
271,7 → 272,6
* @pages: number of pages to bind
* @pagelist: pages to bind
* @dma_addr: DMA addresses of pages
* @flags: RADEON_GART_PAGE_* flags
*
* Binds the requested pages to the gart page table
* (all asics).
278,8 → 278,7
* Returns 0 for success, -EINVAL for failure.
*/
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, struct page **pagelist, dma_addr_t *dma_addr,
uint32_t flags)
int pages, u32 *pagelist, dma_addr_t *dma_addr)
{
unsigned t;
unsigned p;
286,6 → 285,8
uint64_t page_base;
int i, j;
 
// dbgprintf("offset %x pages %d list %x\n",
// offset, pages, pagelist);
if (!rdev->gart.ready) {
WARN(1, "trying to bind memory to uninitialized GART !\n");
return -EINVAL;
294,12 → 295,12
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
 
for (i = 0; i < pages; i++, p++) {
rdev->gart.pages_addr[p] = dma_addr[i];
rdev->gart.pages_addr[p] = pagelist[i] & ~4095;
rdev->gart.pages[p] = pagelist[i];
if (rdev->gart.ptr) {
page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
radeon_gart_set_page(rdev, t, page_base, flags);
radeon_gart_set_page(rdev, t, page_base);
page_base += RADEON_GPU_PAGE_SIZE;
}
}
310,6 → 311,33
}
 
/**
* radeon_gart_restore - bind all pages in the gart page table
*
* @rdev: radeon_device pointer
*
* Binds all pages in the gart page table (all asics).
* Used to rebuild the gart table on device startup or resume.
*/
void radeon_gart_restore(struct radeon_device *rdev)
{
int i, j, t;
u64 page_base;
 
if (!rdev->gart.ptr) {
return;
}
for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
page_base = rdev->gart.pages_addr[i];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
radeon_gart_set_page(rdev, t, page_base);
page_base += RADEON_GPU_PAGE_SIZE;
}
}
mb();
radeon_gart_tlb_flush(rdev);
}
 
/**
* radeon_gart_init - init the driver info for managing the gart
*
* @rdev: radeon_device pointer
374,6 → 402,894
vfree(rdev->gart.pages_addr);
rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL;
}
 
radeon_dummy_page_fini(rdev);
/*
* GPUVM
* GPUVM is similar to the legacy gart on older asics, however
* rather than there being a single global gart table
* for the entire GPU, there are multiple VM page tables active
* at any given time. The VM page tables can contain a mix
* vram pages and system memory pages and system memory pages
* can be mapped as snooped (cached system pages) or unsnooped
* (uncached system pages).
* Each VM has an ID associated with it and there is a page table
* associated with each VMID. When execting a command buffer,
* the kernel tells the the ring what VMID to use for that command
* buffer. VMIDs are allocated dynamically as commands are submitted.
* The userspace drivers maintain their own address space and the kernel
* sets up their pages tables accordingly when they submit their
* command buffers and a VMID is assigned.
* Cayman/Trinity support up to 8 active VMs at any given time;
* SI supports 16.
*/
 
/*
* vm helpers
*
* TODO bind a default page at vm initialization for default address
*/
 
/**
* radeon_vm_num_pde - return the number of page directory entries
*
* @rdev: radeon_device pointer
*
* Calculate the number of page directory entries (cayman+).
*/
static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
{
return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
}
 
/**
* radeon_vm_directory_size - returns the size of the page directory in bytes
*
* @rdev: radeon_device pointer
*
* Calculate the size of the page directory in bytes (cayman+).
*/
static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
{
return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
}
 
/**
* radeon_vm_manager_init - init the vm manager
*
* @rdev: radeon_device pointer
*
* Init the vm manager (cayman+).
* Returns 0 for success, error for failure.
*/
int radeon_vm_manager_init(struct radeon_device *rdev)
{
struct radeon_vm *vm;
struct radeon_bo_va *bo_va;
int r;
unsigned size;
 
if (!rdev->vm_manager.enabled) {
/* allocate enough for 2 full VM pts */
size = radeon_vm_directory_size(rdev);
size += rdev->vm_manager.max_pfn * 8;
size *= 2;
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
RADEON_GPU_PAGE_ALIGN(size),
RADEON_GEM_DOMAIN_VRAM);
if (r) {
dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
(rdev->vm_manager.max_pfn * 8) >> 10);
return r;
}
 
r = radeon_asic_vm_init(rdev);
if (r)
return r;
 
rdev->vm_manager.enabled = true;
 
r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
if (r)
return r;
}
 
/* restore page table */
list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
if (vm->page_directory == NULL)
continue;
 
list_for_each_entry(bo_va, &vm->va, vm_list) {
bo_va->valid = false;
}
}
return 0;
}
 
/**
* radeon_vm_free_pt - free the page table for a specific vm
*
* @rdev: radeon_device pointer
* @vm: vm to unbind
*
* Free the page table of a specific vm (cayman+).
*
* Global and local mutex must be lock!
*/
static void radeon_vm_free_pt(struct radeon_device *rdev,
struct radeon_vm *vm)
{
struct radeon_bo_va *bo_va;
int i;
 
if (!vm->page_directory)
return;
 
list_del_init(&vm->list);
radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
 
list_for_each_entry(bo_va, &vm->va, vm_list) {
bo_va->valid = false;
}
 
if (vm->page_tables == NULL)
return;
 
for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
 
kfree(vm->page_tables);
}
 
/**
* radeon_vm_manager_fini - tear down the vm manager
*
* @rdev: radeon_device pointer
*
* Tear down the VM manager (cayman+).
*/
void radeon_vm_manager_fini(struct radeon_device *rdev)
{
struct radeon_vm *vm, *tmp;
int i;
 
if (!rdev->vm_manager.enabled)
return;
 
mutex_lock(&rdev->vm_manager.lock);
/* free all allocated page tables */
list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
mutex_lock(&vm->mutex);
radeon_vm_free_pt(rdev, vm);
mutex_unlock(&vm->mutex);
}
for (i = 0; i < RADEON_NUM_VM; ++i) {
radeon_fence_unref(&rdev->vm_manager.active[i]);
}
radeon_asic_vm_fini(rdev);
mutex_unlock(&rdev->vm_manager.lock);
 
radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
rdev->vm_manager.enabled = false;
}
 
/**
* radeon_vm_evict - evict page table to make room for new one
*
* @rdev: radeon_device pointer
* @vm: VM we want to allocate something for
*
* Evict a VM from the lru, making sure that it isn't @vm. (cayman+).
* Returns 0 for success, -ENOMEM for failure.
*
* Global and local mutex must be locked!
*/
static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
{
struct radeon_vm *vm_evict;
 
if (list_empty(&rdev->vm_manager.lru_vm))
return -ENOMEM;
 
vm_evict = list_first_entry(&rdev->vm_manager.lru_vm,
struct radeon_vm, list);
if (vm_evict == vm)
return -ENOMEM;
 
mutex_lock(&vm_evict->mutex);
radeon_vm_free_pt(rdev, vm_evict);
mutex_unlock(&vm_evict->mutex);
return 0;
}
 
/**
* radeon_vm_alloc_pt - allocates a page table for a VM
*
* @rdev: radeon_device pointer
* @vm: vm to bind
*
* Allocate a page table for the requested vm (cayman+).
* Returns 0 for success, error for failure.
*
* Global and local mutex must be locked!
*/
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
{
unsigned pd_size, pts_size;
u64 *pd_addr;
int r;
 
if (vm == NULL) {
return -EINVAL;
}
 
if (vm->page_directory != NULL) {
return 0;
}
 
retry:
pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
&vm->page_directory, pd_size,
RADEON_GPU_PAGE_SIZE, false);
if (r == -ENOMEM) {
r = radeon_vm_evict(rdev, vm);
if (r)
return r;
goto retry;
 
} else if (r) {
return r;
}
 
vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
 
/* Initially clear the page directory */
pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory);
memset(pd_addr, 0, pd_size);
 
pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
 
if (vm->page_tables == NULL) {
DRM_ERROR("Cannot allocate memory for page table array\n");
radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
return -ENOMEM;
}
 
return 0;
}
 
/**
* radeon_vm_add_to_lru - add VMs page table to LRU list
*
* @rdev: radeon_device pointer
* @vm: vm to add to LRU
*
* Add the allocated page table to the LRU list (cayman+).
*
* Global mutex must be locked!
*/
void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
{
list_del_init(&vm->list);
list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
}
 
/**
* radeon_vm_grab_id - allocate the next free VMID
*
* @rdev: radeon_device pointer
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
*
* Allocate an id for the vm (cayman+).
* Returns the fence we need to sync to (if any).
*
* Global and local mutex must be locked!
*/
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
struct radeon_vm *vm, int ring)
{
struct radeon_fence *best[RADEON_NUM_RINGS] = {};
unsigned choices[2] = {};
unsigned i;
 
/* check if the id is still valid */
if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id])
return NULL;
 
/* we definately need to flush */
radeon_fence_unref(&vm->last_flush);
 
/* skip over VMID 0, since it is the system VM */
for (i = 1; i < rdev->vm_manager.nvm; ++i) {
struct radeon_fence *fence = rdev->vm_manager.active[i];
 
if (fence == NULL) {
/* found a free one */
vm->id = i;
return NULL;
}
 
if (radeon_fence_is_earlier(fence, best[fence->ring])) {
best[fence->ring] = fence;
choices[fence->ring == ring ? 0 : 1] = i;
}
}
 
for (i = 0; i < 2; ++i) {
if (choices[i]) {
vm->id = choices[i];
return rdev->vm_manager.active[choices[i]];
}
}
 
/* should never happen */
BUG();
return NULL;
}
 
/**
* radeon_vm_fence - remember fence for vm
*
* @rdev: radeon_device pointer
* @vm: vm we want to fence
* @fence: fence to remember
*
* Fence the vm (cayman+).
* Set the fence used to protect page table and id.
*
* Global and local mutex must be locked!
*/
void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence)
{
radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
 
radeon_fence_unref(&vm->fence);
vm->fence = radeon_fence_ref(fence);
}
 
/**
* radeon_vm_bo_find - find the bo_va for a specific vm & bo
*
* @vm: requested vm
* @bo: requested buffer object
*
* Find @bo inside the requested vm (cayman+).
* Search inside the @bos vm list for the requested vm
* Returns the found bo_va or NULL if none is found
*
* Object has to be reserved!
*/
struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
struct radeon_bo *bo)
{
struct radeon_bo_va *bo_va;
 
list_for_each_entry(bo_va, &bo->va, bo_list) {
if (bo_va->vm == vm) {
return bo_va;
}
}
return NULL;
}
 
/**
* radeon_vm_bo_add - add a bo to a specific vm
*
* @rdev: radeon_device pointer
* @vm: requested vm
* @bo: radeon buffer object
*
* Add @bo into the requested vm (cayman+).
* Add @bo to the list of bos associated with the vm
* Returns newly added bo_va or NULL for failure
*
* Object has to be reserved!
*/
struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo)
{
struct radeon_bo_va *bo_va;
 
bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
if (bo_va == NULL) {
return NULL;
}
bo_va->vm = vm;
bo_va->bo = bo;
bo_va->soffset = 0;
bo_va->eoffset = 0;
bo_va->flags = 0;
bo_va->valid = false;
bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list);
INIT_LIST_HEAD(&bo_va->vm_list);
 
mutex_lock(&vm->mutex);
list_add(&bo_va->vm_list, &vm->va);
list_add_tail(&bo_va->bo_list, &bo->va);
mutex_unlock(&vm->mutex);
 
return bo_va;
}
 
/**
* radeon_vm_bo_set_addr - set bos virtual address inside a vm
*
* @rdev: radeon_device pointer
* @bo_va: bo_va to store the address
* @soffset: requested offset of the buffer in the VM address space
* @flags: attributes of pages (read/write/valid/etc.)
*
* Set offset of @bo_va (cayman+).
* Validate and set the offset requested within the vm address space.
* Returns 0 for success, error for failure.
*
* Object has to be reserved!
*/
int radeon_vm_bo_set_addr(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
uint64_t soffset,
uint32_t flags)
{
uint64_t size = radeon_bo_size(bo_va->bo);
uint64_t eoffset, last_offset = 0;
struct radeon_vm *vm = bo_va->vm;
struct radeon_bo_va *tmp;
struct list_head *head;
unsigned last_pfn;
 
if (soffset) {
/* make sure object fit at this offset */
eoffset = soffset + size;
if (soffset >= eoffset) {
return -EINVAL;
}
 
last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
if (last_pfn > rdev->vm_manager.max_pfn) {
dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
last_pfn, rdev->vm_manager.max_pfn);
return -EINVAL;
}
 
} else {
eoffset = last_pfn = 0;
}
 
mutex_lock(&vm->mutex);
head = &vm->va;
last_offset = 0;
list_for_each_entry(tmp, &vm->va, vm_list) {
if (bo_va == tmp) {
/* skip over currently modified bo */
continue;
}
 
if (soffset >= last_offset && eoffset <= tmp->soffset) {
/* bo can be added before this one */
break;
}
if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
/* bo and tmp overlap, invalid offset */
dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
(unsigned)tmp->soffset, (unsigned)tmp->eoffset);
mutex_unlock(&vm->mutex);
return -EINVAL;
}
last_offset = tmp->eoffset;
head = &tmp->vm_list;
}
 
bo_va->soffset = soffset;
bo_va->eoffset = eoffset;
bo_va->flags = flags;
bo_va->valid = false;
list_move(&bo_va->vm_list, head);
 
mutex_unlock(&vm->mutex);
return 0;
}
 
/**
* radeon_vm_map_gart - get the physical address of a gart page
*
* @rdev: radeon_device pointer
* @addr: the unmapped addr
*
* Look up the physical address of the page that the pte resolves
* to (cayman+).
* Returns the physical address of the page.
*/
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
{
uint64_t result;
 
/* page table offset */
result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
 
/* in case cpu page size != gpu page size*/
result |= addr & (~PAGE_MASK);
 
return result;
}
 
/**
* radeon_vm_update_pdes - make sure that page directory is valid
*
* @rdev: radeon_device pointer
* @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
*
* Allocates new page tables if necessary
* and updates the page directory (cayman+).
* Returns 0 for success, error for failure.
*
* Global and local mutex must be locked!
*/
static int radeon_vm_update_pdes(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_ib *ib,
uint64_t start, uint64_t end)
{
static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
 
uint64_t last_pde = ~0, last_pt = ~0;
unsigned count = 0;
uint64_t pt_idx;
int r;
 
start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
 
/* walk over the address space and update the page directory */
for (pt_idx = start; pt_idx <= end; ++pt_idx) {
uint64_t pde, pt;
 
if (vm->page_tables[pt_idx])
continue;
 
retry:
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
&vm->page_tables[pt_idx],
RADEON_VM_PTE_COUNT * 8,
RADEON_GPU_PAGE_SIZE, false);
 
if (r == -ENOMEM) {
r = radeon_vm_evict(rdev, vm);
if (r)
return r;
goto retry;
} else if (r) {
return r;
}
 
pde = vm->pd_gpu_addr + pt_idx * 8;
 
pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
 
if (((last_pde + 8 * count) != pde) ||
((last_pt + incr * count) != pt)) {
 
if (count) {
radeon_asic_vm_set_page(rdev, ib, last_pde,
last_pt, count, incr,
RADEON_VM_PAGE_VALID);
}
 
count = 1;
last_pde = pde;
last_pt = pt;
} else {
++count;
}
}
 
if (count) {
radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
incr, RADEON_VM_PAGE_VALID);
 
}
 
return 0;
}
 
/**
* radeon_vm_update_ptes - make sure that page tables are valid
*
* @rdev: radeon_device pointer
* @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
* @dst: destination address to map to
* @flags: mapping flags
*
* Update the page tables in the range @start - @end (cayman+).
*
* Global and local mutex must be locked!
*/
static void radeon_vm_update_ptes(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_ib *ib,
uint64_t start, uint64_t end,
uint64_t dst, uint32_t flags)
{
static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
 
uint64_t last_pte = ~0, last_dst = ~0;
unsigned count = 0;
uint64_t addr;
 
start = start / RADEON_GPU_PAGE_SIZE;
end = end / RADEON_GPU_PAGE_SIZE;
 
/* walk over the address space and update the page tables */
for (addr = start; addr < end; ) {
uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
unsigned nptes;
uint64_t pte;
 
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
nptes = RADEON_VM_PTE_COUNT - (addr & mask);
 
pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
pte += (addr & mask) * 8;
 
if ((last_pte + 8 * count) != pte) {
 
if (count) {
radeon_asic_vm_set_page(rdev, ib, last_pte,
last_dst, count,
RADEON_GPU_PAGE_SIZE,
flags);
}
 
count = nptes;
last_pte = pte;
last_dst = dst;
} else {
count += nptes;
}
 
addr += nptes;
dst += nptes * RADEON_GPU_PAGE_SIZE;
}
 
if (count) {
radeon_asic_vm_set_page(rdev, ib, last_pte,
last_dst, count,
RADEON_GPU_PAGE_SIZE, flags);
}
}
 
/**
* radeon_vm_bo_update_pte - map a bo into the vm page table
*
* @rdev: radeon_device pointer
* @vm: requested vm
* @bo: radeon buffer object
* @mem: ttm mem
*
* Fill in the page table entries for @bo (cayman+).
* Returns 0 for success, -EINVAL for failure.
*
* Object have to be reserved & global and local mutex must be locked!
*/
int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo,
struct ttm_mem_reg *mem)
{
unsigned ridx = rdev->asic->vm.pt_ring_index;
struct radeon_ib ib;
struct radeon_bo_va *bo_va;
unsigned nptes, npdes, ndw;
uint64_t addr;
int r;
 
/* nothing to do if vm isn't bound */
if (vm->page_directory == NULL)
return 0;
 
bo_va = radeon_vm_bo_find(vm, bo);
if (bo_va == NULL) {
dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
return -EINVAL;
}
 
if (!bo_va->soffset) {
dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
bo, vm);
return -EINVAL;
}
 
if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
return 0;
 
bo_va->flags &= ~RADEON_VM_PAGE_VALID;
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
if (mem) {
addr = mem->start << PAGE_SHIFT;
if (mem->mem_type != TTM_PL_SYSTEM) {
bo_va->flags |= RADEON_VM_PAGE_VALID;
bo_va->valid = true;
}
if (mem->mem_type == TTM_PL_TT) {
bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
} else {
addr += rdev->vm_manager.vram_base_offset;
}
} else {
addr = 0;
bo_va->valid = false;
}
 
nptes = radeon_bo_ngpu_pages(bo);
 
/* assume two extra pdes in case the mapping overlaps the borders */
npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
 
/* padding, etc. */
ndw = 64;
 
if (RADEON_VM_BLOCK_SIZE > 11)
/* reserve space for one header for every 2k dwords */
ndw += (nptes >> 11) * 4;
else
/* reserve space for one header for
every (1 << BLOCK_SIZE) entries */
ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
 
/* reserve space for pte addresses */
ndw += nptes * 2;
 
/* reserve space for one header for every 2k dwords */
ndw += (npdes >> 11) * 4;
 
/* reserve space for pde addresses */
ndw += npdes * 2;
 
/* update too big for an IB */
if (ndw > 0xfffff)
return -ENOMEM;
 
r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4);
ib.length_dw = 0;
 
r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
if (r) {
radeon_ib_free(rdev, &ib);
return r;
}
 
radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
addr, bo_va->flags);
 
radeon_ib_sync_to(&ib, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_ib_free(rdev, &ib);
return r;
}
radeon_fence_unref(&vm->fence);
vm->fence = radeon_fence_ref(ib.fence);
radeon_ib_free(rdev, &ib);
radeon_fence_unref(&vm->last_flush);
 
return 0;
}
 
/**
* radeon_vm_bo_rmv - remove a bo to a specific vm
*
* @rdev: radeon_device pointer
* @bo_va: requested bo_va
*
* Remove @bo_va->bo from the requested vm (cayman+).
* Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
* remove the ptes for @bo_va in the page table.
* Returns 0 for success.
*
* Object have to be reserved!
*/
int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo_va *bo_va)
{
int r = 0;
 
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&bo_va->vm->mutex);
if (bo_va->soffset) {
r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
}
mutex_unlock(&rdev->vm_manager.lock);
list_del(&bo_va->vm_list);
mutex_unlock(&bo_va->vm->mutex);
list_del(&bo_va->bo_list);
 
kfree(bo_va);
return r;
}
 
/**
* radeon_vm_bo_invalidate - mark the bo as invalid
*
* @rdev: radeon_device pointer
* @vm: requested vm
* @bo: radeon buffer object
*
* Mark @bo as invalid (cayman+).
*/
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo)
{
struct radeon_bo_va *bo_va;
 
list_for_each_entry(bo_va, &bo->va, bo_list) {
bo_va->valid = false;
}
}
 
/**
* radeon_vm_init - initialize a vm instance
*
* @rdev: radeon_device pointer
* @vm: requested vm
*
* Init @vm fields (cayman+).
*/
void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
{
vm->id = 0;
vm->fence = NULL;
mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->list);
INIT_LIST_HEAD(&vm->va);
}
 
/**
* radeon_vm_fini - tear down a vm instance
*
* @rdev: radeon_device pointer
* @vm: requested vm
*
* Tear down @vm (cayman+).
* Unbind the VM and remove all bos from the vm bo list
*/
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
{
struct radeon_bo_va *bo_va, *tmp;
int r;
 
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
radeon_vm_free_pt(rdev, vm);
mutex_unlock(&rdev->vm_manager.lock);
 
if (!list_empty(&vm->va)) {
dev_err(rdev->dev, "still active bo inside vm\n");
}
list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
list_del_init(&bo_va->vm_list);
r = radeon_bo_reserve(bo_va->bo, false);
if (!r) {
list_del_init(&bo_va->bo_list);
radeon_bo_unreserve(bo_va->bo);
kfree(bo_va);
}
}
radeon_fence_unref(&vm->fence);
radeon_fence_unref(&vm->last_flush);
mutex_unlock(&vm->mutex);
}
/drivers/video/drm/radeon/radeon_irq_kms.c
41,13 → 41,13
/**
* radeon_driver_irq_handler_kms - irq handler for KMS
*
* @int irq, void *arg: args
* @DRM_IRQ_ARGS: args
*
* This is the irq handler for the radeon KMS driver (all asics).
* radeon_irq_process is a macro that points to the per-asic
* irq handler callback.
*/
irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg)
irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
struct radeon_device *rdev = dev->dev_private;
73,7 → 73,6
/* Disable *all* interrupts */
for (i = 0; i < RADEON_NUM_RINGS; i++)
atomic_set(&rdev->irq.ring_int[i], 0);
rdev->irq.dpm_thermal = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
121,7 → 120,6
/* Disable *all* interrupts */
for (i = 0; i < RADEON_NUM_RINGS; i++)
atomic_set(&rdev->irq.ring_int[i], 0);
rdev->irq.dpm_thermal = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
145,20 → 143,23
*/
int radeon_irq_kms_init(struct radeon_device *rdev)
{
int irq_line;
int r = 0;
 
ENTER();
 
 
spin_lock_init(&rdev->irq.lock);
/* enable msi */
rdev->msi_enabled = 0;
 
rdev->irq.installed = true;
r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
r = drm_irq_install(rdev->ddev);
if (r) {
rdev->irq.installed = false;
FAIL();
return r;
}
 
DRM_INFO("radeon: irq initialized.\n");
return 0;
}
179,6 → 180,7
// if (rdev->msi_enabled)
// pci_disable_msi(rdev->pdev);
}
// flush_work(&rdev->hotplug_work);
}
 
/**
242,9 → 244,6
unsigned long irqflags;
int i;
 
if (!rdev->ddev->irq_enabled)
return;
 
spin_lock_irqsave(&rdev->irq.lock, irqflags);
for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
265,9 → 264,6
unsigned long irqflags;
int i;
 
if (!rdev->ddev->irq_enabled)
return;
 
spin_lock_irqsave(&rdev->irq.lock, irqflags);
for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
275,3 → 271,59
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
 
 
static struct drm_driver drm_driver = {
.irq_preinstall = radeon_driver_irq_preinstall_kms,
.irq_postinstall = radeon_driver_irq_postinstall_kms,
.irq_handler = radeon_driver_irq_handler_kms
};
 
static struct drm_driver *driver = &drm_driver;
 
int drm_irq_install(struct drm_device *dev)
{
unsigned long sh_flags = 0;
int irq_line;
int ret = 0;
 
char *irqname;
 
mutex_lock(&dev->struct_mutex);
 
/* Driver must have been initialized */
if (!dev->dev_private) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
 
if (dev->irq_enabled) {
mutex_unlock(&dev->struct_mutex);
return -EBUSY;
}
dev->irq_enabled = 1;
mutex_unlock(&dev->struct_mutex);
 
irq_line = drm_dev_to_irq(dev);
 
DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
 
/* Before installing handler */
if (driver->irq_preinstall)
driver->irq_preinstall(dev);
 
ret = AttachIntHandler(irq_line, driver->irq_handler, (u32)dev);
 
/* After installing handler */
if (driver->irq_postinstall)
ret = driver->irq_postinstall(dev);
 
if (ret < 0) {
DRM_ERROR(__FUNCTION__);
}
 
u16_t cmd = PciRead16(dev->pdev->busnr, dev->pdev->devfn, 4);
cmd&= ~(1<<10);
PciWrite16(dev->pdev->busnr, dev->pdev->devfn, 4, cmd);
 
return ret;
}
/drivers/video/drm/radeon/radeon_legacy_crtc.c
385,7 → 385,7
 
DRM_DEBUG_KMS("\n");
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
if (!atomic && !crtc->fb) {
DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
395,8 → 395,8
target_fb = fb;
}
else {
radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
target_fb = crtc->primary->fb;
radeon_fb = to_radeon_framebuffer(crtc->fb);
target_fb = crtc->fb;
}
 
switch (target_fb->bits_per_pixel) {
422,7 → 422,6
/* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
rbo = gem_to_radeon_bo(obj);
retry:
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
431,33 → 430,6
&base);
if (unlikely(r != 0)) {
radeon_bo_unreserve(rbo);
 
/* On old GPU like RN50 with little vram pining can fails because
* current fb is taking all space needed. So instead of unpining
* the old buffer after pining the new one, first unpin old one
* and then retry pining new one.
*
* As only master can set mode only master can pin and it is
* unlikely the master client will race with itself especialy
* on those old gpu with single crtc.
*
* We don't shutdown the display controller because new buffer
* will end up in same spot.
*/
if (!atomic && fb && fb != crtc->primary->fb) {
struct radeon_bo *old_rbo;
unsigned long nsize, osize;
 
old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj);
osize = radeon_bo_size(old_rbo);
nsize = radeon_bo_size(rbo);
if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) {
radeon_bo_unpin(old_rbo);
radeon_bo_unreserve(old_rbo);
fb = NULL;
goto retry;
}
}
return -EINVAL;
}
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
555,7 → 527,7
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, crtc_offset);
WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
 
if (!atomic && fb && fb != crtc->primary->fb) {
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
599,7 → 571,7
}
}
 
switch (crtc->primary->fb->bits_per_pixel) {
switch (crtc->fb->bits_per_pixel) {
case 8:
format = 2;
break;
1084,26 → 1056,6
}
}
 
static void radeon_crtc_disable(struct drm_crtc *crtc)
{
radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
int r;
struct radeon_framebuffer *radeon_fb;
struct radeon_bo *rbo;
 
radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r))
DRM_ERROR("failed to reserve rbo before unpin\n");
else {
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
}
}
 
static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
.dpms = radeon_crtc_dpms,
.mode_fixup = radeon_crtc_mode_fixup,
1113,7 → 1065,6
.prepare = radeon_crtc_prepare,
.commit = radeon_crtc_commit,
.load_lut = radeon_crtc_load_lut,
.disable = radeon_crtc_disable
};
 
 
/drivers/video/drm/radeon/radeon_legacy_encoders.c
392,7 → 392,7
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name),
"radeon_bl%d", dev->primary->index);
bd = backlight_device_register(bl_name, drm_connector->kdev,
bd = backlight_device_register(bl_name, &drm_connector->kdev,
pdata, &radeon_backlight_ops, &props);
if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n");
/drivers/video/drm/radeon/radeon_mode.h
46,10 → 46,6
#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
 
#define RADEON_MAX_HPD_PINS 7
#define RADEON_MAX_CRTCS 6
#define RADEON_MAX_AFMT_BLOCKS 7
 
enum radeon_rmx_type {
RMX_OFF,
RMX_FULL,
191,11 → 187,11
struct radeon_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
union {
struct i2c_algo_bit_data bit;
struct i2c_algo_dp_aux_data dp;
} algo;
struct radeon_i2c_bus_rec rec;
struct drm_dp_aux aux;
bool has_aux;
struct mutex mutex;
};
 
/* mostly for macs, but really any system without connector tables */
229,7 → 225,6
int offset;
bool last_buffer_filled_status;
int id;
struct r600_audio_pin *pin;
};
 
struct radeon_mode_info {
237,8 → 232,8
struct card_info *atom_card_info;
enum radeon_connector_table connector_table;
bool mode_config_initialized;
struct radeon_crtc *crtcs[RADEON_MAX_CRTCS];
struct radeon_afmt *afmt[RADEON_MAX_AFMT_BLOCKS];
struct radeon_crtc *crtcs[6];
struct radeon_afmt *afmt[6];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
251,10 → 246,6
struct drm_property *underscan_property;
struct drm_property *underscan_hborder_property;
struct drm_property *underscan_vborder_property;
/* audio */
struct drm_property *audio_property;
/* FMT dithering */
struct drm_property *dither_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size;
295,7 → 286,6
 
struct radeon_atom_ss {
uint16_t percentage;
uint16_t percentage_divider;
uint8_t type;
uint16_t step;
uint8_t delay;
306,12 → 296,6
uint16_t amount;
};
 
enum radeon_flip_status {
RADEON_FLIP_NONE,
RADEON_FLIP_PENDING,
RADEON_FLIP_SUBMITTED
};
 
struct radeon_crtc {
struct drm_crtc base;
int crtc_id;
323,8 → 307,6
uint64_t cursor_addr;
int cursor_width;
int cursor_height;
int max_cursor_width;
int max_cursor_height;
uint32_t legacy_display_base_addr;
uint32_t legacy_cursor_offset;
enum radeon_rmx_type rmx_type;
334,10 → 316,7
fixed20_12 hsc;
struct drm_display_mode native_mode;
int pll_id;
/* page flipping */
struct workqueue_struct *flip_queue;
struct radeon_flip_work *flip_work;
enum radeon_flip_status flip_status;
int deferred_flip_completion;
/* pll sharing */
struct radeon_atom_ss ss;
bool ss_enabled;
348,11 → 327,6
u32 pll_flags;
struct drm_encoder *encoder;
struct drm_connector *connector;
/* for dpm */
u32 line_time;
u32 wm_low;
u32 wm_high;
struct drm_display_mode hw_mode;
};
 
struct radeon_encoder_primary_dac {
450,6 → 424,7
struct radeon_connector_atom_dig {
uint32_t igp_lane_info;
/* displayport */
struct radeon_i2c_chan *dp_i2c_bus;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 dp_sink_type;
int dp_clock;
486,17 → 461,6
u8 cd_mux_state;
};
 
enum radeon_connector_audio {
RADEON_AUDIO_DISABLE = 0,
RADEON_AUDIO_ENABLE = 1,
RADEON_AUDIO_AUTO = 2
};
 
enum radeon_connector_dither {
RADEON_FMT_DITHER_DISABLE = 0,
RADEON_FMT_DITHER_ENABLE = 1,
};
 
struct radeon_connector {
struct drm_connector base;
uint32_t connector_id;
515,9 → 479,6
struct radeon_hpd hpd;
struct radeon_router router;
struct radeon_i2c_chan *router_bus;
enum radeon_connector_audio audio;
enum radeon_connector_dither dither;
int pixelclock_for_modeset;
};
 
struct radeon_framebuffer {
549,133 → 510,13
bool enable_dithen;
u32 vco_mode;
u32 real_clock;
/* added for CI */
u32 post_divider;
u32 flags;
};
 
struct atom_mpll_param {
union {
struct {
#ifdef __BIG_ENDIAN
u32 reserved : 8;
u32 clkfrac : 12;
u32 clkf : 12;
#else
u32 clkf : 12;
u32 clkfrac : 12;
u32 reserved : 8;
#endif
};
u32 fb_div;
};
u32 post_div;
u32 bwcntl;
u32 dll_speed;
u32 vco_mode;
u32 yclk_sel;
u32 qdr;
u32 half_rate;
};
 
#define MEM_TYPE_GDDR5 0x50
#define MEM_TYPE_GDDR4 0x40
#define MEM_TYPE_GDDR3 0x30
#define MEM_TYPE_DDR2 0x20
#define MEM_TYPE_GDDR1 0x10
#define MEM_TYPE_DDR3 0xb0
#define MEM_TYPE_MASK 0xf0
 
struct atom_memory_info {
u8 mem_vendor;
u8 mem_type;
};
 
#define MAX_AC_TIMING_ENTRIES 16
 
struct atom_memory_clock_range_table
{
u8 num_entries;
u8 rsv[3];
u32 mclk[MAX_AC_TIMING_ENTRIES];
};
 
#define VBIOS_MC_REGISTER_ARRAY_SIZE 32
#define VBIOS_MAX_AC_TIMING_ENTRIES 20
 
struct atom_mc_reg_entry {
u32 mclk_max;
u32 mc_data[VBIOS_MC_REGISTER_ARRAY_SIZE];
};
 
struct atom_mc_register_address {
u16 s1;
u8 pre_reg_data;
};
 
struct atom_mc_reg_table {
u8 last;
u8 num_entries;
struct atom_mc_reg_entry mc_reg_table_entry[VBIOS_MAX_AC_TIMING_ENTRIES];
struct atom_mc_register_address mc_reg_address[VBIOS_MC_REGISTER_ARRAY_SIZE];
};
 
#define MAX_VOLTAGE_ENTRIES 32
 
struct atom_voltage_table_entry
{
u16 value;
u32 smio_low;
};
 
struct atom_voltage_table
{
u32 count;
u32 mask_low;
u32 phase_delay;
struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES];
};
 
 
extern void
radeon_add_atom_connector(struct drm_device *dev,
uint32_t connector_id,
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
uint32_t igp_lane_info,
uint16_t connector_object_id,
struct radeon_hpd *hpd,
struct radeon_router *router);
extern void
radeon_add_legacy_connector(struct drm_device *dev,
uint32_t connector_id,
uint32_t supported_device,
int connector_type,
struct radeon_i2c_bus_rec *i2c_bus,
uint16_t connector_object_id,
struct radeon_hpd *hpd);
extern uint32_t
radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
uint8_t dac);
extern void radeon_link_encoder_connector(struct drm_device *dev);
 
extern enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev);
extern enum radeon_tv_std
radeon_atombios_get_tv_info(struct radeon_device *rdev);
extern void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
u16 *vddc, u16 *vddci, u16 *mvdd);
 
extern void
radeon_combios_connected_scratch_regs(struct drm_connector *connector,
struct drm_encoder *encoder,
bool connected);
extern void
radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
struct drm_encoder *encoder,
bool connected);
 
extern struct drm_connector *
radeon_get_connector_for_encoder(struct drm_encoder *encoder);
extern struct drm_connector *
685,11 → 526,10
 
extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
extern int radeon_get_monitor_bpc(struct drm_connector *connector);
 
extern struct edid *radeon_connector_edid(struct drm_connector *connector);
 
extern void radeon_connector_hotplug(struct drm_connector *connector);
extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
struct drm_display_mode *mode);
702,9 → 542,6
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_connector *connector);
extern void radeon_dp_set_rx_power_state(struct drm_connector *connector,
u8 power_state);
extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector);
extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
extern void radeon_atom_encoder_init(struct radeon_device *rdev);
extern void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev);
713,7 → 550,8
uint8_t lane_set);
extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
u8 write_byte, u8 *read_byte);
 
extern void radeon_i2c_init(struct radeon_device *rdev);
extern void radeon_i2c_fini(struct radeon_device *rdev);
724,6 → 562,9
const char *name);
extern struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev,
struct radeon_i2c_bus_rec *i2c_bus);
extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name);
extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name);
739,6 → 580,7
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
 
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
 
810,9 → 652,7
int x, int y);
 
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
unsigned int flags,
int *vpos, int *hpos, void *stime,
void *etime);
int *vpos, int *hpos);
 
extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
extern struct edid *
904,12 → 744,6
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
 
/* fmt blocks */
void avivo_program_fmt(struct drm_encoder *encoder);
void dce3_program_fmt(struct drm_encoder *encoder);
void dce4_program_fmt(struct drm_encoder *encoder);
void dce8_program_fmt(struct drm_encoder *encoder);
 
/* fbdev layer */
int radeon_fbdev_init(struct radeon_device *rdev);
void radeon_fbdev_fini(struct radeon_device *rdev);
919,7 → 753,6
 
void radeon_fb_output_poll_changed(struct radeon_device *rdev);
 
void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id);
void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
 
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
/drivers/video/drm/radeon/radeon_object.h
31,6 → 31,8
#include <drm/radeon_drm.h>
#include "radeon.h"
 
struct sg_table;
 
/**
* radeon_mem_type_to_domain - return domain corresponding to mem_type
* @mem_type: ttm memory type
52,31 → 54,11
return 0;
}
 
/**
* radeon_bo_reserve - reserve bo
* @bo: bo structure
* @no_intr: don't return -ERESTARTSYS on pending signal
*
* Returns:
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
*/
static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
{
int r;
int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr);
 
r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
return r;
}
return 0;
}
 
static inline void radeon_bo_unreserve(struct radeon_bo *bo)
{
ttm_bo_unreserve(&bo->tbo);
// ttm_bo_unreserve(&bo->tbo);
}
 
/**
98,6 → 80,15
return bo->tbo.num_pages << PAGE_SHIFT;
}
 
static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
{
#ifdef __TTM__
return ttm_bo_is_reserved(&bo->tbo);
#else
return !!atomic_read(&bo->tbo.reserved);
#endif
}
 
static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
{
return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
113,10 → 104,13
* @bo: radeon object for which we query the offset
*
* Returns mmap offset of the object.
*
* Note: addr_space_offset is constant after ttm bo init thus isn't protected
* by any lock.
*/
static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
{
return drm_vma_node_offset_addr(&bo->tbo.vma_node);
return bo->tbo.addr_space_offset;
}
 
extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
124,12 → 118,11
 
extern int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u32 flags,
bool kernel, u32 domain,
struct sg_table *sg,
struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
extern struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo);
extern void radeon_bo_unref(struct radeon_bo **bo);
extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
139,9 → 132,9
extern void radeon_bo_force_delete(struct radeon_device *rdev);
extern int radeon_bo_init(struct radeon_device *rdev);
extern void radeon_bo_fini(struct radeon_device *rdev);
extern int radeon_bo_list_validate(struct radeon_device *rdev,
struct ww_acquire_ctx *ticket,
struct list_head *head, int ring);
extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
struct list_head *head);
extern int radeon_bo_list_validate(struct list_head *head, int ring);
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma);
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
151,7 → 144,7
extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop);
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem);
struct ttm_mem_reg *mem);
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
 
171,8 → 164,7
 
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
unsigned size, u32 align, u32 domain,
u32 flags);
unsigned size, u32 domain);
extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
182,7 → 174,7
extern int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
struct radeon_sa_bo **sa_bo,
unsigned size, unsigned align);
unsigned size, unsigned align, bool block);
extern void radeon_sa_bo_free(struct radeon_device *rdev,
struct radeon_sa_bo **sa_bo,
struct radeon_fence *fence);
/drivers/video/drm/radeon/radeon_pm.c
65,18 → 65,7
 
void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
{
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
mutex_lock(&rdev->pm.mutex);
if (power_supply_is_system_supplied() > 0)
rdev->pm.dpm.ac_power = true;
else
rdev->pm.dpm.ac_power = false;
if (rdev->family == CHIP_ARUBA) {
if (rdev->asic->dpm.enable_bapm)
radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
}
mutex_unlock(&rdev->pm.mutex);
} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
mutex_lock(&rdev->pm.mutex);
radeon_pm_update_profile(rdev);
145,11 → 134,7
if (list_empty(&rdev->gem.objects))
return;
 
list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
ttm_bo_unmap_virtual(&bo->tbo);
}
}
 
static void radeon_sync_with_vblank(struct radeon_device *rdev)
{
260,7 → 245,7
if (!ring->ready) {
continue;
}
r = radeon_fence_wait_empty(rdev, i);
r = radeon_fence_wait_empty_locked(rdev, i);
if (r) {
/* needs a GPU reset dont reset here */
mutex_unlock(&rdev->ring_lock);
342,7 → 327,7
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
int cp = rdev->pm.profile;
 
358,14 → 343,9
const char *buf,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
 
/* Can't set profile when the card is off */
if ((rdev->flags & RADEON_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
 
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (strncmp("default", buf, strlen("default")) == 0)
397,13 → 377,12
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
int pm = rdev->pm.pm_method;
 
return snprintf(buf, PAGE_SIZE, "%s\n",
(pm == PM_METHOD_DYNPM) ? "dynpm" :
(pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
(pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
}
 
static ssize_t radeon_set_pm_method(struct device *dev,
411,22 → 390,10
const char *buf,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
 
/* Can't set method when the card is off */
if ((rdev->flags & RADEON_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
count = -EINVAL;
goto fail;
}
 
/* we don't support the legacy modes with dpm */
if (rdev->pm.pm_method == PM_METHOD_DPM) {
count = -EINVAL;
goto fail;
}
 
if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
mutex_lock(&rdev->pm.mutex);
rdev->pm.pm_method = PM_METHOD_DYNPM;
450,542 → 417,65
return count;
}
 
static ssize_t radeon_get_dpm_state(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
//static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
//static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
 
return snprintf(buf, PAGE_SIZE, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
}
 
static ssize_t radeon_set_dpm_state(struct device *dev,
static ssize_t radeon_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
 
mutex_lock(&rdev->pm.mutex);
if (strncmp("battery", buf, strlen("battery")) == 0)
rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
else if (strncmp("balanced", buf, strlen("balanced")) == 0)
rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
else if (strncmp("performance", buf, strlen("performance")) == 0)
rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
else {
mutex_unlock(&rdev->pm.mutex);
count = -EINVAL;
goto fail;
}
mutex_unlock(&rdev->pm.mutex);
 
/* Can't set dpm state when the card is off */
if (!(rdev->flags & RADEON_IS_PX) ||
(ddev->switch_power_state == DRM_SWITCH_POWER_ON))
radeon_pm_compute_clocks(rdev);
 
fail:
return count;
}
 
static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
 
if ((rdev->flags & RADEON_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return snprintf(buf, PAGE_SIZE, "off\n");
 
return snprintf(buf, PAGE_SIZE, "%s\n",
(level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
(level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
}
 
static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
enum radeon_dpm_forced_level level;
int ret = 0;
 
/* Can't force performance level when the card is off */
if ((rdev->flags & RADEON_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
 
mutex_lock(&rdev->pm.mutex);
if (strncmp("low", buf, strlen("low")) == 0) {
level = RADEON_DPM_FORCED_LEVEL_LOW;
} else if (strncmp("high", buf, strlen("high")) == 0) {
level = RADEON_DPM_FORCED_LEVEL_HIGH;
} else if (strncmp("auto", buf, strlen("auto")) == 0) {
level = RADEON_DPM_FORCED_LEVEL_AUTO;
} else {
count = -EINVAL;
goto fail;
}
if (rdev->asic->dpm.force_performance_level) {
if (rdev->pm.dpm.thermal_active) {
count = -EINVAL;
goto fail;
}
ret = radeon_dpm_force_performance_level(rdev, level);
if (ret)
count = -EINVAL;
}
fail:
mutex_unlock(&rdev->pm.mutex);
 
return count;
}
 
 
static ssize_t radeon_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct radeon_device *rdev = dev_get_drvdata(dev);
struct drm_device *ddev = rdev->ddev;
int temp;
 
/* Can't get temperature when the card is off */
if ((rdev->flags & RADEON_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
 
if (rdev->asic->pm.get_temperature)
temp = radeon_get_temperature(rdev);
else
temp = 0;
 
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}
 
static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct radeon_device *rdev = dev_get_drvdata(dev);
// int hyst = to_sensor_dev_attr(attr)->index;
int temp;
 
// if (hyst)
// temp = rdev->pm.dpm.thermal.min_temp;
// else
temp = rdev->pm.dpm.thermal.max_temp;
 
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}
 
 
static struct attribute *hwmon_attributes[] = {
// &sensor_dev_attr_temp1_input.dev_attr.attr,
// &sensor_dev_attr_temp1_crit.dev_attr.attr,
// &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
NULL
};
 
 
 
static int radeon_hwmon_init(struct radeon_device *rdev)
{
int err = 0;
 
switch (rdev->pm.int_thermal_type) {
case THERMAL_TYPE_RV6XX:
temp = rv6xx_get_temp(rdev);
break;
case THERMAL_TYPE_RV770:
temp = rv770_get_temp(rdev);
break;
case THERMAL_TYPE_EVERGREEN:
case THERMAL_TYPE_NI:
temp = evergreen_get_temp(rdev);
break;
case THERMAL_TYPE_SUMO:
temp = sumo_get_temp(rdev);
break;
case THERMAL_TYPE_SI:
case THERMAL_TYPE_CI:
case THERMAL_TYPE_KV:
if (rdev->asic->pm.get_temperature == NULL)
return err;
 
temp = si_get_temp(rdev);
break;
default:
temp = 0;
break;
}
 
return err;
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}
 
static void radeon_hwmon_fini(struct radeon_device *rdev)
static ssize_t radeon_hwmon_show_name(struct device *dev,
struct device_attribute *attr,
char *buf)
{
// if (rdev->pm.int_hwmon_dev)
// hwmon_device_unregister(rdev->pm.int_hwmon_dev);
return sprintf(buf, "radeon\n");
}
 
static void radeon_dpm_thermal_work_handler(struct work_struct *work)
static int radeon_hwmon_init(struct radeon_device *rdev)
{
struct radeon_device *rdev =
container_of(work, struct radeon_device,
pm.dpm.thermal.work);
/* switch to the thermal state */
enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
int err = 0;
 
if (!rdev->pm.dpm_enabled)
return;
rdev->pm.int_hwmon_dev = NULL;
 
if (rdev->asic->pm.get_temperature) {
int temp = radeon_get_temperature(rdev);
 
if (temp < rdev->pm.dpm.thermal.min_temp)
/* switch back the user state */
dpm_state = rdev->pm.dpm.user_state;
} else {
if (rdev->pm.dpm.thermal.high_to_low)
/* switch back the user state */
dpm_state = rdev->pm.dpm.user_state;
return err;
}
mutex_lock(&rdev->pm.mutex);
if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
rdev->pm.dpm.thermal_active = true;
else
rdev->pm.dpm.thermal_active = false;
rdev->pm.dpm.state = dpm_state;
mutex_unlock(&rdev->pm.mutex);
 
radeon_pm_compute_clocks(rdev);
}
 
static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
enum radeon_pm_state_type dpm_state)
static void radeon_hwmon_fini(struct radeon_device *rdev)
{
int i;
struct radeon_ps *ps;
u32 ui_class;
bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
true : false;
 
/* check if the vblank period is too short to adjust the mclk */
if (single_display && rdev->asic->dpm.vblank_too_short) {
if (radeon_dpm_vblank_too_short(rdev))
single_display = false;
}
 
/* certain older asics have a separare 3D performance state,
* so try that first if the user selected performance
*/
if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
/* balanced states don't exist at the moment */
if (dpm_state == POWER_STATE_TYPE_BALANCED)
dpm_state = POWER_STATE_TYPE_PERFORMANCE;
 
restart_search:
/* Pick the best power state based on current conditions */
for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
ps = &rdev->pm.dpm.ps[i];
ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
switch (dpm_state) {
/* user states */
case POWER_STATE_TYPE_BATTERY:
if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
if (single_display)
return ps;
} else
return ps;
}
break;
case POWER_STATE_TYPE_BALANCED:
if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
if (single_display)
return ps;
} else
return ps;
}
break;
case POWER_STATE_TYPE_PERFORMANCE:
if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
if (single_display)
return ps;
} else
return ps;
}
break;
/* internal states */
case POWER_STATE_TYPE_INTERNAL_UVD:
if (rdev->pm.dpm.uvd_ps)
return rdev->pm.dpm.uvd_ps;
else
break;
case POWER_STATE_TYPE_INTERNAL_UVD_SD:
if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
return ps;
break;
case POWER_STATE_TYPE_INTERNAL_UVD_HD:
if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
return ps;
break;
case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
return ps;
break;
case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
return ps;
break;
case POWER_STATE_TYPE_INTERNAL_BOOT:
return rdev->pm.dpm.boot_ps;
case POWER_STATE_TYPE_INTERNAL_THERMAL:
if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
return ps;
break;
case POWER_STATE_TYPE_INTERNAL_ACPI:
if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
return ps;
break;
case POWER_STATE_TYPE_INTERNAL_ULV:
if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
return ps;
break;
case POWER_STATE_TYPE_INTERNAL_3DPERF:
if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
return ps;
break;
default:
break;
}
}
/* use a fallback state if we didn't match */
switch (dpm_state) {
case POWER_STATE_TYPE_INTERNAL_UVD_SD:
dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
goto restart_search;
case POWER_STATE_TYPE_INTERNAL_UVD_HD:
case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
if (rdev->pm.dpm.uvd_ps) {
return rdev->pm.dpm.uvd_ps;
} else {
dpm_state = POWER_STATE_TYPE_PERFORMANCE;
goto restart_search;
}
case POWER_STATE_TYPE_INTERNAL_THERMAL:
dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
goto restart_search;
case POWER_STATE_TYPE_INTERNAL_ACPI:
dpm_state = POWER_STATE_TYPE_BATTERY;
goto restart_search;
case POWER_STATE_TYPE_BATTERY:
case POWER_STATE_TYPE_BALANCED:
case POWER_STATE_TYPE_INTERNAL_3DPERF:
dpm_state = POWER_STATE_TYPE_PERFORMANCE;
goto restart_search;
default:
break;
}
 
return NULL;
}
 
static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
void radeon_pm_suspend(struct radeon_device *rdev)
{
int i;
struct radeon_ps *ps;
enum radeon_pm_state_type dpm_state;
int ret;
 
/* if dpm init failed */
if (!rdev->pm.dpm_enabled)
return;
 
if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
/* add other state override checks here */
if ((!rdev->pm.dpm.thermal_active) &&
(!rdev->pm.dpm.uvd_active))
rdev->pm.dpm.state = rdev->pm.dpm.user_state;
}
dpm_state = rdev->pm.dpm.state;
 
ps = radeon_dpm_pick_power_state(rdev, dpm_state);
if (ps)
rdev->pm.dpm.requested_ps = ps;
else
return;
 
/* no need to reprogram if nothing changed unless we are on BTC+ */
if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
/* vce just modifies an existing state so force a change */
if (ps->vce_active != rdev->pm.dpm.vce_active)
goto force;
if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
/* for pre-BTC and APUs if the num crtcs changed but state is the same,
* all we need to do is update the display configuration.
*/
if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
/* update display watermarks based on new power state */
radeon_bandwidth_update(rdev);
/* update displays */
radeon_dpm_display_configuration_changed(rdev);
rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
}
return;
} else {
/* for BTC+ if the num crtcs hasn't changed and state is the same,
* nothing to do, if the num crtcs is > 1 and state is the same,
* update display configuration.
*/
if (rdev->pm.dpm.new_active_crtcs ==
rdev->pm.dpm.current_active_crtcs) {
return;
} else {
if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
(rdev->pm.dpm.new_active_crtc_count > 1)) {
/* update display watermarks based on new power state */
radeon_bandwidth_update(rdev);
/* update displays */
radeon_dpm_display_configuration_changed(rdev);
rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
return;
}
}
}
}
 
force:
if (radeon_dpm == 1) {
printk("switching from power state:\n");
radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
printk("switching to power state:\n");
radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
}
 
mutex_lock(&rdev->ddev->struct_mutex);
// down_write(&rdev->pm.mclk_lock);
mutex_lock(&rdev->ring_lock);
 
/* update whether vce is active */
ps->vce_active = rdev->pm.dpm.vce_active;
 
ret = radeon_dpm_pre_set_power_state(rdev);
if (ret)
goto done;
 
/* update display watermarks based on new power state */
radeon_bandwidth_update(rdev);
/* update displays */
radeon_dpm_display_configuration_changed(rdev);
 
rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
 
/* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
struct radeon_ring *ring = &rdev->ring[i];
if (ring->ready)
radeon_fence_wait_empty(rdev, i);
}
 
/* program the new power state */
radeon_dpm_set_power_state(rdev);
 
/* update current power state */
rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
 
radeon_dpm_post_set_power_state(rdev);
 
if (rdev->asic->dpm.force_performance_level) {
if (rdev->pm.dpm.thermal_active) {
enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
/* force low perf level for thermal */
radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
/* save the user's level */
rdev->pm.dpm.forced_level = level;
} else {
/* otherwise, user selected level */
radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
}
}
 
done:
mutex_unlock(&rdev->ring_lock);
// up_write(&rdev->pm.mclk_lock);
mutex_unlock(&rdev->ddev->struct_mutex);
}
 
void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
{
enum radeon_pm_state_type dpm_state;
 
if (rdev->asic->dpm.powergate_uvd) {
mutex_lock(&rdev->pm.mutex);
/* don't powergate anything if we
have active but pause streams */
enable |= rdev->pm.dpm.sd > 0;
enable |= rdev->pm.dpm.hd > 0;
/* enable/disable UVD */
radeon_dpm_powergate_uvd(rdev, !enable);
mutex_unlock(&rdev->pm.mutex);
} else {
if (enable) {
mutex_lock(&rdev->pm.mutex);
rdev->pm.dpm.uvd_active = true;
/* disable this for now */
#if 0
if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
else
#endif
dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
rdev->pm.dpm.state = dpm_state;
mutex_unlock(&rdev->pm.mutex);
} else {
mutex_lock(&rdev->pm.mutex);
rdev->pm.dpm.uvd_active = false;
mutex_unlock(&rdev->pm.mutex);
}
 
radeon_pm_compute_clocks(rdev);
}
}
 
void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
{
if (enable) {
mutex_lock(&rdev->pm.mutex);
rdev->pm.dpm.vce_active = true;
/* XXX select vce level based on ring/task */
rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
mutex_unlock(&rdev->pm.mutex);
} else {
mutex_lock(&rdev->pm.mutex);
rdev->pm.dpm.vce_active = false;
mutex_unlock(&rdev->pm.mutex);
}
 
radeon_pm_compute_clocks(rdev);
}
 
static void radeon_pm_suspend_old(struct radeon_device *rdev)
{
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
992,44 → 482,11
}
mutex_unlock(&rdev->pm.mutex);
 
// cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
}
 
static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
void radeon_pm_resume(struct radeon_device *rdev)
{
mutex_lock(&rdev->pm.mutex);
/* disable dpm */
radeon_dpm_disable(rdev);
/* reset the power state */
rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
rdev->pm.dpm_enabled = false;
mutex_unlock(&rdev->pm.mutex);
}
 
void radeon_pm_suspend(struct radeon_device *rdev)
{
if (rdev->pm.pm_method == PM_METHOD_DPM)
radeon_pm_suspend_dpm(rdev);
else
radeon_pm_suspend_old(rdev);
}
 
static void radeon_pm_resume_old(struct radeon_device *rdev)
{
/* set up the default clocks if the MC ucode is loaded */
if ((rdev->family >= CHIP_BARTS) &&
(rdev->family <= CHIP_CAYMAN) &&
rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
SET_VOLTAGE_TYPE_ASIC_VDDC);
if (rdev->pm.default_vddci)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
SET_VOLTAGE_TYPE_ASIC_VDDCI);
if (rdev->pm.default_sclk)
radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
if (rdev->pm.default_mclk)
radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
}
/* asic init will reset the default power state */
mutex_lock(&rdev->pm.mutex);
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
1036,10 → 493,8
rdev->pm.current_clock_mode_index = 0;
rdev->pm.current_sclk = rdev->pm.default_sclk;
rdev->pm.current_mclk = rdev->pm.default_mclk;
if (rdev->pm.power_state) {
rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
}
if (rdev->pm.pm_method == PM_METHOD_DYNPM
&& rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
1050,51 → 505,12
radeon_pm_compute_clocks(rdev);
}
 
static void radeon_pm_resume_dpm(struct radeon_device *rdev)
int radeon_pm_init(struct radeon_device *rdev)
{
int ret;
 
/* asic init will reset to the boot state */
mutex_lock(&rdev->pm.mutex);
rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
radeon_dpm_setup_asic(rdev);
ret = radeon_dpm_enable(rdev);
mutex_unlock(&rdev->pm.mutex);
if (ret)
goto dpm_resume_fail;
rdev->pm.dpm_enabled = true;
return;
 
dpm_resume_fail:
DRM_ERROR("radeon: dpm resume failed\n");
if ((rdev->family >= CHIP_BARTS) &&
(rdev->family <= CHIP_CAYMAN) &&
rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
SET_VOLTAGE_TYPE_ASIC_VDDC);
if (rdev->pm.default_vddci)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
SET_VOLTAGE_TYPE_ASIC_VDDCI);
if (rdev->pm.default_sclk)
radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
if (rdev->pm.default_mclk)
radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
}
}
 
void radeon_pm_resume(struct radeon_device *rdev)
{
if (rdev->pm.pm_method == PM_METHOD_DPM)
radeon_pm_resume_dpm(rdev);
else
radeon_pm_resume_old(rdev);
}
 
static int radeon_pm_init_old(struct radeon_device *rdev)
{
int ret;
 
/* default to profile method */
rdev->pm.pm_method = PM_METHOD_PROFILE;
rdev->pm.profile = PM_PROFILE_DEFAULT;
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
1113,22 → 529,7
radeon_combios_get_power_modes(rdev);
radeon_pm_print_states(rdev);
radeon_pm_init_profile(rdev);
/* set up the default clocks if the MC ucode is loaded */
if ((rdev->family >= CHIP_BARTS) &&
(rdev->family <= CHIP_CAYMAN) &&
rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
SET_VOLTAGE_TYPE_ASIC_VDDC);
if (rdev->pm.default_vddci)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
SET_VOLTAGE_TYPE_ASIC_VDDCI);
if (rdev->pm.default_sclk)
radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
if (rdev->pm.default_mclk)
radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
}
}
 
/* set up the internal thermal sensor if applicable */
ret = radeon_hwmon_init(rdev);
1135,12 → 536,8
if (ret)
return ret;
 
// INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
 
if (rdev->pm.num_power_states > 1) {
/* where's the best place to put these? */
 
 
DRM_INFO("radeon: power management initialized\n");
}
 
1147,165 → 544,8
return 0;
}
 
static void radeon_dpm_print_power_states(struct radeon_device *rdev)
void radeon_pm_fini(struct radeon_device *rdev)
{
int i;
 
for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
printk("== power state %d ==\n", i);
radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
}
}
 
static int radeon_pm_init_dpm(struct radeon_device *rdev)
{
int ret;
 
/* default to balanced state */
rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
rdev->pm.default_sclk = rdev->clock.default_sclk;
rdev->pm.default_mclk = rdev->clock.default_mclk;
rdev->pm.current_sclk = rdev->clock.default_sclk;
rdev->pm.current_mclk = rdev->clock.default_mclk;
rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
 
if (rdev->bios && rdev->is_atom_bios)
radeon_atombios_get_power_modes(rdev);
else
return -EINVAL;
 
/* set up the internal thermal sensor if applicable */
ret = radeon_hwmon_init(rdev);
if (ret)
return ret;
 
INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
mutex_lock(&rdev->pm.mutex);
radeon_dpm_init(rdev);
rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
if (radeon_dpm == 1)
radeon_dpm_print_power_states(rdev);
radeon_dpm_setup_asic(rdev);
ret = radeon_dpm_enable(rdev);
mutex_unlock(&rdev->pm.mutex);
if (ret)
goto dpm_failed;
rdev->pm.dpm_enabled = true;
 
DRM_INFO("radeon: dpm initialized\n");
 
return 0;
 
dpm_failed:
rdev->pm.dpm_enabled = false;
if ((rdev->family >= CHIP_BARTS) &&
(rdev->family <= CHIP_CAYMAN) &&
rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
SET_VOLTAGE_TYPE_ASIC_VDDC);
if (rdev->pm.default_vddci)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
SET_VOLTAGE_TYPE_ASIC_VDDCI);
if (rdev->pm.default_sclk)
radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
if (rdev->pm.default_mclk)
radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
}
DRM_ERROR("radeon: dpm initialization failed\n");
return ret;
}
 
int radeon_pm_init(struct radeon_device *rdev)
{
/* enable dpm on rv6xx+ */
switch (rdev->family) {
case CHIP_RV610:
case CHIP_RV630:
case CHIP_RV620:
case CHIP_RV635:
case CHIP_RV670:
case CHIP_RS780:
case CHIP_RS880:
case CHIP_RV770:
/* DPM requires the RLC, RV770+ dGPU requires SMC */
if (!rdev->rlc_fw)
rdev->pm.pm_method = PM_METHOD_PROFILE;
else if ((rdev->family >= CHIP_RV770) &&
(!(rdev->flags & RADEON_IS_IGP)) &&
(!rdev->smc_fw))
rdev->pm.pm_method = PM_METHOD_PROFILE;
else if (radeon_dpm == 1)
rdev->pm.pm_method = PM_METHOD_DPM;
else
rdev->pm.pm_method = PM_METHOD_PROFILE;
break;
case CHIP_RV730:
case CHIP_RV710:
case CHIP_RV740:
case CHIP_CEDAR:
case CHIP_REDWOOD:
case CHIP_JUNIPER:
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
case CHIP_BARTS:
case CHIP_TURKS:
case CHIP_CAICOS:
case CHIP_CAYMAN:
case CHIP_ARUBA:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
case CHIP_HAINAN:
case CHIP_BONAIRE:
case CHIP_KABINI:
case CHIP_KAVERI:
case CHIP_HAWAII:
case CHIP_MULLINS:
/* DPM requires the RLC, RV770+ dGPU requires SMC */
if (!rdev->rlc_fw)
rdev->pm.pm_method = PM_METHOD_PROFILE;
else if ((rdev->family >= CHIP_RV770) &&
(!(rdev->flags & RADEON_IS_IGP)) &&
(!rdev->smc_fw))
rdev->pm.pm_method = PM_METHOD_PROFILE;
else if (radeon_dpm == 0)
rdev->pm.pm_method = PM_METHOD_PROFILE;
else
rdev->pm.pm_method = PM_METHOD_DPM;
break;
default:
/* default to profile method */
rdev->pm.pm_method = PM_METHOD_PROFILE;
break;
}
 
if (rdev->pm.pm_method == PM_METHOD_DPM)
return radeon_pm_init_dpm(rdev);
else
return radeon_pm_init_old(rdev);
}
 
int radeon_pm_late_init(struct radeon_device *rdev)
{
int ret = 0;
 
if (rdev->pm.pm_method == PM_METHOD_DPM) {
mutex_lock(&rdev->pm.mutex);
ret = radeon_dpm_late_enable(rdev);
mutex_unlock(&rdev->pm.mutex);
}
return ret;
}
 
static void radeon_pm_fini_old(struct radeon_device *rdev)
{
if (rdev->pm.num_power_states > 1) {
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
1325,32 → 565,10
}
 
radeon_hwmon_fini(rdev);
kfree(rdev->pm.power_state);
}
 
static void radeon_pm_fini_dpm(struct radeon_device *rdev)
void radeon_pm_compute_clocks(struct radeon_device *rdev)
{
if (rdev->pm.num_power_states > 1) {
mutex_lock(&rdev->pm.mutex);
radeon_dpm_disable(rdev);
mutex_unlock(&rdev->pm.mutex);
}
radeon_dpm_fini(rdev);
 
radeon_hwmon_fini(rdev);
kfree(rdev->pm.power_state);
}
 
void radeon_pm_fini(struct radeon_device *rdev)
{
if (rdev->pm.pm_method == PM_METHOD_DPM)
radeon_pm_fini_dpm(rdev);
else
radeon_pm_fini_old(rdev);
}
 
static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
1362,7 → 580,6
 
rdev->pm.active_crtcs = 0;
rdev->pm.active_crtc_count = 0;
if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
1371,7 → 588,6
rdev->pm.active_crtc_count++;
}
}
}
 
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
radeon_pm_update_profile(rdev);
1422,51 → 638,6
mutex_unlock(&rdev->pm.mutex);
}
 
static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
 
if (!rdev->pm.dpm_enabled)
return;
 
mutex_lock(&rdev->pm.mutex);
 
/* update active crtc counts */
rdev->pm.dpm.new_active_crtcs = 0;
rdev->pm.dpm.new_active_crtc_count = 0;
if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (crtc->enabled) {
rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
rdev->pm.dpm.new_active_crtc_count++;
}
}
}
 
/* update battery/ac status */
if (power_supply_is_system_supplied() > 0)
rdev->pm.dpm.ac_power = true;
else
rdev->pm.dpm.ac_power = false;
 
radeon_dpm_change_power_state_locked(rdev);
 
mutex_unlock(&rdev->pm.mutex);
 
}
 
void radeon_pm_compute_clocks(struct radeon_device *rdev)
{
if (rdev->pm.pm_method == PM_METHOD_DPM)
radeon_pm_compute_clocks_dpm(rdev);
else
radeon_pm_compute_clocks_old(rdev);
}
 
static bool radeon_pm_in_vbl(struct radeon_device *rdev)
{
int crtc, vpos, hpos, vbl_status;
1477,7 → 648,7
*/
for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
if (rdev->pm.active_crtcs & (1 << crtc)) {
vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL);
vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
!(vbl_status & DRM_SCANOUTPOS_INVBL))
in_vbl = false;
1509,19 → 680,7
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_device *ddev = rdev->ddev;
 
if ((rdev->flags & RADEON_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
seq_printf(m, "PX asic powered off\n");
} else if (rdev->pm.dpm_enabled) {
mutex_lock(&rdev->pm.mutex);
if (rdev->asic->dpm.debugfs_print_current_performance_level)
radeon_dpm_debugfs_print_current_performance_level(rdev, m);
else
seq_printf(m, "Debugfs support not implemented for this asic\n");
mutex_unlock(&rdev->pm.mutex);
} else {
seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
/* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
1535,7 → 694,6
seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
if (rdev->asic->pm.get_pcie_lanes)
seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
}
 
return 0;
}
/drivers/video/drm/radeon/radeon_reg.h
57,7 → 57,6
#include "evergreen_reg.h"
#include "ni_reg.h"
#include "si_reg.h"
#include "cik_reg.h"
 
#define RADEON_MC_AGP_LOCATION 0x014c
#define RADEON_MC_AGP_START_MASK 0x0000FFFF
/drivers/video/drm/radeon/radeon_ring.c
24,12 → 24,281
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
* Christian König
* Christian Konig
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
 
/*
* IB
* IBs (Indirect Buffers) and areas of GPU accessible memory where
* commands are stored. You can put a pointer to the IB in the
* command ring and the hw will fetch the commands from the IB
* and execute them. Generally userspace acceleration drivers
* produce command buffers which are send to the kernel and
* put in IBs for execution by the requested ring.
*/
static int radeon_debugfs_sa_init(struct radeon_device *rdev);
 
/**
* radeon_ib_get - request an IB (Indirect Buffer)
*
* @rdev: radeon_device pointer
* @ring: ring index the IB is associated with
* @ib: IB object returned
* @size: requested IB size
*
* Request an IB (all asics). IBs are allocated using the
* suballocator.
* Returns 0 on success, error on failure.
*/
int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size)
{
int i, r;
 
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
if (r) {
dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
return r;
}
 
r = radeon_semaphore_create(rdev, &ib->semaphore);
if (r) {
return r;
}
 
ib->ring = ring;
ib->fence = NULL;
ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
ib->vm = vm;
if (vm) {
/* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
* space and soffset is the offset inside the pool bo
*/
ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
} else {
ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
}
ib->is_const_ib = false;
for (i = 0; i < RADEON_NUM_RINGS; ++i)
ib->sync_to[i] = NULL;
 
return 0;
}
 
/**
* radeon_ib_free - free an IB (Indirect Buffer)
*
* @rdev: radeon_device pointer
* @ib: IB object to free
*
* Free an IB (all asics).
*/
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
{
radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
radeon_fence_unref(&ib->fence);
}
 
/**
* radeon_ib_sync_to - sync to fence before executing the IB
*
* @ib: IB object to add fence to
* @fence: fence to sync to
*
* Sync to the fence before executing the IB
*/
void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence)
{
struct radeon_fence *other;
 
if (!fence)
return;
 
other = ib->sync_to[fence->ring];
ib->sync_to[fence->ring] = radeon_fence_later(fence, other);
}
 
/**
* radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
*
* @rdev: radeon_device pointer
* @ib: IB object to schedule
* @const_ib: Const IB to schedule (SI only)
*
* Schedule an IB on the associated ring (all asics).
* Returns 0 on success, error on failure.
*
* On SI, there are two parallel engines fed from the primary ring,
* the CE (Constant Engine) and the DE (Drawing Engine). Since
* resource descriptors have moved to memory, the CE allows you to
* prime the caches while the DE is updating register state so that
* the resource descriptors will be already in cache when the draw is
* processed. To accomplish this, the userspace driver submits two
* IBs, one for the CE and one for the DE. If there is a CE IB (called
* a CONST_IB), it will be put on the ring prior to the DE IB. Prior
* to SI there was just a DE IB.
*/
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
bool need_sync = false;
int i, r = 0;
 
if (!ib->length_dw || !ring->ready) {
/* TODO: Nothings in the ib we should report. */
dev_err(rdev->dev, "couldn't schedule ib\n");
return -EINVAL;
}
 
/* 64 dwords should be enough for fence too */
r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8);
if (r) {
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r;
}
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
struct radeon_fence *fence = ib->sync_to[i];
if (radeon_fence_need_sync(fence, ib->ring)) {
need_sync = true;
radeon_semaphore_sync_rings(rdev, ib->semaphore,
fence->ring, ib->ring);
radeon_fence_note_sync(fence, ib->ring);
}
}
/* immediately free semaphore when we don't need to sync */
if (!need_sync) {
radeon_semaphore_free(rdev, &ib->semaphore, NULL);
}
/* if we can't remember our last VM flush then flush now! */
/* XXX figure out why we have to flush for every IB */
if (ib->vm /*&& !ib->vm->last_flush*/) {
radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
}
if (const_ib) {
radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
}
radeon_ring_ib_execute(rdev, ib->ring, ib);
r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
if (r) {
dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
radeon_ring_unlock_undo(rdev, ring);
return r;
}
if (const_ib) {
const_ib->fence = radeon_fence_ref(ib->fence);
}
/* we just flushed the VM, remember that */
if (ib->vm && !ib->vm->last_flush) {
ib->vm->last_flush = radeon_fence_ref(ib->fence);
}
radeon_ring_unlock_commit(rdev, ring);
return 0;
}
 
/**
* radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
*
* @rdev: radeon_device pointer
*
* Initialize the suballocator to manage a pool of memory
* for use as IBs (all asics).
* Returns 0 on success, error on failure.
*/
int radeon_ib_pool_init(struct radeon_device *rdev)
{
int r;
 
if (rdev->ib_pool_ready) {
return 0;
}
r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
RADEON_IB_POOL_SIZE*64*1024,
RADEON_GEM_DOMAIN_GTT);
if (r) {
return r;
}
 
r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
if (r) {
return r;
}
 
rdev->ib_pool_ready = true;
if (radeon_debugfs_sa_init(rdev)) {
dev_err(rdev->dev, "failed to register debugfs file for SA\n");
}
return 0;
}
 
/**
* radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
*
* @rdev: radeon_device pointer
*
* Tear down the suballocator managing the pool of memory
* for use as IBs (all asics).
*/
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
if (rdev->ib_pool_ready) {
radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
rdev->ib_pool_ready = false;
}
}
 
/**
* radeon_ib_ring_tests - test IBs on the rings
*
* @rdev: radeon_device pointer
*
* Test an IB (Indirect Buffer) on each ring.
* If the test fails, disable the ring.
* Returns 0 on success, error if the primary GFX ring
* IB test fails.
*/
int radeon_ib_ring_tests(struct radeon_device *rdev)
{
unsigned i;
int r;
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
struct radeon_ring *ring = &rdev->ring[i];
 
if (!ring->ready)
continue;
 
r = radeon_ib_test(rdev, i, ring);
if (r) {
ring->ready = false;
 
if (i == RADEON_RING_TYPE_GFX_INDEX) {
/* oh, oh, that's really bad */
DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
rdev->accel_working = false;
return r;
 
} else {
/* still not good, but we can live with it */
DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
}
}
}
return 0;
}
 
/*
* Rings
* Most engines on the GPU are fed via ring buffers. Ring
* buffers are areas of GPU accessible memory that the host
98,17 → 367,19
*/
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
{
uint32_t rptr = radeon_ring_get_rptr(rdev, ring);
u32 rptr;
 
if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX])
rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
else
rptr = RREG32(ring->rptr_reg);
ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
/* This works because ring_size is a power of 2 */
ring->ring_free_dw = rptr + (ring->ring_size / 4);
ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
ring->ring_free_dw -= ring->wptr;
ring->ring_free_dw &= ring->ptr_mask;
if (!ring->ring_free_dw) {
/* this is an empty ring */
ring->ring_free_dw = ring->ring_size / 4;
/* update lockup info to avoid false positive */
radeon_ring_lockup_update(rdev, ring);
}
}
 
132,6 → 403,12
/* Align requested size with padding so unlock_commit can
* pad safely */
radeon_ring_free_size(rdev, ring);
if (ring->ring_free_dw == (ring->ring_size / 4)) {
/* This is an empty ring update lockup info to avoid
* false positive.
*/
radeon_ring_lockup_update(ring);
}
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
while (ndw > (ring->ring_free_dw - 1)) {
radeon_ring_free_size(rdev, ring);
138,7 → 415,7
if (ndw < ring->ring_free_dw) {
break;
}
r = radeon_fence_wait_next(rdev, ring->idx);
r = radeon_fence_wait_next_locked(rdev, ring->idx);
if (r)
return r;
}
177,30 → 454,19
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
* @hdp_flush: Whether or not to perform an HDP cache flush
*
* Update the wptr (write pointer) to tell the GPU to
* execute new commands on the ring buffer (all asics).
*/
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring,
bool hdp_flush)
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
/* If we are emitting the HDP flush via the ring buffer, we need to
* do it before padding.
*/
if (hdp_flush && rdev->asic->ring[ring->idx]->hdp_flush)
rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);
/* We pad to match fetch size */
while (ring->wptr & ring->align_mask) {
radeon_ring_write(ring, ring->nop);
}
mb();
/* If we are emitting the HDP flush via MMIO, we need to do it after
* all CPU writes to VRAM finished.
*/
if (hdp_flush && rdev->asic->mmio_hdp_flush)
rdev->asic->mmio_hdp_flush(rdev);
radeon_ring_set_wptr(rdev, ring);
DRM_MEMORYBARRIER();
WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
(void)RREG32(ring->wptr_reg);
}
 
/**
209,14 → 475,12
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
* @hdp_flush: Whether or not to perform an HDP cache flush
*
* Call radeon_ring_commit() then unlock the ring (all asics).
*/
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring,
bool hdp_flush)
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
radeon_ring_commit(rdev, ring, hdp_flush);
radeon_ring_commit(rdev, ring);
mutex_unlock(&rdev->ring_lock);
}
 
246,6 → 510,29
}
 
/**
* radeon_ring_force_activity - add some nop packets to the ring
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Add some nop packets to the ring to force activity (all asics).
* Used for lockup detection to see if the rptr is advancing.
*/
void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
{
int r;
 
radeon_ring_free_size(rdev, ring);
if (ring->rptr == ring->wptr) {
r = radeon_ring_alloc(rdev, ring, 1);
if (!r) {
radeon_ring_write(ring, ring->nop);
radeon_ring_commit(rdev, ring);
}
}
}
 
/**
* radeon_ring_lockup_update - update lockup variables
*
* @ring: radeon_ring structure holding ring information
252,11 → 539,10
*
* Update the last rptr value and timestamp (all asics).
*/
void radeon_ring_lockup_update(struct radeon_device *rdev,
struct radeon_ring *ring)
void radeon_ring_lockup_update(struct radeon_ring *ring)
{
atomic_set(&ring->last_rptr, radeon_ring_get_rptr(rdev, ring));
atomic64_set(&ring->last_activity, jiffies_64);
ring->last_rptr = ring->rptr;
ring->last_activity = GetTimerTicks();
}
 
/**
264,23 → 550,42
* @rdev: radeon device structure
* @ring: radeon_ring structure holding ring information
*
*/
* We don't need to initialize the lockup tracking information as we will either
* have CP rptr to a different value of jiffies wrap around which will force
* initialization of the lockup tracking informations.
*
* A possible false positivie is if we get call after while and last_cp_rptr ==
* the current CP rptr, even if it's unlikely it might happen. To avoid this
* if the elapsed time since last call is bigger than 2 second than we return
* false and update the tracking information. Due to this the caller must call
* radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
* the fencing code should be cautious about that.
*
* Caller should write to the ring to force CP to do something so we don't get
* false positive when CP is just gived nothing to do.
*
**/
bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
uint32_t rptr = radeon_ring_get_rptr(rdev, ring);
uint64_t last = atomic64_read(&ring->last_activity);
uint64_t elapsed;
unsigned long cjiffies, elapsed;
uint32_t rptr;
 
if (rptr != atomic_read(&ring->last_rptr)) {
/* ring is still working, no lockup */
radeon_ring_lockup_update(rdev, ring);
cjiffies = GetTimerTicks();
if (!time_after(cjiffies, ring->last_activity)) {
/* likely a wrap around */
radeon_ring_lockup_update(ring);
return false;
}
 
elapsed = jiffies_to_msecs(jiffies_64 - last);
rptr = RREG32(ring->rptr_reg);
ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
if (ring->rptr != ring->last_rptr) {
/* CP is still working no lockup */
radeon_ring_lockup_update(ring);
return false;
}
elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
dev_err(rdev->dev, "ring %d stalled for more than %llumsec\n",
ring->idx, elapsed);
dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
return true;
}
/* give a chance to the GPU ... */
376,7 → 681,7
radeon_ring_write(ring, data[i]);
}
 
radeon_ring_unlock_commit(rdev, ring, false);
radeon_ring_unlock_commit(rdev, ring);
kfree(data);
return 0;
}
388,6 → 693,10
* @ring: radeon_ring structure holding ring information
* @ring_size: size of the ring
* @rptr_offs: offset of the rptr writeback location in the WB buffer
* @rptr_reg: MMIO offset of the rptr register
* @wptr_reg: MMIO offset of the wptr register
* @ptr_reg_shift: bit offset of the rptr/wptr values
* @ptr_reg_mask: bit mask of the rptr/wptr values
* @nop: nop packet for this ring
*
* Initialize the driver information for the selected ring (all asics).
394,17 → 703,22
* Returns 0 on success, error on failure.
*/
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
unsigned rptr_offs, u32 nop)
unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
{
int r;
 
ring->ring_size = ring_size;
ring->rptr_offs = rptr_offs;
ring->rptr_reg = rptr_reg;
ring->wptr_reg = wptr_reg;
ring->ptr_reg_shift = ptr_reg_shift;
ring->ptr_reg_mask = ptr_reg_mask;
ring->nop = nop;
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, 0,
RADEON_GEM_DOMAIN_GTT,
NULL, &ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
438,7 → 752,7
if (radeon_debugfs_ring_init(rdev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
}
radeon_ring_lockup_update(rdev, ring);
radeon_ring_lockup_update(ring);
return 0;
}
 
485,51 → 799,31
struct radeon_device *rdev = dev->dev_private;
int ridx = *(int*)node->info_ent->data;
struct radeon_ring *ring = &rdev->ring[ridx];
 
uint32_t rptr, wptr, rptr_next;
unsigned count, i, j;
u32 tmp;
 
radeon_ring_free_size(rdev, ring);
count = (ring->ring_size / 4) - ring->ring_free_dw;
 
wptr = radeon_ring_get_wptr(rdev, ring);
seq_printf(m, "wptr: 0x%08x [%5d]\n",
wptr, wptr);
 
rptr = radeon_ring_get_rptr(rdev, ring);
seq_printf(m, "rptr: 0x%08x [%5d]\n",
rptr, rptr);
 
tmp = RREG32(ring->wptr_reg) >> ring->ptr_reg_shift;
seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp);
tmp = RREG32(ring->rptr_reg) >> ring->ptr_reg_shift;
seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp);
if (ring->rptr_save_reg) {
rptr_next = RREG32(ring->rptr_save_reg);
seq_printf(m, "rptr next(0x%04x): 0x%08x [%5d]\n",
ring->rptr_save_reg, rptr_next, rptr_next);
} else
rptr_next = ~0;
 
seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
ring->wptr, ring->wptr);
seq_printf(m, "last semaphore signal addr : 0x%016llx\n",
ring->last_semaphore_signal_addr);
seq_printf(m, "last semaphore wait addr : 0x%016llx\n",
ring->last_semaphore_wait_addr);
seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
RREG32(ring->rptr_save_reg));
}
seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr);
seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr);
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
 
if (!ring->ready)
return 0;
 
/* print 8 dw before current rptr as often it's the last executed
* packet that is the root issue
*/
i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
for (j = 0; j <= (count + 32); j++) {
seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
if (rptr == i)
seq_puts(m, " *");
if (rptr_next == i)
seq_puts(m, " #");
seq_puts(m, "\n");
seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
i = (i + 1) & ring->ptr_mask;
}
return 0;
541,8 → 835,6
static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX;
static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX;
static int si_vce1_index = TN_RING_TYPE_VCE1_INDEX;
static int si_vce2_index = TN_RING_TYPE_VCE2_INDEX;
 
static struct drm_info_list radeon_debugfs_ring_info_list[] = {
{"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index},
551,10 → 843,24
{"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index},
{"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index},
{"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index},
{"radeon_ring_vce1", radeon_debugfs_ring_info, 0, &si_vce1_index},
{"radeon_ring_vce2", radeon_debugfs_ring_info, 0, &si_vce2_index},
};
 
static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
 
radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
 
return 0;
 
}
 
static struct drm_info_list radeon_debugfs_sa_list[] = {
{"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
};
 
#endif
 
static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
576,3 → 882,12
#endif
return 0;
}
 
static int radeon_debugfs_sa_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
#else
return 0;
#endif
}
/drivers/video/drm/radeon/radeon_semaphore.c
29,20 → 29,19
*/
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_trace.h"
 
 
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
uint32_t *cpu_addr;
int i, r;
int r;
 
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) {
return -ENOMEM;
}
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo,
8 * RADEON_NUM_SYNCS, 8);
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
&(*semaphore)->sa_bo, 8, 8, true);
if (r) {
kfree(*semaphore);
*semaphore = NULL;
50,141 → 49,56
}
(*semaphore)->waiters = 0;
(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
 
cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo);
for (i = 0; i < RADEON_NUM_SYNCS; ++i)
cpu_addr[i] = 0;
 
for (i = 0; i < RADEON_NUM_RINGS; ++i)
(*semaphore)->sync_to[i] = NULL;
 
*((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
return 0;
}
 
bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx,
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
struct radeon_ring *ring = &rdev->ring[ridx];
 
trace_radeon_semaphore_signale(ridx, semaphore);
 
if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) {
--semaphore->waiters;
 
/* for debugging lockup only, used by sysfs debug files */
ring->last_semaphore_signal_addr = semaphore->gpu_addr;
return true;
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
}
return false;
}
 
bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
struct radeon_ring *ring = &rdev->ring[ridx];
 
trace_radeon_semaphore_wait(ridx, semaphore);
 
if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) {
++semaphore->waiters;
 
/* for debugging lockup only, used by sysfs debug files */
ring->last_semaphore_wait_addr = semaphore->gpu_addr;
return true;
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
}
return false;
}
 
/**
* radeon_semaphore_sync_to - use the semaphore to sync to a fence
*
* @semaphore: semaphore object to add fence to
* @fence: fence to sync to
*
* Sync to the fence using this semaphore object
*/
void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
struct radeon_fence *fence)
{
struct radeon_fence *other;
 
if (!fence)
return;
 
other = semaphore->sync_to[fence->ring];
semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other);
}
 
/**
* radeon_semaphore_sync_rings - sync ring to all registered fences
*
* @rdev: radeon_device pointer
* @semaphore: semaphore object to use for sync
* @ring: ring that needs sync
*
* Ensure that all registered fences are signaled before letting
* the ring continue. The caller must hold the ring lock.
*/
/* caller must hold ring lock */
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
int ring)
int signaler, int waiter)
{
unsigned count = 0;
int i, r;
int r;
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
struct radeon_fence *fence = semaphore->sync_to[i];
/* no need to signal and wait on the same ring */
if (signaler == waiter) {
return 0;
}
 
/* check if we really need to sync */
if (!radeon_fence_need_sync(fence, ring))
continue;
 
/* prevent GPU deadlocks */
if (!rdev->ring[i].ready) {
dev_err(rdev->dev, "Syncing to a disabled ring!");
if (!rdev->ring[signaler].ready) {
dev_err(rdev->dev, "Trying to sync to a disabled ring!");
return -EINVAL;
}
 
if (++count > RADEON_NUM_SYNCS) {
/* not enough room, wait manually */
r = radeon_fence_wait(fence, false);
if (r)
return r;
continue;
}
 
/* allocate enough space for sync command */
r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8);
if (r) {
return r;
}
radeon_semaphore_emit_signal(rdev, signaler, semaphore);
radeon_ring_commit(rdev, &rdev->ring[signaler]);
 
/* emit the signal semaphore */
if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
/* signaling wasn't successful wait manually */
radeon_ring_undo(&rdev->ring[i]);
r = radeon_fence_wait(fence, false);
if (r)
return r;
continue;
}
 
/* we assume caller has already allocated space on waiters ring */
if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
/* waiting wasn't successful wait manually */
radeon_ring_undo(&rdev->ring[i]);
r = radeon_fence_wait(fence, false);
if (r)
return r;
continue;
}
radeon_semaphore_emit_wait(rdev, waiter, semaphore);
 
radeon_ring_commit(rdev, &rdev->ring[i], false);
radeon_fence_note_sync(fence, ring);
/* for debugging lockup only, used by sysfs debug files */
rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr;
rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr;
 
semaphore->gpu_addr += 8;
}
 
return 0;
}
 
/drivers/video/drm/radeon/radeon_ttm.c
44,7 → 44,6
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
 
static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
 
static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
{
75,6 → 74,8
struct drm_global_reference *global_ref;
int r;
 
ENTER();
 
rdev->mman.mem_global_referenced = false;
global_ref = &rdev->mman.mem_global_ref;
global_ref->global_type = DRM_GLOBAL_TTM_MEM;
103,10 → 104,14
}
 
rdev->mman.mem_global_referenced = true;
 
LEAVE();
 
return 0;
}
 
 
 
static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
117,6 → 122,8
{
struct radeon_device *rdev;
 
ENTER();
 
rdev = radeon_get_rdev(bdev);
 
switch (type) {
134,7 → 141,7
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
if (!rdev->ddev->agp) {
if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
return -EINVAL;
160,6 → 167,9
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
return -EINVAL;
}
 
LEAVE();
 
return 0;
}
 
208,265 → 218,6
new_mem->mm_node = NULL;
}
 
static int radeon_move_blit(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem,
struct ttm_mem_reg *old_mem)
{
struct radeon_device *rdev;
uint64_t old_start, new_start;
struct radeon_fence *fence;
int r, ridx;
 
rdev = radeon_get_rdev(bo->bdev);
ridx = radeon_copy_ring_index(rdev);
old_start = old_mem->start << PAGE_SHIFT;
new_start = new_mem->start << PAGE_SHIFT;
 
switch (old_mem->mem_type) {
case TTM_PL_VRAM:
old_start += rdev->mc.vram_start;
break;
case TTM_PL_TT:
old_start += rdev->mc.gtt_start;
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
return -EINVAL;
}
switch (new_mem->mem_type) {
case TTM_PL_VRAM:
new_start += rdev->mc.vram_start;
break;
case TTM_PL_TT:
new_start += rdev->mc.gtt_start;
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
return -EINVAL;
}
if (!rdev->ring[ridx].ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
 
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
 
/* sync other rings */
fence = bo->sync_obj;
r = radeon_copy(rdev, old_start, new_start,
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
&fence);
/* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
evict, no_wait_gpu, new_mem);
radeon_fence_unref(&fence);
return r;
}
 
static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
u32 placements;
struct ttm_placement placement;
int r;
 
rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.fpfn = 0;
placement.lpfn = 0;
placement.num_placement = 1;
placement.placement = &placements;
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait_gpu);
if (unlikely(r)) {
return r;
}
 
r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
if (unlikely(r)) {
goto out_cleanup;
}
 
r = ttm_tt_bind(bo->ttm, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
}
 
static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
struct ttm_placement placement;
u32 placements;
int r;
 
rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.fpfn = 0;
placement.lpfn = 0;
placement.num_placement = 1;
placement.placement = &placements;
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait_gpu);
if (unlikely(r)) {
return r;
}
r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
}
 
static int radeon_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
int r;
 
rdev = radeon_get_rdev(bo->bdev);
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
radeon_move_null(bo, new_mem);
return 0;
}
if ((old_mem->mem_type == TTM_PL_TT &&
new_mem->mem_type == TTM_PL_SYSTEM) ||
(old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_TT)) {
/* bind is enough */
radeon_move_null(bo, new_mem);
return 0;
}
if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
rdev->asic->copy.copy == NULL) {
/* use memcpy */
goto memcpy;
}
 
if (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM) {
r = radeon_move_vram_ram(bo, evict, interruptible,
no_wait_gpu, new_mem);
} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) {
r = radeon_move_ram_vram(bo, evict, interruptible,
no_wait_gpu, new_mem);
} else {
r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
}
 
if (r) {
memcpy:
r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
if (r) {
return r;
}
}
 
/* update statistics */
// atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
return 0;
}
 
static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct radeon_device *rdev = radeon_get_rdev(bdev);
 
mem->bus.addr = NULL;
mem->bus.offset = 0;
mem->bus.size = mem->num_pages << PAGE_SHIFT;
mem->bus.base = 0;
mem->bus.is_iomem = false;
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
return -EINVAL;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
/* system memory */
return 0;
case TTM_PL_TT:
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
/* RADEON_IS_AGP is set only if AGP is active */
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = rdev->mc.agp_base;
mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
}
#endif
break;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */
if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
return -EINVAL;
mem->bus.base = rdev->mc.aper_base;
mem->bus.is_iomem = true;
#ifdef __alpha__
/*
* Alpha: use bus.addr to hold the ioremap() return,
* so we can modify bus.base below.
*/
if (mem->placement & TTM_PL_FLAG_WC)
mem->bus.addr =
ioremap_wc(mem->bus.base + mem->bus.offset,
mem->bus.size);
else
mem->bus.addr =
ioremap_nocache(mem->bus.base + mem->bus.offset,
mem->bus.size);
 
/*
* Alpha: Use just the bus offset plus
* the hose/domain memory base for bus.base.
* It then can be used to build PTEs for VRAM
* access, as done in ttm_bo_vm_fault().
*/
mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
rdev->ddev->hose->dense_mem_base;
#endif
break;
default:
return -EINVAL;
}
return 0;
}
 
static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
}
509,8 → 260,6
struct ttm_mem_reg *bo_mem)
{
struct radeon_ttm_tt *gtt = (void*)ttm;
uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
RADEON_GART_PAGE_WRITE;
int r;
 
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
518,10 → 267,8
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
}
if (ttm->caching_state == tt_cached)
flags |= RADEON_GART_PAGE_SNOOP;
r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages,
ttm->pages, gtt->ttm.dma_address, flags);
r = radeon_gart_bind(gtt->rdev, gtt->offset,
ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
if (r) {
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
ttm->num_pages, (unsigned)gtt->offset);
542,7 → 289,7
{
struct radeon_ttm_tt *gtt = (void *)ttm;
 
// ttm_dma_tt_fini(&gtt->ttm);
ttm_dma_tt_fini(&gtt->ttm);
kfree(gtt);
}
 
580,98 → 327,24
return &gtt->ttm.ttm;
}
 
static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
{
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt = (void *)ttm;
unsigned i;
int r;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 
if (ttm->state != tt_unpopulated)
return 0;
 
if (slave && ttm->sg) {
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
ttm->state = tt_unbound;
return 0;
}
 
rdev = radeon_get_rdev(ttm->bdev);
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
return ttm_agp_tt_populate(ttm);
}
#endif
 
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
return ttm_dma_populate(&gtt->ttm, rdev->dev);
}
#endif
 
r = ttm_pool_populate(ttm);
if (r) {
return r;
}
 
for (i = 0; i < ttm->num_pages; i++) {
gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
 
}
return 0;
}
 
static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt = (void *)ttm;
unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 
if (slave)
return;
 
rdev = radeon_get_rdev(ttm->bdev);
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
ttm_agp_tt_unpopulate(ttm);
return;
}
#endif
 
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
return;
}
#endif
 
 
ttm_pool_unpopulate(ttm);
}
 
static struct ttm_bo_driver radeon_bo_driver = {
.ttm_tt_create = &radeon_ttm_tt_create,
.ttm_tt_populate = &radeon_ttm_tt_populate,
.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
.invalidate_caches = &radeon_invalidate_caches,
// .ttm_tt_populate = &radeon_ttm_tt_populate,
// .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
// .invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
.evict_flags = &radeon_evict_flags,
.move = &radeon_bo_move,
.verify_access = &radeon_verify_access,
.sync_obj_signaled = &radeon_sync_obj_signaled,
.sync_obj_wait = &radeon_sync_obj_wait,
.sync_obj_flush = &radeon_sync_obj_flush,
.sync_obj_unref = &radeon_sync_obj_unref,
.sync_obj_ref = &radeon_sync_obj_ref,
.move_notify = &radeon_bo_move_notify,
// .evict_flags = &radeon_evict_flags,
// .move = &radeon_bo_move,
// .verify_access = &radeon_verify_access,
// .sync_obj_signaled = &radeon_sync_obj_signaled,
// .sync_obj_wait = &radeon_sync_obj_wait,
// .sync_obj_flush = &radeon_sync_obj_flush,
// .sync_obj_unref = &radeon_sync_obj_unref,
// .sync_obj_ref = &radeon_sync_obj_ref,
// .move_notify = &radeon_bo_move_notify,
// .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
.io_mem_reserve = &radeon_ttm_io_mem_reserve,
.io_mem_free = &radeon_ttm_io_mem_free,
// .io_mem_reserve = &radeon_ttm_io_mem_reserve,
// .io_mem_free = &radeon_ttm_io_mem_free,
};
 
int radeon_ttm_init(struct radeon_device *rdev)
678,6 → 351,8
{
int r;
 
ENTER();
 
r = radeon_ttm_global_init(rdev);
if (r) {
return r;
685,9 → 360,7
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&rdev->mman.bdev,
rdev->mman.bo_global_ref.ref.object,
&radeon_bo_driver,
NULL,
DRM_FILE_PAGE_OFFSET,
&radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
rdev->need_dma32);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
700,26 → 373,25
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
/* Change the size here instead of the init above so only lpfn is affected */
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 
r = radeon_bo_create(rdev, 16*1024*1024, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0,
NULL, &rdev->stollen_vga_memory);
if (r) {
return r;
}
r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
if (r)
return r;
r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
radeon_bo_unreserve(rdev->stollen_vga_memory);
if (r) {
radeon_bo_unref(&rdev->stollen_vga_memory);
return r;
}
// r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
// RADEON_GEM_DOMAIN_VRAM,
// NULL, &rdev->stollen_vga_memory);
// if (r) {
// return r;
// }
// r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
// if (r)
// return r;
// r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
// radeon_bo_unreserve(rdev->stollen_vga_memory);
// if (r) {
// radeon_bo_unref(&rdev->stollen_vga_memory);
// return r;
// }
 
DRM_INFO("radeon: %uM of VRAM memory ready\n",
(unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
(unsigned)rdev->mc.real_vram_size / (1024 * 1024));
r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
rdev->mc.gtt_size >> PAGE_SHIFT);
if (r) {
728,7 → 400,10
}
DRM_INFO("radeon: %uM of GTT memory ready.\n",
(unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
 
LEAVE();
 
return 0;
}
 
799,34 → 474,3
 
 
 
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
dma_addr_t *addrs, int max_pages)
{
unsigned count;
struct scatterlist *sg;
struct page *page;
u32 len;
int pg_index;
dma_addr_t addr;
 
pg_index = 0;
for_each_sg(sgt->sgl, sg, sgt->nents, count) {
len = sg->length;
page = sg_page(sg);
addr = sg_dma_address(sg);
 
while (len > 0) {
if (WARN_ON(pg_index >= max_pages))
return -1;
pages[pg_index] = page;
if (addrs)
addrs[pg_index] = addr;
 
page++;
addr += PAGE_SIZE;
len -= PAGE_SIZE;
pg_index++;
}
}
return 0;
}
/drivers/video/drm/radeon/rdisplay.c
1,8 → 1,8
 
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include <drm.h>
#include <drm_mm.h>
#include "radeon_drm.h"
#include "radeon.h"
#include "radeon_object.h"
#include "bitmap.h"
11,7 → 11,7
#include "r100d.h"
 
 
display_t *os_display;
display_t *rdisplay;
 
static cursor_t* __stdcall select_cursor(cursor_t *cursor);
static void __stdcall move_cursor(cursor_t *cursor, int x, int y);
31,10 → 31,10
int i,j;
int r;
 
rdev = (struct radeon_device *)os_display->ddev->dev_private;
rdev = (struct radeon_device *)rdisplay->ddev->dev_private;
 
r = radeon_bo_create(rdev, CURSOR_WIDTH*CURSOR_HEIGHT*4,
PAGE_SIZE, false, RADEON_GEM_DOMAIN_VRAM, 0, NULL, &cursor->robj);
PAGE_SIZE, false, RADEON_GEM_DOMAIN_VRAM, NULL, &cursor->robj);
 
if (unlikely(r != 0))
return r;
82,7 → 82,7
 
static void radeon_show_cursor()
{
struct radeon_device *rdev = (struct radeon_device *)os_display->ddev->dev_private;
struct radeon_device *rdev = (struct radeon_device *)rdisplay->ddev->dev_private;
 
 
if (ASIC_IS_DCE4(rdev)) {
107,11 → 107,11
cursor_t *old;
uint32_t gpu_addr;
 
rdev = (struct radeon_device *)os_display->ddev->dev_private;
rdev = (struct radeon_device *)rdisplay->ddev->dev_private;
 
old = os_display->cursor;
old = rdisplay->cursor;
 
os_display->cursor = cursor;
rdisplay->cursor = cursor;
gpu_addr = radeon_bo_gpu_offset(cursor->robj);
 
if (ASIC_IS_DCE4(rdev))
136,7 → 136,7
{
struct radeon_device *rdev;
 
rdev = (struct radeon_device *)os_display->ddev->dev_private;
rdev = (struct radeon_device *)rdisplay->ddev->dev_private;
 
uint32_t cur_lock;
 
168,7 → 168,7
void __stdcall move_cursor(cursor_t *cursor, int x, int y)
{
struct radeon_device *rdev;
rdev = (struct radeon_device *)os_display->ddev->dev_private;
rdev = (struct radeon_device *)rdisplay->ddev->dev_private;
 
int hot_x = cursor->hot_x;
int hot_y = cursor->hot_y;
233,26 → 233,26
 
ENTER();
 
os_display = GetDisplay();
rdisplay = GetDisplay();
 
dev = os_display->ddev = rdev->ddev;
dev = rdisplay->ddev = rdev->ddev;
 
ifl = safe_cli();
{
list_for_each_entry(cursor, &os_display->cursors, list)
list_for_each_entry(cursor, &rdisplay->cursors, list)
{
init_cursor(cursor);
};
 
os_display->restore_cursor(0,0);
os_display->init_cursor = init_cursor;
os_display->select_cursor = select_cursor;
os_display->show_cursor = NULL;
os_display->move_cursor = move_cursor;
os_display->restore_cursor = restore_cursor;
os_display->disable_mouse = disable_mouse;
rdisplay->restore_cursor(0,0);
rdisplay->init_cursor = init_cursor;
rdisplay->select_cursor = select_cursor;
rdisplay->show_cursor = NULL;
rdisplay->move_cursor = move_cursor;
rdisplay->restore_cursor = restore_cursor;
rdisplay->disable_mouse = disable_mouse;
 
select_cursor(os_display->cursor);
select_cursor(rdisplay->cursor);
radeon_show_cursor();
};
safe_sti(ifl);
/drivers/video/drm/radeon/rdisplay_kms.c
1,8 → 1,8
 
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include <drm.h>
#include <drm_mm.h>
#include "radeon_drm.h"
#include "radeon.h"
#include "radeon_object.h"
#include "drm_fb_helper.h"
10,11 → 10,16
#include "bitmap.h"
#include "display.h"
 
extern struct drm_framebuffer *main_fb;
extern struct drm_gem_object *main_fb_obj;
struct radeon_fbdev {
struct drm_fb_helper helper;
struct radeon_framebuffer rfb;
struct list_head fbdev_list;
struct radeon_device *rdev;
};
 
display_t *os_display;
struct radeon_fbdev *kos_rfbdev;
 
 
static cursor_t* __stdcall select_cursor_kms(cursor_t *cursor);
static void __stdcall move_cursor_kms(cursor_t *cursor, int x, int y);
 
90,12 → 95,12
cursor_t *old;
uint32_t gpu_addr;
 
rdev = (struct radeon_device *)os_display->ddev->dev_private;
radeon_crtc = to_radeon_crtc(os_display->crtc);
rdev = (struct radeon_device *)rdisplay->ddev->dev_private;
radeon_crtc = to_radeon_crtc(rdisplay->crtc);
 
old = os_display->cursor;
old = rdisplay->cursor;
 
os_display->cursor = cursor;
rdisplay->cursor = cursor;
gpu_addr = radeon_bo_gpu_offset(cursor->robj);
 
if (ASIC_IS_DCE4(rdev)) {
120,8 → 125,8
void __stdcall move_cursor_kms(cursor_t *cursor, int x, int y)
{
struct radeon_device *rdev;
rdev = (struct radeon_device *)os_display->ddev->dev_private;
struct drm_crtc *crtc = os_display->crtc;
rdev = (struct radeon_device *)rdisplay->ddev->dev_private;
struct drm_crtc *crtc = rdisplay->crtc;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 
int hot_x = cursor->hot_x;
192,22 → 197,27
return name;
}
 
static int set_mode(struct drm_device *dev, struct drm_connector *connector,
struct drm_crtc *crtc, videomode_t *reqmode, bool strict)
bool set_mode(struct drm_device *dev, struct drm_connector *connector,
videomode_t *reqmode, bool strict)
{
struct drm_display_mode *mode = NULL, *tmpmode;
struct drm_framebuffer *fb = NULL;
struct drm_mode_set set;
const char *con_name;
unsigned hdisplay, vdisplay;
int ret;
 
drm_modeset_lock_all(dev);
struct drm_fb_helper *fb_helper;
 
fb_helper = &kos_rfbdev->helper;
 
 
bool ret = false;
 
ENTER();
 
dbgprintf("width %d height %d vrefresh %d\n",
reqmode->width, reqmode->height, reqmode->freq);
 
list_for_each_entry(tmpmode, &connector->modes, head)
{
if( (tmpmode->hdisplay == reqmode->width) &&
(tmpmode->vdisplay == reqmode->height) &&
if( (drm_mode_width(tmpmode) == reqmode->width) &&
(drm_mode_height(tmpmode) == reqmode->height) &&
(drm_mode_vrefresh(tmpmode) == reqmode->freq) )
{
mode = tmpmode;
219,8 → 229,8
{
list_for_each_entry(tmpmode, &connector->modes, head)
{
if( (tmpmode->hdisplay == reqmode->width) &&
(tmpmode->vdisplay == reqmode->height) )
if( (drm_mode_width(tmpmode) == reqmode->width) &&
(drm_mode_height(tmpmode) == reqmode->height) )
{
mode = tmpmode;
goto do_set;
228,73 → 238,77
};
};
 
DRM_ERROR("%s failed\n", __FUNCTION__);
do_set:
 
return -1;
if( mode != NULL )
{
struct drm_framebuffer *fb;
struct drm_encoder *encoder;
struct drm_crtc *crtc;
 
do_set:
// char con_edid[128];
char *con_name;
char *enc_name;
 
con_name = connector->name;
encoder = connector->encoder;
crtc = encoder->crtc;
 
DRM_DEBUG_KMS("set mode %d %d: crtc %d connector %s\n",
reqmode->width, reqmode->height, crtc->base.id,
con_name);
// fb = list_first_entry(&dev->mode_config.fb_kernel_list,
// struct drm_framebuffer, filp_head);
 
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
// memcpy(con_edid, connector->edid_blob_ptr->data, 128);
 
hdisplay = mode->hdisplay;
vdisplay = mode->vdisplay;
// dbgprintf("Manufacturer: %s Model %x Serial Number %u\n",
// manufacturer_name(con_edid + 0x08),
// (unsigned short)(con_edid[0x0A] + (con_edid[0x0B] << 8)),
// (unsigned int)(con_edid[0x0C] + (con_edid[0x0D] << 8)
// + (con_edid[0x0E] << 16) + (con_edid[0x0F] << 24)));
 
if (crtc->invert_dimensions)
swap(hdisplay, vdisplay);
con_name = drm_get_connector_name(connector);
enc_name = drm_get_encoder_name(encoder);
 
fb = main_fb;
dbgprintf("set mode %d %d connector %s encoder %s\n",
reqmode->width, reqmode->height, con_name, enc_name);
 
fb = fb_helper->fb;
 
fb->width = reqmode->width;
fb->height = reqmode->height;
 
fb->pitches[0] =
fb->pitches[1] =
fb->pitches[2] =
fb->pitches[0] = fb->pitches[1] = fb->pitches[2] =
fb->pitches[3] = radeon_align_pitch(dev->dev_private, reqmode->width, 32, false) * ((32 + 1) / 8);
fb->bits_per_pixel = 32;
fb->depth = 24;
 
crtc->fb = fb;
crtc->enabled = true;
os_display->crtc = crtc;
rdisplay->crtc = crtc;
 
set.crtc = crtc;
set.x = 0;
set.y = 0;
set.mode = mode;
set.connectors = &connector;
set.num_connectors = 1;
set.fb = fb;
ret = drm_crtc_helper_set_mode(crtc, mode, 0, 0, fb);
 
ret = drm_mode_set_config_internal(&set);
 
drm_modeset_unlock_all(dev);
 
select_cursor_kms(os_display->cursor);
select_cursor_kms(rdisplay->cursor);
radeon_show_cursor_kms(crtc);
 
if ( !ret )
if (ret == true)
{
os_display->width = fb->width;
os_display->height = fb->height;
os_display->vrefresh = drm_mode_vrefresh(mode);
rdisplay->width = fb->width;
rdisplay->height = fb->height;
rdisplay->pitch = fb->pitches[0];
rdisplay->vrefresh = drm_mode_vrefresh(mode);
 
sysSetScreen(fb->width, fb->height, fb->pitches[0]);
 
DRM_DEBUG_KMS("new mode %d x %d pitch %d\n",
dbgprintf("new mode %d x %d pitch %d\n",
fb->width, fb->height, fb->pitches[0]);
}
else
DRM_ERROR("failed to set mode %d_%d on crtc %p\n",
fb->width, fb->height, crtc);
}
 
LEAVE();
return ret;
}
};
 
static int count_connector_modes(struct drm_connector* connector)
{
308,125 → 322,109
return count;
};
 
static struct drm_crtc *get_possible_crtc(struct drm_device *dev, struct drm_encoder *encoder)
static struct drm_connector* get_def_connector(struct drm_device *dev)
{
struct drm_crtc *tmp_crtc;
int crtc_mask = 1;
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
 
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head)
{
if (encoder->possible_crtcs & crtc_mask)
{
encoder->crtc = tmp_crtc;
DRM_DEBUG_KMS("use CRTC %p ID %d\n", tmp_crtc, tmp_crtc->base.id);
return tmp_crtc;
};
crtc_mask <<= 1;
};
return NULL;
};
struct drm_connector *def_connector = NULL;
 
static int choose_config(struct drm_device *dev, struct drm_connector **boot_connector,
struct drm_crtc **boot_crtc)
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
{
struct drm_connector_helper_funcs *connector_funcs;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_crtc *crtc;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
{
if( connector->status != connector_status_connected)
continue;
 
encoder = connector->encoder;
 
if(encoder == NULL)
{
connector_funcs = connector->helper_private;
encoder = connector_funcs->best_encoder(connector);
 
if( encoder == NULL)
{
DRM_DEBUG_KMS("CONNECTOR %x ID: %d no active encoders\n",
connector, connector->base.id);
continue;
};
}
 
connector->encoder = encoder;
 
crtc = encoder->crtc;
if(crtc == NULL)
crtc = get_possible_crtc(dev, encoder);
 
if(crtc != NULL)
{
*boot_connector = connector;
*boot_crtc = crtc;
connector->encoder = encoder;
DRM_DEBUG_KMS("CONNECTOR %p ID:%d status:%d ENCODER %p ID: %d CRTC %p ID:%d\n",
connector, connector->base.id, connector->status,
encoder, encoder->base.id, crtc, crtc->base.id );
return 0;
}
else
DRM_DEBUG_KMS("No CRTC for encoder %d\n", encoder->base.id);
dbgprintf("CONNECTOR %x ID: %d status %d encoder %x\n crtc %x",
connector, connector->base.id,
connector->status, connector->encoder,
crtc);
 
// if (crtc == NULL)
// continue;
 
def_connector = connector;
 
break;
};
 
return -ENOENT;
return def_connector;
};
 
static int get_boot_mode(struct drm_connector *connector, videomode_t *usermode)
{
struct drm_display_mode *mode;
 
list_for_each_entry(mode, &connector->modes, head)
{
DRM_DEBUG_KMS("check mode w:%d h:%d %dHz\n",
mode->hdisplay, mode->vdisplay,
drm_mode_vrefresh(mode));
 
if( os_display->width == mode->hdisplay &&
os_display->height == mode->vdisplay &&
drm_mode_vrefresh(mode) == 60)
bool init_display_kms(struct radeon_device *rdev, videomode_t *usermode)
{
usermode->width = os_display->width;
usermode->height = os_display->height;
usermode->freq = 60;
return 1;
}
}
return 0;
}
struct drm_device *dev;
 
int init_display_kms(struct drm_device *dev, videomode_t *usermode)
{
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
struct drm_connector *connector = NULL;
struct drm_encoder *encoder;
struct drm_crtc *crtc = NULL;
struct drm_framebuffer *fb;
struct drm_display_mode *native;
 
 
cursor_t *cursor;
bool retval = false;
u32_t ifl;
int ret;
 
mutex_lock(&dev->mode_config.mutex);
struct radeon_fbdev *rfbdev;
struct drm_fb_helper *fb_helper;
 
ret = choose_config(dev, &connector, &crtc);
if(ret)
int i;
 
ENTER();
 
dev = rdev->ddev;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
{
DRM_DEBUG_KMS("No active connectors!\n");
mutex_unlock(&dev->mode_config.mutex);
if( connector->status != connector_status_connected)
continue;
 
connector_funcs = connector->helper_private;
encoder = connector_funcs->best_encoder(connector);
if( encoder == NULL)
{
dbgprintf("CONNECTOR %x ID: %d no active encoders\n",
connector, connector->base.id);
continue;
}
connector->encoder = encoder;
 
dbgprintf("CONNECTOR %x ID: %d status %d encoder %x\n crtc %x\n",
connector, connector->base.id,
connector->status, connector->encoder,
encoder->crtc);
 
crtc = encoder->crtc;
break;
};
 
if(connector == NULL)
{
dbgprintf("No active connectors!\n");
return -1;
};
 
{
struct drm_display_mode *tmp, *native = NULL;
struct radeon_device *rdev = dev->dev_private;
struct drm_display_mode *tmp;
 
list_for_each_entry(tmp, &connector->modes, head) {
if (tmp->hdisplay > 16384 ||
tmp->vdisplay > 16384)
if (drm_mode_width(tmp) > 16384 ||
drm_mode_height(tmp) > 16384)
continue;
if (tmp->type & DRM_MODE_TYPE_PREFERRED)
{
434,97 → 432,122
break;
};
}
}
 
if( ASIC_IS_AVIVO(rdev) && native )
{
dbgprintf("native w %d h %d\n", native->hdisplay, native->vdisplay);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(connector->encoder);
radeon_encoder->rmx_type = RMX_FULL;
radeon_encoder->native_mode = *native;
};
}
 
#if 0
mutex_lock(&dev->object_name_lock);
idr_preload(GFP_KERNEL);
 
if (!main_fb_obj->name) {
ret = idr_alloc(&dev->object_name_idr, &main_fb_obj, 1, 0, GFP_NOWAIT);
if(crtc == NULL)
{
struct drm_crtc *tmp_crtc;
int crtc_mask = 1;
 
main_fb_obj->name = ret;
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head)
{
if (encoder->possible_crtcs & crtc_mask)
{
crtc = tmp_crtc;
encoder->crtc = crtc;
break;
};
crtc_mask <<= 1;
};
};
 
/* Allocate a reference for the name table. */
drm_gem_object_reference(main_fb_obj);
if(crtc == NULL)
{
dbgprintf("No CRTC for encoder %d\n", encoder->base.id);
return -1;
};
 
DRM_DEBUG_KMS("%s allocate fb name %d\n", __FUNCTION__, main_fb_obj->name );
}
 
idr_preload_end();
mutex_unlock(&dev->object_name_lock);
drm_gem_object_unreference(main_fb_obj);
#endif
dbgprintf("[Select CRTC:%d]\n", crtc->base.id);
 
os_display = GetDisplay();
os_display->ddev = dev;
os_display->connector = connector;
os_display->crtc = crtc;
 
os_display->supported_modes = count_connector_modes(connector);
// drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
 
rdisplay = GetDisplay();
rdisplay->ddev = dev;
rdisplay->connector = connector;
rdisplay->crtc = crtc;
 
rdisplay->supported_modes = count_connector_modes(connector);
 
 
 
ifl = safe_cli();
{
list_for_each_entry(cursor, &os_display->cursors, list)
list_for_each_entry(cursor, &rdisplay->cursors, list)
{
init_cursor(cursor);
};
 
os_display->restore_cursor(0,0);
os_display->init_cursor = init_cursor;
os_display->select_cursor = select_cursor_kms;
os_display->show_cursor = NULL;
os_display->move_cursor = move_cursor_kms;
os_display->restore_cursor = restore_cursor;
os_display->disable_mouse = disable_mouse;
select_cursor_kms(os_display->cursor);
};
safe_sti(ifl);
 
 
// dbgprintf("current mode %d x %d x %d\n",
// os_display->width, os_display->height, os_display->vrefresh);
// dbgprintf("user mode mode %d x %d x %d\n",
// usermode->width, usermode->height, usermode->freq);
dbgprintf("current mode %d x %d x %d\n",
rdisplay->width, rdisplay->height, rdisplay->vrefresh);
dbgprintf("user mode mode %d x %d x %d\n",
usermode->width, usermode->height, usermode->freq);
 
if( (usermode->width == 0) ||
(usermode->height == 0))
 
if( (usermode->width != 0) &&
(usermode->height != 0) &&
( (usermode->width != rdisplay->width) ||
(usermode->height != rdisplay->height) ||
(usermode->freq != rdisplay->vrefresh) ) )
{
if( !get_boot_mode(connector, usermode))
 
retval = set_mode(dev, rdisplay->connector, usermode, false);
}
else
{
struct drm_display_mode *mode;
usermode->width = rdisplay->width;
usermode->height = rdisplay->height;
usermode->freq = 60;
retval = set_mode(dev, rdisplay->connector, usermode, false);
};
 
mode = list_entry(connector->modes.next, typeof(*mode), head);
usermode->width = mode->hdisplay;
usermode->height = mode->vdisplay;
usermode->freq = drm_mode_vrefresh(mode);
ifl = safe_cli();
{
rdisplay->restore_cursor(0,0);
rdisplay->init_cursor = init_cursor;
rdisplay->select_cursor = select_cursor_kms;
rdisplay->show_cursor = NULL;
rdisplay->move_cursor = move_cursor_kms;
rdisplay->restore_cursor = restore_cursor;
rdisplay->disable_mouse = disable_mouse;
 
select_cursor_kms(rdisplay->cursor);
radeon_show_cursor_kms(rdisplay->crtc);
};
};
safe_sti(ifl);
 
mutex_unlock(&dev->mode_config.mutex);
// init_bitmaps();
 
set_mode(dev, os_display->connector, os_display->crtc, usermode, false);
LEAVE();
 
radeon_show_cursor_kms(os_display->crtc);
 
return 0;
return retval;
};
 
 
int get_videomodes(videomode_t *mode, int *count)
int get_modes(videomode_t *mode, int *count)
{
int err = -1;
 
// ENTER();
 
dbgprintf("mode %x count %d\n", mode, *count);
 
if( *count == 0 )
{
*count = os_display->supported_modes;
*count = rdisplay->supported_modes;
err = 0;
}
else if( mode != NULL )
532,15 → 555,15
struct drm_display_mode *drmmode;
int i = 0;
 
if( *count > os_display->supported_modes)
*count = os_display->supported_modes;
if( *count > rdisplay->supported_modes)
*count = rdisplay->supported_modes;
 
list_for_each_entry(drmmode, &os_display->connector->modes, head)
list_for_each_entry(drmmode, &rdisplay->connector->modes, head)
{
if( i < *count)
{
mode->width = drmmode->hdisplay;
mode->height = drmmode->vdisplay;
mode->width = drm_mode_width(drmmode);
mode->height = drm_mode_height(drmmode);
mode->bpp = 32;
mode->freq = drm_mode_vrefresh(drmmode);
i++;
551,30 → 574,109
*count = i;
err = 0;
};
// LEAVE();
return err;
};
}
 
int set_user_mode(videomode_t *mode)
{
int err = -1;
 
// ENTER();
 
dbgprintf("width %d height %d vrefresh %d\n",
mode->width, mode->height, mode->freq);
 
if( (mode->width != 0) &&
(mode->height != 0) &&
(mode->freq != 0 ) &&
( (mode->width != os_display->width) ||
(mode->height != os_display->height) ||
(mode->freq != os_display->vrefresh) ) )
( (mode->width != rdisplay->width) ||
(mode->height != rdisplay->height) ||
(mode->freq != rdisplay->vrefresh) ) )
{
return set_mode(os_display->ddev, os_display->connector, os_display->crtc, mode, true);
if( set_mode(rdisplay->ddev, rdisplay->connector, mode, true) )
err = 0;
};
 
return -1;
// LEAVE();
return err;
};
 
 
int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **gobj_p)
{
struct radeon_device *rdev = rfbdev->rdev;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
bool fb_tiled = false; /* useful for testing */
u32 tiling_flags = 0;
int ret;
int aligned_size, size;
int height = mode_cmd->height;
u32 bpp, depth;
 
static struct radeon_bo kos_bo;
static struct drm_mm_node vm_node;
 
 
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
 
/* need to align pitch with crtc limits */
mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
fb_tiled) * ((bpp + 1) / 8);
 
if (rdev->family >= CHIP_R600)
height = ALIGN(mode_cmd->height, 8);
size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE);
 
#ifndef __TTM__
ret = drm_gem_object_init(rdev->ddev, &kos_bo.gem_base, aligned_size);
if (unlikely(ret)) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
aligned_size);
return -ENOMEM;
}
#endif
 
kos_bo.rdev = rdev;
kos_bo.gem_base.driver_private = NULL;
kos_bo.surface_reg = -1;
// kos_bo.domain = RADEON_GEM_DOMAIN_VRAM;
 
INIT_LIST_HEAD(&kos_bo.list);
 
gobj = &kos_bo.gem_base;
rbo = gem_to_radeon_bo(gobj);
 
if (fb_tiled)
tiling_flags = RADEON_TILING_MACRO;
 
// if (tiling_flags) {
// ret = radeon_bo_set_tiling_flags(rbo,
// tiling_flags | RADEON_TILING_SURFACE,
// mode_cmd->pitches[0]);
// if (ret)
// dev_err(rdev->dev, "FB failed to set tiling flags\n");
// }
 
vm_node.size = 0xC00000 >> 12;
vm_node.start = 0;
vm_node.mm = NULL;
 
rbo->tbo.vm_node = &vm_node;
rbo->tbo.offset = rbo->tbo.vm_node->start << PAGE_SHIFT;
rbo->tbo.offset += (u64)rbo->rdev->mc.vram_start;
rbo->kptr = (void*)0xFE000000;
rbo->pin_count = 1;
 
 
*gobj_p = gobj;
return 0;
}
 
#if 0
typedef struct
{
585,7 → 687,7
}rect_t;
 
extern struct hmm bm_mm;
struct drm_device *main_device;
struct drm_device *main_drm_device;
 
void FASTCALL GetWindowRect(rect_t *rc)__asm__("GetWindowRect");
 
595,7 → 697,7
{
u32_t addr;
 
addr = (u32_t)os_display;
addr = (u32_t)rdisplay;
addr+= sizeof(display_t); /* shoot me */
return *(u32_t*)addr;
}
690,7 → 792,7
src_offset = (u8*)(src_y*bitmap->pitch + src_x*4);
src_offset += (u32)bitmap->uaddr;
 
dst_offset = (u8*)(dst_y*os_display->width + dst_x);
dst_offset = (u8*)(dst_y*rdisplay->width + dst_x);
dst_offset+= get_display_map();
 
u32_t tmp_h = height;
704,7 → 806,7
u8* tmp_dst = dst_offset;
 
src_offset+= bitmap->pitch;
dst_offset+= os_display->width;
dst_offset+= rdisplay->width;
 
while( tmp_w--)
{
764,7 → 866,7
RADEON_GMC_WR_MSK_DIS;
 
ib->ptr[4] = ((bitmap->pitch/64) << 22) | (bitmap->gaddr >> 10);
ib->ptr[5] = ((os_display->pitch/64) << 22) | (rdev->mc.vram_start >> 10);
ib->ptr[5] = ((rdisplay->pitch/64) << 22) | (rdev->mc.vram_start >> 10);
ib->ptr[6] = (0x1fff) | (0x1fff << 16);
ib->ptr[7] = 0;
ib->ptr[8] = (0x1fff) | (0x1fff << 16);
/drivers/video/drm/radeon/reg_srcs/cayman
21,7 → 21,7
0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
0x000089B0 VGT_HS_OFFCHIP_PARAM
0x00008A14 PA_CL_ENHANCE
0x00008A60 PA_SU_LINE_STIPPLE_VALUE
0x00008A60 PA_SC_LINE_STIPPLE_VALUE
0x00008B10 PA_SC_LINE_STIPPLE_STATE
0x00008BF0 PA_SC_ENHANCE
0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
532,7 → 532,7
0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
0x00028B90 VGT_GS_INSTANCE_CNT
0x00028B74 VGT_GS_INSTANCE_CNT
0x00028BD4 PA_SC_CENTROID_PRIORITY_0
0x00028BD8 PA_SC_CENTROID_PRIORITY_1
0x00028BDC PA_SC_LINE_CNTL
/drivers/video/drm/radeon/reg_srcs/evergreen
22,7 → 22,7
0x000089A4 VGT_COMPUTE_START_Z
0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
0x00008A14 PA_CL_ENHANCE
0x00008A60 PA_SU_LINE_STIPPLE_VALUE
0x00008A60 PA_SC_LINE_STIPPLE_VALUE
0x00008B10 PA_SC_LINE_STIPPLE_STATE
0x00008BF0 PA_SC_ENHANCE
0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
545,7 → 545,7
0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
0x00028B90 VGT_GS_INSTANCE_CNT
0x00028B74 VGT_GS_INSTANCE_CNT
0x00028C00 PA_SC_LINE_CNTL
0x00028C08 PA_SU_VTX_CNTL
0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
/drivers/video/drm/radeon/reg_srcs/r600
18,7 → 18,6
0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
0x00028A40 VGT_GS_MODE
0x00028A6C VGT_GS_OUT_PRIM_TYPE
0x00028B38 VGT_GS_MAX_VERT_OUT
0x000088C8 VGT_GS_PER_ES
0x000088E8 VGT_GS_PER_VS
0x000088D4 VGT_GS_VERTEX_REUSE
/drivers/video/drm/radeon/rs400.c
109,6 → 109,7
uint32_t size_reg;
uint32_t tmp;
 
radeon_gart_restore(rdev);
tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
173,13 → 174,10
/* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
* AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
tmp = RREG32_MC(RS480_MC_MISC_CNTL);
tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN;
WREG32_MC(RS480_MC_MISC_CNTL, tmp);
WREG32_MC(RS480_MC_MISC_CNTL,
(RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
} else {
tmp = RREG32_MC(RS480_MC_MISC_CNTL);
tmp |= RS480_GART_INDEX_REG_EN;
WREG32_MC(RS480_MC_MISC_CNTL, tmp);
WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
}
/* Enable gart */
WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
208,26 → 206,24
radeon_gart_table_ram_free(rdev);
}
 
#define RS400_PTE_UNSNOOPED (1 << 0)
#define RS400_PTE_WRITEABLE (1 << 2)
#define RS400_PTE_READABLE (1 << 3)
 
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags)
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
uint32_t entry;
u32 *gtt = rdev->gart.ptr;
 
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
}
 
entry = (lower_32_bits(addr) & PAGE_MASK) |
((upper_32_bits(addr) & 0xff) << 4);
if (flags & RADEON_GART_PAGE_READ)
addr |= RS400_PTE_READABLE;
if (flags & RADEON_GART_PAGE_WRITE)
addr |= RS400_PTE_WRITEABLE;
if (!(flags & RADEON_GART_PAGE_SNOOP))
entry |= RS400_PTE_UNSNOOPED;
((upper_32_bits(addr) & 0xff) << 4) |
RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
entry = cpu_to_le32(entry);
gtt[i] = entry;
return 0;
}
 
int rs400_mc_wait_for_idle(struct radeon_device *rdev)
275,26 → 271,19
 
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
unsigned long flags;
uint32_t r;
 
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(RS480_NB_MC_INDEX, reg & 0xff);
r = RREG32(RS480_NB_MC_DATA);
WREG32(RS480_NB_MC_INDEX, 0xff);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
}
 
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
unsigned long flags;
 
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
WREG32(RS480_NB_MC_DATA, (v));
WREG32(RS480_NB_MC_INDEX, 0xff);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
 
#if defined(CONFIG_DEBUG_FS)
509,9 → 498,6
return r;
r300_set_reg_safe(rdev);
 
/* Initialize power management */
radeon_pm_init(rdev);
 
rdev->accel_working = true;
r = rs400_startup(rdev);
if (r) {
/drivers/video/drm/radeon/rs600.c
106,202 → 106,13
if (!avivo_is_counter_moving(rdev, crtc))
break;
}
}
}
void avivo_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
int bpc = 0;
u32 tmp = 0;
enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
 
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
bpc = radeon_get_monitor_bpc(connector);
dither = radeon_connector->dither;
}
 
/* LVDS FMT is set up by atom */
if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
return;
 
if (bpc == 0)
return;
 
switch (bpc) {
case 6:
if (dither == RADEON_FMT_DITHER_ENABLE)
/* XXX sort out optimal dither settings */
tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
else
tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN;
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
break;
case 8:
if (dither == RADEON_FMT_DITHER_ENABLE)
/* XXX sort out optimal dither settings */
tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN |
AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH);
else
tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN |
AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH);
break;
case 10:
default:
/* not needed */
break;
udelay(1);
}
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32(AVIVO_TMDSA_BIT_DEPTH_CONTROL, tmp);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, tmp);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
WREG32(AVIVO_DVOA_BIT_DEPTH_CONTROL, tmp);
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
WREG32(AVIVO_DDIA_BIT_DEPTH_CONTROL, tmp);
break;
default:
break;
}
}
 
void rs600_pm_misc(struct radeon_device *rdev)
{
int requested_index = rdev->pm.requested_power_state_index;
struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl;
u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl;
 
if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
tmp = RREG32(voltage->gpio.reg);
if (voltage->active_high)
tmp |= voltage->gpio.mask;
else
tmp &= ~(voltage->gpio.mask);
WREG32(voltage->gpio.reg, tmp);
if (voltage->delay)
udelay(voltage->delay);
} else {
tmp = RREG32(voltage->gpio.reg);
if (voltage->active_high)
tmp &= ~voltage->gpio.mask;
else
tmp |= voltage->gpio.mask;
WREG32(voltage->gpio.reg, tmp);
if (voltage->delay)
udelay(voltage->delay);
}
} else if (voltage->type == VOLTAGE_VDDC)
radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC);
 
dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf);
if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) {
dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2);
dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2);
} else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) {
dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4);
dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4);
}
} else {
dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1);
dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1);
}
WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length);
 
dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL);
if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP;
if (voltage->delay) {
dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC;
dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay);
} else
dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC;
} else
dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP;
WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl);
 
hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL);
if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
hdp_dyn_cntl &= ~HDP_FORCEON;
else
hdp_dyn_cntl |= HDP_FORCEON;
WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl);
#if 0
/* mc_host_dyn seems to cause hangs from time to time */
mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL);
if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN)
mc_host_dyn_cntl &= ~MC_HOST_FORCEON;
else
mc_host_dyn_cntl |= MC_HOST_FORCEON;
WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl);
#endif
dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL);
if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN)
dyn_backbias_cntl |= IO_CG_BACKBIAS_EN;
else
dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN;
WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl);
 
/* set pcie lanes */
if ((rdev->flags & RADEON_IS_PCIE) &&
!(rdev->flags & RADEON_IS_IGP) &&
rdev->asic->pm.set_pcie_lanes &&
(ps->pcie_lanes !=
rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
radeon_set_pcie_lanes(rdev,
ps->pcie_lanes);
DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
}
}
 
void rs600_pm_prepare(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 tmp;
 
/* disable any active CRTCs */
list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (radeon_crtc->enabled) {
tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
}
}
}
 
void rs600_pm_finish(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 tmp;
 
/* enable any active CRTCs */
list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (radeon_crtc->enabled) {
tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
}
}
}
 
/* hpd for digital panel detect/disconnect */
bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
516,6 → 327,7
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
radeon_gart_restore(rdev);
/* Enable bus master */
tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
WREG32(RADEON_BUS_CNTL, tmp);
586,22 → 398,24
radeon_gart_table_vram_free(rdev);
}
 
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags)
#define R600_PTE_VALID (1 << 0)
#define R600_PTE_SYSTEM (1 << 1)
#define R600_PTE_SNOOPED (1 << 2)
#define R600_PTE_READABLE (1 << 5)
#define R600_PTE_WRITEABLE (1 << 6)
 
int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
void __iomem *ptr = (void *)rdev->gart.ptr;
 
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
}
addr = addr & 0xFFFFFFFFFFFFF000ULL;
addr |= R600_PTE_SYSTEM;
if (flags & RADEON_GART_PAGE_VALID)
addr |= R600_PTE_VALID;
if (flags & RADEON_GART_PAGE_READ)
addr |= R600_PTE_READABLE;
if (flags & RADEON_GART_PAGE_WRITE)
addr |= R600_PTE_WRITEABLE;
if (flags & RADEON_GART_PAGE_SNOOP)
addr |= R600_PTE_SNOOPED;
addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
writeq(addr, ptr + (i * 8));
return 0;
}
 
int rs600_irq_set(struct radeon_device *rdev)
863,26 → 677,16
 
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
unsigned long flags;
u32 r;
 
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
S_000070_MC_IND_CITF_ARB0(1));
r = RREG32(R_000074_MC_IND_DATA);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
return RREG32(R_000074_MC_IND_DATA);
}
 
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
unsigned long flags;
 
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
WREG32(R_000074_MC_IND_DATA, v);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
 
static void rs600_debugfs(struct radeon_device *rdev)
970,11 → 774,6
return r;
}
 
r = r600_audio_init(rdev);
if (r) {
dev_err(rdev->dev, "failed initializing audio\n");
return r;
}
 
return 0;
}
1035,9 → 834,6
return r;
rs600_set_safe_registers(rdev);
 
/* Initialize power management */
radeon_pm_init(rdev);
 
rdev->accel_working = true;
r = rs600_startup(rdev);
if (r) {
/drivers/video/drm/radeon/rs690.c
162,16 → 162,6
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
base = G_000100_MC_FB_START(base) << 16;
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
/* Some boards seem to be configured for 128MB of sideport memory,
* but really only have 64MB. Just skip the sideport and use
* UMA memory.
*/
if (rdev->mc.igp_sideport_enabled &&
(rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
base += 128 * 1024 * 1024;
rdev->mc.real_vram_size -= 128 * 1024 * 1024;
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
}
 
/* Use K8 direct mapping for fast fb access. */
rdev->fastfb_working = false;
259,15 → 249,12
 
static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
struct radeon_crtc *crtc,
struct rs690_watermark *wm,
bool low)
struct rs690_watermark *wm)
{
struct drm_display_mode *mode = &crtc->base.mode;
fixed20_12 a, b, c;
fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
fixed20_12 sclk, core_bandwidth, max_bandwidth;
u32 selected_sclk;
 
if (!crtc->base.enabled) {
/* FIXME: wouldn't it better to set priority mark to maximum */
275,21 → 262,6
return;
}
 
if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) &&
(rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
selected_sclk = radeon_dpm_get_sclk(rdev, low);
else
selected_sclk = rdev->pm.current_sclk;
 
/* sclk in Mhz */
a.full = dfixed_const(100);
sclk.full = dfixed_const(selected_sclk);
sclk.full = dfixed_div(sclk, a);
 
/* core_bandwidth = sclk(Mhz) * 16 */
a.full = dfixed_const(16);
core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
 
if (crtc->vsc.full > dfixed_const(2))
wm->num_line_pair.full = dfixed_const(2);
else
350,31 → 322,29
wm->active_time.full = dfixed_div(wm->active_time, a);
 
/* Maximun bandwidth is the minimun bandwidth of all component */
max_bandwidth = core_bandwidth;
rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
if (rdev->mc.igp_sideport_enabled) {
if (max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
rdev->pm.sideport_bandwidth.full)
max_bandwidth = rdev->pm.sideport_bandwidth;
read_delay_latency.full = dfixed_const(370 * 800);
a.full = dfixed_const(1000);
b.full = dfixed_div(rdev->pm.igp_sideport_mclk, a);
read_delay_latency.full = dfixed_div(read_delay_latency, b);
read_delay_latency.full = dfixed_mul(read_delay_latency, a);
rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
read_delay_latency.full = dfixed_const(370 * 800 * 1000);
read_delay_latency.full = dfixed_div(read_delay_latency,
rdev->pm.igp_sideport_mclk);
} else {
if (max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
rdev->pm.k8_bandwidth.full)
max_bandwidth = rdev->pm.k8_bandwidth;
if (max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
rdev->pm.max_bandwidth = rdev->pm.k8_bandwidth;
if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
rdev->pm.ht_bandwidth.full)
max_bandwidth = rdev->pm.ht_bandwidth;
rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth;
read_delay_latency.full = dfixed_const(5000);
}
 
/* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
a.full = dfixed_const(16);
sclk.full = dfixed_mul(max_bandwidth, a);
rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a);
a.full = dfixed_const(1000);
sclk.full = dfixed_div(a, sclk);
rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk);
/* Determine chunk time
* ChunkTime = the time it takes the DCP to send one chunk of data
* to the LB which consists of pipeline delay and inter chunk gap
381,7 → 351,7
* sclk = system clock(ns)
*/
a.full = dfixed_const(256 * 13);
chunk_time.full = dfixed_mul(sclk, a);
chunk_time.full = dfixed_mul(rdev->pm.sclk, a);
a.full = dfixed_const(10);
chunk_time.full = dfixed_div(chunk_time, a);
 
445,217 → 415,193
}
}
 
static void rs690_compute_mode_priority(struct radeon_device *rdev,
struct rs690_watermark *wm0,
struct rs690_watermark *wm1,
struct drm_display_mode *mode0,
struct drm_display_mode *mode1,
u32 *d1mode_priority_a_cnt,
u32 *d2mode_priority_a_cnt)
void rs690_bandwidth_update(struct radeon_device *rdev)
{
struct drm_display_mode *mode0 = NULL;
struct drm_display_mode *mode1 = NULL;
struct rs690_watermark wm0;
struct rs690_watermark wm1;
u32 tmp;
u32 d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
u32 d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
fixed20_12 priority_mark02, priority_mark12, fill_rate;
fixed20_12 a, b;
 
*d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
*d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
radeon_update_display_priority(rdev);
 
if (rdev->mode_info.crtcs[0]->base.enabled)
mode0 = &rdev->mode_info.crtcs[0]->base.mode;
if (rdev->mode_info.crtcs[1]->base.enabled)
mode1 = &rdev->mode_info.crtcs[1]->base.mode;
/*
* Set display0/1 priority up in the memory controller for
* modes if the user specifies HIGH for displaypriority
* option.
*/
if ((rdev->disp_priority == 2) &&
((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
tmp &= C_000104_MC_DISP0R_INIT_LAT;
tmp &= C_000104_MC_DISP1R_INIT_LAT;
if (mode0)
tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
if (mode1)
tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
}
rs690_line_buffer_adjust(rdev, mode0, mode1);
 
if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
WREG32(R_006C9C_DCP_CONTROL, 0);
if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
WREG32(R_006C9C_DCP_CONTROL, 2);
 
rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
 
tmp = (wm0.lb_request_fifo_depth - 1);
tmp |= (wm1.lb_request_fifo_depth - 1) << 16;
WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
 
if (mode0 && mode1) {
if (dfixed_trunc(wm0->dbpp) > 64)
a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair);
if (dfixed_trunc(wm0.dbpp) > 64)
a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0->num_line_pair.full;
if (dfixed_trunc(wm1->dbpp) > 64)
b.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair);
a.full = wm0.num_line_pair.full;
if (dfixed_trunc(wm1.dbpp) > 64)
b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
else
b.full = wm1->num_line_pair.full;
b.full = wm1.num_line_pair.full;
a.full += b.full;
fill_rate.full = dfixed_div(wm0->sclk, a);
if (wm0->consumption_rate.full > fill_rate.full) {
b.full = wm0->consumption_rate.full - fill_rate.full;
b.full = dfixed_mul(b, wm0->active_time);
a.full = dfixed_mul(wm0->worst_case_latency,
wm0->consumption_rate);
fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
b.full = dfixed_mul(b, wm0.active_time);
a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
a.full = a.full + b.full;
b.full = dfixed_const(16 * 1000);
priority_mark02.full = dfixed_div(a, b);
} else {
a.full = dfixed_mul(wm0->worst_case_latency,
wm0->consumption_rate);
a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
b.full = dfixed_const(16 * 1000);
priority_mark02.full = dfixed_div(a, b);
}
if (wm1->consumption_rate.full > fill_rate.full) {
b.full = wm1->consumption_rate.full - fill_rate.full;
b.full = dfixed_mul(b, wm1->active_time);
a.full = dfixed_mul(wm1->worst_case_latency,
wm1->consumption_rate);
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
b.full = dfixed_mul(b, wm1.active_time);
a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
a.full = a.full + b.full;
b.full = dfixed_const(16 * 1000);
priority_mark12.full = dfixed_div(a, b);
} else {
a.full = dfixed_mul(wm1->worst_case_latency,
wm1->consumption_rate);
a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
b.full = dfixed_const(16 * 1000);
priority_mark12.full = dfixed_div(a, b);
}
if (wm0->priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark.full;
if (wm0->priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark_max.full;
if (wm1->priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark.full;
if (wm1->priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark_max.full;
*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
*d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2) {
*d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
*d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
}
} else if (mode0) {
if (dfixed_trunc(wm0->dbpp) > 64)
a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair);
if (dfixed_trunc(wm0.dbpp) > 64)
a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0->num_line_pair.full;
fill_rate.full = dfixed_div(wm0->sclk, a);
if (wm0->consumption_rate.full > fill_rate.full) {
b.full = wm0->consumption_rate.full - fill_rate.full;
b.full = dfixed_mul(b, wm0->active_time);
a.full = dfixed_mul(wm0->worst_case_latency,
wm0->consumption_rate);
a.full = wm0.num_line_pair.full;
fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
b.full = dfixed_mul(b, wm0.active_time);
a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
a.full = a.full + b.full;
b.full = dfixed_const(16 * 1000);
priority_mark02.full = dfixed_div(a, b);
} else {
a.full = dfixed_mul(wm0->worst_case_latency,
wm0->consumption_rate);
a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
b.full = dfixed_const(16 * 1000);
priority_mark02.full = dfixed_div(a, b);
}
if (wm0->priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark.full;
if (wm0->priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark_max.full;
*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
if (rdev->disp_priority == 2)
*d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
} else if (mode1) {
if (dfixed_trunc(wm1->dbpp) > 64)
a.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair);
if (dfixed_trunc(wm1.dbpp) > 64)
a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
else
a.full = wm1->num_line_pair.full;
fill_rate.full = dfixed_div(wm1->sclk, a);
if (wm1->consumption_rate.full > fill_rate.full) {
b.full = wm1->consumption_rate.full - fill_rate.full;
b.full = dfixed_mul(b, wm1->active_time);
a.full = dfixed_mul(wm1->worst_case_latency,
wm1->consumption_rate);
a.full = wm1.num_line_pair.full;
fill_rate.full = dfixed_div(wm1.sclk, a);
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
b.full = dfixed_mul(b, wm1.active_time);
a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
a.full = a.full + b.full;
b.full = dfixed_const(16 * 1000);
priority_mark12.full = dfixed_div(a, b);
} else {
a.full = dfixed_mul(wm1->worst_case_latency,
wm1->consumption_rate);
a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
b.full = dfixed_const(16 * 1000);
priority_mark12.full = dfixed_div(a, b);
}
if (wm1->priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark.full;
if (wm1->priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark_max.full;
*d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2)
*d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
}
}
 
void rs690_bandwidth_update(struct radeon_device *rdev)
{
struct drm_display_mode *mode0 = NULL;
struct drm_display_mode *mode1 = NULL;
struct rs690_watermark wm0_high, wm0_low;
struct rs690_watermark wm1_high, wm1_low;
u32 tmp;
u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
 
radeon_update_display_priority(rdev);
 
if (rdev->mode_info.crtcs[0]->base.enabled)
mode0 = &rdev->mode_info.crtcs[0]->base.mode;
if (rdev->mode_info.crtcs[1]->base.enabled)
mode1 = &rdev->mode_info.crtcs[1]->base.mode;
/*
* Set display0/1 priority up in the memory controller for
* modes if the user specifies HIGH for displaypriority
* option.
*/
if ((rdev->disp_priority == 2) &&
((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
tmp &= C_000104_MC_DISP0R_INIT_LAT;
tmp &= C_000104_MC_DISP1R_INIT_LAT;
if (mode0)
tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
if (mode1)
tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
}
rs690_line_buffer_adjust(rdev, mode0, mode1);
 
if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
WREG32(R_006C9C_DCP_CONTROL, 0);
if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
WREG32(R_006C9C_DCP_CONTROL, 2);
 
rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false);
rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false);
 
rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, true);
rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, true);
 
tmp = (wm0_high.lb_request_fifo_depth - 1);
tmp |= (wm1_high.lb_request_fifo_depth - 1) << 16;
WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
 
rs690_compute_mode_priority(rdev,
&wm0_high, &wm1_high,
mode0, mode1,
&d1mode_priority_a_cnt, &d2mode_priority_a_cnt);
rs690_compute_mode_priority(rdev,
&wm0_low, &wm1_low,
mode0, mode1,
&d1mode_priority_b_cnt, &d2mode_priority_b_cnt);
 
WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt);
WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt);
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
}
 
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
unsigned long flags;
uint32_t r;
 
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
r = RREG32(R_00007C_MC_DATA);
WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
}
 
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
unsigned long flags;
 
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
S_000078_MC_IND_WR_EN(1));
WREG32(R_00007C_MC_DATA, v);
WREG32(R_000078_MC_INDEX, 0x7F);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
 
static void rs690_mc_program(struct radeon_device *rdev)
726,11 → 672,6
return r;
}
 
r = r600_audio_init(rdev);
if (r) {
dev_err(rdev->dev, "failed initializing audio\n");
return r;
}
 
return 0;
}
793,9 → 734,6
return r;
rs600_set_safe_registers(rdev);
 
/* Initialize power management */
radeon_pm_init(rdev);
 
rdev->accel_working = true;
r = rs690_startup(rdev);
if (r) {
/drivers/video/drm/radeon/rv515.c
124,7 → 124,7
radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
radeon_ring_write(ring, PACKET0(0x20C8, 0));
radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring, false);
radeon_ring_unlock_commit(rdev, ring);
}
 
int rv515_mc_wait_for_idle(struct radeon_device *rdev)
209,27 → 209,19
 
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
unsigned long flags;
uint32_t r;
 
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
r = RREG32(MC_IND_DATA);
WREG32(MC_IND_INDEX, 0);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
 
return r;
}
 
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
unsigned long flags;
 
spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
WREG32(MC_IND_DATA, (v));
WREG32(MC_IND_INDEX, 0);
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
 
#if defined(CONFIG_DEBUG_FS)
406,9 → 398,8
for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled[i]) {
tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
if ((tmp & 0x7) != 3) {
tmp &= ~0x7;
tmp |= 0x3;
if ((tmp & 0x3) != 0) {
tmp &= ~0x3;
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
631,9 → 622,6
return r;
rv515_set_safe_registers(rdev);
 
/* Initialize power management */
radeon_pm_init(rdev);
 
rdev->accel_working = true;
r = rv515_startup(rdev);
if (r) {
889,15 → 877,12
 
static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
struct radeon_crtc *crtc,
struct rv515_watermark *wm,
bool low)
struct rv515_watermark *wm)
{
struct drm_display_mode *mode = &crtc->base.mode;
fixed20_12 a, b, c;
fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
fixed20_12 sclk;
u32 selected_sclk;
 
if (!crtc->base.enabled) {
/* FIXME: wouldn't it better to set priority mark to maximum */
905,18 → 890,6
return;
}
 
/* rv6xx, rv7xx */
if ((rdev->family >= CHIP_RV610) &&
(rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
selected_sclk = radeon_dpm_get_sclk(rdev, low);
else
selected_sclk = rdev->pm.current_sclk;
 
/* sclk in Mhz */
a.full = dfixed_const(100);
sclk.full = dfixed_const(selected_sclk);
sclk.full = dfixed_div(sclk, a);
 
if (crtc->vsc.full > dfixed_const(2))
wm->num_line_pair.full = dfixed_const(2);
else
982,7 → 955,7
* sclk = system clock(Mhz)
*/
a.full = dfixed_const(600 * 1000);
chunk_time.full = dfixed_div(a, sclk);
chunk_time.full = dfixed_div(a, rdev->pm.sclk);
read_delay_latency.full = dfixed_const(1000);
 
/* Determine the worst case latency
1043,169 → 1016,152
}
}
 
static void rv515_compute_mode_priority(struct radeon_device *rdev,
struct rv515_watermark *wm0,
struct rv515_watermark *wm1,
struct drm_display_mode *mode0,
struct drm_display_mode *mode1,
u32 *d1mode_priority_a_cnt,
u32 *d2mode_priority_a_cnt)
void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
{
struct drm_display_mode *mode0 = NULL;
struct drm_display_mode *mode1 = NULL;
struct rv515_watermark wm0;
struct rv515_watermark wm1;
u32 tmp;
u32 d1mode_priority_a_cnt = MODE_PRIORITY_OFF;
u32 d2mode_priority_a_cnt = MODE_PRIORITY_OFF;
fixed20_12 priority_mark02, priority_mark12, fill_rate;
fixed20_12 a, b;
 
*d1mode_priority_a_cnt = MODE_PRIORITY_OFF;
*d2mode_priority_a_cnt = MODE_PRIORITY_OFF;
if (rdev->mode_info.crtcs[0]->base.enabled)
mode0 = &rdev->mode_info.crtcs[0]->base.mode;
if (rdev->mode_info.crtcs[1]->base.enabled)
mode1 = &rdev->mode_info.crtcs[1]->base.mode;
rs690_line_buffer_adjust(rdev, mode0, mode1);
 
rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
 
tmp = wm0.lb_request_fifo_depth;
tmp |= wm1.lb_request_fifo_depth << 16;
WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
 
if (mode0 && mode1) {
if (dfixed_trunc(wm0->dbpp) > 64)
a.full = dfixed_div(wm0->dbpp, wm0->num_line_pair);
if (dfixed_trunc(wm0.dbpp) > 64)
a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0->num_line_pair.full;
if (dfixed_trunc(wm1->dbpp) > 64)
b.full = dfixed_div(wm1->dbpp, wm1->num_line_pair);
a.full = wm0.num_line_pair.full;
if (dfixed_trunc(wm1.dbpp) > 64)
b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
else
b.full = wm1->num_line_pair.full;
b.full = wm1.num_line_pair.full;
a.full += b.full;
fill_rate.full = dfixed_div(wm0->sclk, a);
if (wm0->consumption_rate.full > fill_rate.full) {
b.full = wm0->consumption_rate.full - fill_rate.full;
b.full = dfixed_mul(b, wm0->active_time);
fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
b.full = dfixed_mul(b, wm0.active_time);
a.full = dfixed_const(16);
b.full = dfixed_div(b, a);
a.full = dfixed_mul(wm0->worst_case_latency,
wm0->consumption_rate);
a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
priority_mark02.full = a.full + b.full;
} else {
a.full = dfixed_mul(wm0->worst_case_latency,
wm0->consumption_rate);
a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
b.full = dfixed_const(16 * 1000);
priority_mark02.full = dfixed_div(a, b);
}
if (wm1->consumption_rate.full > fill_rate.full) {
b.full = wm1->consumption_rate.full - fill_rate.full;
b.full = dfixed_mul(b, wm1->active_time);
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
b.full = dfixed_mul(b, wm1.active_time);
a.full = dfixed_const(16);
b.full = dfixed_div(b, a);
a.full = dfixed_mul(wm1->worst_case_latency,
wm1->consumption_rate);
a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
priority_mark12.full = a.full + b.full;
} else {
a.full = dfixed_mul(wm1->worst_case_latency,
wm1->consumption_rate);
a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
b.full = dfixed_const(16 * 1000);
priority_mark12.full = dfixed_div(a, b);
}
if (wm0->priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark.full;
if (wm0->priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark_max.full;
if (wm1->priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark.full;
if (wm1->priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark_max.full;
*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
*d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2) {
*d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
*d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
}
} else if (mode0) {
if (dfixed_trunc(wm0->dbpp) > 64)
a.full = dfixed_div(wm0->dbpp, wm0->num_line_pair);
if (dfixed_trunc(wm0.dbpp) > 64)
a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
else
a.full = wm0->num_line_pair.full;
fill_rate.full = dfixed_div(wm0->sclk, a);
if (wm0->consumption_rate.full > fill_rate.full) {
b.full = wm0->consumption_rate.full - fill_rate.full;
b.full = dfixed_mul(b, wm0->active_time);
a.full = wm0.num_line_pair.full;
fill_rate.full = dfixed_div(wm0.sclk, a);
if (wm0.consumption_rate.full > fill_rate.full) {
b.full = wm0.consumption_rate.full - fill_rate.full;
b.full = dfixed_mul(b, wm0.active_time);
a.full = dfixed_const(16);
b.full = dfixed_div(b, a);
a.full = dfixed_mul(wm0->worst_case_latency,
wm0->consumption_rate);
a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
priority_mark02.full = a.full + b.full;
} else {
a.full = dfixed_mul(wm0->worst_case_latency,
wm0->consumption_rate);
a.full = dfixed_mul(wm0.worst_case_latency,
wm0.consumption_rate);
b.full = dfixed_const(16);
priority_mark02.full = dfixed_div(a, b);
}
if (wm0->priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark.full;
if (wm0->priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0->priority_mark_max.full;
*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
if (wm0.priority_mark.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark.full;
if (dfixed_trunc(priority_mark02) < 0)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
if (rdev->disp_priority == 2)
*d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
} else if (mode1) {
if (dfixed_trunc(wm1->dbpp) > 64)
a.full = dfixed_div(wm1->dbpp, wm1->num_line_pair);
if (dfixed_trunc(wm1.dbpp) > 64)
a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
else
a.full = wm1->num_line_pair.full;
fill_rate.full = dfixed_div(wm1->sclk, a);
if (wm1->consumption_rate.full > fill_rate.full) {
b.full = wm1->consumption_rate.full - fill_rate.full;
b.full = dfixed_mul(b, wm1->active_time);
a.full = wm1.num_line_pair.full;
fill_rate.full = dfixed_div(wm1.sclk, a);
if (wm1.consumption_rate.full > fill_rate.full) {
b.full = wm1.consumption_rate.full - fill_rate.full;
b.full = dfixed_mul(b, wm1.active_time);
a.full = dfixed_const(16);
b.full = dfixed_div(b, a);
a.full = dfixed_mul(wm1->worst_case_latency,
wm1->consumption_rate);
a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
priority_mark12.full = a.full + b.full;
} else {
a.full = dfixed_mul(wm1->worst_case_latency,
wm1->consumption_rate);
a.full = dfixed_mul(wm1.worst_case_latency,
wm1.consumption_rate);
b.full = dfixed_const(16 * 1000);
priority_mark12.full = dfixed_div(a, b);
}
if (wm1->priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark.full;
if (wm1->priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1->priority_mark_max.full;
*d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (wm1.priority_mark.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark.full;
if (dfixed_trunc(priority_mark12) < 0)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2)
*d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
}
}
 
void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
{
struct drm_display_mode *mode0 = NULL;
struct drm_display_mode *mode1 = NULL;
struct rv515_watermark wm0_high, wm0_low;
struct rv515_watermark wm1_high, wm1_low;
u32 tmp;
u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
 
if (rdev->mode_info.crtcs[0]->base.enabled)
mode0 = &rdev->mode_info.crtcs[0]->base.mode;
if (rdev->mode_info.crtcs[1]->base.enabled)
mode1 = &rdev->mode_info.crtcs[1]->base.mode;
rs690_line_buffer_adjust(rdev, mode0, mode1);
 
rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false);
rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false);
 
rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, false);
rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, false);
 
tmp = wm0_high.lb_request_fifo_depth;
tmp |= wm1_high.lb_request_fifo_depth << 16;
WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
 
rv515_compute_mode_priority(rdev,
&wm0_high, &wm1_high,
mode0, mode1,
&d1mode_priority_a_cnt, &d2mode_priority_a_cnt);
rv515_compute_mode_priority(rdev,
&wm0_low, &wm1_low,
mode0, mode1,
&d1mode_priority_b_cnt, &d2mode_priority_b_cnt);
 
WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt);
WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt);
WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
}
 
void rv515_bandwidth_update(struct radeon_device *rdev)
/drivers/video/drm/radeon/rv770.c
744,10 → 744,10
(const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
radeon_program_register_sequence(rdev,
rv730_golden_registers,
(const u32)ARRAY_SIZE(rv730_golden_registers));
(const u32)ARRAY_SIZE(rv770_golden_registers));
radeon_program_register_sequence(rdev,
rv730_mgcg_init,
(const u32)ARRAY_SIZE(rv730_mgcg_init));
(const u32)ARRAY_SIZE(rv770_mgcg_init));
break;
case CHIP_RV710:
radeon_program_register_sequence(rdev,
758,18 → 758,18
(const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
radeon_program_register_sequence(rdev,
rv710_golden_registers,
(const u32)ARRAY_SIZE(rv710_golden_registers));
(const u32)ARRAY_SIZE(rv770_golden_registers));
radeon_program_register_sequence(rdev,
rv710_mgcg_init,
(const u32)ARRAY_SIZE(rv710_mgcg_init));
(const u32)ARRAY_SIZE(rv770_mgcg_init));
break;
case CHIP_RV740:
radeon_program_register_sequence(rdev,
rv740_golden_registers,
(const u32)ARRAY_SIZE(rv740_golden_registers));
(const u32)ARRAY_SIZE(rv770_golden_registers));
radeon_program_register_sequence(rdev,
rv740_mgcg_init,
(const u32)ARRAY_SIZE(rv740_mgcg_init));
(const u32)ARRAY_SIZE(rv770_mgcg_init));
break;
default:
break;
801,7 → 801,7
return reference_clock;
}
 
void rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
835,15 → 835,9
/* Unlock the lock, so double-buffering can take place inside vblank */
tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
}
 
bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc_id)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
 
/* Return current update_pending status: */
return !!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) &
AVIVO_D1GRPH_SURFACE_UPDATE_PENDING);
return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
}
 
/* get temperature in millidegrees */
900,6 → 894,7
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1076,8 → 1071,7
*/
void r700_cp_stop(struct radeon_device *rdev)
{
if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1121,35 → 1115,7
return 0;
}
 
void rv770_set_clk_bypass_mode(struct radeon_device *rdev)
{
u32 tmp, i;
 
if (rdev->flags & RADEON_IS_IGP)
return;
 
tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
tmp &= SCLK_MUX_SEL_MASK;
tmp |= SCLK_MUX_SEL(1) | SCLK_MUX_UPDATE;
WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
 
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(CG_SPLL_STATUS) & SPLL_CHG_STATUS)
break;
udelay(1);
}
 
tmp &= ~SCLK_MUX_UPDATE;
WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
 
tmp = RREG32(MPLL_CNTL_MODE);
if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
tmp &= ~RV730_MPLL_MCLK_SEL;
else
tmp &= ~MPLL_MCLK_SEL;
WREG32(MPLL_CNTL_MODE, tmp);
}
 
/*
* Core functions
*/
1169,6 → 1135,7
u32 hdp_host_path_cntl;
u32 sq_dyn_gpr_size_simd_ab_0;
u32 gb_tiling_config = 0;
u32 cc_rb_backend_disable = 0;
u32 cc_gc_shader_pipe_config = 0;
u32 mc_arb_ramcfg;
u32 db_debug4, tmp;
1302,10 → 1269,21
WREG32(SPI_CONFIG_CNTL, 0);
}
 
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
if (tmp < rdev->config.rv770.max_backends) {
rdev->config.rv770.max_backends = tmp;
}
 
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
tmp = rdev->config.rv770.max_simds -
r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
rdev->config.rv770.active_simds = tmp;
tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
if (tmp < rdev->config.rv770.max_pipes) {
rdev->config.rv770.max_pipes = tmp;
}
tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
if (tmp < rdev->config.rv770.max_simds) {
rdev->config.rv770.max_simds = tmp;
}
 
switch (rdev->config.rv770.max_tile_pipes) {
case 1:
1325,14 → 1303,6
rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
 
disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
tmp = 0;
for (i = 0; i < rdev->config.rv770.max_backends; i++)
tmp |= (1 << i);
/* if all the backends are disabled, fix it up here */
if ((disabled_rb_mask & tmp) == tmp) {
for (i = 0; i < rdev->config.rv770.max_backends; i++)
disabled_rb_mask &= ~(1 << i);
}
tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
R7XX_MAX_BACKENDS, disabled_rb_mask);
1673,6 → 1643,80
return 0;
}
 
/**
* rv770_copy_dma - copy pages using the DMA engine
*
* @rdev: radeon_device pointer
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer
* @fence: radeon fence object
*
* Copy GPU paging using the DMA engine (r7xx).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
int rv770_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence)
{
struct radeon_semaphore *sem = NULL;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_dw, cur_size_in_dw;
int i, num_loops;
int r = 0;
 
r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
 
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
 
if (radeon_fence_need_sync(*fence, ring->idx)) {
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
ring->idx);
radeon_fence_note_sync(*fence, ring->idx);
} else {
radeon_semaphore_free(rdev, &sem, NULL);
}
 
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
if (cur_size_in_dw > 0xFFFF)
cur_size_in_dw = 0xFFFF;
size_in_dw -= cur_size_in_dw;
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
radeon_ring_write(ring, dst_offset & 0xfffffffc);
radeon_ring_write(ring, src_offset & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
src_offset += cur_size_in_dw * 4;
dst_offset += cur_size_in_dw * 4;
}
 
r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
return r;
}
 
radeon_ring_unlock_commit(rdev, ring);
radeon_semaphore_free(rdev, &sem, *fence);
 
return r;
}
 
static int rv770_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;
1681,13 → 1725,19
/* enable pcie gen2 link */
rv770_pcie_gen2_enable(rdev);
 
/* scratch needs to be initialized before MC */
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
 
r = r600_vram_scratch_init(rdev);
if (r)
return r;
 
rv770_mc_program(rdev);
 
if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev);
} else {
1697,6 → 1747,12
}
 
rv770_gpu_init(rdev);
r = r600_blit_init(rdev);
if (r) {
r600_blit_fini(rdev);
rdev->asic->copy.copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
 
/* allocate wb buffer */
r = radeon_wb_init(rdev);
1743,13 → 1799,15
 
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
RADEON_CP_PACKET2);
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
 
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
DMA_RB_RPTR, DMA_RB_WPTR,
2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
 
1783,11 → 1841,6
return r;
}
 
r = r600_audio_init(rdev);
if (r) {
DRM_ERROR("radeon: audio init failed\n");
return r;
}
 
return 0;
}
1856,17 → 1909,6
if (r)
return r;
 
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
 
/* Initialize power management */
radeon_pm_init(rdev);
 
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
/drivers/video/drm/radeon/rv770d.h
62,253 → 62,6
# define UPLL_FB_DIV(x) ((x) << 0)
# define UPLL_FB_DIV_MASK 0x01FFFFFF
 
/* pm registers */
#define SMC_SRAM_ADDR 0x200
#define SMC_SRAM_AUTO_INC_DIS (1 << 16)
#define SMC_SRAM_DATA 0x204
#define SMC_IO 0x208
#define SMC_RST_N (1 << 0)
#define SMC_STOP_MODE (1 << 2)
#define SMC_CLK_EN (1 << 11)
#define SMC_MSG 0x20c
#define HOST_SMC_MSG(x) ((x) << 0)
#define HOST_SMC_MSG_MASK (0xff << 0)
#define HOST_SMC_MSG_SHIFT 0
#define HOST_SMC_RESP(x) ((x) << 8)
#define HOST_SMC_RESP_MASK (0xff << 8)
#define HOST_SMC_RESP_SHIFT 8
#define SMC_HOST_MSG(x) ((x) << 16)
#define SMC_HOST_MSG_MASK (0xff << 16)
#define SMC_HOST_MSG_SHIFT 16
#define SMC_HOST_RESP(x) ((x) << 24)
#define SMC_HOST_RESP_MASK (0xff << 24)
#define SMC_HOST_RESP_SHIFT 24
 
#define SMC_ISR_FFD8_FFDB 0x218
 
#define CG_SPLL_FUNC_CNTL 0x600
#define SPLL_RESET (1 << 0)
#define SPLL_SLEEP (1 << 1)
#define SPLL_DIVEN (1 << 2)
#define SPLL_BYPASS_EN (1 << 3)
#define SPLL_REF_DIV(x) ((x) << 4)
#define SPLL_REF_DIV_MASK (0x3f << 4)
#define SPLL_HILEN(x) ((x) << 12)
#define SPLL_HILEN_MASK (0xf << 12)
#define SPLL_LOLEN(x) ((x) << 16)
#define SPLL_LOLEN_MASK (0xf << 16)
#define CG_SPLL_FUNC_CNTL_2 0x604
#define SCLK_MUX_SEL(x) ((x) << 0)
#define SCLK_MUX_SEL_MASK (0x1ff << 0)
#define SCLK_MUX_UPDATE (1 << 26)
#define CG_SPLL_FUNC_CNTL_3 0x608
#define SPLL_FB_DIV(x) ((x) << 0)
#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
#define SPLL_DITHEN (1 << 28)
#define CG_SPLL_STATUS 0x60c
#define SPLL_CHG_STATUS (1 << 1)
 
#define SPLL_CNTL_MODE 0x610
#define SPLL_DIV_SYNC (1 << 5)
 
#define MPLL_CNTL_MODE 0x61c
# define MPLL_MCLK_SEL (1 << 11)
# define RV730_MPLL_MCLK_SEL (1 << 25)
 
#define MPLL_AD_FUNC_CNTL 0x624
#define CLKF(x) ((x) << 0)
#define CLKF_MASK (0x7f << 0)
#define CLKR(x) ((x) << 7)
#define CLKR_MASK (0x1f << 7)
#define CLKFRAC(x) ((x) << 12)
#define CLKFRAC_MASK (0x1f << 12)
#define YCLK_POST_DIV(x) ((x) << 17)
#define YCLK_POST_DIV_MASK (3 << 17)
#define IBIAS(x) ((x) << 20)
#define IBIAS_MASK (0x3ff << 20)
#define RESET (1 << 30)
#define PDNB (1 << 31)
#define MPLL_AD_FUNC_CNTL_2 0x628
#define BYPASS (1 << 19)
#define BIAS_GEN_PDNB (1 << 24)
#define RESET_EN (1 << 25)
#define VCO_MODE (1 << 29)
#define MPLL_DQ_FUNC_CNTL 0x62c
#define MPLL_DQ_FUNC_CNTL_2 0x630
 
#define GENERAL_PWRMGT 0x63c
# define GLOBAL_PWRMGT_EN (1 << 0)
# define STATIC_PM_EN (1 << 1)
# define THERMAL_PROTECTION_DIS (1 << 2)
# define THERMAL_PROTECTION_TYPE (1 << 3)
# define ENABLE_GEN2PCIE (1 << 4)
# define ENABLE_GEN2XSP (1 << 5)
# define SW_SMIO_INDEX(x) ((x) << 6)
# define SW_SMIO_INDEX_MASK (3 << 6)
# define SW_SMIO_INDEX_SHIFT 6
# define LOW_VOLT_D2_ACPI (1 << 8)
# define LOW_VOLT_D3_ACPI (1 << 9)
# define VOLT_PWRMGT_EN (1 << 10)
# define BACKBIAS_PAD_EN (1 << 18)
# define BACKBIAS_VALUE (1 << 19)
# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
# define AC_DC_SW (1 << 24)
 
#define CG_TPC 0x640
#define SCLK_PWRMGT_CNTL 0x644
# define SCLK_PWRMGT_OFF (1 << 0)
# define SCLK_LOW_D1 (1 << 1)
# define FIR_RESET (1 << 4)
# define FIR_FORCE_TREND_SEL (1 << 5)
# define FIR_TREND_MODE (1 << 6)
# define DYN_GFX_CLK_OFF_EN (1 << 7)
# define GFX_CLK_FORCE_ON (1 << 8)
# define GFX_CLK_REQUEST_OFF (1 << 9)
# define GFX_CLK_FORCE_OFF (1 << 10)
# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
#define MCLK_PWRMGT_CNTL 0x648
# define DLL_SPEED(x) ((x) << 0)
# define DLL_SPEED_MASK (0x1f << 0)
# define MPLL_PWRMGT_OFF (1 << 5)
# define DLL_READY (1 << 6)
# define MC_INT_CNTL (1 << 7)
# define MRDCKA0_SLEEP (1 << 8)
# define MRDCKA1_SLEEP (1 << 9)
# define MRDCKB0_SLEEP (1 << 10)
# define MRDCKB1_SLEEP (1 << 11)
# define MRDCKC0_SLEEP (1 << 12)
# define MRDCKC1_SLEEP (1 << 13)
# define MRDCKD0_SLEEP (1 << 14)
# define MRDCKD1_SLEEP (1 << 15)
# define MRDCKA0_RESET (1 << 16)
# define MRDCKA1_RESET (1 << 17)
# define MRDCKB0_RESET (1 << 18)
# define MRDCKB1_RESET (1 << 19)
# define MRDCKC0_RESET (1 << 20)
# define MRDCKC1_RESET (1 << 21)
# define MRDCKD0_RESET (1 << 22)
# define MRDCKD1_RESET (1 << 23)
# define DLL_READY_READ (1 << 24)
# define USE_DISPLAY_GAP (1 << 25)
# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
# define MPLL_TURNOFF_D2 (1 << 28)
#define DLL_CNTL 0x64c
# define MRDCKA0_BYPASS (1 << 24)
# define MRDCKA1_BYPASS (1 << 25)
# define MRDCKB0_BYPASS (1 << 26)
# define MRDCKB1_BYPASS (1 << 27)
# define MRDCKC0_BYPASS (1 << 28)
# define MRDCKC1_BYPASS (1 << 29)
# define MRDCKD0_BYPASS (1 << 30)
# define MRDCKD1_BYPASS (1 << 31)
 
#define MPLL_TIME 0x654
# define MPLL_LOCK_TIME(x) ((x) << 0)
# define MPLL_LOCK_TIME_MASK (0xffff << 0)
# define MPLL_RESET_TIME(x) ((x) << 16)
# define MPLL_RESET_TIME_MASK (0xffff << 16)
 
#define CG_CLKPIN_CNTL 0x660
# define MUX_TCLK_TO_XCLK (1 << 8)
# define XTALIN_DIVIDE (1 << 9)
 
#define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
# define CURRENT_PROFILE_INDEX_MASK (0xf << 4)
# define CURRENT_PROFILE_INDEX_SHIFT 4
 
#define S0_VID_LOWER_SMIO_CNTL 0x678
#define S1_VID_LOWER_SMIO_CNTL 0x67c
#define S2_VID_LOWER_SMIO_CNTL 0x680
#define S3_VID_LOWER_SMIO_CNTL 0x684
 
#define CG_FTV 0x690
#define CG_FFCT_0 0x694
# define UTC_0(x) ((x) << 0)
# define UTC_0_MASK (0x3ff << 0)
# define DTC_0(x) ((x) << 10)
# define DTC_0_MASK (0x3ff << 10)
 
#define CG_BSP 0x6d0
# define BSP(x) ((x) << 0)
# define BSP_MASK (0xffff << 0)
# define BSU(x) ((x) << 16)
# define BSU_MASK (0xf << 16)
#define CG_AT 0x6d4
# define CG_R(x) ((x) << 0)
# define CG_R_MASK (0xffff << 0)
# define CG_L(x) ((x) << 16)
# define CG_L_MASK (0xffff << 16)
#define CG_GIT 0x6d8
# define CG_GICST(x) ((x) << 0)
# define CG_GICST_MASK (0xffff << 0)
# define CG_GIPOT(x) ((x) << 16)
# define CG_GIPOT_MASK (0xffff << 16)
 
#define CG_SSP 0x6e8
# define SST(x) ((x) << 0)
# define SST_MASK (0xffff << 0)
# define SSTU(x) ((x) << 16)
# define SSTU_MASK (0xf << 16)
 
#define CG_DISPLAY_GAP_CNTL 0x714
# define DISP1_GAP(x) ((x) << 0)
# define DISP1_GAP_MASK (3 << 0)
# define DISP2_GAP(x) ((x) << 2)
# define DISP2_GAP_MASK (3 << 2)
# define VBI_TIMER_COUNT(x) ((x) << 4)
# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
# define VBI_TIMER_UNIT(x) ((x) << 20)
# define VBI_TIMER_UNIT_MASK (7 << 20)
# define DISP1_GAP_MCHG(x) ((x) << 24)
# define DISP1_GAP_MCHG_MASK (3 << 24)
# define DISP2_GAP_MCHG(x) ((x) << 26)
# define DISP2_GAP_MCHG_MASK (3 << 26)
 
#define CG_SPLL_SPREAD_SPECTRUM 0x790
#define SSEN (1 << 0)
#define CLKS(x) ((x) << 4)
#define CLKS_MASK (0xfff << 4)
#define CG_SPLL_SPREAD_SPECTRUM_2 0x794
#define CLKV(x) ((x) << 0)
#define CLKV_MASK (0x3ffffff << 0)
#define CG_MPLL_SPREAD_SPECTRUM 0x798
#define CG_UPLL_SPREAD_SPECTRUM 0x79c
# define SSEN_MASK 0x00000001
 
#define CG_CGTT_LOCAL_0 0x7d0
#define CG_CGTT_LOCAL_1 0x7d4
 
#define BIOS_SCRATCH_4 0x1734
 
#define MC_SEQ_MISC0 0x2a00
#define MC_SEQ_MISC0_GDDR5_SHIFT 28
#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
#define MC_SEQ_MISC0_GDDR5_VALUE 5
 
#define MC_ARB_SQM_RATIO 0x2770
#define STATE0(x) ((x) << 0)
#define STATE0_MASK (0xff << 0)
#define STATE1(x) ((x) << 8)
#define STATE1_MASK (0xff << 8)
#define STATE2(x) ((x) << 16)
#define STATE2_MASK (0xff << 16)
#define STATE3(x) ((x) << 24)
#define STATE3_MASK (0xff << 24)
 
#define MC_ARB_RFSH_RATE 0x27b0
#define POWERMODE0(x) ((x) << 0)
#define POWERMODE0_MASK (0xff << 0)
#define POWERMODE1(x) ((x) << 8)
#define POWERMODE1_MASK (0xff << 8)
#define POWERMODE2(x) ((x) << 16)
#define POWERMODE2_MASK (0xff << 16)
#define POWERMODE3(x) ((x) << 24)
#define POWERMODE3_MASK (0xff << 24)
 
#define CGTS_SM_CTRL_REG 0x9150
 
/* Registers */
#define CB_COLOR0_BASE 0x28040
#define CB_COLOR1_BASE 0x28044
404,23 → 157,10
#define GUI_ACTIVE (1<<31)
#define GRBM_STATUS2 0x8014
 
#define CG_THERMAL_CTRL 0x72C
#define DPM_EVENT_SRC(x) ((x) << 0)
#define DPM_EVENT_SRC_MASK (7 << 0)
#define DIG_THERM_DPM(x) ((x) << 14)
#define DIG_THERM_DPM_MASK 0x003FC000
#define DIG_THERM_DPM_SHIFT 14
#define CG_CLKPIN_CNTL 0x660
# define MUX_TCLK_TO_XCLK (1 << 8)
# define XTALIN_DIVIDE (1 << 9)
 
#define CG_THERMAL_INT 0x734
#define DIG_THERM_INTH(x) ((x) << 8)
#define DIG_THERM_INTH_MASK 0x0000FF00
#define DIG_THERM_INTH_SHIFT 8
#define DIG_THERM_INTL(x) ((x) << 16)
#define DIG_THERM_INTL_MASK 0x00FF0000
#define DIG_THERM_INTL_SHIFT 16
#define THERM_INT_MASK_HIGH (1 << 24)
#define THERM_INT_MASK_LOW (1 << 25)
 
#define CG_MULT_THERMAL_STATUS 0x740
#define ASIC_T(x) ((x) << 16)
#define ASIC_T_MASK 0x3FF0000
859,7 → 599,7
#define AFMT_VBI_PACKET_CONTROL 0x7608
# define AFMT_GENERIC0_UPDATE (1 << 2)
#define AFMT_INFOFRAME_CONTROL0 0x760c
# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */
# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
# define AFMT_MPEG_INFO_UPDATE (1 << 10)
#define AFMT_GENERIC0_7 0x7610
922,22 → 662,7
#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c
#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c
 
/* PCIE indirect regs */
#define PCIE_P_CNTL 0x40
# define P_PLL_PWRDN_IN_L1L23 (1 << 3)
# define P_PLL_BUF_PDNB (1 << 4)
# define P_PLL_PDNB (1 << 9)
# define P_ALLOW_PRX_FRONTEND_SHUTOFF (1 << 12)
/* PCIE PORT regs */
#define PCIE_LC_CNTL 0xa0
# define LC_L0S_INACTIVITY(x) ((x) << 8)
# define LC_L0S_INACTIVITY_MASK (0xf << 8)
# define LC_L0S_INACTIVITY_SHIFT 8
# define LC_L1_INACTIVITY(x) ((x) << 12)
# define LC_L1_INACTIVITY_MASK (0xf << 12)
# define LC_L1_INACTIVITY_SHIFT 12
# define LC_PMI_TO_L1_DIS (1 << 16)
# define LC_ASPM_TO_L1_DIS (1 << 24)
/* PCIE link stuff */
#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
# define LC_LINK_WIDTH_SHIFT 0
965,9 → 690,6
# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
# define LC_CURRENT_DATA_RATE (1 << 11)
# define LC_HW_VOLTAGE_IF_CONTROL(x) ((x) << 12)
# define LC_HW_VOLTAGE_IF_CONTROL_MASK (3 << 12)
# define LC_HW_VOLTAGE_IF_CONTROL_SHIFT 12
# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
978,21 → 700,7
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
 
/*
* PM4
*/
#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
 
/* UVD */
#define UVD_GPCOM_VCPU_CMD 0xef0c
#define UVD_GPCOM_VCPU_DATA0 0xef10
#define UVD_GPCOM_VCPU_DATA1 0xef14
 
#define UVD_LMI_EXT40_ADDR 0xf498
#define UVD_VCPU_CHIP_ID 0xf4d4
#define UVD_VCPU_CACHE_OFFSET0 0xf4d8
1006,6 → 714,4
#define UVD_RBC_RB_RPTR 0xf690
#define UVD_RBC_RB_WPTR 0xf694
 
#define UVD_CONTEXT_ID 0xf6f4
 
#endif
/drivers/video/drm/radeon/si.c
22,6 → 22,7
* Authors: Alex Deucher
*/
#include <linux/firmware.h>
//#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <drm/drmP.h>
31,90 → 32,40
#include "sid.h"
#include "atom.h"
#include "si_blit_shaders.h"
#include "clearstate_si.h"
#include "radeon_ucode.h"
 
#define SI_PFP_UCODE_SIZE 2144
#define SI_PM4_UCODE_SIZE 2144
#define SI_CE_UCODE_SIZE 2144
#define SI_RLC_UCODE_SIZE 2048
#define SI_MC_UCODE_SIZE 7769
#define OLAND_MC_UCODE_SIZE 7863
 
MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
MODULE_FIRMWARE("radeon/TAHITI_me.bin");
MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
MODULE_FIRMWARE("radeon/TAHITI_mc2.bin");
MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
 
MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
MODULE_FIRMWARE("radeon/tahiti_me.bin");
MODULE_FIRMWARE("radeon/tahiti_ce.bin");
MODULE_FIRMWARE("radeon/tahiti_mc.bin");
MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
MODULE_FIRMWARE("radeon/tahiti_smc.bin");
 
MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
 
MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
MODULE_FIRMWARE("radeon/pitcairn_me.bin");
MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
 
MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
MODULE_FIRMWARE("radeon/VERDE_me.bin");
MODULE_FIRMWARE("radeon/VERDE_ce.bin");
MODULE_FIRMWARE("radeon/VERDE_mc.bin");
MODULE_FIRMWARE("radeon/VERDE_mc2.bin");
MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
MODULE_FIRMWARE("radeon/VERDE_smc.bin");
 
MODULE_FIRMWARE("radeon/verde_pfp.bin");
MODULE_FIRMWARE("radeon/verde_me.bin");
MODULE_FIRMWARE("radeon/verde_ce.bin");
MODULE_FIRMWARE("radeon/verde_mc.bin");
MODULE_FIRMWARE("radeon/verde_rlc.bin");
MODULE_FIRMWARE("radeon/verde_smc.bin");
 
MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
MODULE_FIRMWARE("radeon/OLAND_me.bin");
MODULE_FIRMWARE("radeon/OLAND_ce.bin");
MODULE_FIRMWARE("radeon/OLAND_mc.bin");
MODULE_FIRMWARE("radeon/OLAND_mc2.bin");
MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
MODULE_FIRMWARE("radeon/OLAND_smc.bin");
 
MODULE_FIRMWARE("radeon/oland_pfp.bin");
MODULE_FIRMWARE("radeon/oland_me.bin");
MODULE_FIRMWARE("radeon/oland_ce.bin");
MODULE_FIRMWARE("radeon/oland_mc.bin");
MODULE_FIRMWARE("radeon/oland_rlc.bin");
MODULE_FIRMWARE("radeon/oland_smc.bin");
 
MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
MODULE_FIRMWARE("radeon/HAINAN_me.bin");
MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
 
MODULE_FIRMWARE("radeon/hainan_pfp.bin");
MODULE_FIRMWARE("radeon/hainan_me.bin");
MODULE_FIRMWARE("radeon/hainan_ce.bin");
MODULE_FIRMWARE("radeon/hainan_mc.bin");
MODULE_FIRMWARE("radeon/hainan_rlc.bin");
MODULE_FIRMWARE("radeon/hainan_smc.bin");
 
static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
static void si_pcie_gen3_enable(struct radeon_device *rdev);
static void si_program_aspm(struct radeon_device *rdev);
extern void sumo_rlc_fini(struct radeon_device *rdev);
extern int sumo_rlc_init(struct radeon_device *rdev);
extern int r600_ih_ring_alloc(struct radeon_device *rdev);
extern void r600_ih_ring_fini(struct radeon_device *rdev);
extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
123,236 → 74,7
extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
bool enable);
static void si_init_pg(struct radeon_device *rdev);
static void si_init_cg(struct radeon_device *rdev);
static void si_fini_pg(struct radeon_device *rdev);
static void si_fini_cg(struct radeon_device *rdev);
static void si_rlc_stop(struct radeon_device *rdev);
 
static const u32 verde_rlc_save_restore_register_list[] =
{
(0x8000 << 16) | (0x98f4 >> 2),
0x00000000,
(0x8040 << 16) | (0x98f4 >> 2),
0x00000000,
(0x8000 << 16) | (0xe80 >> 2),
0x00000000,
(0x8040 << 16) | (0xe80 >> 2),
0x00000000,
(0x8000 << 16) | (0x89bc >> 2),
0x00000000,
(0x8040 << 16) | (0x89bc >> 2),
0x00000000,
(0x8000 << 16) | (0x8c1c >> 2),
0x00000000,
(0x8040 << 16) | (0x8c1c >> 2),
0x00000000,
(0x9c00 << 16) | (0x98f0 >> 2),
0x00000000,
(0x9c00 << 16) | (0xe7c >> 2),
0x00000000,
(0x8000 << 16) | (0x9148 >> 2),
0x00000000,
(0x8040 << 16) | (0x9148 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9150 >> 2),
0x00000000,
(0x9c00 << 16) | (0x897c >> 2),
0x00000000,
(0x9c00 << 16) | (0x8d8c >> 2),
0x00000000,
(0x9c00 << 16) | (0xac54 >> 2),
0X00000000,
0x3,
(0x9c00 << 16) | (0x98f8 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9910 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9914 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9918 >> 2),
0x00000000,
(0x9c00 << 16) | (0x991c >> 2),
0x00000000,
(0x9c00 << 16) | (0x9920 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9924 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9928 >> 2),
0x00000000,
(0x9c00 << 16) | (0x992c >> 2),
0x00000000,
(0x9c00 << 16) | (0x9930 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9934 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9938 >> 2),
0x00000000,
(0x9c00 << 16) | (0x993c >> 2),
0x00000000,
(0x9c00 << 16) | (0x9940 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9944 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9948 >> 2),
0x00000000,
(0x9c00 << 16) | (0x994c >> 2),
0x00000000,
(0x9c00 << 16) | (0x9950 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9954 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9958 >> 2),
0x00000000,
(0x9c00 << 16) | (0x995c >> 2),
0x00000000,
(0x9c00 << 16) | (0x9960 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9964 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9968 >> 2),
0x00000000,
(0x9c00 << 16) | (0x996c >> 2),
0x00000000,
(0x9c00 << 16) | (0x9970 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9974 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9978 >> 2),
0x00000000,
(0x9c00 << 16) | (0x997c >> 2),
0x00000000,
(0x9c00 << 16) | (0x9980 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9984 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9988 >> 2),
0x00000000,
(0x9c00 << 16) | (0x998c >> 2),
0x00000000,
(0x9c00 << 16) | (0x8c00 >> 2),
0x00000000,
(0x9c00 << 16) | (0x8c14 >> 2),
0x00000000,
(0x9c00 << 16) | (0x8c04 >> 2),
0x00000000,
(0x9c00 << 16) | (0x8c08 >> 2),
0x00000000,
(0x8000 << 16) | (0x9b7c >> 2),
0x00000000,
(0x8040 << 16) | (0x9b7c >> 2),
0x00000000,
(0x8000 << 16) | (0xe84 >> 2),
0x00000000,
(0x8040 << 16) | (0xe84 >> 2),
0x00000000,
(0x8000 << 16) | (0x89c0 >> 2),
0x00000000,
(0x8040 << 16) | (0x89c0 >> 2),
0x00000000,
(0x8000 << 16) | (0x914c >> 2),
0x00000000,
(0x8040 << 16) | (0x914c >> 2),
0x00000000,
(0x8000 << 16) | (0x8c20 >> 2),
0x00000000,
(0x8040 << 16) | (0x8c20 >> 2),
0x00000000,
(0x8000 << 16) | (0x9354 >> 2),
0x00000000,
(0x8040 << 16) | (0x9354 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9060 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9364 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9100 >> 2),
0x00000000,
(0x9c00 << 16) | (0x913c >> 2),
0x00000000,
(0x8000 << 16) | (0x90e0 >> 2),
0x00000000,
(0x8000 << 16) | (0x90e4 >> 2),
0x00000000,
(0x8000 << 16) | (0x90e8 >> 2),
0x00000000,
(0x8040 << 16) | (0x90e0 >> 2),
0x00000000,
(0x8040 << 16) | (0x90e4 >> 2),
0x00000000,
(0x8040 << 16) | (0x90e8 >> 2),
0x00000000,
(0x9c00 << 16) | (0x8bcc >> 2),
0x00000000,
(0x9c00 << 16) | (0x8b24 >> 2),
0x00000000,
(0x9c00 << 16) | (0x88c4 >> 2),
0x00000000,
(0x9c00 << 16) | (0x8e50 >> 2),
0x00000000,
(0x9c00 << 16) | (0x8c0c >> 2),
0x00000000,
(0x9c00 << 16) | (0x8e58 >> 2),
0x00000000,
(0x9c00 << 16) | (0x8e5c >> 2),
0x00000000,
(0x9c00 << 16) | (0x9508 >> 2),
0x00000000,
(0x9c00 << 16) | (0x950c >> 2),
0x00000000,
(0x9c00 << 16) | (0x9494 >> 2),
0x00000000,
(0x9c00 << 16) | (0xac0c >> 2),
0x00000000,
(0x9c00 << 16) | (0xac10 >> 2),
0x00000000,
(0x9c00 << 16) | (0xac14 >> 2),
0x00000000,
(0x9c00 << 16) | (0xae00 >> 2),
0x00000000,
(0x9c00 << 16) | (0xac08 >> 2),
0x00000000,
(0x9c00 << 16) | (0x88d4 >> 2),
0x00000000,
(0x9c00 << 16) | (0x88c8 >> 2),
0x00000000,
(0x9c00 << 16) | (0x88cc >> 2),
0x00000000,
(0x9c00 << 16) | (0x89b0 >> 2),
0x00000000,
(0x9c00 << 16) | (0x8b10 >> 2),
0x00000000,
(0x9c00 << 16) | (0x8a14 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9830 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9834 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9838 >> 2),
0x00000000,
(0x9c00 << 16) | (0x9a10 >> 2),
0x00000000,
(0x8000 << 16) | (0x9870 >> 2),
0x00000000,
(0x8000 << 16) | (0x9874 >> 2),
0x00000000,
(0x8001 << 16) | (0x9870 >> 2),
0x00000000,
(0x8001 << 16) | (0x9874 >> 2),
0x00000000,
(0x8040 << 16) | (0x9870 >> 2),
0x00000000,
(0x8040 << 16) | (0x9874 >> 2),
0x00000000,
(0x8041 << 16) | (0x9870 >> 2),
0x00000000,
(0x8041 << 16) | (0x9874 >> 2),
0x00000000,
0x00000000
};
 
static const u32 tahiti_golden_rlc_registers[] =
{
0xc424, 0xffffffff, 0x00601005,
1507,57 → 1229,44
};
 
/* ucode loading */
int si_mc_load_microcode(struct radeon_device *rdev)
static int si_mc_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data = NULL;
const __le32 *new_fw_data = NULL;
const __be32 *fw_data;
u32 running, blackout = 0;
u32 *io_mc_regs = NULL;
const __le32 *new_io_mc_regs = NULL;
int i, regs_size, ucode_size;
u32 *io_mc_regs;
int i, ucode_size, regs_size;
 
if (!rdev->mc_fw)
return -EINVAL;
 
if (rdev->new_fw) {
const struct mc_firmware_header_v1_0 *hdr =
(const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
 
radeon_ucode_print_mc_hdr(&hdr->header);
regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
new_io_mc_regs = (const __le32 *)
(rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
new_fw_data = (const __le32 *)
(rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
} else {
ucode_size = rdev->mc_fw->size / 4;
 
switch (rdev->family) {
case CHIP_TAHITI:
io_mc_regs = (u32 *)&tahiti_io_mc_regs;
ucode_size = SI_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
case CHIP_PITCAIRN:
io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
ucode_size = SI_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
case CHIP_VERDE:
default:
io_mc_regs = (u32 *)&verde_io_mc_regs;
ucode_size = SI_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
case CHIP_OLAND:
io_mc_regs = (u32 *)&oland_io_mc_regs;
ucode_size = OLAND_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
case CHIP_HAINAN:
io_mc_regs = (u32 *)&hainan_io_mc_regs;
ucode_size = OLAND_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
}
fw_data = (const __be32 *)rdev->mc_fw->data;
}
 
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
 
1573,21 → 1282,13
 
/* load mc io regs */
for (i = 0; i < regs_size; i++) {
if (rdev->new_fw) {
WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
} else {
WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
}
}
/* load the MC ucode */
for (i = 0; i < ucode_size; i++) {
if (rdev->new_fw)
WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
else
fw_data = (const __be32 *)rdev->mc_fw->data;
for (i = 0; i < ucode_size; i++)
WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
}
 
/* put the engine back into the active state */
WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
1615,80 → 1316,75
 
static int si_init_microcode(struct radeon_device *rdev)
{
struct platform_device *pdev;
const char *chip_name;
const char *new_chip_name;
const char *rlc_chip_name;
size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
size_t smc_req_size, mc2_req_size;
char fw_name[30];
int err;
int new_fw = 0;
 
DRM_DEBUG("\n");
 
pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
err = IS_ERR(pdev);
if (err) {
printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
return -EINVAL;
}
 
switch (rdev->family) {
case CHIP_TAHITI:
chip_name = "TAHITI";
new_chip_name = "tahiti";
rlc_chip_name = "TAHITI";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = SI_MC_UCODE_SIZE * 4;
mc2_req_size = TAHITI_MC_UCODE_SIZE * 4;
smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4);
break;
case CHIP_PITCAIRN:
chip_name = "PITCAIRN";
new_chip_name = "pitcairn";
rlc_chip_name = "PITCAIRN";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = SI_MC_UCODE_SIZE * 4;
mc2_req_size = PITCAIRN_MC_UCODE_SIZE * 4;
smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4);
break;
case CHIP_VERDE:
chip_name = "VERDE";
new_chip_name = "verde";
rlc_chip_name = "VERDE";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = SI_MC_UCODE_SIZE * 4;
mc2_req_size = VERDE_MC_UCODE_SIZE * 4;
smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4);
break;
case CHIP_OLAND:
chip_name = "OLAND";
new_chip_name = "oland";
rlc_chip_name = "OLAND";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4);
mc_req_size = OLAND_MC_UCODE_SIZE * 4;
break;
case CHIP_HAINAN:
chip_name = "HAINAN";
new_chip_name = "hainan";
rlc_chip_name = "HAINAN";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4);
mc_req_size = OLAND_MC_UCODE_SIZE * 4;
break;
default: BUG();
}
 
DRM_INFO("Loading %s Microcode\n", new_chip_name);
DRM_INFO("Loading %s Microcode\n", chip_name);
 
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
if (err) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
if (err)
goto out;
if (rdev->pfp_fw->size != pfp_req_size) {
1698,23 → 1394,9
err = -EINVAL;
goto out;
}
} else {
err = radeon_ucode_validate(rdev->pfp_fw);
if (err) {
printk(KERN_ERR
"si_cp: validation failed for firmware \"%s\"\n",
fw_name);
goto out;
} else {
new_fw++;
}
}
 
snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
if (err) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
if (err)
goto out;
if (rdev->me_fw->size != me_req_size) {
1723,23 → 1405,9
rdev->me_fw->size, fw_name);
err = -EINVAL;
}
} else {
err = radeon_ucode_validate(rdev->me_fw);
if (err) {
printk(KERN_ERR
"si_cp: validation failed for firmware \"%s\"\n",
fw_name);
goto out;
} else {
new_fw++;
}
}
 
snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
if (err) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev);
if (err)
goto out;
if (rdev->ce_fw->size != ce_req_size) {
1748,23 → 1416,9
rdev->ce_fw->size, fw_name);
err = -EINVAL;
}
} else {
err = radeon_ucode_validate(rdev->ce_fw);
if (err) {
printk(KERN_ERR
"si_cp: validation failed for firmware \"%s\"\n",
fw_name);
goto out;
} else {
new_fw++;
}
}
 
snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
if (err) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
if (err)
goto out;
if (rdev->rlc_fw->size != rlc_req_size) {
1773,88 → 1427,21
rdev->rlc_fw->size, fw_name);
err = -EINVAL;
}
} else {
err = radeon_ucode_validate(rdev->rlc_fw);
if (err) {
printk(KERN_ERR
"si_cp: validation failed for firmware \"%s\"\n",
fw_name);
goto out;
} else {
new_fw++;
}
}
 
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
if (err) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
if (err) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
if (err)
goto out;
}
if ((rdev->mc_fw->size != mc_req_size) &&
(rdev->mc_fw->size != mc2_req_size)) {
if (rdev->mc_fw->size != mc_req_size) {
printk(KERN_ERR
"si_mc: Bogus length %zu in firmware \"%s\"\n",
rdev->mc_fw->size, fw_name);
err = -EINVAL;
}
DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
} else {
err = radeon_ucode_validate(rdev->mc_fw);
if (err) {
printk(KERN_ERR
"si_cp: validation failed for firmware \"%s\"\n",
fw_name);
goto out;
} else {
new_fw++;
}
}
 
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
if (err) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
if (err) {
printk(KERN_ERR
"smc: error loading firmware \"%s\"\n",
fw_name);
release_firmware(rdev->smc_fw);
rdev->smc_fw = NULL;
err = 0;
} else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR
"si_smc: Bogus length %zu in firmware \"%s\"\n",
rdev->smc_fw->size, fw_name);
err = -EINVAL;
}
} else {
err = radeon_ucode_validate(rdev->smc_fw);
if (err) {
printk(KERN_ERR
"si_cp: validation failed for firmware \"%s\"\n",
fw_name);
goto out;
} else {
new_fw++;
}
}
out:
platform_device_unregister(pdev);
 
if (new_fw == 0) {
rdev->new_fw = false;
} else if (new_fw < 6) {
printk(KERN_ERR "si_fw: mixing new and old firmware!\n");
err = -EINVAL;
} else {
rdev->new_fw = true;
}
out:
if (err) {
if (err != -EINVAL)
printk(KERN_ERR
1870,8 → 1457,6
rdev->rlc_fw = NULL;
release_firmware(rdev->mc_fw);
rdev->mc_fw = NULL;
release_firmware(rdev->smc_fw);
rdev->smc_fw = NULL;
}
return err;
}
1882,8 → 1467,7
struct drm_display_mode *mode,
struct drm_display_mode *other_mode)
{
u32 tmp, buffer_alloc, i;
u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
u32 tmp;
/*
* Line Buffer Setup
* There are 3 line buffers, each one shared by 2 display controllers.
1898,30 → 1482,16
* non-linked crtcs for maximum line buffer allocation.
*/
if (radeon_crtc->base.enabled && mode) {
if (other_mode) {
if (other_mode)
tmp = 0; /* 1/2 */
buffer_alloc = 1;
} else {
else
tmp = 2; /* whole */
buffer_alloc = 2;
}
} else {
} else
tmp = 0;
buffer_alloc = 0;
}
 
WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
DC_LB_MEMORY_CONFIG(tmp));
 
WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
DMIF_BUFFERS_ALLOCATED(buffer_alloc));
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
DMIF_BUFFERS_ALLOCATED_COMPLETED)
break;
udelay(1);
}
 
if (radeon_crtc->base.enabled && mode) {
switch (tmp) {
case 0:
2222,8 → 1792,7
u32 lb_size, u32 num_heads)
{
struct drm_display_mode *mode = &radeon_crtc->base.mode;
struct dce6_wm_params wm_low, wm_high;
u32 dram_channels;
struct dce6_wm_params wm;
u32 pixel_period;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
2239,88 → 1808,43
priority_a_cnt = 0;
priority_b_cnt = 0;
 
wm.yclk = rdev->pm.current_mclk * 10;
wm.sclk = rdev->pm.current_sclk * 10;
wm.disp_clk = mode->clock;
wm.src_width = mode->crtc_hdisplay;
wm.active_time = mode->crtc_hdisplay * pixel_period;
wm.blank_time = line_time - wm.active_time;
wm.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
wm.interlaced = true;
wm.vsc = radeon_crtc->vsc;
wm.vtaps = 1;
if (radeon_crtc->rmx_type != RMX_OFF)
wm.vtaps = 2;
wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
wm.lb_size = lb_size;
if (rdev->family == CHIP_ARUBA)
dram_channels = evergreen_get_number_of_dram_channels(rdev);
wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
else
dram_channels = si_get_number_of_dram_channels(rdev);
wm.dram_channels = si_get_number_of_dram_channels(rdev);
wm.num_heads = num_heads;
 
/* watermark for high clocks */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
wm_high.yclk =
radeon_dpm_get_mclk(rdev, false) * 10;
wm_high.sclk =
radeon_dpm_get_sclk(rdev, false) * 10;
} else {
wm_high.yclk = rdev->pm.current_mclk * 10;
wm_high.sclk = rdev->pm.current_sclk * 10;
}
 
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
wm_high.active_time = mode->crtc_hdisplay * pixel_period;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
wm_high.interlaced = true;
wm_high.vsc = radeon_crtc->vsc;
wm_high.vtaps = 1;
if (radeon_crtc->rmx_type != RMX_OFF)
wm_high.vtaps = 2;
wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
wm_high.lb_size = lb_size;
wm_high.dram_channels = dram_channels;
wm_high.num_heads = num_heads;
 
/* watermark for low clocks */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
wm_low.yclk =
radeon_dpm_get_mclk(rdev, true) * 10;
wm_low.sclk =
radeon_dpm_get_sclk(rdev, true) * 10;
} else {
wm_low.yclk = rdev->pm.current_mclk * 10;
wm_low.sclk = rdev->pm.current_sclk * 10;
}
 
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
wm_low.active_time = mode->crtc_hdisplay * pixel_period;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
wm_low.interlaced = true;
wm_low.vsc = radeon_crtc->vsc;
wm_low.vtaps = 1;
if (radeon_crtc->rmx_type != RMX_OFF)
wm_low.vtaps = 2;
wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
wm_low.lb_size = lb_size;
wm_low.dram_channels = dram_channels;
wm_low.num_heads = num_heads;
 
/* set for high clocks */
latency_watermark_a = min(dce6_latency_watermark(&wm_high), (u32)65535);
latency_watermark_a = min(dce6_latency_watermark(&wm), (u32)65535);
/* set for low clocks */
latency_watermark_b = min(dce6_latency_watermark(&wm_low), (u32)65535);
/* wm.yclk = low clk; wm.sclk = low clk */
latency_watermark_b = min(dce6_latency_watermark(&wm), (u32)65535);
 
/* possibly force display priority to high */
/* should really do this at mode validation time... */
if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
!dce6_average_bandwidth_vs_available_bandwidth(&wm_high) ||
!dce6_check_latency_hiding(&wm_high) ||
if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
!dce6_average_bandwidth_vs_available_bandwidth(&wm) ||
!dce6_check_latency_hiding(&wm) ||
(rdev->disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
priority_a_cnt |= PRIORITY_ALWAYS_ON;
priority_b_cnt |= PRIORITY_ALWAYS_ON;
}
if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
!dce6_average_bandwidth_vs_available_bandwidth(&wm_low) ||
!dce6_check_latency_hiding(&wm_low) ||
(rdev->disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
priority_a_cnt |= PRIORITY_ALWAYS_ON;
priority_b_cnt |= PRIORITY_ALWAYS_ON;
}
 
a.full = dfixed_const(1000);
b.full = dfixed_const(mode->clock);
2371,10 → 1895,6
WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
 
/* save values for DPM */
radeon_crtc->line_time = line_time;
radeon_crtc->wm_high = latency_watermark_a;
radeon_crtc->wm_low = latency_watermark_b;
}
 
void dce6_bandwidth_update(struct radeon_device *rdev)
2981,7 → 2501,7
}
 
static u32 si_get_rb_disabled(struct radeon_device *rdev,
u32 max_rb_num_per_se,
u32 max_rb_num, u32 se_num,
u32 sh_per_se)
{
u32 data, mask;
2995,7 → 2515,7
 
data >>= BACKEND_DISABLE_SHIFT;
 
mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
 
return data & mask;
}
3002,7 → 2522,7
 
static void si_setup_rb(struct radeon_device *rdev,
u32 se_num, u32 sh_per_se,
u32 max_rb_num_per_se)
u32 max_rb_num)
{
int i, j;
u32 data, mask;
3012,7 → 2532,7
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
si_select_se_sh(rdev, i, j);
data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
}
}
3019,14 → 2539,12
si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
 
mask = 1;
for (i = 0; i < max_rb_num_per_se * se_num; i++) {
for (i = 0; i < max_rb_num; i++) {
if (!(disabled_rbs & mask))
enabled_rbs |= mask;
mask <<= 1;
}
 
rdev->config.si.backend_enable_mask = enabled_rbs;
 
for (i = 0; i < se_num; i++) {
si_select_se_sh(rdev, i, 0xffffffff);
data = 0;
3255,13 → 2773,6
rdev->config.si.max_sh_per_se,
rdev->config.si.max_cu_per_sh);
 
rdev->config.si.active_cus = 0;
for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
rdev->config.si.active_cus +=
hweight32(si_get_cu_active_bitmap(rdev, i, j));
}
}
 
/* set HW defaults for 3D engine */
WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
3350,7 → 2861,7
/* EVENT_WRITE_EOP - flush caches, send int */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
radeon_ring_write(ring, lower_32_bits(addr));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
3383,7 → 2894,7
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (1 << 8));
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
radeon_ring_write(ring, next_rptr);
}
 
3424,7 → 2935,6
if (enable)
WREG32(CP_ME_CNTL, 0);
else {
if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
WREG32(SCRATCH_UMSK, 0);
3437,57 → 2947,15
 
static int si_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
int i;
 
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
if (!rdev->me_fw || !rdev->pfp_fw)
return -EINVAL;
 
si_cp_enable(rdev, false);
 
if (rdev->new_fw) {
const struct gfx_firmware_header_v1_0 *pfp_hdr =
(const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
const struct gfx_firmware_header_v1_0 *ce_hdr =
(const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
const struct gfx_firmware_header_v1_0 *me_hdr =
(const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
const __le32 *fw_data;
u32 fw_size;
 
radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
radeon_ucode_print_gfx_hdr(&ce_hdr->header);
radeon_ucode_print_gfx_hdr(&me_hdr->header);
 
/* PFP */
fw_data = (const __le32 *)
(rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
WREG32(CP_PFP_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
WREG32(CP_PFP_UCODE_ADDR, 0);
 
/* CE */
fw_data = (const __le32 *)
(rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
WREG32(CP_CE_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
WREG32(CP_CE_UCODE_ADDR, 0);
 
/* ME */
fw_data = (const __be32 *)
(rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
WREG32(CP_ME_RAM_WADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
WREG32(CP_ME_RAM_WADDR, 0);
} else {
const __be32 *fw_data;
 
/* PFP */
fw_data = (const __be32 *)rdev->pfp_fw->data;
WREG32(CP_PFP_UCODE_ADDR, 0);
for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
3507,7 → 2975,6
for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
WREG32(CP_ME_RAM_WADDR, 0);
}
 
WREG32(CP_PFP_UCODE_ADDR, 0);
WREG32(CP_CE_UCODE_ADDR, 0);
3540,7 → 3007,7
radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
radeon_ring_write(ring, 0xc000);
radeon_ring_write(ring, 0xe000);
radeon_ring_unlock_commit(rdev, ring, false);
radeon_ring_unlock_commit(rdev, ring);
 
si_cp_enable(rdev, true);
 
3569,7 → 3036,7
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
 
radeon_ring_unlock_commit(rdev, ring, false);
radeon_ring_unlock_commit(rdev, ring);
 
for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
ring = &rdev->ring[i];
3579,7 → 3046,7
radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(ring, 0);
 
radeon_ring_unlock_commit(rdev, ring, false);
radeon_ring_unlock_commit(rdev, ring);
}
 
return 0;
3590,17 → 3057,17
struct radeon_ring *ring;
si_cp_enable(rdev, false);
 
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
radeon_ring_fini(rdev, ring);
radeon_scratch_free(rdev, ring->rptr_save_reg);
// ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
// radeon_ring_fini(rdev, ring);
// radeon_scratch_free(rdev, ring->rptr_save_reg);
 
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
radeon_ring_fini(rdev, ring);
radeon_scratch_free(rdev, ring->rptr_save_reg);
// ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
// radeon_ring_fini(rdev, ring);
// radeon_scratch_free(rdev, ring->rptr_save_reg);
 
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
radeon_ring_fini(rdev, ring);
radeon_scratch_free(rdev, ring->rptr_save_reg);
// ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
// radeon_ring_fini(rdev, ring);
// radeon_scratch_free(rdev, ring->rptr_save_reg);
}
 
static int si_cp_resume(struct radeon_device *rdev)
3610,7 → 3077,16
u32 rb_bufsz;
int r;
 
si_enable_gui_idle_interrupt(rdev, false);
/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
SOFT_RESET_PA |
SOFT_RESET_VGT |
SOFT_RESET_SPI |
SOFT_RESET_SX));
RREG32(GRBM_SOFT_RESET);
mdelay(15);
WREG32(GRBM_SOFT_RESET, 0);
RREG32(GRBM_SOFT_RESET);
 
WREG32(CP_SEM_WAIT_TIMER, 0x0);
WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
3624,8 → 3100,8
/* ring 0 - compute and gfx */
/* Set ring buffer size */
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
rb_bufsz = order_base_2(ring->ring_size / 8);
tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
3652,11 → 3128,13
 
WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
 
ring->rptr = RREG32(CP_RB0_RPTR);
 
/* ring1 - compute only */
/* Set ring buffer size */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
rb_bufsz = order_base_2(ring->ring_size / 8);
tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
3676,11 → 3154,13
 
WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
 
ring->rptr = RREG32(CP_RB1_RPTR);
 
/* ring2 - compute only */
/* Set ring buffer size */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
rb_bufsz = order_base_2(ring->ring_size / 8);
tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
3700,6 → 3180,8
 
WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
 
ring->rptr = RREG32(CP_RB2_RPTR);
 
/* start the rings */
si_cp_start(rdev);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
3721,15 → 3203,10
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
}
 
si_enable_gui_idle_interrupt(rdev, true);
 
if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
 
return 0;
}
 
u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
static u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
{
u32 reset_mask = 0;
u32 tmp;
3827,13 → 3304,6
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
 
/* disable PG/CG */
si_fini_pg(rdev);
si_fini_cg(rdev);
 
/* stop the rlc */
si_rlc_stop(rdev);
 
/* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
 
3942,106 → 3412,6
evergreen_print_gpu_status_regs(rdev);
}
 
static void si_set_clk_bypass_mode(struct radeon_device *rdev)
{
u32 tmp, i;
 
tmp = RREG32(CG_SPLL_FUNC_CNTL);
tmp |= SPLL_BYPASS_EN;
WREG32(CG_SPLL_FUNC_CNTL, tmp);
 
tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
tmp |= SPLL_CTLREQ_CHG;
WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
 
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
break;
udelay(1);
}
 
tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
 
tmp = RREG32(MPLL_CNTL_MODE);
tmp &= ~MPLL_MCLK_SEL;
WREG32(MPLL_CNTL_MODE, tmp);
}
 
static void si_spll_powerdown(struct radeon_device *rdev)
{
u32 tmp;
 
tmp = RREG32(SPLL_CNTL_MODE);
tmp |= SPLL_SW_DIR_CONTROL;
WREG32(SPLL_CNTL_MODE, tmp);
 
tmp = RREG32(CG_SPLL_FUNC_CNTL);
tmp |= SPLL_RESET;
WREG32(CG_SPLL_FUNC_CNTL, tmp);
 
tmp = RREG32(CG_SPLL_FUNC_CNTL);
tmp |= SPLL_SLEEP;
WREG32(CG_SPLL_FUNC_CNTL, tmp);
 
tmp = RREG32(SPLL_CNTL_MODE);
tmp &= ~SPLL_SW_DIR_CONTROL;
WREG32(SPLL_CNTL_MODE, tmp);
}
 
static void si_gpu_pci_config_reset(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
u32 tmp, i;
 
dev_info(rdev->dev, "GPU pci config reset\n");
 
/* disable dpm? */
 
/* disable cg/pg */
si_fini_pg(rdev);
si_fini_cg(rdev);
 
/* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
/* dma0 */
tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
tmp &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
/* dma1 */
tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
tmp &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
/* XXX other engines? */
 
/* halt the rlc, disable cp internal ints */
si_rlc_stop(rdev);
 
udelay(50);
 
/* disable mem access */
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
}
 
/* set mclk/sclk to bypass */
si_set_clk_bypass_mode(rdev);
/* powerdown spll */
si_spll_powerdown(rdev);
/* disable BM */
pci_clear_master(rdev->pdev);
/* reset */
radeon_pci_config_reset(rdev);
/* wait for asic to come out of reset */
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
break;
udelay(1);
}
}
 
int si_asic_reset(struct radeon_device *rdev)
{
u32 reset_mask;
4051,17 → 3421,10
if (reset_mask)
r600_set_bios_scratch_engine_hung(rdev, true);
 
/* try soft reset */
si_gpu_soft_reset(rdev, reset_mask);
 
reset_mask = si_gpu_check_soft_reset(rdev);
 
/* try pci config reset */
if (reset_mask && radeon_hard_reset)
si_gpu_pci_config_reset(rdev);
 
reset_mask = si_gpu_check_soft_reset(rdev);
 
if (!reset_mask)
r600_set_bios_scratch_engine_hung(rdev, false);
 
4084,12 → 3447,42
if (!(reset_mask & (RADEON_RESET_GFX |
RADEON_RESET_COMPUTE |
RADEON_RESET_CP))) {
radeon_ring_lockup_update(rdev, ring);
radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
 
/**
* si_dma_is_lockup - Check if the DMA engine is locked up
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Check if the async DMA engine is locked up.
* Returns true if the engine appears to be locked up, false if not.
*/
bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 reset_mask = si_gpu_check_soft_reset(rdev);
u32 mask;
 
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
mask = RADEON_RESET_DMA;
else
mask = RADEON_RESET_DMA1;
 
if (!(reset_mask & mask)) {
radeon_ring_lockup_update(ring);
return false;
}
/* force ring activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
 
/* MC */
static void si_mc_program(struct radeon_device *rdev)
{
4142,7 → 3535,7
}
}
 
void si_vram_gtt_location(struct radeon_device *rdev,
static void si_vram_gtt_location(struct radeon_device *rdev,
struct radeon_mc *mc)
{
if (mc->mc_vram_size > 0xFFC0000000ULL) {
4207,15 → 3600,8
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* size in MB on si */
tmp = RREG32(CONFIG_MEMSIZE);
/* some boards may have garbage in the upper 16 bits */
if (tmp & 0xffff0000) {
DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
if (tmp & 0xffff)
tmp &= 0xffff;
}
rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
rdev->mc.visible_vram_size = rdev->mc.aper_size;
si_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
4246,17 → 3632,16
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
radeon_gart_restore(rdev);
/* Setup TLB control */
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
ENABLE_L1_TLB |
ENABLE_L1_FRAGMENT_PROCESSING |
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
ENABLE_ADVANCED_DRIVER_MODEL |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
EFFECTIVE_L2_QUEUE_SIZE(7) |
4263,8 → 3648,7
CONTEXT1_IDENTITY_ACCESS_MODE(1));
WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
BANK_SELECT(4) |
L2_CACHE_BIGK_FRAGMENT_SIZE(4));
L2_CACHE_BIGK_FRAGMENT_SIZE(0));
/* setup context0 */
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
4290,10 → 3674,10
for (i = 1; i < 16; i++) {
if (i < 8)
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
rdev->vm_manager.saved_table_addr[i]);
rdev->gart.table_addr >> 12);
else
WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
rdev->vm_manager.saved_table_addr[i]);
rdev->gart.table_addr >> 12);
}
 
/* enable context1-15 */
4301,7 → 3685,6
(u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT1_CNTL2, 4);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4325,17 → 3708,6
 
static void si_pcie_gart_disable(struct radeon_device *rdev)
{
unsigned i;
 
for (i = 1; i < 16; ++i) {
uint32_t reg;
if (i < 8)
reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
else
reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
}
 
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
4424,64 → 3796,13
return 0;
}
 
static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
{
u32 start_reg, reg, i;
u32 command = ib[idx + 4];
u32 info = ib[idx + 1];
u32 idx_value = ib[idx];
if (command & PACKET3_CP_DMA_CMD_SAS) {
/* src address space is register */
if (((info & 0x60000000) >> 29) == 0) {
start_reg = idx_value << 2;
if (command & PACKET3_CP_DMA_CMD_SAIC) {
reg = start_reg;
if (!si_vm_reg_valid(reg)) {
DRM_ERROR("CP DMA Bad SRC register\n");
return -EINVAL;
}
} else {
for (i = 0; i < (command & 0x1fffff); i++) {
reg = start_reg + (4 * i);
if (!si_vm_reg_valid(reg)) {
DRM_ERROR("CP DMA Bad SRC register\n");
return -EINVAL;
}
}
}
}
}
if (command & PACKET3_CP_DMA_CMD_DAS) {
/* dst address space is register */
if (((info & 0x00300000) >> 20) == 0) {
start_reg = ib[idx + 2];
if (command & PACKET3_CP_DMA_CMD_DAIC) {
reg = start_reg;
if (!si_vm_reg_valid(reg)) {
DRM_ERROR("CP DMA Bad DST register\n");
return -EINVAL;
}
} else {
for (i = 0; i < (command & 0x1fffff); i++) {
reg = start_reg + (4 * i);
if (!si_vm_reg_valid(reg)) {
DRM_ERROR("CP DMA Bad DST register\n");
return -EINVAL;
}
}
}
}
}
return 0;
}
 
static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
u32 *ib, struct radeon_cs_packet *pkt)
{
int r;
u32 idx = pkt->idx + 1;
u32 idx_value = ib[idx];
u32 start_reg, end_reg, reg, i;
u32 command, info;
 
switch (pkt->opcode) {
case PACKET3_NOP:
4582,9 → 3903,50
}
break;
case PACKET3_CP_DMA:
r = si_vm_packet3_cp_dma_check(ib, idx);
if (r)
return r;
command = ib[idx + 4];
info = ib[idx + 1];
if (command & PACKET3_CP_DMA_CMD_SAS) {
/* src address space is register */
if (((info & 0x60000000) >> 29) == 0) {
start_reg = idx_value << 2;
if (command & PACKET3_CP_DMA_CMD_SAIC) {
reg = start_reg;
if (!si_vm_reg_valid(reg)) {
DRM_ERROR("CP DMA Bad SRC register\n");
return -EINVAL;
}
} else {
for (i = 0; i < (command & 0x1fffff); i++) {
reg = start_reg + (4 * i);
if (!si_vm_reg_valid(reg)) {
DRM_ERROR("CP DMA Bad SRC register\n");
return -EINVAL;
}
}
}
}
}
if (command & PACKET3_CP_DMA_CMD_DAS) {
/* dst address space is register */
if (((info & 0x00300000) >> 20) == 0) {
start_reg = ib[idx + 2];
if (command & PACKET3_CP_DMA_CMD_DAIC) {
reg = start_reg;
if (!si_vm_reg_valid(reg)) {
DRM_ERROR("CP DMA Bad DST register\n");
return -EINVAL;
}
} else {
for (i = 0; i < (command & 0x1fffff); i++) {
reg = start_reg + (4 * i);
if (!si_vm_reg_valid(reg)) {
DRM_ERROR("CP DMA Bad DST register\n");
return -EINVAL;
}
}
}
}
}
break;
default:
DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
4596,7 → 3958,6
static int si_vm_packet3_compute_check(struct radeon_device *rdev,
u32 *ib, struct radeon_cs_packet *pkt)
{
int r;
u32 idx = pkt->idx + 1;
u32 idx_value = ib[idx];
u32 start_reg, reg, i;
4669,11 → 4030,6
return -EINVAL;
}
break;
case PACKET3_CP_DMA:
r = si_vm_packet3_cp_dma_check(ib, idx);
if (r)
return r;
break;
default:
DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
return -EINVAL;
4751,268 → 4107,110
}
 
/**
* si_vm_decode_fault - print human readable fault info
* si_vm_set_page - update the page tables using the CP
*
* @rdev: radeon_device pointer
* @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
* @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
* Print human readable fault information (SI).
* Update the page tables using the CP (SI).
*/
static void si_vm_decode_fault(struct radeon_device *rdev,
u32 status, u32 addr)
void si_vm_set_page(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
char *block;
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
uint64_t value;
unsigned ndw;
 
if (rdev->family == CHIP_TAHITI) {
switch (mc_id) {
case 160:
case 144:
case 96:
case 80:
case 224:
case 208:
case 32:
case 16:
block = "CB";
break;
case 161:
case 145:
case 97:
case 81:
case 225:
case 209:
case 33:
case 17:
block = "CB_FMASK";
break;
case 162:
case 146:
case 98:
case 82:
case 226:
case 210:
case 34:
case 18:
block = "CB_CMASK";
break;
case 163:
case 147:
case 99:
case 83:
case 227:
case 211:
case 35:
case 19:
block = "CB_IMMED";
break;
case 164:
case 148:
case 100:
case 84:
case 228:
case 212:
case 36:
case 20:
block = "DB";
break;
case 165:
case 149:
case 101:
case 85:
case 229:
case 213:
case 37:
case 21:
block = "DB_HTILE";
break;
case 167:
case 151:
case 103:
case 87:
case 231:
case 215:
case 39:
case 23:
block = "DB_STEN";
break;
case 72:
case 68:
case 64:
case 8:
case 4:
case 0:
case 136:
case 132:
case 128:
case 200:
case 196:
case 192:
block = "TC";
break;
case 112:
case 48:
block = "CP";
break;
case 49:
case 177:
case 50:
case 178:
block = "SH";
break;
case 53:
case 190:
block = "VGT";
break;
case 117:
block = "IH";
break;
case 51:
case 115:
block = "RLC";
break;
case 119:
case 183:
block = "DMA0";
break;
case 61:
block = "DMA1";
break;
case 248:
case 120:
block = "HDP";
break;
default:
block = "unknown";
break;
if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
while (count) {
ndw = 2 + count * 2;
if (ndw > 0x3FFE)
ndw = 0x3FFE;
 
ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(1));
ib->ptr[ib->length_dw++] = pe;
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
for (; ndw > 2; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & RADEON_VM_PAGE_VALID) {
value = addr;
} else {
value = 0;
}
addr += incr;
value |= r600_flags;
ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
} else {
switch (mc_id) {
case 32:
case 16:
case 96:
case 80:
case 160:
case 144:
case 224:
case 208:
block = "CB";
break;
case 33:
case 17:
case 97:
case 81:
case 161:
case 145:
case 225:
case 209:
block = "CB_FMASK";
break;
case 34:
case 18:
case 98:
case 82:
case 162:
case 146:
case 226:
case 210:
block = "CB_CMASK";
break;
case 35:
case 19:
case 99:
case 83:
case 163:
case 147:
case 227:
case 211:
block = "CB_IMMED";
break;
case 36:
case 20:
case 100:
case 84:
case 164:
case 148:
case 228:
case 212:
block = "DB";
break;
case 37:
case 21:
case 101:
case 85:
case 165:
case 149:
case 229:
case 213:
block = "DB_HTILE";
break;
case 39:
case 23:
case 103:
case 87:
case 167:
case 151:
case 231:
case 215:
block = "DB_STEN";
break;
case 72:
case 68:
case 8:
case 4:
case 136:
case 132:
case 200:
case 196:
block = "TC";
break;
case 112:
case 48:
block = "CP";
break;
case 49:
case 177:
case 50:
case 178:
block = "SH";
break;
case 53:
block = "VGT";
break;
case 117:
block = "IH";
break;
case 51:
case 115:
block = "RLC";
break;
case 119:
case 183:
block = "DMA0";
break;
case 61:
block = "DMA1";
break;
case 248:
case 120:
block = "HDP";
break;
default:
block = "unknown";
break;
/* DMA */
if (flags & RADEON_VM_PAGE_SYSTEM) {
while (count) {
ndw = count * 2;
if (ndw > 0xFFFFE)
ndw = 0xFFFFE;
 
/* for non-physically contiguous pages (system) */
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
ib->ptr[ib->length_dw++] = pe;
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & RADEON_VM_PAGE_VALID) {
value = addr;
} else {
value = 0;
}
addr += incr;
value |= r600_flags;
ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
}
}
} else {
while (count) {
ndw = count * 2;
if (ndw > 0xFFFFE)
ndw = 0xFFFFE;
 
printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
protections, vmid, addr,
(status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
block, mc_id);
if (flags & RADEON_VM_PAGE_VALID)
value = addr;
else
value = 0;
/* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
ib->ptr[ib->length_dw++] = pe; /* dst addr */
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
ib->ptr[ib->length_dw++] = r600_flags; /* mask */
ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = value; /* value */
ib->ptr[ib->length_dw++] = upper_32_bits(value);
ib->ptr[ib->length_dw++] = incr; /* increment size */
ib->ptr[ib->length_dw++] = 0;
pe += ndw * 4;
addr += (ndw / 2) * incr;
count -= ndw / 2;
}
}
while (ib->length_dw & 0x7)
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
}
}
 
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
5023,7 → 4221,7
 
/* write new base address */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0)));
 
if (vm->id < 8) {
5038,7 → 4236,7
 
/* flush hdp cache */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
radeon_ring_write(ring, 0);
5046,7 → 4244,7
 
/* bits 0-15 are the VM contexts0-15 */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
5057,749 → 4255,135
radeon_ring_write(ring, 0x0);
}
 
/*
* Power and clock gating
*/
static void si_wait_for_rlc_serdes(struct radeon_device *rdev)
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
int i;
struct radeon_ring *ring = &rdev->ring[ridx];
 
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0)
break;
udelay(1);
}
if (vm == NULL)
return;
 
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0)
break;
udelay(1);
}
}
 
static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
bool enable)
{
u32 tmp = RREG32(CP_INT_CNTL_RING0);
u32 mask;
int i;
 
if (enable)
tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
else
tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(CP_INT_CNTL_RING0, tmp);
 
if (!enable) {
/* read a gfx register */
tmp = RREG32(DB_DEPTH_INFO);
 
mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
for (i = 0; i < rdev->usec_timeout; i++) {
if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
break;
udelay(1);
}
}
}
 
static void si_set_uvd_dcm(struct radeon_device *rdev,
bool sw_mode)
{
u32 tmp, tmp2;
 
tmp = RREG32(UVD_CGC_CTRL);
tmp &= ~(CLK_OD_MASK | CG_DT_MASK);
tmp |= DCM | CG_DT(1) | CLK_OD(4);
 
if (sw_mode) {
tmp &= ~0x7ffff800;
tmp2 = DYN_OR_EN | DYN_RR_EN | G_DIV_ID(7);
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
if (vm->id < 8) {
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
} else {
tmp |= 0x7ffff800;
tmp2 = 0;
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
}
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
 
WREG32(UVD_CGC_CTRL, tmp);
WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2);
}
/* flush hdp cache */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
radeon_ring_write(ring, 1);
 
void si_init_uvd_internal_cg(struct radeon_device *rdev)
{
bool hw_mode = true;
 
if (hw_mode) {
si_set_uvd_dcm(rdev, false);
} else {
u32 tmp = RREG32(UVD_CGC_CTRL);
tmp &= ~DCM;
WREG32(UVD_CGC_CTRL, tmp);
/* bits 0-7 are the VM contexts0-7 */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
radeon_ring_write(ring, 1 << vm->id);
}
}
 
static u32 si_halt_rlc(struct radeon_device *rdev)
/*
* RLC
*/
void si_rlc_fini(struct radeon_device *rdev)
{
u32 data, orig;
int r;
 
orig = data = RREG32(RLC_CNTL);
/* save restore block */
if (rdev->rlc.save_restore_obj) {
r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
if (unlikely(r != 0))
dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
radeon_bo_unpin(rdev->rlc.save_restore_obj);
radeon_bo_unreserve(rdev->rlc.save_restore_obj);
 
if (data & RLC_ENABLE) {
data &= ~RLC_ENABLE;
WREG32(RLC_CNTL, data);
 
si_wait_for_rlc_serdes(rdev);
radeon_bo_unref(&rdev->rlc.save_restore_obj);
rdev->rlc.save_restore_obj = NULL;
}
 
return orig;
}
/* clear state block */
if (rdev->rlc.clear_state_obj) {
r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
if (unlikely(r != 0))
dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
radeon_bo_unpin(rdev->rlc.clear_state_obj);
radeon_bo_unreserve(rdev->rlc.clear_state_obj);
 
static void si_update_rlc(struct radeon_device *rdev, u32 rlc)
{
u32 tmp;
 
tmp = RREG32(RLC_CNTL);
if (tmp != rlc)
WREG32(RLC_CNTL, rlc);
radeon_bo_unref(&rdev->rlc.clear_state_obj);
rdev->rlc.clear_state_obj = NULL;
}
 
static void si_enable_dma_pg(struct radeon_device *rdev, bool enable)
{
u32 data, orig;
 
orig = data = RREG32(DMA_PG);
if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA))
data |= PG_CNTL_ENABLE;
else
data &= ~PG_CNTL_ENABLE;
if (orig != data)
WREG32(DMA_PG, data);
}
 
static void si_init_dma_pg(struct radeon_device *rdev)
int si_rlc_init(struct radeon_device *rdev)
{
u32 tmp;
int r;
 
WREG32(DMA_PGFSM_WRITE, 0x00002000);
WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
 
for (tmp = 0; tmp < 5; tmp++)
WREG32(DMA_PGFSM_WRITE, 0);
/* save restore block */
if (rdev->rlc.save_restore_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, NULL,
&rdev->rlc.save_restore_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
return r;
}
 
static void si_enable_gfx_cgpg(struct radeon_device *rdev,
bool enable)
{
u32 tmp;
 
if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
WREG32(RLC_TTOP_D, tmp);
 
tmp = RREG32(RLC_PG_CNTL);
tmp |= GFX_PG_ENABLE;
WREG32(RLC_PG_CNTL, tmp);
 
tmp = RREG32(RLC_AUTO_PG_CTRL);
tmp |= AUTO_PG_EN;
WREG32(RLC_AUTO_PG_CTRL, tmp);
} else {
tmp = RREG32(RLC_AUTO_PG_CTRL);
tmp &= ~AUTO_PG_EN;
WREG32(RLC_AUTO_PG_CTRL, tmp);
 
tmp = RREG32(DB_RENDER_CONTROL);
}
}
 
static void si_init_gfx_cgpg(struct radeon_device *rdev)
{
u32 tmp;
 
WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
 
tmp = RREG32(RLC_PG_CNTL);
tmp |= GFX_PG_SRC;
WREG32(RLC_PG_CNTL, tmp);
 
WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
 
tmp = RREG32(RLC_AUTO_PG_CTRL);
 
tmp &= ~GRBM_REG_SGIT_MASK;
tmp |= GRBM_REG_SGIT(0x700);
tmp &= ~PG_AFTER_GRBM_REG_ST_MASK;
WREG32(RLC_AUTO_PG_CTRL, tmp);
r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
if (unlikely(r != 0)) {
si_rlc_fini(rdev);
return r;
}
 
static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
{
u32 mask = 0, tmp, tmp1;
int i;
 
si_select_se_sh(rdev, se, sh);
tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
 
tmp &= 0xffff0000;
 
tmp |= tmp1;
tmp >>= 16;
 
for (i = 0; i < rdev->config.si.max_cu_per_sh; i ++) {
mask <<= 1;
mask |= 1;
r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
&rdev->rlc.save_restore_gpu_addr);
radeon_bo_unreserve(rdev->rlc.save_restore_obj);
if (r) {
dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
si_rlc_fini(rdev);
return r;
}
 
return (~tmp) & mask;
/* clear state block */
if (rdev->rlc.clear_state_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, NULL,
&rdev->rlc.clear_state_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
si_rlc_fini(rdev);
return r;
}
 
static void si_init_ao_cu_mask(struct radeon_device *rdev)
{
u32 i, j, k, active_cu_number = 0;
u32 mask, counter, cu_bitmap;
u32 tmp = 0;
 
for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
mask = 1;
cu_bitmap = 0;
counter = 0;
for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
if (si_get_cu_active_bitmap(rdev, i, j) & mask) {
if (counter < 2)
cu_bitmap |= mask;
counter++;
}
mask <<= 1;
r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
if (unlikely(r != 0)) {
si_rlc_fini(rdev);
return r;
}
 
active_cu_number += counter;
tmp |= (cu_bitmap << (i * 16 + j * 8));
r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
&rdev->rlc.clear_state_gpu_addr);
radeon_bo_unreserve(rdev->rlc.clear_state_obj);
if (r) {
dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
si_rlc_fini(rdev);
return r;
}
}
 
WREG32(RLC_PG_AO_CU_MASK, tmp);
 
tmp = RREG32(RLC_MAX_PG_CU);
tmp &= ~MAX_PU_CU_MASK;
tmp |= MAX_PU_CU(active_cu_number);
WREG32(RLC_MAX_PG_CU, tmp);
}
 
static void si_enable_cgcg(struct radeon_device *rdev,
bool enable)
{
u32 data, orig, tmp;
 
orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
 
if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
si_enable_gui_idle_interrupt(rdev, true);
 
WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
 
tmp = si_halt_rlc(rdev);
 
WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff);
 
si_wait_for_rlc_serdes(rdev);
 
si_update_rlc(rdev, tmp);
 
WREG32(RLC_SERDES_WR_CTRL, 0x007000ff);
 
data |= CGCG_EN | CGLS_EN;
} else {
si_enable_gui_idle_interrupt(rdev, false);
 
RREG32(CB_CGTT_SCLK_CTRL);
RREG32(CB_CGTT_SCLK_CTRL);
RREG32(CB_CGTT_SCLK_CTRL);
RREG32(CB_CGTT_SCLK_CTRL);
 
data &= ~(CGCG_EN | CGLS_EN);
}
 
if (orig != data)
WREG32(RLC_CGCG_CGLS_CTRL, data);
}
 
static void si_enable_mgcg(struct radeon_device *rdev,
bool enable)
{
u32 data, orig, tmp = 0;
 
if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
orig = data = RREG32(CGTS_SM_CTRL_REG);
data = 0x96940200;
if (orig != data)
WREG32(CGTS_SM_CTRL_REG, data);
 
if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
orig = data = RREG32(CP_MEM_SLP_CNTL);
data |= CP_MEM_LS_EN;
if (orig != data)
WREG32(CP_MEM_SLP_CNTL, data);
}
 
orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
data &= 0xffffffc0;
if (orig != data)
WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
 
tmp = si_halt_rlc(rdev);
 
WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff);
 
si_update_rlc(rdev, tmp);
} else {
orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
data |= 0x00000003;
if (orig != data)
WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
 
data = RREG32(CP_MEM_SLP_CNTL);
if (data & CP_MEM_LS_EN) {
data &= ~CP_MEM_LS_EN;
WREG32(CP_MEM_SLP_CNTL, data);
}
orig = data = RREG32(CGTS_SM_CTRL_REG);
data |= LS_OVERRIDE | OVERRIDE;
if (orig != data)
WREG32(CGTS_SM_CTRL_REG, data);
 
tmp = si_halt_rlc(rdev);
 
WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff);
 
si_update_rlc(rdev, tmp);
}
}
 
static void si_enable_uvd_mgcg(struct radeon_device *rdev,
bool enable)
{
u32 orig, data, tmp;
 
if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
tmp |= 0x3fff;
WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
 
orig = data = RREG32(UVD_CGC_CTRL);
data |= DCM;
if (orig != data)
WREG32(UVD_CGC_CTRL, data);
 
WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0);
WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0);
} else {
tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
tmp &= ~0x3fff;
WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
 
orig = data = RREG32(UVD_CGC_CTRL);
data &= ~DCM;
if (orig != data)
WREG32(UVD_CGC_CTRL, data);
 
WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0xffffffff);
WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0xffffffff);
}
}
 
static const u32 mc_cg_registers[] =
{
MC_HUB_MISC_HUB_CG,
MC_HUB_MISC_SIP_CG,
MC_HUB_MISC_VM_CG,
MC_XPB_CLK_GAT,
ATC_MISC_CG,
MC_CITF_MISC_WR_CG,
MC_CITF_MISC_RD_CG,
MC_CITF_MISC_VM_CG,
VM_L2_CG,
};
 
static void si_enable_mc_ls(struct radeon_device *rdev,
bool enable)
{
int i;
u32 orig, data;
 
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
data |= MC_LS_ENABLE;
else
data &= ~MC_LS_ENABLE;
if (data != orig)
WREG32(mc_cg_registers[i], data);
}
}
 
static void si_enable_mc_mgcg(struct radeon_device *rdev,
bool enable)
{
int i;
u32 orig, data;
 
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
data |= MC_CG_ENABLE;
else
data &= ~MC_CG_ENABLE;
if (data != orig)
WREG32(mc_cg_registers[i], data);
}
}
 
static void si_enable_dma_mgcg(struct radeon_device *rdev,
bool enable)
{
u32 orig, data, offset;
int i;
 
if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
for (i = 0; i < 2; i++) {
if (i == 0)
offset = DMA0_REGISTER_OFFSET;
else
offset = DMA1_REGISTER_OFFSET;
orig = data = RREG32(DMA_POWER_CNTL + offset);
data &= ~MEM_POWER_OVERRIDE;
if (data != orig)
WREG32(DMA_POWER_CNTL + offset, data);
WREG32(DMA_CLK_CTRL + offset, 0x00000100);
}
} else {
for (i = 0; i < 2; i++) {
if (i == 0)
offset = DMA0_REGISTER_OFFSET;
else
offset = DMA1_REGISTER_OFFSET;
orig = data = RREG32(DMA_POWER_CNTL + offset);
data |= MEM_POWER_OVERRIDE;
if (data != orig)
WREG32(DMA_POWER_CNTL + offset, data);
 
orig = data = RREG32(DMA_CLK_CTRL + offset);
data = 0xff000000;
if (data != orig)
WREG32(DMA_CLK_CTRL + offset, data);
}
}
}
 
static void si_enable_bif_mgls(struct radeon_device *rdev,
bool enable)
{
u32 orig, data;
 
orig = data = RREG32_PCIE(PCIE_CNTL2);
 
if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
else
data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
 
if (orig != data)
WREG32_PCIE(PCIE_CNTL2, data);
}
 
static void si_enable_hdp_mgcg(struct radeon_device *rdev,
bool enable)
{
u32 orig, data;
 
orig = data = RREG32(HDP_HOST_PATH_CNTL);
 
if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
data &= ~CLOCK_GATING_DIS;
else
data |= CLOCK_GATING_DIS;
 
if (orig != data)
WREG32(HDP_HOST_PATH_CNTL, data);
}
 
static void si_enable_hdp_ls(struct radeon_device *rdev,
bool enable)
{
u32 orig, data;
 
orig = data = RREG32(HDP_MEM_POWER_LS);
 
if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
data |= HDP_LS_ENABLE;
else
data &= ~HDP_LS_ENABLE;
 
if (orig != data)
WREG32(HDP_MEM_POWER_LS, data);
}
 
static void si_update_cg(struct radeon_device *rdev,
u32 block, bool enable)
{
if (block & RADEON_CG_BLOCK_GFX) {
si_enable_gui_idle_interrupt(rdev, false);
/* order matters! */
if (enable) {
si_enable_mgcg(rdev, true);
si_enable_cgcg(rdev, true);
} else {
si_enable_cgcg(rdev, false);
si_enable_mgcg(rdev, false);
}
si_enable_gui_idle_interrupt(rdev, true);
}
 
if (block & RADEON_CG_BLOCK_MC) {
si_enable_mc_mgcg(rdev, enable);
si_enable_mc_ls(rdev, enable);
}
 
if (block & RADEON_CG_BLOCK_SDMA) {
si_enable_dma_mgcg(rdev, enable);
}
 
if (block & RADEON_CG_BLOCK_BIF) {
si_enable_bif_mgls(rdev, enable);
}
 
if (block & RADEON_CG_BLOCK_UVD) {
if (rdev->has_uvd) {
si_enable_uvd_mgcg(rdev, enable);
}
}
 
if (block & RADEON_CG_BLOCK_HDP) {
si_enable_hdp_mgcg(rdev, enable);
si_enable_hdp_ls(rdev, enable);
}
}
 
static void si_init_cg(struct radeon_device *rdev)
{
si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
RADEON_CG_BLOCK_MC |
RADEON_CG_BLOCK_SDMA |
RADEON_CG_BLOCK_BIF |
RADEON_CG_BLOCK_HDP), true);
if (rdev->has_uvd) {
si_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
si_init_uvd_internal_cg(rdev);
}
}
 
static void si_fini_cg(struct radeon_device *rdev)
{
if (rdev->has_uvd) {
si_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
}
si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
RADEON_CG_BLOCK_MC |
RADEON_CG_BLOCK_SDMA |
RADEON_CG_BLOCK_BIF |
RADEON_CG_BLOCK_HDP), false);
}
 
u32 si_get_csb_size(struct radeon_device *rdev)
{
u32 count = 0;
const struct cs_section_def *sect = NULL;
const struct cs_extent_def *ext = NULL;
 
if (rdev->rlc.cs_data == NULL)
return 0;
 
/* begin clear state */
count += 2;
/* context control state */
count += 3;
 
for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
for (ext = sect->section; ext->extent != NULL; ++ext) {
if (sect->id == SECT_CONTEXT)
count += 2 + ext->reg_count;
else
return 0;
}
}
/* pa_sc_raster_config */
count += 3;
/* end clear state */
count += 2;
/* clear state */
count += 2;
 
return count;
}
 
void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
{
u32 count = 0, i;
const struct cs_section_def *sect = NULL;
const struct cs_extent_def *ext = NULL;
 
if (rdev->rlc.cs_data == NULL)
return;
if (buffer == NULL)
return;
 
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
 
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
buffer[count++] = cpu_to_le32(0x80000000);
buffer[count++] = cpu_to_le32(0x80000000);
 
for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
for (ext = sect->section; ext->extent != NULL; ++ext) {
if (sect->id == SECT_CONTEXT) {
buffer[count++] =
cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
for (i = 0; i < ext->reg_count; i++)
buffer[count++] = cpu_to_le32(ext->extent[i]);
} else {
return;
}
}
}
 
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
switch (rdev->family) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
buffer[count++] = cpu_to_le32(0x2a00126a);
break;
case CHIP_VERDE:
buffer[count++] = cpu_to_le32(0x0000124a);
break;
case CHIP_OLAND:
buffer[count++] = cpu_to_le32(0x00000082);
break;
case CHIP_HAINAN:
buffer[count++] = cpu_to_le32(0x00000000);
break;
default:
buffer[count++] = cpu_to_le32(0x00000000);
break;
}
 
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
 
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
buffer[count++] = cpu_to_le32(0);
}
 
static void si_init_pg(struct radeon_device *rdev)
{
if (rdev->pg_flags) {
if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) {
si_init_dma_pg(rdev);
}
si_init_ao_cu_mask(rdev);
if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
si_init_gfx_cgpg(rdev);
} else {
WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
}
si_enable_dma_pg(rdev, true);
si_enable_gfx_cgpg(rdev, true);
} else {
WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
}
}
 
static void si_fini_pg(struct radeon_device *rdev)
{
if (rdev->pg_flags) {
si_enable_dma_pg(rdev, false);
si_enable_gfx_cgpg(rdev, false);
}
}
 
/*
* RLC
*/
void si_rlc_reset(struct radeon_device *rdev)
{
u32 tmp = RREG32(GRBM_SOFT_RESET);
 
tmp |= SOFT_RESET_RLC;
WREG32(GRBM_SOFT_RESET, tmp);
udelay(50);
tmp &= ~SOFT_RESET_RLC;
WREG32(GRBM_SOFT_RESET, tmp);
udelay(50);
}
 
static void si_rlc_stop(struct radeon_device *rdev)
{
WREG32(RLC_CNTL, 0);
 
si_enable_gui_idle_interrupt(rdev, false);
 
si_wait_for_rlc_serdes(rdev);
}
 
static void si_rlc_start(struct radeon_device *rdev)
{
WREG32(RLC_CNTL, RLC_ENABLE);
 
si_enable_gui_idle_interrupt(rdev, true);
 
udelay(50);
}
 
static bool si_lbpw_supported(struct radeon_device *rdev)
{
u32 tmp;
 
/* Enable LBPW only for DDR3 */
tmp = RREG32(MC_SEQ_MISC0);
if ((tmp & 0xF0000000) == 0xB0000000)
return true;
return false;
}
 
static void si_enable_lbpw(struct radeon_device *rdev, bool enable)
{
u32 tmp;
 
tmp = RREG32(RLC_LB_CNTL);
if (enable)
tmp |= LOAD_BALANCE_ENABLE;
else
tmp &= ~LOAD_BALANCE_ENABLE;
WREG32(RLC_LB_CNTL, tmp);
 
if (!enable) {
si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(SPI_LB_CU_MASK, 0x00ff);
}
}
 
static int si_rlc_resume(struct radeon_device *rdev)
{
u32 i;
const __be32 *fw_data;
 
if (!rdev->rlc_fw)
return -EINVAL;
5806,47 → 4390,25
 
si_rlc_stop(rdev);
 
si_rlc_reset(rdev);
 
si_init_pg(rdev);
 
si_init_cg(rdev);
 
WREG32(RLC_RL_BASE, 0);
WREG32(RLC_RL_SIZE, 0);
WREG32(RLC_LB_CNTL, 0);
WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
WREG32(RLC_LB_CNTR_INIT, 0);
WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
 
WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
 
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
 
if (rdev->new_fw) {
const struct rlc_firmware_header_v1_0 *hdr =
(const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
u32 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
const __le32 *fw_data = (const __le32 *)
(rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
 
radeon_ucode_print_rlc_hdr(&hdr->header);
 
for (i = 0; i < fw_size; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
}
} else {
const __be32 *fw_data =
(const __be32 *)rdev->rlc_fw->data;
fw_data = (const __be32 *)rdev->rlc_fw->data;
for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
}
}
WREG32(RLC_UCODE_ADDR, 0);
 
si_enable_lbpw(rdev, si_lbpw_supported(rdev));
 
si_rlc_start(rdev);
 
return 0;
5884,9 → 4446,7
{
u32 tmp;
 
tmp = RREG32(CP_INT_CNTL_RING0) &
(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(CP_INT_CNTL_RING0, tmp);
WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(CP_INT_CNTL_RING1, 0);
WREG32(CP_INT_CNTL_RING2, 0);
tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
5921,7 → 4481,7
}
 
if (!ASIC_IS_NODCE(rdev)) {
WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
 
tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD1_INT_CONTROL, tmp);
5972,7 → 4532,7
WREG32(INTERRUPT_CNTL, interrupt_cntl);
 
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
rb_bufsz = drm_order(rdev->ih.ring_size / 4);
 
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR |
6011,13 → 4571,13
 
int si_irq_set(struct radeon_device *rdev)
{
u32 cp_int_cntl;
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
u32 grbm_int_cntl = 0;
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
u32 dma_cntl, dma_cntl1;
u32 thermal_int = 0;
 
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
6031,9 → 4591,6
return 0;
}
 
cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
 
if (!ASIC_IS_NODCE(rdev)) {
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
6046,9 → 4603,6
dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
 
thermal_int = RREG32(CG_THERMAL_INT) &
~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
 
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("si_irq_set: sw int gfx\n");
6135,11 → 4689,6
 
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 
if (rdev->irq.dpm_thermal) {
DRM_DEBUG("dpm thermal\n");
thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
}
 
if (rdev->num_crtc >= 2) {
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
6154,22 → 4703,16
}
 
if (rdev->num_crtc >= 2) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
GRPH_PFLIP_INT_MASK);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
GRPH_PFLIP_INT_MASK);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
}
if (rdev->num_crtc >= 4) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
GRPH_PFLIP_INT_MASK);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
GRPH_PFLIP_INT_MASK);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
}
if (rdev->num_crtc >= 6) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
GRPH_PFLIP_INT_MASK);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
GRPH_PFLIP_INT_MASK);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
}
 
if (!ASIC_IS_NODCE(rdev)) {
6181,8 → 4724,6
WREG32(DC_HPD6_INT_CONTROL, hpd6);
}
 
WREG32(CG_THERMAL_INT, thermal_int);
 
return 0;
}
 
6326,7 → 4867,6
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
6348,8 → 4888,6
u32 src_id, src_data, ring_id;
u32 ring_index;
bool queue_hotplug = false;
bool queue_thermal = false;
u32 status, addr;
 
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
6534,14 → 5072,6
break;
}
break;
case 8: /* D1 page flip */
case 10: /* D2 page flip */
case 12: /* D3 page flip */
case 14: /* D4 page flip */
case 16: /* D5 page flip */
case 18: /* D6 page flip */
DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
break;
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
6591,24 → 5121,15
break;
}
break;
case 124: /* UVD */
DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
break;
case 146:
case 147:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
/* reset addr and status */
WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
if (addr == 0x0 && status == 0x0)
break;
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
addr);
RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
si_vm_decode_fault(rdev, status, addr);
RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
/* reset addr and status */
WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
break;
case 176: /* RINGID0 CP_INT */
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
6637,16 → 5158,6
DRM_DEBUG("IH: DMA trap\n");
radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
break;
case 230: /* thermal low to high */
DRM_DEBUG("IH: thermal low to high\n");
rdev->pm.dpm.thermal.high_to_low = false;
queue_thermal = true;
break;
case 231: /* thermal high to low */
DRM_DEBUG("IH: thermal high to low\n");
rdev->pm.dpm.thermal.high_to_low = true;
queue_thermal = true;
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
break;
6677,6 → 5188,80
return IRQ_HANDLED;
}
 
/**
* si_copy_dma - copy pages using the DMA engine
*
* @rdev: radeon_device pointer
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer
* @fence: radeon fence object
*
* Copy GPU paging using the DMA engine (SI).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
int si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence)
{
struct radeon_semaphore *sem = NULL;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes;
int i, num_loops;
int r = 0;
 
r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
 
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
 
if (radeon_fence_need_sync(*fence, ring->idx)) {
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
ring->idx);
radeon_fence_note_sync(*fence, ring->idx);
} else {
radeon_semaphore_free(rdev, &sem, NULL);
}
 
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
if (cur_size_in_bytes > 0xFFFFF)
cur_size_in_bytes = 0xFFFFF;
size_in_bytes -= cur_size_in_bytes;
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
radeon_ring_write(ring, dst_offset & 0xffffffff);
radeon_ring_write(ring, src_offset & 0xffffffff);
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
src_offset += cur_size_in_bytes;
dst_offset += cur_size_in_bytes;
}
 
r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
return r;
}
 
radeon_ring_unlock_commit(rdev, ring);
radeon_semaphore_free(rdev, &sem, *fence);
 
return r;
}
 
/*
* startup/shutdown callbacks
*/
6685,26 → 5270,26
struct radeon_ring *ring;
int r;
 
/* enable pcie gen2/3 link */
si_pcie_gen3_enable(rdev);
/* enable aspm */
si_program_aspm(rdev);
 
/* scratch needs to be initialized before MC */
r = r600_vram_scratch_init(rdev);
if (r)
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
!rdev->rlc_fw || !rdev->mc_fw) {
r = si_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
 
si_mc_program(rdev);
 
if (!rdev->pm.dpm_enabled) {
r = si_mc_load_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
return r;
}
}
 
r = r600_vram_scratch_init(rdev);
if (r)
return r;
 
si_mc_program(rdev);
r = si_pcie_gart_enable(rdev);
if (r)
return r;
6711,13 → 5296,7
si_gpu_init(rdev);
 
/* allocate rlc buffers */
if (rdev->family == CHIP_VERDE) {
rdev->rlc.reg_list = verde_rlc_save_restore_register_list;
rdev->rlc.reg_list_size =
(u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
}
rdev->rlc.cs_data = si_cs_data;
r = sumo_rlc_init(rdev);
r = si_rlc_init(rdev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
6758,17 → 5337,6
return r;
}
 
if (rdev->has_uvd) {
r = uvd_v2_2_resume(rdev);
if (!r) {
r = radeon_fence_driver_start_ring(rdev,
R600_RING_TYPE_UVD_INDEX);
if (r)
dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
}
if (r)
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
}
 
/* Enable IRQ */
if (!rdev->irq.installed) {
6787,31 → 5355,38
 
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
RADEON_CP_PACKET2);
CP_RB0_RPTR, CP_RB0_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
 
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
RADEON_CP_PACKET2);
CP_RB1_RPTR, CP_RB1_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
 
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
RADEON_CP_PACKET2);
CP_RB2_RPTR, CP_RB2_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
 
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
if (r)
return r;
 
ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
if (r)
return r;
 
6826,17 → 5401,6
if (r)
return r;
 
if (rdev->has_uvd) {
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
RADEON_CP_PACKET2);
if (!r)
r = uvd_v1_0_init(rdev);
if (r)
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
}
}
 
r = radeon_ib_pool_init(rdev);
if (r) {
6850,10 → 5414,6
return r;
}
 
r = dce6_audio_init(rdev);
if (r)
return r;
 
return 0;
}
 
6871,6 → 5431,8
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
 
ENTER();
 
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
6917,18 → 5479,6
if (r)
return r;
 
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
!rdev->rlc_fw || !rdev->mc_fw) {
r = si_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
 
/* Initialize power management */
radeon_pm_init(rdev);
 
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
6949,15 → 5499,6
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 64 * 1024);
 
if (rdev->has_uvd) {
r = radeon_uvd_init(rdev);
if (!r) {
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 4096);
}
}
 
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
 
6969,7 → 5510,7
r = si_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
si_cp_fini(rdev);
// si_cp_fini(rdev);
// si_irq_fini(rdev);
// si_rlc_fini(rdev);
// radeon_wb_fini(rdev);
6988,6 → 5529,7
DRM_ERROR("radeon: MC ucode required for NI+.\n");
return -EINVAL;
}
LEAVE();
 
return 0;
}
7012,6 → 5554,7
return clock;
}
 
#if 0
int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
{
unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
7102,364 → 5645,4
 
return 0;
}
 
static void si_pcie_gen3_enable(struct radeon_device *rdev)
{
struct pci_dev *root = rdev->pdev->bus->self;
int bridge_pos, gpu_pos;
u32 speed_cntl, mask, current_data_rate;
int ret, i;
u16 tmp16;
 
if (radeon_pcie_gen2 == 0)
return;
 
if (rdev->flags & RADEON_IS_IGP)
return;
 
if (!(rdev->flags & RADEON_IS_PCIE))
return;
 
ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
if (ret != 0)
return;
 
if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
return;
 
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
LC_CURRENT_DATA_RATE_SHIFT;
if (mask & DRM_PCIE_SPEED_80) {
if (current_data_rate == 2) {
DRM_INFO("PCIE gen 3 link speeds already enabled\n");
return;
}
DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
} else if (mask & DRM_PCIE_SPEED_50) {
if (current_data_rate == 1) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
}
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
}
 
bridge_pos = pci_pcie_cap(root);
if (!bridge_pos)
return;
 
gpu_pos = pci_pcie_cap(rdev->pdev);
if (!gpu_pos)
return;
 
if (mask & DRM_PCIE_SPEED_80) {
/* re-try equalization if gen3 is not already enabled */
if (current_data_rate != 2) {
u16 bridge_cfg, gpu_cfg;
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
 
pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
 
tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
 
tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
 
tmp = RREG32_PCIE(PCIE_LC_STATUS1);
max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
 
if (current_lw < max_lw) {
tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
if (tmp & LC_RENEGOTIATION_SUPPORT) {
tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
}
}
 
for (i = 0; i < 10; i++) {
/* check status */
pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
 
pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
 
pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
 
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp |= LC_SET_QUIESCE;
WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
 
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp |= LC_REDO_EQ;
WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
 
mdelay(100);
 
/* linkctl */
pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
 
pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
 
/* linkctl2 */
pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
tmp16 &= ~((1 << 4) | (7 << 9));
tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
 
pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
tmp16 &= ~((1 << 4) | (7 << 9));
tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
 
tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
tmp &= ~LC_SET_QUIESCE;
WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
}
}
}
 
/* set the link speed */
speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
 
pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
tmp16 &= ~0xf;
if (mask & DRM_PCIE_SPEED_80)
tmp16 |= 3; /* gen3 */
else if (mask & DRM_PCIE_SPEED_50)
tmp16 |= 2; /* gen2 */
else
tmp16 |= 1; /* gen1 */
pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
 
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
 
for (i = 0; i < rdev->usec_timeout; i++) {
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
break;
udelay(1);
}
}
 
static void si_program_aspm(struct radeon_device *rdev)
{
u32 data, orig;
bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
bool disable_clkreq = false;
 
if (radeon_aspm == 0)
return;
 
if (!(rdev->flags & RADEON_IS_PCIE))
return;
 
orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
data &= ~LC_XMIT_N_FTS_MASK;
data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
if (orig != data)
WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
 
orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
data |= LC_GO_TO_RECOVERY;
if (orig != data)
WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
 
orig = data = RREG32_PCIE(PCIE_P_CNTL);
data |= P_IGNORE_EDB_ERR;
if (orig != data)
WREG32_PCIE(PCIE_P_CNTL, data);
 
orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
data |= LC_PMI_TO_L1_DIS;
if (!disable_l0s)
data |= LC_L0S_INACTIVITY(7);
 
if (!disable_l1) {
data |= LC_L1_INACTIVITY(7);
data &= ~LC_PMI_TO_L1_DIS;
if (orig != data)
WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
 
if (!disable_plloff_in_l1) {
bool clk_req_support;
 
orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
if (orig != data)
WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
 
orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
if (orig != data)
WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
 
orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
if (orig != data)
WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
 
orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
if (orig != data)
WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
 
if ((rdev->family != CHIP_OLAND) && (rdev->family != CHIP_HAINAN)) {
orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
data &= ~PLL_RAMP_UP_TIME_0_MASK;
if (orig != data)
WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
 
orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
data &= ~PLL_RAMP_UP_TIME_1_MASK;
if (orig != data)
WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
 
orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2);
data &= ~PLL_RAMP_UP_TIME_2_MASK;
if (orig != data)
WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2, data);
 
orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3);
data &= ~PLL_RAMP_UP_TIME_3_MASK;
if (orig != data)
WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3, data);
 
orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
data &= ~PLL_RAMP_UP_TIME_0_MASK;
if (orig != data)
WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
 
orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
data &= ~PLL_RAMP_UP_TIME_1_MASK;
if (orig != data)
WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
 
orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2);
data &= ~PLL_RAMP_UP_TIME_2_MASK;
if (orig != data)
WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2, data);
 
orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3);
data &= ~PLL_RAMP_UP_TIME_3_MASK;
if (orig != data)
WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3, data);
}
orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
data &= ~LC_DYN_LANES_PWR_STATE_MASK;
data |= LC_DYN_LANES_PWR_STATE(3);
if (orig != data)
WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
 
orig = data = RREG32_PIF_PHY0(PB0_PIF_CNTL);
data &= ~LS2_EXIT_TIME_MASK;
if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
data |= LS2_EXIT_TIME(5);
if (orig != data)
WREG32_PIF_PHY0(PB0_PIF_CNTL, data);
 
orig = data = RREG32_PIF_PHY1(PB1_PIF_CNTL);
data &= ~LS2_EXIT_TIME_MASK;
if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
data |= LS2_EXIT_TIME(5);
if (orig != data)
WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
 
if (!disable_clkreq) {
struct pci_dev *root = rdev->pdev->bus->self;
u32 lnkcap;
 
clk_req_support = false;
pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
clk_req_support = true;
} else {
clk_req_support = false;
}
 
if (clk_req_support) {
orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
if (orig != data)
WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
 
orig = data = RREG32(THM_CLK_CNTL);
data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
if (orig != data)
WREG32(THM_CLK_CNTL, data);
 
orig = data = RREG32(MISC_CLK_CNTL);
data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
if (orig != data)
WREG32(MISC_CLK_CNTL, data);
 
orig = data = RREG32(CG_CLKPIN_CNTL);
data &= ~BCLK_AS_XCLK;
if (orig != data)
WREG32(CG_CLKPIN_CNTL, data);
 
orig = data = RREG32(CG_CLKPIN_CNTL_2);
data &= ~FORCE_BIF_REFCLK_EN;
if (orig != data)
WREG32(CG_CLKPIN_CNTL_2, data);
 
orig = data = RREG32(MPLL_BYPASSCLK_SEL);
data &= ~MPLL_CLKOUT_SEL_MASK;
data |= MPLL_CLKOUT_SEL(4);
if (orig != data)
WREG32(MPLL_BYPASSCLK_SEL, data);
 
orig = data = RREG32(SPLL_CNTL_MODE);
data &= ~SPLL_REFCLK_SEL_MASK;
if (orig != data)
WREG32(SPLL_CNTL_MODE, data);
}
}
} else {
if (orig != data)
WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
}
 
orig = data = RREG32_PCIE(PCIE_CNTL2);
data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
if (orig != data)
WREG32_PCIE(PCIE_CNTL2, data);
 
if (!disable_l0s) {
data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
data = RREG32_PCIE(PCIE_LC_STATUS1);
if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
data &= ~LC_L0S_INACTIVITY_MASK;
if (orig != data)
WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
}
}
}
}
#endif
/drivers/video/drm/radeon/sid.h
30,99 → 30,6
#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
 
#define SI_MAX_SH_GPRS 256
#define SI_MAX_TEMP_GPRS 16
#define SI_MAX_SH_THREADS 256
#define SI_MAX_SH_STACK_ENTRIES 4096
#define SI_MAX_FRC_EOV_CNT 16384
#define SI_MAX_BACKENDS 8
#define SI_MAX_BACKENDS_MASK 0xFF
#define SI_MAX_BACKENDS_PER_SE_MASK 0x0F
#define SI_MAX_SIMDS 12
#define SI_MAX_SIMDS_MASK 0x0FFF
#define SI_MAX_SIMDS_PER_SE_MASK 0x00FF
#define SI_MAX_PIPES 8
#define SI_MAX_PIPES_MASK 0xFF
#define SI_MAX_PIPES_PER_SIMD_MASK 0x3F
#define SI_MAX_LDS_NUM 0xFFFF
#define SI_MAX_TCC 16
#define SI_MAX_TCC_MASK 0xFFFF
 
/* SMC IND accessor regs */
#define SMC_IND_INDEX_0 0x200
#define SMC_IND_DATA_0 0x204
 
#define SMC_IND_ACCESS_CNTL 0x228
# define AUTO_INCREMENT_IND_0 (1 << 0)
#define SMC_MESSAGE_0 0x22c
#define SMC_RESP_0 0x230
 
/* CG IND registers are accessed via SMC indirect space + SMC_CG_IND_START */
#define SMC_CG_IND_START 0xc0030000
#define SMC_CG_IND_END 0xc0040000
 
#define CG_CGTT_LOCAL_0 0x400
#define CG_CGTT_LOCAL_1 0x401
 
/* SMC IND registers */
#define SMC_SYSCON_RESET_CNTL 0x80000000
# define RST_REG (1 << 0)
#define SMC_SYSCON_CLOCK_CNTL_0 0x80000004
# define CK_DISABLE (1 << 0)
# define CKEN (1 << 24)
 
#define VGA_HDP_CONTROL 0x328
#define VGA_MEMORY_DISABLE (1 << 4)
 
#define DCCG_DISP_SLOW_SELECT_REG 0x4fc
#define DCCG_DISP1_SLOW_SELECT(x) ((x) << 0)
#define DCCG_DISP1_SLOW_SELECT_MASK (7 << 0)
#define DCCG_DISP1_SLOW_SELECT_SHIFT 0
#define DCCG_DISP2_SLOW_SELECT(x) ((x) << 4)
#define DCCG_DISP2_SLOW_SELECT_MASK (7 << 4)
#define DCCG_DISP2_SLOW_SELECT_SHIFT 4
 
#define CG_SPLL_FUNC_CNTL 0x600
#define SPLL_RESET (1 << 0)
#define SPLL_SLEEP (1 << 1)
#define SPLL_BYPASS_EN (1 << 3)
#define SPLL_REF_DIV(x) ((x) << 4)
#define SPLL_REF_DIV_MASK (0x3f << 4)
#define SPLL_PDIV_A(x) ((x) << 20)
#define SPLL_PDIV_A_MASK (0x7f << 20)
#define SPLL_PDIV_A_SHIFT 20
#define CG_SPLL_FUNC_CNTL_2 0x604
#define SCLK_MUX_SEL(x) ((x) << 0)
#define SCLK_MUX_SEL_MASK (0x1ff << 0)
#define SPLL_CTLREQ_CHG (1 << 23)
#define SCLK_MUX_UPDATE (1 << 26)
#define CG_SPLL_FUNC_CNTL_3 0x608
#define SPLL_FB_DIV(x) ((x) << 0)
#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
#define SPLL_FB_DIV_SHIFT 0
#define SPLL_DITHEN (1 << 28)
#define CG_SPLL_FUNC_CNTL_4 0x60c
 
#define SPLL_STATUS 0x614
#define SPLL_CHG_STATUS (1 << 1)
#define SPLL_CNTL_MODE 0x618
#define SPLL_SW_DIR_CONTROL (1 << 0)
# define SPLL_REFCLK_SEL(x) ((x) << 26)
# define SPLL_REFCLK_SEL_MASK (3 << 26)
 
#define CG_SPLL_SPREAD_SPECTRUM 0x620
#define SSEN (1 << 0)
#define CLK_S(x) ((x) << 4)
#define CLK_S_MASK (0xfff << 4)
#define CLK_S_SHIFT 4
#define CG_SPLL_SPREAD_SPECTRUM_2 0x624
#define CLK_V(x) ((x) << 0)
#define CLK_V_MASK (0x3ffffff << 0)
#define CLK_V_SHIFT 0
 
#define CG_SPLL_AUTOSCALE_CNTL 0x62c
# define AUTOSCALE_ON_SS_CLEAR (1 << 9)
 
/* discrete uvd clocks */
#define CG_UPLL_FUNC_CNTL 0x634
# define UPLL_RESET_MASK 0x00000001
152,45 → 59,6
#define CG_UPLL_SPREAD_SPECTRUM 0x650
# define SSEN_MASK 0x00000001
 
#define MPLL_BYPASSCLK_SEL 0x65c
# define MPLL_CLKOUT_SEL(x) ((x) << 8)
# define MPLL_CLKOUT_SEL_MASK 0xFF00
 
#define CG_CLKPIN_CNTL 0x660
# define XTALIN_DIVIDE (1 << 1)
# define BCLK_AS_XCLK (1 << 2)
#define CG_CLKPIN_CNTL_2 0x664
# define FORCE_BIF_REFCLK_EN (1 << 3)
# define MUX_TCLK_TO_XCLK (1 << 8)
 
#define THM_CLK_CNTL 0x66c
# define CMON_CLK_SEL(x) ((x) << 0)
# define CMON_CLK_SEL_MASK 0xFF
# define TMON_CLK_SEL(x) ((x) << 8)
# define TMON_CLK_SEL_MASK 0xFF00
#define MISC_CLK_CNTL 0x670
# define DEEP_SLEEP_CLK_SEL(x) ((x) << 0)
# define DEEP_SLEEP_CLK_SEL_MASK 0xFF
# define ZCLK_SEL(x) ((x) << 8)
# define ZCLK_SEL_MASK 0xFF00
 
#define CG_THERMAL_CTRL 0x700
#define DPM_EVENT_SRC(x) ((x) << 0)
#define DPM_EVENT_SRC_MASK (7 << 0)
#define DIG_THERM_DPM(x) ((x) << 14)
#define DIG_THERM_DPM_MASK 0x003FC000
#define DIG_THERM_DPM_SHIFT 14
 
#define CG_THERMAL_INT 0x708
#define DIG_THERM_INTH(x) ((x) << 8)
#define DIG_THERM_INTH_MASK 0x0000FF00
#define DIG_THERM_INTH_SHIFT 8
#define DIG_THERM_INTL(x) ((x) << 16)
#define DIG_THERM_INTL_MASK 0x00FF0000
#define DIG_THERM_INTL_SHIFT 16
#define THERM_INT_MASK_HIGH (1 << 24)
#define THERM_INT_MASK_LOW (1 << 25)
 
#define CG_MULT_THERMAL_STATUS 0x714
#define ASIC_MAX_TEMP(x) ((x) << 0)
#define ASIC_MAX_TEMP_MASK 0x000001ff
199,98 → 67,36
#define CTF_TEMP_MASK 0x0003fe00
#define CTF_TEMP_SHIFT 9
 
#define GENERAL_PWRMGT 0x780
# define GLOBAL_PWRMGT_EN (1 << 0)
# define STATIC_PM_EN (1 << 1)
# define THERMAL_PROTECTION_DIS (1 << 2)
# define THERMAL_PROTECTION_TYPE (1 << 3)
# define SW_SMIO_INDEX(x) ((x) << 6)
# define SW_SMIO_INDEX_MASK (1 << 6)
# define SW_SMIO_INDEX_SHIFT 6
# define VOLT_PWRMGT_EN (1 << 10)
# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
#define CG_TPC 0x784
#define SCLK_PWRMGT_CNTL 0x788
# define SCLK_PWRMGT_OFF (1 << 0)
# define SCLK_LOW_D1 (1 << 1)
# define FIR_RESET (1 << 4)
# define FIR_FORCE_TREND_SEL (1 << 5)
# define FIR_TREND_MODE (1 << 6)
# define DYN_GFX_CLK_OFF_EN (1 << 7)
# define GFX_CLK_FORCE_ON (1 << 8)
# define GFX_CLK_REQUEST_OFF (1 << 9)
# define GFX_CLK_FORCE_OFF (1 << 10)
# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
# define DYN_LIGHT_SLEEP_EN (1 << 14)
#define SI_MAX_SH_GPRS 256
#define SI_MAX_TEMP_GPRS 16
#define SI_MAX_SH_THREADS 256
#define SI_MAX_SH_STACK_ENTRIES 4096
#define SI_MAX_FRC_EOV_CNT 16384
#define SI_MAX_BACKENDS 8
#define SI_MAX_BACKENDS_MASK 0xFF
#define SI_MAX_BACKENDS_PER_SE_MASK 0x0F
#define SI_MAX_SIMDS 12
#define SI_MAX_SIMDS_MASK 0x0FFF
#define SI_MAX_SIMDS_PER_SE_MASK 0x00FF
#define SI_MAX_PIPES 8
#define SI_MAX_PIPES_MASK 0xFF
#define SI_MAX_PIPES_PER_SIMD_MASK 0x3F
#define SI_MAX_LDS_NUM 0xFFFF
#define SI_MAX_TCC 16
#define SI_MAX_TCC_MASK 0xFFFF
 
#define TARGET_AND_CURRENT_PROFILE_INDEX 0x798
# define CURRENT_STATE_INDEX_MASK (0xf << 4)
# define CURRENT_STATE_INDEX_SHIFT 4
#define VGA_HDP_CONTROL 0x328
#define VGA_MEMORY_DISABLE (1 << 4)
 
#define CG_FTV 0x7bc
#define CG_CLKPIN_CNTL 0x660
# define XTALIN_DIVIDE (1 << 1)
#define CG_CLKPIN_CNTL_2 0x664
# define MUX_TCLK_TO_XCLK (1 << 8)
 
#define CG_FFCT_0 0x7c0
# define UTC_0(x) ((x) << 0)
# define UTC_0_MASK (0x3ff << 0)
# define DTC_0(x) ((x) << 10)
# define DTC_0_MASK (0x3ff << 10)
 
#define CG_BSP 0x7fc
# define BSP(x) ((x) << 0)
# define BSP_MASK (0xffff << 0)
# define BSU(x) ((x) << 16)
# define BSU_MASK (0xf << 16)
#define CG_AT 0x800
# define CG_R(x) ((x) << 0)
# define CG_R_MASK (0xffff << 0)
# define CG_L(x) ((x) << 16)
# define CG_L_MASK (0xffff << 16)
 
#define CG_GIT 0x804
# define CG_GICST(x) ((x) << 0)
# define CG_GICST_MASK (0xffff << 0)
# define CG_GIPOT(x) ((x) << 16)
# define CG_GIPOT_MASK (0xffff << 16)
 
#define CG_SSP 0x80c
# define SST(x) ((x) << 0)
# define SST_MASK (0xffff << 0)
# define SSTU(x) ((x) << 16)
# define SSTU_MASK (0xf << 16)
 
#define CG_DISPLAY_GAP_CNTL 0x828
# define DISP1_GAP(x) ((x) << 0)
# define DISP1_GAP_MASK (3 << 0)
# define DISP2_GAP(x) ((x) << 2)
# define DISP2_GAP_MASK (3 << 2)
# define VBI_TIMER_COUNT(x) ((x) << 4)
# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
# define VBI_TIMER_UNIT(x) ((x) << 20)
# define VBI_TIMER_UNIT_MASK (7 << 20)
# define DISP1_GAP_MCHG(x) ((x) << 24)
# define DISP1_GAP_MCHG_MASK (3 << 24)
# define DISP2_GAP_MCHG(x) ((x) << 26)
# define DISP2_GAP_MCHG_MASK (3 << 26)
 
#define CG_ULV_CONTROL 0x878
#define CG_ULV_PARAMETER 0x87c
 
#define SMC_SCRATCH0 0x884
 
#define CG_CAC_CTRL 0x8b8
# define CAC_WINDOW(x) ((x) << 0)
# define CAC_WINDOW_MASK 0x00ffffff
 
#define DMIF_ADDR_CONFIG 0xBD4
 
#define DMIF_ADDR_CALC 0xC00
 
#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
 
#define SRBM_STATUS 0xE50
#define GRBM_RQ_PENDING (1 << 5)
#define VMC_BUSY (1 << 8)
362,7 → 168,6
#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_CNTL2 0x1430
#define VM_CONTEXT1_CNTL2 0x1434
377,20 → 182,6
 
#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
#define PROTECTIONS_MASK (0xf << 0)
#define PROTECTIONS_SHIFT 0
/* bit 0: range
* bit 1: pde0
* bit 2: valid
* bit 3: read
* bit 4: write
*/
#define MEMORY_CLIENT_ID_MASK (0xff << 12)
#define MEMORY_CLIENT_ID_SHIFT 12
#define MEMORY_CLIENT_RW_MASK (1 << 24)
#define MEMORY_CLIENT_RW_SHIFT 24
#define FAULT_VMID_MASK (0xf << 25)
#define FAULT_VMID_SHIFT 25
 
#define VM_INVALIDATE_REQUEST 0x1478
#define VM_INVALIDATE_RESPONSE 0x147c
412,10 → 203,6
#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x1580
 
#define VM_L2_CG 0x15c0
#define MC_CG_ENABLE (1 << 18)
#define MC_LS_ENABLE (1 << 19)
 
#define MC_SHARED_CHMAP 0x2004
#define NOOFCHAN_SHIFT 12
#define NOOFCHAN_MASK 0x0000f000
441,17 → 228,6
 
#define MC_SHARED_BLACKOUT_CNTL 0x20ac
 
#define MC_HUB_MISC_HUB_CG 0x20b8
#define MC_HUB_MISC_VM_CG 0x20bc
 
#define MC_HUB_MISC_SIP_CG 0x20c0
 
#define MC_XPB_CLK_GAT 0x2478
 
#define MC_CITF_MISC_RD_CG 0x2648
#define MC_CITF_MISC_WR_CG 0x264c
#define MC_CITF_MISC_VM_CG 0x2650
 
#define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0
#define NOOFBANK_MASK 0x00000003
467,24 → 243,7
#define NOOFGROUPS_SHIFT 12
#define NOOFGROUPS_MASK 0x00001000
 
#define MC_ARB_DRAM_TIMING 0x2774
#define MC_ARB_DRAM_TIMING2 0x2778
 
#define MC_ARB_BURST_TIME 0x2808
#define STATE0(x) ((x) << 0)
#define STATE0_MASK (0x1f << 0)
#define STATE0_SHIFT 0
#define STATE1(x) ((x) << 5)
#define STATE1_MASK (0x1f << 5)
#define STATE1_SHIFT 5
#define STATE2(x) ((x) << 10)
#define STATE2_MASK (0x1f << 10)
#define STATE2_SHIFT 10
#define STATE3(x) ((x) << 15)
#define STATE3_MASK (0x1f << 15)
#define STATE3_SHIFT 15
 
#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x28e8
#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x2808
#define TRAIN_DONE_D0 (1 << 30)
#define TRAIN_DONE_D1 (1 << 31)
 
491,109 → 250,14
#define MC_SEQ_SUP_CNTL 0x28c8
#define RUN_MASK (1 << 0)
#define MC_SEQ_SUP_PGM 0x28cc
#define MC_PMG_AUTO_CMD 0x28d0
 
#define MC_IO_PAD_CNTL_D0 0x29d0
#define MEM_FALL_OUT_CMD (1 << 8)
 
#define MC_SEQ_RAS_TIMING 0x28a0
#define MC_SEQ_CAS_TIMING 0x28a4
#define MC_SEQ_MISC_TIMING 0x28a8
#define MC_SEQ_MISC_TIMING2 0x28ac
#define MC_SEQ_PMG_TIMING 0x28b0
#define MC_SEQ_RD_CTL_D0 0x28b4
#define MC_SEQ_RD_CTL_D1 0x28b8
#define MC_SEQ_WR_CTL_D0 0x28bc
#define MC_SEQ_WR_CTL_D1 0x28c0
 
#define MC_SEQ_MISC0 0x2a00
#define MC_SEQ_MISC0_VEN_ID_SHIFT 8
#define MC_SEQ_MISC0_VEN_ID_MASK 0x00000f00
#define MC_SEQ_MISC0_VEN_ID_VALUE 3
#define MC_SEQ_MISC0_REV_ID_SHIFT 12
#define MC_SEQ_MISC0_REV_ID_MASK 0x0000f000
#define MC_SEQ_MISC0_REV_ID_VALUE 1
#define MC_SEQ_MISC0_GDDR5_SHIFT 28
#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
#define MC_SEQ_MISC0_GDDR5_VALUE 5
#define MC_SEQ_MISC1 0x2a04
#define MC_SEQ_RESERVE_M 0x2a08
#define MC_PMG_CMD_EMRS 0x2a0c
 
#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
#define MC_SEQ_IO_DEBUG_DATA 0x2a48
 
#define MC_SEQ_MISC5 0x2a54
#define MC_SEQ_MISC6 0x2a58
 
#define MC_SEQ_MISC7 0x2a64
 
#define MC_SEQ_RAS_TIMING_LP 0x2a6c
#define MC_SEQ_CAS_TIMING_LP 0x2a70
#define MC_SEQ_MISC_TIMING_LP 0x2a74
#define MC_SEQ_MISC_TIMING2_LP 0x2a78
#define MC_SEQ_WR_CTL_D0_LP 0x2a7c
#define MC_SEQ_WR_CTL_D1_LP 0x2a80
#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84
#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88
 
#define MC_PMG_CMD_MRS 0x2aac
 
#define MC_SEQ_RD_CTL_D0_LP 0x2b1c
#define MC_SEQ_RD_CTL_D1_LP 0x2b20
 
#define MC_PMG_CMD_MRS1 0x2b44
#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48
#define MC_SEQ_PMG_TIMING_LP 0x2b4c
 
#define MC_SEQ_WR_CTL_2 0x2b54
#define MC_SEQ_WR_CTL_2_LP 0x2b58
#define MC_PMG_CMD_MRS2 0x2b5c
#define MC_SEQ_PMG_CMD_MRS2_LP 0x2b60
 
#define MCLK_PWRMGT_CNTL 0x2ba0
# define DLL_SPEED(x) ((x) << 0)
# define DLL_SPEED_MASK (0x1f << 0)
# define DLL_READY (1 << 6)
# define MC_INT_CNTL (1 << 7)
# define MRDCK0_PDNB (1 << 8)
# define MRDCK1_PDNB (1 << 9)
# define MRDCK0_RESET (1 << 16)
# define MRDCK1_RESET (1 << 17)
# define DLL_READY_READ (1 << 24)
#define DLL_CNTL 0x2ba4
# define MRDCK0_BYPASS (1 << 24)
# define MRDCK1_BYPASS (1 << 25)
 
#define MPLL_CNTL_MODE 0x2bb0
# define MPLL_MCLK_SEL (1 << 11)
#define MPLL_FUNC_CNTL 0x2bb4
#define BWCTRL(x) ((x) << 20)
#define BWCTRL_MASK (0xff << 20)
#define MPLL_FUNC_CNTL_1 0x2bb8
#define VCO_MODE(x) ((x) << 0)
#define VCO_MODE_MASK (3 << 0)
#define CLKFRAC(x) ((x) << 4)
#define CLKFRAC_MASK (0xfff << 4)
#define CLKF(x) ((x) << 16)
#define CLKF_MASK (0xfff << 16)
#define MPLL_FUNC_CNTL_2 0x2bbc
#define MPLL_AD_FUNC_CNTL 0x2bc0
#define YCLK_POST_DIV(x) ((x) << 0)
#define YCLK_POST_DIV_MASK (7 << 0)
#define MPLL_DQ_FUNC_CNTL 0x2bc4
#define YCLK_SEL(x) ((x) << 4)
#define YCLK_SEL_MASK (1 << 4)
 
#define MPLL_SS1 0x2bcc
#define CLKV(x) ((x) << 0)
#define CLKV_MASK (0x3ffffff << 0)
#define MPLL_SS2 0x2bd0
#define CLKS(x) ((x) << 0)
#define CLKS_MASK (0xfff << 0)
 
#define HDP_HOST_PATH_CNTL 0x2C00
#define CLOCK_GATING_DIS (1 << 23)
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
#define HDP_NONSURFACE_SIZE 0x2C0C
601,11 → 265,7
#define HDP_ADDR_CONFIG 0x2F48
#define HDP_MISC_CNTL 0x2F4C
#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
#define HDP_MEM_POWER_LS 0x2F50
#define HDP_LS_ENABLE (1 << 0)
 
#define ATC_MISC_CG 0x3350
 
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
650,99 → 310,6
 
#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
 
/* DCE6 ELD audio interface */
#define AZ_F0_CODEC_ENDPOINT_INDEX 0x5E00
# define AZ_ENDPOINT_REG_INDEX(x) (((x) & 0xff) << 0)
# define AZ_ENDPOINT_REG_WRITE_EN (1 << 8)
#define AZ_F0_CODEC_ENDPOINT_DATA 0x5E04
 
#define AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x25
#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
#define SPEAKER_ALLOCATION_SHIFT 0
#define HDMI_CONNECTION (1 << 16)
#define DP_CONNECTION (1 << 17)
 
#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x28 /* LPCM */
#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x29 /* AC3 */
#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x2A /* MPEG1 */
#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x2B /* MP3 */
#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x2C /* MPEG2 */
#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x2D /* AAC */
#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x2E /* DTS */
#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x2F /* ATRAC */
#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x30 /* one bit audio - leave at 0 (default) */
#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x31 /* Dolby Digital */
#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x32 /* DTS-HD */
#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x33 /* MAT-MLP */
#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x34 /* DTS */
#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x35 /* WMA Pro */
# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
/* max channels minus one. 7 = 8 channels */
# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
* bit0 = 32 kHz
* bit1 = 44.1 kHz
* bit2 = 48 kHz
* bit3 = 88.2 kHz
* bit4 = 96 kHz
* bit5 = 176.4 kHz
* bit6 = 192 kHz
*/
 
#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x37
# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
* 0 = invalid
* x = legal delay value
* 255 = sync not supported
*/
#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x38
# define HBR_CAPABLE (1 << 0) /* enabled by default */
 
#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x3a
# define MANUFACTURER_ID(x) (((x) & 0xffff) << 0)
# define PRODUCT_ID(x) (((x) & 0xffff) << 16)
#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x3b
# define SINK_DESCRIPTION_LEN(x) (((x) & 0xff) << 0)
#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x3c
# define PORT_ID0(x) (((x) & 0xffffffff) << 0)
#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x3d
# define PORT_ID1(x) (((x) & 0xffffffff) << 0)
#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x3e
# define DESCRIPTION0(x) (((x) & 0xff) << 0)
# define DESCRIPTION1(x) (((x) & 0xff) << 8)
# define DESCRIPTION2(x) (((x) & 0xff) << 16)
# define DESCRIPTION3(x) (((x) & 0xff) << 24)
#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x3f
# define DESCRIPTION4(x) (((x) & 0xff) << 0)
# define DESCRIPTION5(x) (((x) & 0xff) << 8)
# define DESCRIPTION6(x) (((x) & 0xff) << 16)
# define DESCRIPTION7(x) (((x) & 0xff) << 24)
#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x40
# define DESCRIPTION8(x) (((x) & 0xff) << 0)
# define DESCRIPTION9(x) (((x) & 0xff) << 8)
# define DESCRIPTION10(x) (((x) & 0xff) << 16)
# define DESCRIPTION11(x) (((x) & 0xff) << 24)
#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x41
# define DESCRIPTION12(x) (((x) & 0xff) << 0)
# define DESCRIPTION13(x) (((x) & 0xff) << 8)
# define DESCRIPTION14(x) (((x) & 0xff) << 16)
# define DESCRIPTION15(x) (((x) & 0xff) << 24)
#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x42
# define DESCRIPTION16(x) (((x) & 0xff) << 0)
# define DESCRIPTION17(x) (((x) & 0xff) << 8)
 
#define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL 0x54
# define AUDIO_ENABLED (1 << 31)
 
#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56
#define PORT_CONNECTIVITY_MASK (3 << 30)
#define PORT_CONNECTIVITY_SHIFT 30
 
#define DC_LB_MEMORY_SPLIT 0x6b0c
#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
 
823,7 → 390,7
# define GRPH_PFLIP_INT_MASK (1 << 0)
# define GRPH_PFLIP_INT_TYPE (1 << 8)
 
#define DAC_AUTODETECT_INT_CONTROL 0x67c8
#define DACA_AUTODETECT_INT_CONTROL 0x66c8
 
#define DC_HPD1_INT_STATUS 0x601c
#define DC_HPD2_INT_STATUS 0x6028
857,23 → 424,9
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
 
#define DPG_PIPE_STUTTER_CONTROL 0x6cd4
# define STUTTER_ENABLE (1 << 0)
 
/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
#define CRTC_STATUS_FRAME_COUNT 0x6e98
 
#define AFMT_AUDIO_SRC_CONTROL 0x713c
#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
/* AFMT_AUDIO_SRC_SELECT
* 0 = stream0
* 1 = stream1
* 2 = stream2
* 3 = stream3
* 4 = stream4
* 5 = stream5
*/
 
#define GRBM_CNTL 0x8000
#define GRBM_READ_TIMEOUT(x) ((x) << 0)
 
1046,24 → 599,6
 
#define SQC_CACHES 0x8C08
 
#define SQ_POWER_THROTTLE 0x8e58
#define MIN_POWER(x) ((x) << 0)
#define MIN_POWER_MASK (0x3fff << 0)
#define MIN_POWER_SHIFT 0
#define MAX_POWER(x) ((x) << 16)
#define MAX_POWER_MASK (0x3fff << 16)
#define MAX_POWER_SHIFT 0
#define SQ_POWER_THROTTLE2 0x8e5c
#define MAX_POWER_DELTA(x) ((x) << 0)
#define MAX_POWER_DELTA_MASK (0x3fff << 0)
#define MAX_POWER_DELTA_SHIFT 0
#define STI_SIZE(x) ((x) << 16)
#define STI_SIZE_MASK (0x3ff << 16)
#define STI_SIZE_SHIFT 16
#define LTI_RATIO(x) ((x) << 27)
#define LTI_RATIO_MASK (0xf << 27)
#define LTI_RATIO_SHIFT 27
 
#define SX_DEBUG_1 0x9060
 
#define SPI_STATIC_THREAD_MGMT_1 0x90E0
1081,12 → 616,7
#define CGTS_USER_TCC_DISABLE 0x914C
#define TCC_DISABLE_MASK 0xFFFF0000
#define TCC_DISABLE_SHIFT 16
#define CGTS_SM_CTRL_REG 0x9150
#define OVERRIDE (1 << 21)
#define LS_OVERRIDE (1 << 22)
 
#define SPI_LB_CU_MASK 0x9354
 
#define TA_CNTL_AUX 0x9508
 
#define CC_RB_BACKEND_DISABLE 0x98F4
1175,8 → 705,6
#define CB_PERFCOUNTER3_SELECT0 0x9a38
#define CB_PERFCOUNTER3_SELECT1 0x9a3c
 
#define CB_CGTT_SCLK_CTRL 0x9a60
 
#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
#define BACKEND_DISABLE_MASK 0x00FF0000
#define BACKEND_DISABLE_SHIFT 16
1234,9 → 762,6
# define CP_RINGID1_INT_STAT (1 << 30)
# define CP_RINGID0_INT_STAT (1 << 31)
 
#define CP_MEM_SLP_CNTL 0xC1E4
# define CP_MEM_LS_EN (1 << 0)
 
#define CP_DEBUG 0xC1FC
 
#define RLC_CNTL 0xC300
1244,7 → 769,6
#define RLC_RL_BASE 0xC304
#define RLC_RL_SIZE 0xC308
#define RLC_LB_CNTL 0xC30C
# define LOAD_BALANCE_ENABLE (1 << 0)
#define RLC_SAVE_AND_RESTORE_BASE 0xC310
#define RLC_LB_CNTR_MAX 0xC314
#define RLC_LB_CNTR_INIT 0xC318
1259,57 → 783,7
#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC340
#define RLC_MC_CNTL 0xC344
#define RLC_UCODE_CNTL 0xC348
#define RLC_STAT 0xC34C
# define RLC_BUSY_STATUS (1 << 0)
# define GFX_POWER_STATUS (1 << 1)
# define GFX_CLOCK_STATUS (1 << 2)
# define GFX_LS_STATUS (1 << 3)
 
#define RLC_PG_CNTL 0xC35C
# define GFX_PG_ENABLE (1 << 0)
# define GFX_PG_SRC (1 << 1)
 
#define RLC_CGTT_MGCG_OVERRIDE 0xC400
#define RLC_CGCG_CGLS_CTRL 0xC404
# define CGCG_EN (1 << 0)
# define CGLS_EN (1 << 1)
 
#define RLC_TTOP_D 0xC414
# define RLC_PUD(x) ((x) << 0)
# define RLC_PUD_MASK (0xff << 0)
# define RLC_PDD(x) ((x) << 8)
# define RLC_PDD_MASK (0xff << 8)
# define RLC_TTPD(x) ((x) << 16)
# define RLC_TTPD_MASK (0xff << 16)
# define RLC_MSD(x) ((x) << 24)
# define RLC_MSD_MASK (0xff << 24)
 
#define RLC_LB_INIT_CU_MASK 0xC41C
 
#define RLC_PG_AO_CU_MASK 0xC42C
#define RLC_MAX_PG_CU 0xC430
# define MAX_PU_CU(x) ((x) << 0)
# define MAX_PU_CU_MASK (0xff << 0)
#define RLC_AUTO_PG_CTRL 0xC434
# define AUTO_PG_EN (1 << 0)
# define GRBM_REG_SGIT(x) ((x) << 3)
# define GRBM_REG_SGIT_MASK (0xffff << 3)
# define PG_AFTER_GRBM_REG_ST(x) ((x) << 19)
# define PG_AFTER_GRBM_REG_ST_MASK (0x1fff << 19)
 
#define RLC_SERDES_WR_MASTER_MASK_0 0xC454
#define RLC_SERDES_WR_MASTER_MASK_1 0xC458
#define RLC_SERDES_WR_CTRL 0xC45C
 
#define RLC_SERDES_MASTER_BUSY_0 0xC464
#define RLC_SERDES_MASTER_BUSY_1 0xC468
 
#define RLC_GCPM_GENERAL_3 0xC478
 
#define DB_RENDER_CONTROL 0x28000
 
#define DB_DEPTH_INFO 0x2803c
 
#define PA_SC_RASTER_CONFIG 0x28350
# define RASTER_CONFIG_RB_MAP_0 0
# define RASTER_CONFIG_RB_MAP_1 1
1355,147 → 829,6
# define THREAD_TRACE_FLUSH (54 << 0)
# define THREAD_TRACE_FINISH (55 << 0)
 
/* PIF PHY0 registers idx/data 0x8/0xc */
#define PB0_PIF_CNTL 0x10
# define LS2_EXIT_TIME(x) ((x) << 17)
# define LS2_EXIT_TIME_MASK (0x7 << 17)
# define LS2_EXIT_TIME_SHIFT 17
#define PB0_PIF_PAIRING 0x11
# define MULTI_PIF (1 << 25)
#define PB0_PIF_PWRDOWN_0 0x12
# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7)
# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7)
# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7
# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10)
# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10)
# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10
# define PLL_RAMP_UP_TIME_0(x) ((x) << 24)
# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24)
# define PLL_RAMP_UP_TIME_0_SHIFT 24
#define PB0_PIF_PWRDOWN_1 0x13
# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7)
# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7)
# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7
# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10)
# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10)
# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10
# define PLL_RAMP_UP_TIME_1(x) ((x) << 24)
# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24)
# define PLL_RAMP_UP_TIME_1_SHIFT 24
 
#define PB0_PIF_PWRDOWN_2 0x17
# define PLL_POWER_STATE_IN_TXS2_2(x) ((x) << 7)
# define PLL_POWER_STATE_IN_TXS2_2_MASK (0x7 << 7)
# define PLL_POWER_STATE_IN_TXS2_2_SHIFT 7
# define PLL_POWER_STATE_IN_OFF_2(x) ((x) << 10)
# define PLL_POWER_STATE_IN_OFF_2_MASK (0x7 << 10)
# define PLL_POWER_STATE_IN_OFF_2_SHIFT 10
# define PLL_RAMP_UP_TIME_2(x) ((x) << 24)
# define PLL_RAMP_UP_TIME_2_MASK (0x7 << 24)
# define PLL_RAMP_UP_TIME_2_SHIFT 24
#define PB0_PIF_PWRDOWN_3 0x18
# define PLL_POWER_STATE_IN_TXS2_3(x) ((x) << 7)
# define PLL_POWER_STATE_IN_TXS2_3_MASK (0x7 << 7)
# define PLL_POWER_STATE_IN_TXS2_3_SHIFT 7
# define PLL_POWER_STATE_IN_OFF_3(x) ((x) << 10)
# define PLL_POWER_STATE_IN_OFF_3_MASK (0x7 << 10)
# define PLL_POWER_STATE_IN_OFF_3_SHIFT 10
# define PLL_RAMP_UP_TIME_3(x) ((x) << 24)
# define PLL_RAMP_UP_TIME_3_MASK (0x7 << 24)
# define PLL_RAMP_UP_TIME_3_SHIFT 24
/* PIF PHY1 registers idx/data 0x10/0x14 */
#define PB1_PIF_CNTL 0x10
#define PB1_PIF_PAIRING 0x11
#define PB1_PIF_PWRDOWN_0 0x12
#define PB1_PIF_PWRDOWN_1 0x13
 
#define PB1_PIF_PWRDOWN_2 0x17
#define PB1_PIF_PWRDOWN_3 0x18
/* PCIE registers idx/data 0x30/0x34 */
#define PCIE_CNTL2 0x1c /* PCIE */
# define SLV_MEM_LS_EN (1 << 16)
# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17)
# define MST_MEM_LS_EN (1 << 18)
# define REPLAY_MEM_LS_EN (1 << 19)
#define PCIE_LC_STATUS1 0x28 /* PCIE */
# define LC_REVERSE_RCVR (1 << 0)
# define LC_REVERSE_XMIT (1 << 1)
# define LC_OPERATING_LINK_WIDTH_MASK (0x7 << 2)
# define LC_OPERATING_LINK_WIDTH_SHIFT 2
# define LC_DETECTED_LINK_WIDTH_MASK (0x7 << 5)
# define LC_DETECTED_LINK_WIDTH_SHIFT 5
 
#define PCIE_P_CNTL 0x40 /* PCIE */
# define P_IGNORE_EDB_ERR (1 << 6)
 
/* PCIE PORT registers idx/data 0x38/0x3c */
#define PCIE_LC_CNTL 0xa0
# define LC_L0S_INACTIVITY(x) ((x) << 8)
# define LC_L0S_INACTIVITY_MASK (0xf << 8)
# define LC_L0S_INACTIVITY_SHIFT 8
# define LC_L1_INACTIVITY(x) ((x) << 12)
# define LC_L1_INACTIVITY_MASK (0xf << 12)
# define LC_L1_INACTIVITY_SHIFT 12
# define LC_PMI_TO_L1_DIS (1 << 16)
# define LC_ASPM_TO_L1_DIS (1 << 24)
#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
# define LC_LINK_WIDTH_SHIFT 0
# define LC_LINK_WIDTH_MASK 0x7
# define LC_LINK_WIDTH_X0 0
# define LC_LINK_WIDTH_X1 1
# define LC_LINK_WIDTH_X2 2
# define LC_LINK_WIDTH_X4 3
# define LC_LINK_WIDTH_X8 4
# define LC_LINK_WIDTH_X16 6
# define LC_LINK_WIDTH_RD_SHIFT 4
# define LC_LINK_WIDTH_RD_MASK 0x70
# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
# define LC_RECONFIG_NOW (1 << 8)
# define LC_RENEGOTIATION_SUPPORT (1 << 9)
# define LC_RENEGOTIATE_EN (1 << 10)
# define LC_SHORT_RECONFIG_EN (1 << 11)
# define LC_UPCONFIGURE_SUPPORT (1 << 12)
# define LC_UPCONFIGURE_DIS (1 << 13)
# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21)
# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21)
# define LC_DYN_LANES_PWR_STATE_SHIFT 21
#define PCIE_LC_N_FTS_CNTL 0xa3 /* PCIE_P */
# define LC_XMIT_N_FTS(x) ((x) << 0)
# define LC_XMIT_N_FTS_MASK (0xff << 0)
# define LC_XMIT_N_FTS_SHIFT 0
# define LC_XMIT_N_FTS_OVERRIDE_EN (1 << 8)
# define LC_N_FTS_MASK (0xff << 24)
#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
# define LC_GEN2_EN_STRAP (1 << 0)
# define LC_GEN3_EN_STRAP (1 << 1)
# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 2)
# define LC_TARGET_LINK_SPEED_OVERRIDE_MASK (0x3 << 3)
# define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT 3
# define LC_FORCE_EN_SW_SPEED_CHANGE (1 << 5)
# define LC_FORCE_DIS_SW_SPEED_CHANGE (1 << 6)
# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 7)
# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 8)
# define LC_INITIATE_LINK_SPEED_CHANGE (1 << 9)
# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 10)
# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 10
# define LC_CURRENT_DATA_RATE_MASK (0x3 << 13) /* 0/1/2 = gen1/2/3 */
# define LC_CURRENT_DATA_RATE_SHIFT 13
# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 16)
# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 18)
# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 19)
# define LC_OTHER_SIDE_EVER_SENT_GEN3 (1 << 20)
# define LC_OTHER_SIDE_SUPPORTS_GEN3 (1 << 21)
 
#define PCIE_LC_CNTL2 0xb1
# define LC_ALLOW_PDWN_IN_L1 (1 << 17)
# define LC_ALLOW_PDWN_IN_L23 (1 << 18)
 
#define PCIE_LC_CNTL3 0xb5 /* PCIE_P */
# define LC_GO_TO_RECOVERY (1 << 30)
#define PCIE_LC_CNTL4 0xb6 /* PCIE_P */
# define LC_REDO_EQ (1 << 5)
# define LC_SET_QUIESCE (1 << 13)
 
/*
* UVD
*/
1505,21 → 838,6
#define UVD_RBC_RB_RPTR 0xF690
#define UVD_RBC_RB_WPTR 0xF694
 
#define UVD_CGC_CTRL 0xF4B0
# define DCM (1 << 0)
# define CG_DT(x) ((x) << 2)
# define CG_DT_MASK (0xf << 2)
# define CLK_OD(x) ((x) << 6)
# define CLK_OD_MASK (0x1f << 6)
 
/* UVD CTX indirect */
#define UVD_CGC_MEM_CTRL 0xC0
#define UVD_CGC_CTRL2 0xC1
# define DYN_OR_EN (1 << 0)
# define DYN_RR_EN (1 << 1)
# define G_DIV_ID(x) ((x) << 2)
# define G_DIV_ID_MASK (0x7 << 2)
 
/*
* PM4
*/
1606,7 → 924,7
* 6. COMMAND [30:21] | BYTE_COUNT [20:0]
*/
# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
/* 0 - DST_ADDR
/* 0 - SRC_ADDR
* 1 - GDS
*/
# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
1621,7 → 939,7
# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
/* COMMAND */
# define PACKET3_CP_DMA_DIS_WC (1 << 21)
# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
/* 0 - none
* 1 - 8 in 16
* 2 - 8 in 32
1764,15 → 1082,6
# define DMA_IDLE (1 << 0)
#define DMA_TILING_CONFIG 0xd0b8
 
#define DMA_POWER_CNTL 0xd0bc
# define MEM_POWER_OVERRIDE (1 << 8)
#define DMA_CLK_CTRL 0xd0c0
 
#define DMA_PG 0xd0d4
# define PG_CNTL_ENABLE (1 << 0)
#define DMA_PGFSM_CONFIG 0xd0d8
#define DMA_PGFSM_WRITE 0xd0dc
 
#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \
(((b) & 0x1) << 26) | \
(((t) & 0x1) << 23) | \
1799,51 → 1108,4
#define DMA_PACKET_CONSTANT_FILL 0xd
#define DMA_PACKET_NOP 0xf
 
#define VCE_STATUS 0x20004
#define VCE_VCPU_CNTL 0x20014
#define VCE_CLK_EN (1 << 0)
#define VCE_VCPU_CACHE_OFFSET0 0x20024
#define VCE_VCPU_CACHE_SIZE0 0x20028
#define VCE_VCPU_CACHE_OFFSET1 0x2002c
#define VCE_VCPU_CACHE_SIZE1 0x20030
#define VCE_VCPU_CACHE_OFFSET2 0x20034
#define VCE_VCPU_CACHE_SIZE2 0x20038
#define VCE_SOFT_RESET 0x20120
#define VCE_ECPU_SOFT_RESET (1 << 0)
#define VCE_FME_SOFT_RESET (1 << 2)
#define VCE_RB_BASE_LO2 0x2016c
#define VCE_RB_BASE_HI2 0x20170
#define VCE_RB_SIZE2 0x20174
#define VCE_RB_RPTR2 0x20178
#define VCE_RB_WPTR2 0x2017c
#define VCE_RB_BASE_LO 0x20180
#define VCE_RB_BASE_HI 0x20184
#define VCE_RB_SIZE 0x20188
#define VCE_RB_RPTR 0x2018c
#define VCE_RB_WPTR 0x20190
#define VCE_CLOCK_GATING_A 0x202f8
#define VCE_CLOCK_GATING_B 0x202fc
#define VCE_UENC_CLOCK_GATING 0x205bc
#define VCE_UENC_REG_CLOCK_GATING 0x205c0
#define VCE_FW_REG_STATUS 0x20e10
# define VCE_FW_REG_STATUS_BUSY (1 << 0)
# define VCE_FW_REG_STATUS_PASS (1 << 3)
# define VCE_FW_REG_STATUS_DONE (1 << 11)
#define VCE_LMI_FW_START_KEYSEL 0x20e18
#define VCE_LMI_FW_PERIODIC_CTRL 0x20e20
#define VCE_LMI_CTRL2 0x20e74
#define VCE_LMI_CTRL 0x20e98
#define VCE_LMI_VM_CTRL 0x20ea0
#define VCE_LMI_SWAP_CNTL 0x20eb4
#define VCE_LMI_SWAP_CNTL1 0x20eb8
#define VCE_LMI_CACHE_CTRL 0x20ef4
 
#define VCE_CMD_NO_OP 0x00000000
#define VCE_CMD_END 0x00000001
#define VCE_CMD_IB 0x00000002
#define VCE_CMD_FENCE 0x00000003
#define VCE_CMD_TRAP 0x00000004
#define VCE_CMD_IB_AUTO 0x00000005
#define VCE_CMD_SEMAPHORE 0x00000006
 
#endif
/drivers/video/drm/radeon/r600_reg.h
31,12 → 31,6
#define R600_PCIE_PORT_INDEX 0x0038
#define R600_PCIE_PORT_DATA 0x003c
 
#define R600_RCU_INDEX 0x0100
#define R600_RCU_DATA 0x0104
 
#define R600_UVD_CTX_INDEX 0xf4a0
#define R600_UVD_CTX_DATA 0xf4a4
 
#define R600_MC_VM_FB_LOCATION 0x2180
#define R600_MC_FB_BASE_MASK 0x0000FFFF
#define R600_MC_FB_BASE_SHIFT 0
/drivers/video/drm/radeon/radeon_agp.c
117,6 → 117,9
/* ATI Host Bridge / RV280 [M9+] Needs AGPMode 1 (phoronix forum) */
{ PCI_VENDOR_ID_ATI, 0xcbb2, PCI_VENDOR_ID_ATI, 0x5c61,
PCI_VENDOR_ID_SONY, 0x8175, 1},
/* HP Host Bridge / R300 [FireGL X1] Needs AGPMode 2 (fdo #7770) */
{ PCI_VENDOR_ID_HP, 0x122e, PCI_VENDOR_ID_ATI, 0x4e47,
PCI_VENDOR_ID_ATI, 0x0152, 2},
{ 0, 0, 0, 0, 0, 0, 0 },
};
#endif
/drivers/video/drm/radeon/radeon_i2c.c
64,7 → 64,8
radeon_router_select_ddc_port(radeon_connector);
 
if (use_aux) {
ret = i2c_transfer(&radeon_connector->ddc_bus->aux.ddc, msgs, 2);
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
ret = i2c_transfer(&dig->dp_i2c_bus->adapter, msgs, 2);
} else {
ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
}
94,8 → 95,6
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
 
mutex_lock(&i2c->mutex);
 
/* RV410 appears to have a bug where the hw i2c in reset
* holds the i2c port in a bad state - switch hw i2c away before
* doing DDC - do this for all r200s/r300s/r400s for safety sake
172,8 → 171,6
temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask;
WREG32(rec->mask_data_reg, temp);
temp = RREG32(rec->mask_data_reg);
 
mutex_unlock(&i2c->mutex);
}
 
static int get_clock(void *i2c_priv)
817,8 → 814,6
struct radeon_i2c_bus_rec *rec = &i2c->rec;
int ret = 0;
 
mutex_lock(&i2c->mutex);
 
switch (rdev->family) {
case CHIP_R100:
case CHIP_RV100:
885,8 → 880,6
break;
}
 
mutex_unlock(&i2c->mutex);
 
return ret;
}
 
926,7 → 919,6
i2c->adapter.class = I2C_CLASS_DDC;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
mutex_init(&i2c->mutex);
if (rec->mm_i2c ||
(rec->hw_capable &&
radeon_hw_i2c &&
957,16 → 949,16
/* set the radeon bit adapter */
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"Radeon i2c bit bus %s", name);
i2c->adapter.algo_data = &i2c->bit;
i2c->bit.pre_xfer = pre_xfer;
i2c->bit.post_xfer = post_xfer;
i2c->bit.setsda = set_data;
i2c->bit.setscl = set_clock;
i2c->bit.getsda = get_data;
i2c->bit.getscl = get_clock;
i2c->bit.udelay = 10;
i2c->bit.timeout = usecs_to_jiffies(2200); /* from VESA */
i2c->bit.data = i2c;
i2c->adapter.algo_data = &i2c->algo.bit;
i2c->algo.bit.pre_xfer = pre_xfer;
i2c->algo.bit.post_xfer = post_xfer;
i2c->algo.bit.setsda = set_data;
i2c->algo.bit.setscl = set_clock;
i2c->algo.bit.getsda = get_data;
i2c->algo.bit.getscl = get_clock;
i2c->algo.bit.udelay = 10;
i2c->algo.bit.timeout = usecs_to_jiffies(2200); /* from VESA */
i2c->algo.bit.data = i2c;
ret = i2c_bit_add_bus(&i2c->adapter);
if (ret) {
DRM_ERROR("Failed to register bit i2c %s\n", name);
981,13 → 973,45
 
}
 
struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name)
{
struct radeon_i2c_chan *i2c;
int ret;
 
i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
if (i2c == NULL)
return NULL;
 
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
i2c->dev = dev;
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"Radeon aux bus %s", name);
i2c_set_adapdata(&i2c->adapter, i2c);
i2c->adapter.algo_data = &i2c->algo.dp;
i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
i2c->algo.dp.address = 0;
ret = i2c_dp_aux_add_bus(&i2c->adapter);
if (ret) {
DRM_INFO("Failed to register i2c %s\n", name);
goto out_free;
}
 
return i2c;
out_free:
kfree(i2c);
return NULL;
 
}
 
void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
{
if (!i2c)
return;
i2c_del_adapter(&i2c->adapter);
if (i2c->has_aux)
drm_dp_aux_unregister(&i2c->aux);
kfree(i2c);
}
 
994,9 → 1018,6
/* Add the default buses */
void radeon_i2c_init(struct radeon_device *rdev)
{
if (radeon_hw_i2c)
DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
 
if (rdev->is_atom_bios)
radeon_atombios_i2c_init(rdev);
else
/drivers/video/drm/radeon/display.h
47,7 → 47,7
 
};
 
extern display_t *os_display;
extern display_t *rdisplay;
 
int init_cursor(cursor_t *cursor);
void __stdcall restore_cursor(int x, int y);
/drivers/video/drm/radeon/radeon_sa.c
49,7 → 49,7
 
int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
unsigned size, u32 align, u32 domain, u32 flags)
unsigned size, u32 domain)
{
int i, r;
 
57,7 → 57,6
sa_manager->bo = NULL;
sa_manager->size = size;
sa_manager->domain = domain;
sa_manager->align = align;
sa_manager->hole = &sa_manager->olist;
INIT_LIST_HEAD(&sa_manager->olist);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
64,8 → 63,8
INIT_LIST_HEAD(&sa_manager->flist[i]);
}
 
r = radeon_bo_create(rdev, size, align, true,
domain, flags, NULL, &sa_manager->bo);
r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
domain, NULL, &sa_manager->bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
312,13 → 311,13
int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
struct radeon_sa_bo **sa_bo,
unsigned size, unsigned align)
unsigned size, unsigned align, bool block)
{
struct radeon_fence *fences[RADEON_NUM_RINGS];
unsigned tries[RADEON_NUM_RINGS];
int i, r;
 
BUG_ON(align > sa_manager->align);
BUG_ON(align > RADEON_GPU_PAGE_SIZE);
BUG_ON(size > sa_manager->size);
 
*sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
353,11 → 352,14
r = radeon_fence_wait_any(rdev, fences, false);
spin_lock(&sa_manager->wq.lock);
/* if we have nothing to wait for block */
if (r == -ENOENT) {
r = wait_event_interruptible(
sa_manager->wq,
radeon_sa_event(sa_manager, size, align)
);
if (r == -ENOENT && block) {
// r = wait_event_interruptible_locked(
// sa_manager->wq,
// radeon_sa_event(sa_manager, size, align)
// );
 
} else if (r == -ENOENT) {
r = -ENOMEM;
}
 
} while (!r);
399,15 → 401,13
 
spin_lock(&sa_manager->wq.lock);
list_for_each_entry(i, &sa_manager->olist, olist) {
uint64_t soffset = i->soffset + sa_manager->gpu_addr;
uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
if (&i->olist == sa_manager->hole) {
seq_printf(m, ">");
} else {
seq_printf(m, " ");
}
seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
soffset, eoffset, eoffset - soffset);
seq_printf(m, "[0x%08x 0x%08x] size %8d",
i->soffset, i->eoffset, i->eoffset - i->soffset);
if (i->fence) {
seq_printf(m, " protected by 0x%016llx on ring %d",
i->fence->seq, i->fence->ring);
/drivers/video/drm/radeon/radeon_benchmark.c
41,7 → 41,7
struct radeon_fence *fence = NULL;
int i, r;
 
start_jiffies = jiffies;
start_jiffies = GetTimerTicks();
for (i = 0; i < n; i++) {
switch (flag) {
case RADEON_BENCHMARK_COPY_DMA:
65,7 → 65,7
goto exit_do_move;
radeon_fence_unref(&fence);
}
end_jiffies = jiffies;
end_jiffies = GetTimerTicks();
r = jiffies_to_msecs(end_jiffies - start_jiffies);
 
exit_do_move:
100,7 → 100,7
ENTER();
 
n = RADEON_BENCHMARK_ITERATIONS;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj);
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, NULL, &sobj);
if (r) {
goto out_cleanup;
}
108,11 → 108,11
if (unlikely(r != 0))
goto out_cleanup;
r = radeon_bo_pin(sobj, sdomain, &saddr);
radeon_bo_unreserve(sobj);
// radeon_bo_unreserve(sobj);
if (r) {
goto out_cleanup;
}
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj);
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, NULL, &dobj);
if (r) {
goto out_cleanup;
}
120,13 → 120,16
if (unlikely(r != 0))
goto out_cleanup;
r = radeon_bo_pin(dobj, ddomain, &daddr);
radeon_bo_unreserve(dobj);
// radeon_bo_unreserve(dobj);
if (r) {
goto out_cleanup;
}
dbgprintf("done\n");
 
if (rdev->asic->copy.dma) {
/* r100 doesn't have dma engine so skip the test */
/* also, VRAM-to-VRAM test doesn't make much sense for DMA */
/* skip it as well if domains are the same */
if ((rdev->asic->copy.dma) && (sdomain != ddomain)) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
RADEON_BENCHMARK_COPY_DMA, n);
if (time < 0)
136,7 → 139,6
sdomain, ddomain, "dma");
}
 
if (rdev->asic->copy.blit) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
RADEON_BENCHMARK_COPY_BLIT, n);
if (time < 0)
144,7 → 146,6
if (time > 0)
radeon_benchmark_log_results(n, size, time,
sdomain, ddomain, "blit");
}
 
out_cleanup:
if (sobj) {
/drivers/video/drm/radeon/ObjectID.h
69,8 → 69,6
#define ENCODER_OBJECT_ID_ALMOND 0x22
#define ENCODER_OBJECT_ID_TRAVIS 0x23
#define ENCODER_OBJECT_ID_NUTMEG 0x22
#define ENCODER_OBJECT_ID_HDMI_ANX9805 0x26
 
/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14
88,8 → 86,6
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 0x20
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 0x21
#define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25
#define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27
 
#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
 
368,14 → 364,6
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
 
#define ENCODER_INTERNAL_UNIPHY3_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 << OBJECT_ID_SHIFT)
 
#define ENCODER_INTERNAL_UNIPHY3_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 << OBJECT_ID_SHIFT)
 
#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
404,10 → 392,6
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_VCE << OBJECT_ID_SHIFT)
 
#define ENCODER_HDMI_ANX9805_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
 
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
/****************************************************/
477,14 → 461,6
GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
 
#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
 
#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
 
#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
497,10 → 473,6
GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
 
#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
 
#define CONNECTOR_VGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
569,18 → 541,6
GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
 
#define CONNECTOR_HDMI_TYPE_A_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
 
#define CONNECTOR_HDMI_TYPE_A_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
 
#define CONNECTOR_HDMI_TYPE_A_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
 
#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
/drivers/video/drm/radeon/atombios_i2c.c
30,7 → 30,7
#define TARGET_HW_I2C_CLOCK 50
 
/* these are a limitation of ProcessI2cChannelTransaction not the hw */
#define ATOM_MAX_HW_I2C_WRITE 3
#define ATOM_MAX_HW_I2C_WRITE 2
#define ATOM_MAX_HW_I2C_READ 255
 
static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
42,42 → 42,28
PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
unsigned char *base;
u16 out = cpu_to_le16(0);
int r = 0;
u16 out;
 
memset(&args, 0, sizeof(args));
 
mutex_lock(&chan->mutex);
 
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
 
if (flags & HW_I2C_WRITE) {
if (num > ATOM_MAX_HW_I2C_WRITE) {
DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
r = -EINVAL;
goto done;
DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num);
return -EINVAL;
}
if (buf == NULL)
args.ucRegIndex = 0;
else
args.ucRegIndex = buf[0];
if (num)
num--;
if (num)
memcpy(&out, &buf[1], num);
memcpy(&out, buf, num);
args.lpI2CDataOut = cpu_to_le16(out);
} else {
if (num > ATOM_MAX_HW_I2C_READ) {
DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
r = -EINVAL;
goto done;
return -EINVAL;
}
args.ucRegIndex = 0;
args.lpI2CDataOut = 0;
}
 
args.ucFlag = flags;
args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
args.ucRegIndex = 0;
args.ucTransBytes = num;
args.ucSlaveAddr = slave_addr << 1;
args.ucLineNumber = chan->rec.i2c_id;
87,17 → 73,13
/* error */
if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
DRM_DEBUG_KMS("hw_i2c error\n");
r = -EIO;
goto done;
return -EIO;
}
 
if (!(flags & HW_I2C_WRITE))
radeon_atom_copy_swap(buf, base, num, false);
memcpy(buf, base, num);
 
done:
mutex_unlock(&chan->mutex);
 
return r;
return 0;
}
 
int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
106,7 → 88,7
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
struct i2c_msg *p;
int i, remaining, current_count, buffer_offset, max_bytes, ret;
u8 flags;
u8 buf = 0, flags;
 
/* check for bus probe */
p = &msgs[0];
113,7 → 95,7
if ((num == 1) && (p->len == 0)) {
ret = radeon_process_i2c_ch(i2c,
p->addr, HW_I2C_WRITE,
NULL, 0);
&buf, 1);
if (ret)
return ret;
else
/drivers/video/drm/radeon/cayman_blit_shaders.c
317,4 → 317,58
0x00000010, /* */
};
 
const u32 cayman_vs[] =
{
0x00000004,
0x80400400,
0x0000a03c,
0x95000688,
0x00004000,
0x15000688,
0x00000000,
0x88000000,
0x04000000,
0x67961001,
#ifdef __BIG_ENDIAN
0x00020000,
#else
0x00000000,
#endif
0x00000000,
0x04000000,
0x67961000,
#ifdef __BIG_ENDIAN
0x00020008,
#else
0x00000008,
#endif
0x00000000,
};
 
const u32 cayman_ps[] =
{
0x00000004,
0xa00c0000,
0x00000008,
0x80400000,
0x00000000,
0x95000688,
0x00000000,
0x88000000,
0x00380400,
0x00146b10,
0x00380000,
0x20146b10,
0x00380400,
0x40146b00,
0x80380000,
0x60146b00,
0x00000010,
0x000d1000,
0xb0800000,
0x00000000,
};
 
const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps);
const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs);
const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
/drivers/video/drm/radeon/evergreen_blit_shaders.c
300,4 → 300,58
0x00000010, /* */
};
 
const u32 evergreen_vs[] =
{
0x00000004,
0x80800400,
0x0000a03c,
0x95000688,
0x00004000,
0x15200688,
0x00000000,
0x00000000,
0x3c000000,
0x67961001,
#ifdef __BIG_ENDIAN
0x000a0000,
#else
0x00080000,
#endif
0x00000000,
0x1c000000,
0x67961000,
#ifdef __BIG_ENDIAN
0x00020008,
#else
0x00000008,
#endif
0x00000000,
};
 
const u32 evergreen_ps[] =
{
0x00000003,
0xa00c0000,
0x00000008,
0x80400000,
0x00000000,
0x95200688,
0x00380400,
0x00146b10,
0x00380000,
0x20146b10,
0x00380400,
0x40146b00,
0x80380000,
0x60146b00,
0x00000000,
0x00000000,
0x00000010,
0x000d1000,
0xb0800000,
0x00000000,
};
 
const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps);
const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs);
const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state);
/drivers/video/drm/radeon/r600_blit_shaders.h
35,4 → 35,5
extern const u32 r6xx_ps_size, r6xx_vs_size;
extern const u32 r6xx_default_size, r7xx_default_size;
 
__pure uint32_t int2float(uint32_t x);
#endif
/drivers/video/drm/radeon/radeon_encoders.c
343,7 → 343,7
case DRM_MODE_CONNECTOR_HDMIB:
if (radeon_connector->use_digital) {
/* HDMI 1.3 supports up to 340 Mhz over single link */
if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (pixel_clock > 340000)
return true;
else
365,7 → 365,7
return false;
else {
/* HDMI 1.3 supports up to 340 Mhz over single link */
if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (pixel_clock > 340000)
return true;
else
/drivers/video/drm/radeon/radeon_fb.c
35,9 → 35,11
 
#include <drm/drm_fb_helper.h>
 
struct drm_framebuffer *main_fb;
struct drm_gem_object *main_fb_obj;
 
int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **gobj_p);
 
/* object hierarchy -
this contains a helper + a radeon fb
the helper contains a pointer to radeon framebuffer baseclass.
86,105 → 88,10
return aligned;
}
 
static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
{
struct radeon_bo *rbo = gem_to_radeon_bo(gobj);
int ret;
 
ret = radeon_bo_reserve(rbo, false);
if (likely(ret == 0)) {
radeon_bo_kunmap(rbo);
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
drm_gem_object_unreference_unlocked(gobj);
}
 
static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **gobj_p)
{
struct radeon_device *rdev = rfbdev->rdev;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
bool fb_tiled = false; /* useful for testing */
u32 tiling_flags = 0;
int ret;
int aligned_size, size;
int height = mode_cmd->height;
u32 bpp, depth;
 
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
 
/* need to align pitch with crtc limits */
mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
fb_tiled) * ((bpp + 1) / 8);
 
if (rdev->family >= CHIP_R600)
height = ALIGN(mode_cmd->height, 8);
size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE);
 
rbo = rdev->stollen_vga_memory;
gobj = &rbo->gem_base;
mutex_lock(&rdev->gem.mutex);
list_add_tail(&rbo->list, &rdev->gem.objects);
mutex_unlock(&rdev->gem.mutex);
 
rbo = gem_to_radeon_bo(gobj);
 
if (fb_tiled)
tiling_flags = RADEON_TILING_MACRO;
 
#ifdef __BIG_ENDIAN
switch (bpp) {
case 32:
tiling_flags |= RADEON_TILING_SWAP_32BIT;
break;
case 16:
tiling_flags |= RADEON_TILING_SWAP_16BIT;
default:
break;
}
#endif
 
// if (tiling_flags) {
// ret = radeon_bo_set_tiling_flags(rbo,
// tiling_flags | RADEON_TILING_SURFACE,
// mode_cmd->pitches[0]);
// if (ret)
// dev_err(rdev->dev, "FB failed to set tiling flags\n");
// }
 
 
ret = radeon_bo_reserve(rbo, false);
if (unlikely(ret != 0))
goto out_unref;
/* Only 27 bit offset for legacy CRTC */
ret = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM,
ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
NULL);
if (ret) {
radeon_bo_unreserve(rbo);
goto out_unref;
}
radeon_bo_unreserve(rbo);
if (ret) {
goto out_unref;
}
 
*gobj_p = gobj;
return 0;
out_unref:
radeonfb_destroy_pinned_object(gobj);
*gobj_p = NULL;
return ret;
}
 
static int radeonfb_create(struct drm_fb_helper *helper,
static int radeonfb_create(struct radeon_fbdev *rfbdev,
struct drm_fb_helper_surface_size *sizes)
{
struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
struct radeon_device *rdev = rfbdev->rdev;
struct fb_info *info;
struct drm_framebuffer *fb = NULL;
195,6 → 102,8
int ret;
unsigned long tmp;
 
ENTER();
 
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
 
216,6 → 125,7
/* okay we have an object now allocate the framebuffer */
info = framebuffer_alloc(0, device);
if (info == NULL) {
dbgprintf("framebuffer_alloc\n");
ret = -ENOMEM;
goto out_unref;
}
224,7 → 134,7
 
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
DRM_ERROR("failed to initalise framebuffer %d\n", ret);
goto out_unref;
}
 
262,7 → 172,10
 
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
 
// if (info->screen_base == NULL) {
// ret = -ENOSPC;
// goto out_unref;
// }
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
269,9 → 182,9
DRM_INFO("fb depth is %d\n", fb->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
 
main_fb = fb;
main_fb_obj = gobj;
 
LEAVE();
 
return 0;
 
out_unref:
284,7 → 197,21
return ret;
}
 
static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
int new_fb = 0;
int ret;
 
if (!helper->fb) {
ret = radeonfb_create(rfbdev, sizes);
if (ret)
return ret;
new_fb = 1;
}
return new_fb;
}
static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
{
struct fb_info *info;
306,17 → 233,20
return 0;
}
 
static const struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
.gamma_set = radeon_crtc_fb_gamma_set,
.gamma_get = radeon_crtc_fb_gamma_get,
.fb_probe = radeonfb_create,
.fb_probe = radeon_fb_find_or_create_single,
};
 
extern struct radeon_fbdev *kos_rfbdev;
 
int radeon_fbdev_init(struct radeon_device *rdev)
{
struct radeon_fbdev *rfbdev;
int bpp_sel = 32;
int ret;
 
ENTER();
 
/* select 8 bpp console on RN50 or 16MB cards */
329,10 → 259,8
 
rfbdev->rdev = rdev;
rdev->mode_info.rfbdev = rfbdev;
rfbdev->helper.funcs = &radeon_fb_helper_funcs;
 
drm_fb_helper_prepare(rdev->ddev, &rfbdev->helper,
&radeon_fb_helper_funcs);
 
ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
rdev->num_crtc,
RADEONFB_CONN_LIMIT);
342,11 → 270,10
}
 
drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
 
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(rdev->ddev);
kos_rfbdev = rfbdev;
 
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
LEAVE();
 
return 0;
/drivers/video/drm/radeon/radeon_gem.c
29,6 → 29,13
#include <drm/radeon_drm.h>
#include "radeon.h"
 
int radeon_gem_object_init(struct drm_gem_object *obj)
{
BUG();
 
return 0;
}
 
void radeon_gem_object_free(struct drm_gem_object *gobj)
{
struct radeon_bo *robj = gem_to_radeon_bo(gobj);
38,9 → 45,9
}
}
 
int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
int radeon_gem_object_create(struct radeon_device *rdev, int size,
int alignment, int initial_domain,
u32 flags, bool kernel,
bool discardable, bool kernel,
struct drm_gem_object **obj)
{
struct radeon_bo *robj;
53,19 → 60,16
alignment = PAGE_SIZE;
}
 
/* Maximum bo size is the unpinned gtt size since we use the gtt to
* handle vram to system pool migrations.
*/
max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
/* maximun bo size is the minimun btw visible vram and gtt size */
max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
if (size > max_size) {
DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
size >> 20, max_size >> 20);
printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
__func__, __LINE__, size >> 20, max_size >> 20);
return -ENOMEM;
}
 
retry:
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
flags, NULL, &robj);
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
if (r) {
if (r != -ERESTARTSYS) {
if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
72,7 → 76,7
initial_domain |= RADEON_GEM_DOMAIN_GTT;
goto retry;
}
DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
size, initial_domain, alignment, r);
}
return r;
86,7 → 90,33
return 0;
}
 
static int radeon_gem_set_domain(struct drm_gem_object *gobj,
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr)
{
struct radeon_bo *robj = gem_to_radeon_bo(obj);
int r;
 
r = radeon_bo_reserve(robj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(robj, pin_domain, gpu_addr);
radeon_bo_unreserve(robj);
return r;
}
 
void radeon_gem_object_unpin(struct drm_gem_object *obj)
{
struct radeon_bo *robj = gem_to_radeon_bo(obj);
int r;
 
r = radeon_bo_reserve(robj, false);
if (likely(r == 0)) {
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
}
 
int radeon_gem_set_domain(struct drm_gem_object *gobj,
uint32_t rdomain, uint32_t wdomain)
{
struct radeon_bo *robj;
137,15 → 167,18
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_info *args = data;
struct ttm_mem_type_manager *man;
unsigned i;
 
man = &rdev->mman.bdev.man[TTM_PL_VRAM];
 
args->vram_size = rdev->mc.real_vram_size;
args->vram_visible = (u64)man->size << PAGE_SHIFT;
args->vram_visible -= rdev->vram_pin_size;
args->gart_size = rdev->mc.gtt_size;
args->gart_size -= rdev->gart_pin_size;
 
if (rdev->stollen_vga_memory)
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
args->vram_visible -= radeon_fbdev_total_size(rdev);
args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
for(i = 0; i < RADEON_NUM_RINGS; ++i)
args->gart_size -= rdev->ring[i].ring_size;
return 0;
}
 
178,7 → 211,7
/* create a gem object to contain this object in */
args->size = roundup(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, args->alignment,
args->initial_domain, args->flags,
args->initial_domain, false,
false, &gobj);
if (r) {
up_read(&rdev->exclusive_lock);
270,7 → 303,18
}
robj = gem_to_radeon_bo(gobj);
r = radeon_bo_wait(robj, &cur_placement, true);
args->domain = radeon_mem_type_to_domain(cur_placement);
switch (cur_placement) {
case TTM_PL_VRAM:
args->domain = RADEON_GEM_DOMAIN_VRAM;
break;
case TTM_PL_TT:
args->domain = RADEON_GEM_DOMAIN_GTT;
break;
case TTM_PL_SYSTEM:
args->domain = RADEON_GEM_DOMAIN_CPU;
default:
break;
}
drm_gem_object_unreference_unlocked(gobj);
r = radeon_gem_handle_lockup(rdev, r);
return r;
284,7 → 328,6
struct drm_gem_object *gobj;
struct radeon_bo *robj;
int r;
uint32_t cur_placement = 0;
 
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
291,11 → 334,10
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
r = radeon_bo_wait(robj, &cur_placement, false);
/* Flush HDP cache via MMIO if necessary */
if (rdev->asic->mmio_hdp_flush &&
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
robj->rdev->asic->mmio_hdp_flush(rdev);
r = radeon_bo_wait(robj, NULL, false);
/* callback hw specific functions if any */
if (rdev->asic->ioctl_wait_idle)
robj->rdev->asic->ioctl_wait_idle(rdev, robj);
drm_gem_object_unreference_unlocked(gobj);
r = radeon_gem_handle_lockup(rdev, r);
return r;
/drivers/video/drm/radeon/cmdline.c
2,7 → 2,7
#include <drm/drmP.h>
#include <drm.h>
#include <drm_mm.h>
#include <drm/radeon_drm.h>
#include "radeon_drm.h"
#include "radeon.h"
#include "radeon_object.h"
 
/drivers/video/drm/radeon/radeon_trace.h
3,19 → 3,80
 
#include <linux/stringify.h>
#include <linux/types.h>
#include <linux/tracepoint.h>
 
#include <drm/drmP.h>
 
#undef TRACE_SYSTEM
#define TRACE_SYSTEM radeon
#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
#define TRACE_INCLUDE_FILE radeon_trace
 
#define trace_radeon_vm_set_page(pe, addr, count, incr, flags)
#define trace_radeon_fence_emit(ddev, ring, seq)
#define trace_radeon_fence_wait_begin(ddev, i, target_seq)
#define trace_radeon_fence_wait_end(ddev, i, target_seq)
#define trace_radeon_semaphore_signale(ridx, semaphore)
#define trace_radeon_semaphore_wait(ridx, semaphore)
#define trace_radeon_vm_grab_id(id, ring)
#define trace_radeon_vm_bo_update(bo_va)
#define trace_radeon_bo_create(bo)
#define trace_radeon_cs(parser)
#define trace_radeon_vm_flush(pd_addr, ring, id)
TRACE_EVENT(radeon_bo_create,
TP_PROTO(struct radeon_bo *bo),
TP_ARGS(bo),
TP_STRUCT__entry(
__field(struct radeon_bo *, bo)
__field(u32, pages)
),
 
TP_fast_assign(
__entry->bo = bo;
__entry->pages = bo->tbo.num_pages;
),
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
);
 
DECLARE_EVENT_CLASS(radeon_fence_request,
 
TP_PROTO(struct drm_device *dev, u32 seqno),
 
TP_ARGS(dev, seqno),
 
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, seqno)
),
 
TP_fast_assign(
__entry->dev = dev->primary->index;
__entry->seqno = seqno;
),
 
TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
);
 
DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
 
TP_PROTO(struct drm_device *dev, u32 seqno),
 
TP_ARGS(dev, seqno)
);
 
DEFINE_EVENT(radeon_fence_request, radeon_fence_retire,
 
TP_PROTO(struct drm_device *dev, u32 seqno),
 
TP_ARGS(dev, seqno)
);
 
DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
 
TP_PROTO(struct drm_device *dev, u32 seqno),
 
TP_ARGS(dev, seqno)
);
 
DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
 
TP_PROTO(struct drm_device *dev, u32 seqno),
 
TP_ARGS(dev, seqno)
);
 
#endif
 
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#include <trace/define_trace.h>
/drivers/video/drm/radeon/r420_reg_safe.h
31,12 → 31,12
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFC48, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE00BFF,
0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
0x00000000, 0x00000100, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0xFF800000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x0003FC0B, 0xFFFFFCFF, 0xFFBFFB99,
0x0003FC01, 0xFFFFFCF8, 0xFF800B19,
};
/drivers/video/drm/radeon/r200_reg_safe.h
16,7 → 16,7
0xFFE7FE1F, 0xF003FFFF, 0x7EFFFFFF, 0xFFFF803C,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFEFCE, 0xFFFEFFFF, 0xFFFFFFFE,
0x020E0FF0, 0xFFCC83FD, 0xFFFFFFFF, 0xFFFFFFFF,
0x020E0FF0, 0xFFFC83FD, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFBFFFF, 0xEFFCFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
/drivers/video/drm/radeon/r300_reg_safe.h
31,12 → 31,12
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFC48, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE00BFF,
0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
0x00000000, 0x0000C100, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x0003FC0B, 0xFFFFFCFF, 0xFFBFFB99,
0x0003FC01, 0xFFFFFCF8, 0xFF800B19,
};
/drivers/video/drm/radeon/rs600_reg_safe.h
31,14 → 31,14
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFC48, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE00BFF,
0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
0x00000000, 0x00000100, 0x00000000, 0x00000000,
0x00000000, 0x0000C100, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0xFF800000,
0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x0003FC0B, 0xFFFFFCFF, 0xFFBFFB99, 0xFFFFFFFF,
0x0003FC01, 0xFFFFFCF8, 0xFF800B19, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
/drivers/video/drm/radeon/rv515_reg_safe.h
15,7 → 15,7
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFEFC6, 0xF00EBFFF, 0x007C0000,
0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
0xF0000038, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
31,14 → 31,14
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0x1FFFFC48, 0xFFFFE000, 0xFFFFFE1E, 0xFFFFFFFF,
0x388F8F50, 0xFFF88082, 0xFF0000FC, 0xFAE00BFF,
0x1FFFFC78, 0xFFFFE000, 0xFFFFFFFE, 0xFFFFFFFF,
0x38CF8F50, 0xFFF88082, 0xFF0000FC, 0xFAE009FF,
0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
0x00008CFC, 0xFFFCC1FF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFF8CFC, 0xFFFFC1FF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFE80FFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x0003FC0B, 0x3FFFFCFF, 0xFFBFFB99, 0xFFDFFFFF,
0x0003FC01, 0x3FFFFCF8, 0xFF800B19, 0xFFDFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,