Subversion Repositories Kolibri OS

Compare Revisions

No changes between revisions

Regard whitespace Rev 2997 → Rev 2996

/drivers/video/drm/radeon/radeon_blit_common.h
File deleted
/drivers/video/drm/radeon/si.c
File deleted
/drivers/video/drm/radeon/si_blit_shaders.c
File deleted
/drivers/video/drm/radeon/si_blit_shaders.h
File deleted
/drivers/video/drm/radeon/si_reg.h
File deleted
/drivers/video/drm/radeon/atombios_i2c.c
File deleted
/drivers/video/drm/radeon/atombios_encoders.c
File deleted
/drivers/video/drm/radeon/sid.h
File deleted
/drivers/video/drm/radeon/reg_srcs/r600
File deleted
/drivers/video/drm/radeon/reg_srcs/r420
File deleted
/drivers/video/drm/radeon/reg_srcs/cayman
File deleted
/drivers/video/drm/radeon/reg_srcs/rs600
File deleted
/drivers/video/drm/radeon/reg_srcs/rv515
File deleted
/drivers/video/drm/radeon/reg_srcs/r100
File deleted
/drivers/video/drm/radeon/reg_srcs/evergreen
File deleted
/drivers/video/drm/radeon/reg_srcs/r200
File deleted
/drivers/video/drm/radeon/reg_srcs/r300
File deleted
/drivers/video/drm/radeon/reg_srcs/rn50
File deleted
/drivers/video/drm/radeon/radeon_semaphore.c
File deleted
/drivers/video/drm/radeon/evergreen_hdmi.c
File deleted
/drivers/video/drm/radeon/radeon_sa.c
File deleted
/drivers/video/drm/radeon/r600_video.c
709,7 → 709,7
 
mutex_lock(&rdev->r600_video.mutex);
rdev->r600_video.vb_ib = NULL;
r = r600_video_prepare_copy(rdev, w*4);
r = r600_video_prepare_copy(rdev, h*pitch);
if (r) {
// if (rdev->r600_blit.vb_ib)
// radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
/drivers/video/drm/radeon/Makefile
42,24 → 42,23
 
NAME_SRC= \
pci.c \
$(DRM_TOPDIR)/drm_mm.c \
$(DRM_TOPDIR)/drm_irq.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_modes.c \
$(DRM_TOPDIR)/drm_crtc.c \
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_fb_helper.c \
$(DRM_TOPDIR)/drm_dp_i2c_helper.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_fb_helper.c \
$(DRM_TOPDIR)/drm_irq.c \
$(DRM_TOPDIR)/drm_mm.c \
$(DRM_TOPDIR)/drm_modes.c \
$(DRM_TOPDIR)/drm_pci.c \
$(DRM_TOPDIR)/drm_stub.c \
$(DRM_TOPDIR)/i2c/i2c-core.c \
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \
tracker/bitmap.c \
r700_vs.c \
r600_video.c \
radeon_device.c \
evergreen.c \
evergreen_blit_shaders.c \
evergreen_blit_kms.c \
evergreen_hdmi.c \
cayman_blit_shaders.c \
radeon_clocks.c \
atom.c \
73,8 → 72,6
radeon_connectors.c \
atombios_crtc.c \
atombios_dp.c \
atombios_encoders.c \
atombios_i2c.c \
radeon_encoders.c \
radeon_fence.c \
radeon_gem.c \
87,8 → 84,6
radeon_gart.c \
radeon_ring.c \
radeon_object_kos.c \
radeon_sa.c \
radeon_semaphore.c \
radeon_pm.c \
r100.c \
r200.c \
97,6 → 92,7
rv515.c \
r520.c \
r600.c \
r600_audio.c \
r600_blit_kms.c \
r600_blit_shaders.c \
r600_hdmi.c \
108,8 → 104,6
rdisplay.c \
rdisplay_kms.c \
cmdline.c \
si.c \
si_blit_shaders.c \
fwblob.asm
 
FW_BINS= \
/drivers/video/drm/radeon/evergreen.c
24,10 → 24,10
#include <linux/firmware.h>
//#include <linux/platform_device.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include <drm/radeon_drm.h>
#include "radeon_drm.h"
#include "evergreend.h"
#include "atom.h"
#include "avivod.h"
37,122 → 37,16
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
 
static const u32 crtc_offsets[6] =
{
EVERGREEN_CRTC0_REGISTER_OFFSET,
EVERGREEN_CRTC1_REGISTER_OFFSET,
EVERGREEN_CRTC2_REGISTER_OFFSET,
EVERGREEN_CRTC3_REGISTER_OFFSET,
EVERGREEN_CRTC4_REGISTER_OFFSET,
EVERGREEN_CRTC5_REGISTER_OFFSET
};
 
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl);
static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
 
void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
unsigned *bankh, unsigned *mtaspect,
unsigned *tile_split)
{
*bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
*bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
*mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
*tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
switch (*bankw) {
default:
case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
}
switch (*bankh) {
default:
case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
}
switch (*mtaspect) {
default:
case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
}
}
 
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
{
u16 ctl, v;
int err;
 
err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl);
if (err)
return;
 
v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
 
/* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
* to avoid hangs or perfomance issues
*/
if ((v == 0) || (v == 6) || (v == 7)) {
ctl &= ~PCI_EXP_DEVCTL_READRQ;
ctl |= (2 << 12);
pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl);
}
}
 
/**
* dce4_wait_for_vblank - vblank wait asic callback.
*
* @rdev: radeon_device pointer
* @crtc: crtc to wait for vblank on
*
* Wait for vblank on the requested crtc (evergreen+).
*/
void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
int i;
 
if (crtc >= rdev->num_crtc)
return;
 
if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
for (i = 0; i < rdev->usec_timeout; i++) {
if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
break;
udelay(1);
}
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
break;
udelay(1);
}
}
}
 
 
/**
* evergreen_page_flip - pageflip callback.
*
* @rdev: radeon_device pointer
* @crtc_id: crtc to cleanup pageflip on
* @crtc_base: new address of the crtc (GPU MC address)
*
* Does the actual pageflip (evergreen+).
* During vblank we take the crtc lock and wait for the update_pending
* bit to go high, when it does, we release the lock, and allow the
* double buffered update to take place.
* Returns the current update pending status.
*/
u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
int i;
 
/* Lock the graphics update lock */
tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
170,11 → 64,7
(u32)crtc_base);
 
/* Wait for update_pending to go high. */
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
break;
udelay(1);
}
while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
/* Unlock the lock, so double-buffering can take place inside vblank */
232,74 → 122,6
return actual_temp * 1000;
}
 
/**
* sumo_pm_init_profile - Initialize power profiles callback.
*
* @rdev: radeon_device pointer
*
* Initialize the power states used in profile mode
* (sumo, trinity, SI).
* Used for profile mode only.
*/
void sumo_pm_init_profile(struct radeon_device *rdev)
{
int idx;
 
/* default */
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
 
/* low,mid sh/mh */
if (rdev->flags & RADEON_IS_MOBILITY)
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
else
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
 
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
 
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
 
/* high sh/mh */
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
rdev->pm.power_state[idx].num_clock_modes - 1;
 
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
rdev->pm.power_state[idx].num_clock_modes - 1;
}
 
/**
* evergreen_pm_misc - set additional pm hw parameters callback.
*
* @rdev: radeon_device pointer
*
* Set non-clock parameters associated with a power state
* (voltage, etc.) (evergreen+).
*/
void evergreen_pm_misc(struct radeon_device *rdev)
{
int req_ps_idx = rdev->pm.requested_power_state_index;
327,13 → 149,6
}
}
 
/**
* evergreen_pm_prepare - pre-power state change callback.
*
* @rdev: radeon_device pointer
*
* Prepare for a power state change (evergreen+).
*/
void evergreen_pm_prepare(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
352,13 → 167,6
}
}
 
/**
* evergreen_pm_finish - post-power state change callback.
*
* @rdev: radeon_device pointer
*
* Clean up after a power state change (evergreen+).
*/
void evergreen_pm_finish(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
377,15 → 185,6
}
}
 
/**
* evergreen_hpd_sense - hpd sense callback.
*
* @rdev: radeon_device pointer
* @hpd: hpd (hotplug detect) pin
*
* Checks if a digital monitor is connected (evergreen+).
* Returns true if connected, false if not connected.
*/
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
bool connected = false;
422,14 → 221,6
return connected;
}
 
/**
* evergreen_hpd_set_polarity - hpd set polarity callback.
*
* @rdev: radeon_device pointer
* @hpd: hpd (hotplug detect) pin
*
* Set the polarity of the hpd pin (evergreen+).
*/
void evergreen_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd)
{
490,19 → 281,10
}
}
 
/**
* evergreen_hpd_init - hpd setup callback.
*
* @rdev: radeon_device pointer
*
* Setup the hpd pins used by the card (evergreen+).
* Enable the pin, set the polarity, and enable the hpd interrupts.
*/
void evergreen_hpd_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
unsigned enabled = 0;
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
 
511,44 → 293,40
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp);
rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, tmp);
rdev->irq.hpd[1] = true;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, tmp);
rdev->irq.hpd[2] = true;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, tmp);
rdev->irq.hpd[3] = true;
break;
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, tmp);
rdev->irq.hpd[4] = true;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, tmp);
rdev->irq.hpd[5] = true;
break;
default:
break;
}
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
enabled |= 1 << radeon_connector->hpd.hpd;
}
// radeon_irq_kms_enable_hpd(rdev, enabled);
if (rdev->irq.installed)
evergreen_irq_set(rdev);
}
 
/**
* evergreen_hpd_fini - hpd tear down callback.
*
* @rdev: radeon_device pointer
*
* Tear down the hpd pins used by the card (evergreen+).
* Disable the hpd interrupts.
*/
void evergreen_hpd_fini(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
unsigned disabled = 0;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
555,28 → 333,32
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, 0);
rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, 0);
rdev->irq.hpd[1] = false;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, 0);
rdev->irq.hpd[2] = false;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, 0);
rdev->irq.hpd[3] = false;
break;
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, 0);
rdev->irq.hpd[4] = false;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, 0);
rdev->irq.hpd[5] = false;
break;
default:
break;
}
disabled |= 1 << radeon_connector->hpd.hpd;
}
// radeon_irq_kms_disable_hpd(rdev, disabled);
}
 
/* watermark setup */
655,7 → 437,7
return 0;
}
 
u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
{
u32 tmp = RREG32(MC_SHARED_CHMAP);
 
1007,14 → 789,6
 
}
 
/**
* evergreen_bandwidth_update - update display watermarks callback.
*
* @rdev: radeon_device pointer
*
* Update the display watermarks based on the requested mode(s)
* (evergreen+).
*/
void evergreen_bandwidth_update(struct radeon_device *rdev)
{
struct drm_display_mode *mode0 = NULL;
1038,15 → 812,6
}
}
 
/**
* evergreen_mc_wait_for_idle - wait for MC idle callback.
*
* @rdev: radeon_device pointer
*
* Wait for the MC (memory controller) to be idle.
* (evergreen+).
* Returns 0 if the MC is idle, -1 if not.
*/
int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
1088,12 → 853,12
}
}
 
static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
int evergreen_pcie_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
int r;
 
if (rdev->gart.robj == NULL) {
if (rdev->gart.table.vram.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
1120,11 → 885,6
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
if ((rdev->family == CHIP_JUNIPER) ||
(rdev->family == CHIP_CYPRESS) ||
(rdev->family == CHIP_HEMLOCK) ||
(rdev->family == CHIP_BARTS))
WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
}
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
1140,16 → 900,14
WREG32(VM_CONTEXT1_CNTL, 0);
 
evergreen_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
 
static void evergreen_pcie_gart_disable(struct radeon_device *rdev)
void evergreen_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
int r;
 
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
1169,10 → 927,17
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
radeon_gart_table_vram_unpin(rdev);
if (rdev->gart.table.vram.robj) {
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
}
}
 
static void evergreen_pcie_gart_fini(struct radeon_device *rdev)
void evergreen_pcie_gart_fini(struct radeon_device *rdev)
{
evergreen_pcie_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
1180,7 → 945,7
}
 
 
static void evergreen_agp_enable(struct radeon_device *rdev)
void evergreen_agp_enable(struct radeon_device *rdev)
{
u32 tmp;
 
1208,105 → 973,175
 
void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
u32 crtc_enabled, tmp, frame_count, blackout;
int i, j;
 
save->vga_control[0] = RREG32(D1VGA_CONTROL);
save->vga_control[1] = RREG32(D2VGA_CONTROL);
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
if (rdev->num_crtc >= 4) {
save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
}
if (rdev->num_crtc >= 6) {
save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
}
 
/* disable VGA render */
/* Stop all video */
WREG32(VGA_RENDER_CONTROL, 0);
/* blank the display controllers */
for (i = 0; i < rdev->num_crtc; i++) {
crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
if (crtc_enabled) {
save->crtc_enabled[i] = true;
if (ASIC_IS_DCE6(rdev)) {
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
radeon_wait_for_vblank(rdev, i);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
}
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
radeon_wait_for_vblank(rdev, i);
tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
}
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
}
/* wait for the next frame */
frame_count = radeon_get_vblank_counter(rdev, i);
for (j = 0; j < rdev->usec_timeout; j++) {
if (radeon_get_vblank_counter(rdev, i) != frame_count)
break;
udelay(1);
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
 
radeon_mc_wait_for_idle(rdev);
 
blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
if ((blackout & BLACKOUT_MODE_MASK) != 1) {
/* Block CPU access */
WREG32(BIF_FB_EN, 0);
/* blackout the MC */
blackout &= ~BLACKOUT_MODE_MASK;
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
WREG32(D1VGA_CONTROL, 0);
WREG32(D2VGA_CONTROL, 0);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_D3VGA_CONTROL, 0);
WREG32(EVERGREEN_D4VGA_CONTROL, 0);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_D5VGA_CONTROL, 0);
WREG32(EVERGREEN_D6VGA_CONTROL, 0);
}
}
 
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
u32 tmp, frame_count;
int i, j;
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
 
/* update crtc base addresses */
for (i = 0; i < rdev->num_crtc; i++) {
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
 
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
 
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
 
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
}
 
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
 
/* unblackout the MC */
tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
tmp &= ~BLACKOUT_MODE_MASK;
WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
/* allow CPU access */
WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
 
for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled) {
if (ASIC_IS_DCE6(rdev)) {
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
/* Unlock host access */
WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
/* Restore video state */
WREG32(D1VGA_CONTROL, save->vga_control[0]);
WREG32(D2VGA_CONTROL, save->vga_control[1]);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
}
/* wait for the next frame */
frame_count = radeon_get_vblank_counter(rdev, i);
for (j = 0; j < rdev->usec_timeout; j++) {
if (radeon_get_vblank_counter(rdev, i) != frame_count)
break;
udelay(1);
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
}
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
}
/* Unlock vga access */
WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
}
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
}
 
1353,11 → 1188,8
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
rdev->mc.vram_end >> 12);
}
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
/* llano/ontario only */
if ((rdev->family == CHIP_PALM) ||
(rdev->family == CHIP_SUMO) ||
(rdev->family == CHIP_SUMO2)) {
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
if (rdev->flags & RADEON_IS_IGP) {
tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
1392,36 → 1224,18
*/
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
u32 next_rptr;
 
/* set to DX10/11 mode */
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
radeon_ring_write(ring, 1);
 
if (ring->rptr_save_reg) {
next_rptr = ring->wptr + 3 + 4;
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, ((ring->rptr_save_reg -
PACKET3_SET_CONFIG_REG_START) >> 2));
radeon_ring_write(ring, next_rptr);
} else if (rdev->wb.enabled) {
next_rptr = ring->wptr + 5 + 4;
radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
radeon_ring_write(ring, next_rptr);
radeon_ring_write(ring, 0);
}
 
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(ring,
radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
radeon_ring_write(rdev, 1);
/* FIXME: implement */
radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(rdev,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
radeon_ring_write(ring, ib->length_dw);
radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
radeon_ring_write(rdev, ib->length_dw);
}
 
 
1459,28 → 1273,27
 
static int evergreen_cp_start(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r, i;
uint32_t cp_me;
 
r = radeon_ring_lock(rdev, ring, 7);
r = radeon_ring_lock(rdev, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(ring, 0x1);
radeon_ring_write(ring, 0x0);
radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring);
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_unlock_commit(rdev);
 
cp_me = 0xff;
WREG32(CP_ME_CNTL, cp_me);
 
r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
r = radeon_ring_lock(rdev, evergreen_default_size + 19);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
1487,45 → 1300,44
}
 
/* setup clear context state */
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
 
for (i = 0; i < evergreen_default_size; i++)
radeon_ring_write(ring, evergreen_default_state[i]);
radeon_ring_write(rdev, evergreen_default_state[i]);
 
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
 
/* set clear context state */
radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(rdev, 0);
 
/* SQ_VTX_BASE_VTX_LOC */
radeon_ring_write(ring, 0xc0026f00);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(rdev, 0xc0026f00);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
 
/* Clear consts */
radeon_ring_write(ring, 0xc0036f00);
radeon_ring_write(ring, 0x00000bc4);
radeon_ring_write(ring, 0xffffffff);
radeon_ring_write(ring, 0xffffffff);
radeon_ring_write(ring, 0xffffffff);
radeon_ring_write(rdev, 0xc0036f00);
radeon_ring_write(rdev, 0x00000bc4);
radeon_ring_write(rdev, 0xffffffff);
radeon_ring_write(rdev, 0xffffffff);
radeon_ring_write(rdev, 0xffffffff);
 
radeon_ring_write(ring, 0xc0026900);
radeon_ring_write(ring, 0x00000316);
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* */
radeon_ring_write(rdev, 0xc0026900);
radeon_ring_write(rdev, 0x00000316);
radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(rdev, 0x00000010); /* */
 
radeon_ring_unlock_commit(rdev, ring);
radeon_ring_unlock_commit(rdev);
 
return 0;
}
 
static int evergreen_cp_resume(struct radeon_device *rdev)
int evergreen_cp_resume(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 tmp;
u32 rb_bufsz;
int r;
1543,14 → 1355,13
RREG32(GRBM_SOFT_RESET);
 
/* Set ring buffer size */
rb_bufsz = drm_order(ring->ring_size / 8);
rb_bufsz = drm_order(rdev->cp.ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB_CNTL, tmp);
WREG32(CP_SEM_WAIT_TIMER, 0x0);
WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
WREG32(CP_SEM_WAIT_TIMER, 0x4);
 
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
1558,8 → 1369,7
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
ring->wptr = 0;
WREG32(CP_RB_WPTR, ring->wptr);
WREG32(CP_RB_WPTR, 0);
 
/* set the wb address wether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
1577,16 → 1387,17
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
 
WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
 
ring->rptr = RREG32(CP_RB_RPTR);
rdev->cp.rptr = RREG32(CP_RB_RPTR);
rdev->cp.wptr = RREG32(CP_RB_WPTR);
 
evergreen_cp_start(rdev);
ring->ready = true;
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
rdev->cp.ready = true;
r = radeon_ring_test(rdev);
if (r) {
ring->ready = false;
rdev->cp.ready = false;
return r;
}
return 0;
1595,10 → 1406,205
/*
* Core functions
*/
static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
{
u32 backend_map = 0;
u32 enabled_backends_mask = 0;
u32 enabled_backends_count = 0;
u32 cur_pipe;
u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
u32 cur_backend = 0;
u32 i;
bool force_no_swizzle;
 
if (num_tile_pipes > EVERGREEN_MAX_PIPES)
num_tile_pipes = EVERGREEN_MAX_PIPES;
if (num_tile_pipes < 1)
num_tile_pipes = 1;
if (num_backends > EVERGREEN_MAX_BACKENDS)
num_backends = EVERGREEN_MAX_BACKENDS;
if (num_backends < 1)
num_backends = 1;
 
for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
if (((backend_disable_mask >> i) & 1) == 0) {
enabled_backends_mask |= (1 << i);
++enabled_backends_count;
}
if (enabled_backends_count == num_backends)
break;
}
 
if (enabled_backends_count == 0) {
enabled_backends_mask = 1;
enabled_backends_count = 1;
}
 
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
 
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
switch (rdev->family) {
case CHIP_CEDAR:
case CHIP_REDWOOD:
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
case CHIP_TURKS:
case CHIP_CAICOS:
force_no_swizzle = false;
break;
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
case CHIP_JUNIPER:
case CHIP_BARTS:
default:
force_no_swizzle = true;
break;
}
if (force_no_swizzle) {
bool last_backend_enabled = false;
 
force_no_swizzle = false;
for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
if (((enabled_backends_mask >> i) & 1) == 1) {
if (last_backend_enabled)
force_no_swizzle = true;
last_backend_enabled = true;
} else
last_backend_enabled = false;
}
}
 
switch (num_tile_pipes) {
case 1:
case 3:
case 5:
case 7:
DRM_ERROR("odd number of pipes!\n");
break;
case 2:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
break;
case 4:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 1;
swizzle_pipe[3] = 3;
}
break;
case 6:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 1;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 5;
}
break;
case 8:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
swizzle_pipe[6] = 6;
swizzle_pipe[7] = 7;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
swizzle_pipe[6] = 5;
swizzle_pipe[7] = 7;
}
break;
}
 
for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
while (((1 << cur_backend) & enabled_backends_mask) == 0)
cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
 
backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
 
cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
}
 
return backend_map;
}
 
static void evergreen_program_channel_remap(struct radeon_device *rdev)
{
u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
 
tmp = RREG32(MC_SHARED_CHMAP);
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
case 0:
case 1:
case 2:
case 3:
default:
/* default mapping */
mc_shared_chremap = 0x00fac688;
break;
}
 
switch (rdev->family) {
case CHIP_HEMLOCK:
case CHIP_CYPRESS:
case CHIP_BARTS:
tcp_chan_steer_lo = 0x54763210;
tcp_chan_steer_hi = 0x0000ba98;
break;
case CHIP_JUNIPER:
case CHIP_REDWOOD:
case CHIP_CEDAR:
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
case CHIP_TURKS:
case CHIP_CAICOS:
default:
tcp_chan_steer_lo = 0x76543210;
tcp_chan_steer_hi = 0x0000ba98;
break;
}
 
WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
}
 
static void evergreen_gpu_init(struct radeon_device *rdev)
{
u32 gb_addr_config;
u32 cc_rb_backend_disable = 0;
u32 cc_gc_shader_pipe_config;
u32 gb_addr_config = 0;
u32 mc_shared_chmap, mc_arb_ramcfg;
u32 gb_backend_map;
u32 grbm_gfx_index;
u32 sx_debug_1;
u32 smx_dc_ctl0;
u32 sq_config;
1613,7 → 1619,6
u32 sq_stack_resource_mgmt_3;
u32 vgt_cache_invalidation;
u32 hdp_host_path_cntl, tmp;
u32 disabled_rb_mask;
int i, j, num_shader_engines, ps_thread_count;
 
switch (rdev->family) {
1638,7 → 1643,6
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_JUNIPER:
rdev->config.evergreen.num_ses = 1;
1660,7 → 1664,6
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_REDWOOD:
rdev->config.evergreen.num_ses = 1;
1682,7 → 1685,6
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_CEDAR:
default:
1705,7 → 1707,6
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_PALM:
rdev->config.evergreen.num_ses = 1;
1727,7 → 1728,6
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_SUMO:
rdev->config.evergreen.num_ses = 1;
1755,7 → 1755,6
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_SUMO2:
rdev->config.evergreen.num_ses = 1;
1777,7 → 1776,6
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_BARTS:
rdev->config.evergreen.num_ses = 2;
1799,7 → 1797,6
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_TURKS:
rdev->config.evergreen.num_ses = 1;
1821,7 → 1818,6
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_CAICOS:
rdev->config.evergreen.num_ses = 1;
1843,7 → 1839,6
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
break;
}
 
1858,16 → 1853,154
 
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
 
evergreen_fix_pci_max_read_req_size(rdev);
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
 
cc_gc_shader_pipe_config |=
INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
& EVERGREEN_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
& EVERGREEN_MAX_SIMDS_MASK);
 
cc_rb_backend_disable =
BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
& EVERGREEN_MAX_BACKENDS_MASK);
 
 
mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
if ((rdev->family == CHIP_PALM) ||
(rdev->family == CHIP_SUMO) ||
(rdev->family == CHIP_SUMO2))
if (rdev->flags & RADEON_IS_IGP)
mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
else
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
switch (rdev->config.evergreen.max_tile_pipes) {
case 1:
default:
gb_addr_config |= NUM_PIPES(0);
break;
case 2:
gb_addr_config |= NUM_PIPES(1);
break;
case 4:
gb_addr_config |= NUM_PIPES(2);
break;
case 8:
gb_addr_config |= NUM_PIPES(3);
break;
}
 
gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
 
if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
gb_addr_config |= ROW_SIZE(2);
else
gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
 
if (rdev->ddev->pdev->device == 0x689e) {
u32 efuse_straps_4;
u32 efuse_straps_3;
u8 efuse_box_bit_131_124;
 
WREG32(RCU_IND_INDEX, 0x204);
efuse_straps_4 = RREG32(RCU_IND_DATA);
WREG32(RCU_IND_INDEX, 0x203);
efuse_straps_3 = RREG32(RCU_IND_DATA);
efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
 
switch(efuse_box_bit_131_124) {
case 0x00:
gb_backend_map = 0x76543210;
break;
case 0x55:
gb_backend_map = 0x77553311;
break;
case 0x56:
gb_backend_map = 0x77553300;
break;
case 0x59:
gb_backend_map = 0x77552211;
break;
case 0x66:
gb_backend_map = 0x77443300;
break;
case 0x99:
gb_backend_map = 0x66552211;
break;
case 0x5a:
gb_backend_map = 0x77552200;
break;
case 0xaa:
gb_backend_map = 0x66442200;
break;
case 0x95:
gb_backend_map = 0x66553311;
break;
default:
DRM_ERROR("bad backend map, using default\n");
gb_backend_map =
evergreen_get_tile_pipe_to_backend_map(rdev,
rdev->config.evergreen.max_tile_pipes,
rdev->config.evergreen.max_backends,
((EVERGREEN_MAX_BACKENDS_MASK <<
rdev->config.evergreen.max_backends) &
EVERGREEN_MAX_BACKENDS_MASK));
break;
}
} else if (rdev->ddev->pdev->device == 0x68b9) {
u32 efuse_straps_3;
u8 efuse_box_bit_127_124;
 
WREG32(RCU_IND_INDEX, 0x203);
efuse_straps_3 = RREG32(RCU_IND_DATA);
efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
 
switch(efuse_box_bit_127_124) {
case 0x0:
gb_backend_map = 0x00003210;
break;
case 0x5:
case 0x6:
case 0x9:
case 0xa:
gb_backend_map = 0x00003311;
break;
default:
DRM_ERROR("bad backend map, using default\n");
gb_backend_map =
evergreen_get_tile_pipe_to_backend_map(rdev,
rdev->config.evergreen.max_tile_pipes,
rdev->config.evergreen.max_backends,
((EVERGREEN_MAX_BACKENDS_MASK <<
rdev->config.evergreen.max_backends) &
EVERGREEN_MAX_BACKENDS_MASK));
break;
}
} else {
switch (rdev->family) {
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
case CHIP_BARTS:
gb_backend_map = 0x66442200;
break;
case CHIP_JUNIPER:
gb_backend_map = 0x00002200;
break;
default:
gb_backend_map =
evergreen_get_tile_pipe_to_backend_map(rdev,
rdev->config.evergreen.max_tile_pipes,
rdev->config.evergreen.max_backends,
((EVERGREEN_MAX_BACKENDS_MASK <<
rdev->config.evergreen.max_backends) &
EVERGREEN_MAX_BACKENDS_MASK));
}
}
 
/* setup tiling info dword. gb_addr_config is not adequate since it does
* not have bank info, so create a custom tiling dword.
* bits 3:0 num_pipes
1894,62 → 2027,47
/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
if (rdev->flags & RADEON_IS_IGP)
rdev->config.evergreen.tile_config |= 1 << 4;
else {
switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
case 0: /* four banks */
rdev->config.evergreen.tile_config |= 0 << 4;
break;
case 1: /* eight banks */
rdev->config.evergreen.tile_config |= 1 << 4;
break;
case 2: /* sixteen banks */
default:
rdev->config.evergreen.tile_config |= 2 << 4;
break;
}
}
rdev->config.evergreen.tile_config |= 0 << 8;
else
rdev->config.evergreen.tile_config |=
((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
rdev->config.evergreen.tile_config |=
((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
rdev->config.evergreen.tile_config |=
((gb_addr_config & 0x30000000) >> 28) << 12;
 
num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
rdev->config.evergreen.backend_map = gb_backend_map;
WREG32(GB_BACKEND_MAP, gb_backend_map);
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
u32 efuse_straps_4;
u32 efuse_straps_3;
evergreen_program_channel_remap(rdev);
 
WREG32(RCU_IND_INDEX, 0x204);
efuse_straps_4 = RREG32(RCU_IND_DATA);
WREG32(RCU_IND_INDEX, 0x203);
efuse_straps_3 = RREG32(RCU_IND_DATA);
tmp = (((efuse_straps_4 & 0xf) << 4) |
((efuse_straps_3 & 0xf0000000) >> 28));
} else {
tmp = 0;
for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
u32 rb_disable_bitmap;
num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
 
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
tmp <<= 4;
tmp |= rb_disable_bitmap;
for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
u32 rb = cc_rb_backend_disable | (0xf0 << 16);
u32 sp = cc_gc_shader_pipe_config;
u32 gfx = grbm_gfx_index | SE_INDEX(i);
 
if (i == num_shader_engines) {
rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
}
}
/* enabled rb are just the one not disabled :) */
disabled_rb_mask = tmp;
 
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(GRBM_GFX_INDEX, gfx);
WREG32(RLC_GFX_INDEX, gfx);
 
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
WREG32(CC_RB_BACKEND_DISABLE, rb);
WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
}
 
tmp = gb_addr_config & NUM_PIPES_MASK;
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
WREG32(GB_BACKEND_MAP, tmp);
grbm_gfx_index |= SE_BROADCAST_WRITES;
WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
WREG32(RLC_GFX_INDEX, grbm_gfx_index);
 
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
1977,9 → 2095,6
smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
WREG32(SMX_DC_CTL0, smx_dc_ctl0);
 
if (rdev->family <= CHIP_SUMO2)
WREG32(SMX_SAR_CTL0, 0x00010000);
 
WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
2145,9 → 2260,7
 
/* Get VRAM informations */
rdev->mc.vram_is_ddr = true;
if ((rdev->family == CHIP_PALM) ||
(rdev->family == CHIP_SUMO) ||
(rdev->family == CHIP_SUMO2))
if (rdev->flags & RADEON_IS_IGP)
tmp = RREG32(FUS_MC_ARB_RAMCFG);
else
tmp = RREG32(MC_ARB_RAMCFG);
2179,14 → 2292,12
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* Setup GPU memory space */
if ((rdev->family == CHIP_PALM) ||
(rdev->family == CHIP_SUMO) ||
(rdev->family == CHIP_SUMO2)) {
if (rdev->flags & RADEON_IS_IGP) {
/* size in bytes on fusion */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
} else {
/* size in MB on evergreen/cayman/tn */
/* size in MB on evergreen */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
}
2197,11 → 2308,13
return 0;
}
 
bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
{
u32 srbm_status;
u32 grbm_status;
u32 grbm_status_se0, grbm_status_se1;
struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
int r;
 
srbm_status = RREG32(SRBM_STATUS);
grbm_status = RREG32(GRBM_STATUS);
2208,13 → 2321,20
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
radeon_ring_lockup_update(ring);
r100_gpu_lockup_update(lockup, &rdev->cp);
return false;
}
/* force CP activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
}
rdev->cp.rptr = RREG32(CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
}
 
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
{
2233,14 → 2353,6
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
RREG32(CP_STALLED_STAT2));
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2278,14 → 2390,6
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
RREG32(CP_STALLED_STAT2));
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
evergreen_mc_resume(rdev, &save);
return 0;
}
2299,22 → 2403,28
 
u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
{
if (crtc >= rdev->num_crtc)
switch (crtc) {
case 0:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
case 1:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
case 2:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
case 3:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
case 4:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
case 5:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
default:
return 0;
else
return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
}
}
 
void evergreen_disable_interrupt_state(struct radeon_device *rdev)
{
u32 tmp;
 
if (rdev->family >= CHIP_CAYMAN) {
cayman_cp_int_cntl_setup(rdev, 0,
CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
cayman_cp_int_cntl_setup(rdev, 1, 0);
cayman_cp_int_cntl_setup(rdev, 2, 0);
} else
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2339,8 → 2449,6
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
 
/* only one DAC on DCE6 */
if (!ASIC_IS_DCE6(rdev))
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
 
2362,12 → 2470,10
int evergreen_irq_set(struct radeon_device *rdev)
{
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
 
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2388,62 → 2494,38
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
 
afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
afmt3 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
afmt4 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
 
if (rdev->family >= CHIP_CAYMAN) {
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
}
if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
}
} else {
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
if (rdev->irq.sw_int) {
DRM_DEBUG("evergreen_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
}
 
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
rdev->irq.pflip[0]) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
crtc1 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[1] ||
atomic_read(&rdev->irq.pflip[1])) {
rdev->irq.pflip[1]) {
DRM_DEBUG("evergreen_irq_set: vblank 1\n");
crtc2 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[2] ||
atomic_read(&rdev->irq.pflip[2])) {
rdev->irq.pflip[2]) {
DRM_DEBUG("evergreen_irq_set: vblank 2\n");
crtc3 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[3] ||
atomic_read(&rdev->irq.pflip[3])) {
rdev->irq.pflip[3]) {
DRM_DEBUG("evergreen_irq_set: vblank 3\n");
crtc4 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[4] ||
atomic_read(&rdev->irq.pflip[4])) {
rdev->irq.pflip[4]) {
DRM_DEBUG("evergreen_irq_set: vblank 4\n");
crtc5 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[5] ||
atomic_read(&rdev->irq.pflip[5])) {
rdev->irq.pflip[5]) {
DRM_DEBUG("evergreen_irq_set: vblank 5\n");
crtc6 |= VBLANK_INT_MASK;
}
2471,36 → 2553,11
DRM_DEBUG("evergreen_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
}
if (rdev->irq.afmt[0]) {
DRM_DEBUG("evergreen_irq_set: hdmi 0\n");
afmt1 |= AFMT_AZ_FORMAT_WTRIG_MASK;
if (rdev->irq.gui_idle) {
DRM_DEBUG("gui idle\n");
grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
}
if (rdev->irq.afmt[1]) {
DRM_DEBUG("evergreen_irq_set: hdmi 1\n");
afmt2 |= AFMT_AZ_FORMAT_WTRIG_MASK;
}
if (rdev->irq.afmt[2]) {
DRM_DEBUG("evergreen_irq_set: hdmi 2\n");
afmt3 |= AFMT_AZ_FORMAT_WTRIG_MASK;
}
if (rdev->irq.afmt[3]) {
DRM_DEBUG("evergreen_irq_set: hdmi 3\n");
afmt4 |= AFMT_AZ_FORMAT_WTRIG_MASK;
}
if (rdev->irq.afmt[4]) {
DRM_DEBUG("evergreen_irq_set: hdmi 4\n");
afmt5 |= AFMT_AZ_FORMAT_WTRIG_MASK;
}
if (rdev->irq.afmt[5]) {
DRM_DEBUG("evergreen_irq_set: hdmi 5\n");
afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK;
}
 
if (rdev->family >= CHIP_CAYMAN) {
cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
} else
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 
2533,17 → 2590,10
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
 
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1);
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2);
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, afmt3);
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, afmt4);
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
 
return 0;
}
 
static void evergreen_irq_ack(struct radeon_device *rdev)
static inline void evergreen_irq_ack(struct radeon_device *rdev)
{
u32 tmp;
 
2564,13 → 2614,6
rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
}
 
rdev->irq.stat_regs.evergreen.afmt_status1 = RREG32(AFMT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.afmt_status2 = RREG32(AFMT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.afmt_status3 = RREG32(AFMT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.afmt_status4 = RREG32(AFMT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.afmt_status5 = RREG32(AFMT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.afmt_status6 = RREG32(AFMT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
 
if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
2644,55 → 2687,9
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, tmp);
}
if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, tmp);
}
if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, tmp);
}
if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, tmp);
}
if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, tmp);
}
if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, tmp);
}
}
 
static void evergreen_irq_disable(struct radeon_device *rdev)
static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
{
r600_disable_interrupts(rdev);
/* Wait and acknowledge irq */
mdelay(1);
evergreen_irq_ack(rdev);
evergreen_disable_interrupt_state(rdev);
}
 
void evergreen_irq_suspend(struct radeon_device *rdev)
{
evergreen_irq_disable(rdev);
r600_rlc_stop(rdev);
}
 
static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
{
u32 wptr, tmp;
 
if (rdev->wb.enabled)
2721,22 → 2718,22
u32 rptr;
u32 src_id, src_data;
u32 ring_index;
unsigned long flags;
bool queue_hotplug = false;
bool queue_hdmi = false;
 
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
 
wptr = evergreen_get_ih_wptr(rdev);
 
restart_ih:
/* is somebody else already processing irqs? */
if (atomic_xchg(&rdev->ih.lock, 1))
return IRQ_NONE;
 
rptr = rdev->ih.rptr;
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
 
spin_lock_irqsave(&rdev->ih.lock, flags);
if (rptr == wptr) {
spin_unlock_irqrestore(&rdev->ih.lock, flags);
return IRQ_NONE;
}
restart_ih:
/* Order reading of wptr vs. reading of IH ring data */
rmb();
 
2743,6 → 2740,7
/* display interrupts */
evergreen_irq_ack(rdev);
 
rdev->ih.wptr = wptr;
while (rptr != wptr) {
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
2955,80 → 2953,20
break;
}
break;
case 44: /* hdmi */
switch (src_data) {
case 0:
if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
queue_hdmi = true;
DRM_DEBUG("IH: HDMI0\n");
}
break;
case 1:
if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
queue_hdmi = true;
DRM_DEBUG("IH: HDMI1\n");
}
break;
case 2:
if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
queue_hdmi = true;
DRM_DEBUG("IH: HDMI2\n");
}
break;
case 3:
if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
queue_hdmi = true;
DRM_DEBUG("IH: HDMI3\n");
}
break;
case 4:
if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
queue_hdmi = true;
DRM_DEBUG("IH: HDMI4\n");
}
break;
case 5:
if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
queue_hdmi = true;
DRM_DEBUG("IH: HDMI5\n");
}
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
radeon_fence_process(rdev);
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
if (rdev->family >= CHIP_CAYMAN) {
switch (src_data) {
case 0:
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
radeon_fence_process(rdev);
break;
case 1:
radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
break;
case 2:
radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
break;
}
} else
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
rdev->pm.gui_idle = true;
// wake_up(&rdev->irq.idle_queue);
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3039,24 → 2977,24
rptr += 16;
rptr &= rdev->ih.ptr_mask;
}
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
 
/* make sure wptr hasn't changed while processing */
wptr = evergreen_get_ih_wptr(rdev);
if (wptr != rptr)
if (wptr != rdev->ih.wptr)
goto restart_ih;
 
// if (queue_hotplug)
// schedule_work(&rdev->hotplug_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags);
return IRQ_HANDLED;
}
 
static int evergreen_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
 
/* enable pcie gen2 link */
if (!ASIC_IS_DCE5(rdev))
evergreen_pcie_gen2_enable(rdev);
 
if (ASIC_IS_DCE5(rdev)) {
3082,10 → 3020,6
}
}
 
r = r600_vram_scratch_init(rdev);
if (r)
return r;
 
evergreen_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
evergreen_agp_enable(rdev);
3098,8 → 3032,8
 
r = evergreen_blit_init(rdev);
if (r) {
// r600_blit_fini(rdev);
rdev->asic->copy.copy = NULL;
evergreen_blit_fini(rdev);
rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
 
3117,9 → 3051,7
}
evergreen_irq_set(rdev);
 
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
return r;
r = evergreen_cp_load_microcode(rdev);
3134,8 → 3066,8
 
 
 
#if 0
 
 
int evergreen_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence)
3156,7 → 3088,6
mutex_unlock(&rdev->r600_blit.mutex);
return 0;
}
#endif
 
/* Plan is to move initialization in that function and use
* helper function so that radeon_device_init pretty much
3168,6 → 3099,10
{
int r;
 
/* This don't do much */
r = radeon_gem_init(rdev);
if (r)
return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
3224,8 → 3159,8
if (r)
return r;
 
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
 
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
3240,26 → 3175,25
dev_err(rdev->dev, "disabling GPU acceleration\n");
rdev->accel_working = false;
}
 
/* Don't start up if the MC ucode is missing on BTC parts.
* The default clocks and voltages before the MC ucode
* is loaded are not suffient for advanced operations.
*/
if (ASIC_IS_DCE5(rdev)) {
if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
DRM_ERROR("radeon: MC ucode required for NI+.\n");
return -EINVAL;
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
rdev->accel_working = false;
}
r = r600_ib_test(rdev);
if (r) {
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
}
 
}
return 0;
}
 
 
void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
{
u32 link_width_cntl, speed_cntl, mask;
int ret;
u32 link_width_cntl, speed_cntl;
 
if (radeon_pcie_gen2 == 0)
return;
3274,21 → 3208,7
if (ASIC_IS_X2(rdev))
return;
 
ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
if (ret != 0)
return;
 
if (!(mask & DRM_PCIE_SPEED_50))
return;
 
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
if (speed_cntl & LC_CURRENT_DATA_RATE) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
}
 
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
 
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
 
/drivers/video/drm/radeon/r600.c
28,9 → 28,8
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_mode.h"
48,7 → 47,6
#define EVERGREEN_PM4_UCODE_SIZE 1376
#define EVERGREEN_RLC_UCODE_SIZE 768
#define CAYMAN_RLC_UCODE_SIZE 1024
#define ARUBA_RLC_UCODE_SIZE 1536
 
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
97,7 → 95,7
 
/* r600,rv610,rv630,rv620,rv635,rv670 */
int r600_mc_wait_for_idle(struct radeon_device *rdev);
static void r600_gpu_init(struct radeon_device *rdev);
void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
void r600_irq_disable(struct radeon_device *rdev);
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
280,66 → 278,67
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
unsigned enable = 0;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
/* don't try to enable hpd on eDP or LVDS avoid breaking the
* aux dp channel on imac and help (but not completely fix)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
*/
continue;
}
if (ASIC_IS_DCE3(rdev)) {
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
if (ASIC_IS_DCE32(rdev))
tmp |= DC_HPDx_EN;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp);
rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, tmp);
rdev->irq.hpd[1] = true;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, tmp);
rdev->irq.hpd[2] = true;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, tmp);
rdev->irq.hpd[3] = true;
break;
/* DCE 3.2 */
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, tmp);
rdev->irq.hpd[4] = true;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, tmp);
rdev->irq.hpd[5] = true;
break;
default:
break;
}
}
} else {
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
rdev->irq.hpd[1] = true;
break;
case RADEON_HPD_3:
WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
rdev->irq.hpd[2] = true;
break;
default:
break;
}
}
enable |= 1 << radeon_connector->hpd.hpd;
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
// radeon_irq_kms_enable_hpd(rdev, enable);
if (rdev->irq.installed)
r600_irq_set(rdev);
}
 
void r600_hpd_fini(struct radeon_device *rdev)
346,52 → 345,61
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
unsigned disable = 0;
 
if (ASIC_IS_DCE3(rdev)) {
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
if (ASIC_IS_DCE3(rdev)) {
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, 0);
rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, 0);
rdev->irq.hpd[1] = false;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, 0);
rdev->irq.hpd[2] = false;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, 0);
rdev->irq.hpd[3] = false;
break;
/* DCE 3.2 */
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, 0);
rdev->irq.hpd[4] = false;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, 0);
rdev->irq.hpd[5] = false;
break;
default:
break;
}
}
} else {
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
rdev->irq.hpd[1] = false;
break;
case RADEON_HPD_3:
WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
rdev->irq.hpd[2] = false;
break;
default:
break;
}
}
disable |= 1 << radeon_connector->hpd.hpd;
}
// radeon_irq_kms_disable_hpd(rdev, disable);
}
 
/*
405,7 → 413,7
/* flush hdp cache so updates hit vram */
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
!(rdev->flags & RADEON_IS_AGP)) {
void __iomem *ptr = (void *)rdev->gart.ptr;
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
u32 tmp;
 
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
440,7 → 448,7
{
int r;
 
if (rdev->gart.robj) {
if (rdev->gart.table.vram.robj) {
WARN(1, "R600 PCIE GART already initialized\n");
return 0;
}
452,12 → 460,12
return radeon_gart_table_vram_alloc(rdev);
}
 
static int r600_pcie_gart_enable(struct radeon_device *rdev)
int r600_pcie_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
int r, i;
 
if (rdev->gart.robj == NULL) {
if (rdev->gart.table.vram.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
502,17 → 510,14
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
 
r600_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
 
static void r600_pcie_gart_disable(struct radeon_device *rdev)
void r600_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
int i;
int i, r;
 
/* Disable all tables */
for (i = 0; i < 7; i++)
539,10 → 544,17
WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
radeon_gart_table_vram_unpin(rdev);
if (rdev->gart.table.vram.robj) {
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
}
}
 
static void r600_pcie_gart_fini(struct radeon_device *rdev)
void r600_pcie_gart_fini(struct radeon_device *rdev)
{
radeon_gart_fini(rdev);
r600_pcie_gart_disable(rdev);
549,7 → 561,7
radeon_gart_table_vram_free(rdev);
}
 
static void r600_agp_enable(struct radeon_device *rdev)
void r600_agp_enable(struct radeon_device *rdev)
{
u32 tmp;
int i;
639,7 → 651,7
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
}
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
WREG32(MC_VM_FB_LOCATION, tmp);
697,7 → 709,7
}
if (rdev->flags & RADEON_IS_AGP) {
size_bf = mc->gtt_start;
size_af = 0xFFFFFFFF - mc->gtt_end;
size_af = 0xFFFFFFFF - mc->gtt_end + 1;
if (size_bf > size_af) {
if (mc->mc_vram_size > size_bf) {
dev_warn(rdev->dev, "limiting VRAM\n");
711,7 → 723,7
mc->real_vram_size = size_af;
mc->mc_vram_size = size_af;
}
mc->vram_start = mc->gtt_end + 1;
mc->vram_start = mc->gtt_end;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
729,7 → 741,7
}
}
 
static int r600_mc_init(struct radeon_device *rdev)
int r600_mc_init(struct radeon_device *rdev)
{
u32 tmp;
int chansize, numchan;
778,41 → 790,11
return 0;
}
 
int r600_vram_scratch_init(struct radeon_device *rdev)
{
int r;
 
if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
NULL, &rdev->vram_scratch.robj);
if (r) {
return r;
}
}
 
r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->vram_scratch.robj,
RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
if (r) {
radeon_bo_unreserve(rdev->vram_scratch.robj);
return r;
}
r = radeon_bo_kmap(rdev->vram_scratch.robj,
(void **)&rdev->vram_scratch.ptr);
if (r)
radeon_bo_unpin(rdev->vram_scratch.robj);
radeon_bo_unreserve(rdev->vram_scratch.robj);
 
return r;
}
/* We doesn't check that the GPU really needs a reset we simply do the
* reset, it's up to the caller to determine if the GPU needs one. We
* might add an helper function to check that.
*/
static int r600_gpu_soft_reset(struct radeon_device *rdev)
int r600_gpu_soft_reset(struct radeon_device *rdev)
{
struct rv515_mc_save save;
u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
843,14 → 825,6
RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
RREG32(CP_STALLED_STAT2));
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
rv515_mc_stop(rdev, &save);
if (r600_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
894,35 → 868,41
RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
RREG32(CP_STALLED_STAT2));
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
rv515_mc_resume(rdev, &save);
return 0;
}
 
bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
bool r600_gpu_is_lockup(struct radeon_device *rdev)
{
u32 srbm_status;
u32 grbm_status;
u32 grbm_status2;
struct r100_gpu_lockup *lockup;
int r;
 
if (rdev->family >= CHIP_RV770)
lockup = &rdev->config.rv770.lockup;
else
lockup = &rdev->config.r600.lockup;
 
srbm_status = RREG32(R_000E50_SRBM_STATUS);
grbm_status = RREG32(R_008010_GRBM_STATUS);
grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
if (!G_008010_GUI_ACTIVE(grbm_status)) {
radeon_ring_lockup_update(ring);
r100_gpu_lockup_update(lockup, &rdev->cp);
return false;
}
/* force CP activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
}
rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
}
 
int r600_asic_reset(struct radeon_device *rdev)
{
929,51 → 909,113
return r600_gpu_soft_reset(rdev);
}
 
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
u32 total_max_rb_num,
u32 disabled_rb_mask)
static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
{
u32 rendering_pipe_num, rb_num_width, req_rb_num;
u32 pipe_rb_ratio, pipe_rb_remain;
u32 data = 0, mask = 1 << (max_rb_num - 1);
unsigned i, j;
u32 backend_map = 0;
u32 enabled_backends_mask;
u32 enabled_backends_count;
u32 cur_pipe;
u32 swizzle_pipe[R6XX_MAX_PIPES];
u32 cur_backend;
u32 i;
 
/* mask out the RBs that don't exist on that asic */
disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
if (num_tile_pipes > R6XX_MAX_PIPES)
num_tile_pipes = R6XX_MAX_PIPES;
if (num_tile_pipes < 1)
num_tile_pipes = 1;
if (num_backends > R6XX_MAX_BACKENDS)
num_backends = R6XX_MAX_BACKENDS;
if (num_backends < 1)
num_backends = 1;
 
rendering_pipe_num = 1 << tiling_pipe_num;
req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
BUG_ON(rendering_pipe_num < req_rb_num);
enabled_backends_mask = 0;
enabled_backends_count = 0;
for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
if (((backend_disable_mask >> i) & 1) == 0) {
enabled_backends_mask |= (1 << i);
++enabled_backends_count;
}
if (enabled_backends_count == num_backends)
break;
}
 
pipe_rb_ratio = rendering_pipe_num / req_rb_num;
pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
if (enabled_backends_count == 0) {
enabled_backends_mask = 1;
enabled_backends_count = 1;
}
 
if (rdev->family <= CHIP_RV740) {
/* r6xx/r7xx */
rb_num_width = 2;
} else {
/* eg+ */
rb_num_width = 4;
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
 
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
switch (num_tile_pipes) {
case 1:
swizzle_pipe[0] = 0;
break;
case 2:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
break;
case 3:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
break;
case 4:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
break;
case 5:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
break;
case 6:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 5;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
break;
case 7:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
swizzle_pipe[6] = 5;
break;
case 8:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
swizzle_pipe[6] = 5;
swizzle_pipe[7] = 7;
break;
}
 
for (i = 0; i < max_rb_num; i++) {
if (!(mask & disabled_rb_mask)) {
for (j = 0; j < pipe_rb_ratio; j++) {
data <<= rb_num_width;
data |= max_rb_num - i - 1;
cur_backend = 0;
for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
while (((1 << cur_backend) & enabled_backends_mask) == 0)
cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
 
backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
 
cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
}
if (pipe_rb_remain) {
data <<= rb_num_width;
data |= max_rb_num - i - 1;
pipe_rb_remain--;
}
}
mask >>= 1;
}
 
return data;
return backend_map;
}
 
int r600_count_pipe_bits(uint32_t val)
987,10 → 1029,11
return ret;
}
 
static void r600_gpu_init(struct radeon_device *rdev)
void r600_gpu_init(struct radeon_device *rdev)
{
u32 tiling_config;
u32 ramcfg;
u32 backend_map;
u32 cc_rb_backend_disable;
u32 cc_gc_shader_pipe_config;
u32 tmp;
1001,9 → 1044,8
u32 sq_thread_resource_mgmt = 0;
u32 sq_stack_resource_mgmt_1 = 0;
u32 sq_stack_resource_mgmt_2 = 0;
u32 disabled_rb_mask;
 
rdev->config.r600.tiling_group_size = 256;
/* FIXME: implement */
switch (rdev->family) {
case CHIP_R600:
rdev->config.r600.max_pipes = 4;
1107,7 → 1149,10
rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
 
if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
rdev->config.r600.tiling_group_size = 512;
else
rdev->config.r600.tiling_group_size = 256;
tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
if (tmp > 3) {
tiling_config |= ROW_TILING(3);
1119,36 → 1164,32
tiling_config |= BANK_SWAPS(1);
 
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
tmp = R6XX_MAX_BACKENDS -
r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
if (tmp < rdev->config.r600.max_backends) {
rdev->config.r600.max_backends = tmp;
}
cc_rb_backend_disable |=
BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
 
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
tmp = R6XX_MAX_PIPES -
r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
if (tmp < rdev->config.r600.max_pipes) {
rdev->config.r600.max_pipes = tmp;
}
tmp = R6XX_MAX_SIMDS -
r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
if (tmp < rdev->config.r600.max_simds) {
rdev->config.r600.max_simds = tmp;
}
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
cc_gc_shader_pipe_config |=
INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
 
disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
R6XX_MAX_BACKENDS, disabled_rb_mask);
tiling_config |= tmp << 16;
rdev->config.r600.backend_map = tmp;
 
backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
(R6XX_MAX_BACKENDS -
r600_count_pipe_bits((cc_rb_backend_disable &
R6XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
rdev->config.r600.tile_config = tiling_config;
rdev->config.r600.backend_map = backend_map;
tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
 
/* Setup pipes */
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
 
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1392,7 → 1433,6
WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
NUM_CLIP_SEQ(3)));
WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
WREG32(VC_ENHANCE, 0);
}
 
 
1632,28 → 1672,27
 
int r600_cp_start(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
uint32_t cp_me;
 
r = radeon_ring_lock(rdev, ring, 7);
r = radeon_ring_lock(rdev, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(ring, 0x1);
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
if (rdev->family >= CHIP_RV770) {
radeon_ring_write(ring, 0x0);
radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
} else {
radeon_ring_write(ring, 0x3);
radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
radeon_ring_write(rdev, 0x3);
radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
}
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring);
radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_unlock_commit(rdev);
 
cp_me = 0xff;
WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1662,7 → 1701,6
 
int r600_cp_resume(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 tmp;
u32 rb_bufsz;
int r;
1674,13 → 1712,13
WREG32(GRBM_SOFT_RESET, 0);
 
/* Set ring buffer size */
rb_bufsz = drm_order(ring->ring_size / 8);
rb_bufsz = drm_order(rdev->cp.ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB_CNTL, tmp);
WREG32(CP_SEM_WAIT_TIMER, 0x0);
WREG32(CP_SEM_WAIT_TIMER, 0x4);
 
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
1688,8 → 1726,7
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
ring->wptr = 0;
WREG32(CP_RB_WPTR, ring->wptr);
WREG32(CP_RB_WPTR, 0);
 
/* set the wb address whether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
1707,47 → 1744,43
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
 
WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
 
ring->rptr = RREG32(CP_RB_RPTR);
rdev->cp.rptr = RREG32(CP_RB_RPTR);
rdev->cp.wptr = RREG32(CP_RB_WPTR);
 
r600_cp_start(rdev);
ring->ready = true;
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
rdev->cp.ready = true;
r = radeon_ring_test(rdev);
if (r) {
ring->ready = false;
rdev->cp.ready = false;
return r;
}
return 0;
}
 
void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
void r600_cp_commit(struct radeon_device *rdev)
{
WREG32(CP_RB_WPTR, rdev->cp.wptr);
(void)RREG32(CP_RB_WPTR);
}
 
void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
{
u32 rb_bufsz;
int r;
 
/* Align ring size */
rb_bufsz = drm_order(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
ring->ring_size = ring_size;
ring->align_mask = 16 - 1;
 
if (radeon_ring_supports_scratch_reg(rdev, ring)) {
r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
if (r) {
DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
ring->rptr_save_reg = 0;
rdev->cp.ring_size = ring_size;
rdev->cp.align_mask = 16 - 1;
}
}
}
 
void r600_cp_fini(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r600_cp_stop(rdev);
radeon_ring_fini(rdev, ring);
radeon_scratch_free(rdev, ring->rptr_save_reg);
radeon_ring_fini(rdev);
}
 
 
1766,7 → 1799,7
}
}
 
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
int r600_ring_test(struct radeon_device *rdev)
{
uint32_t scratch;
uint32_t tmp = 0;
1779,16 → 1812,16
return r;
}
WREG32(scratch, 0xCAFEDEAD);
r = radeon_ring_lock(rdev, ring, 3);
r = radeon_ring_lock(rdev, 3);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
radeon_scratch_free(rdev, scratch);
return r;
}
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(rdev, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
1796,10 → 1829,10
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
DRM_INFO("ring test succeeded in %d usecs\n", i);
} else {
DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
ring->idx, scratch, tmp);
DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
r = -EINVAL;
}
radeon_scratch_free(rdev, scratch);
1809,82 → 1842,51
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
 
if (rdev->wb.use_event) {
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
/* flush read cache over gart */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
PACKET3_VC_ACTION_ENA |
PACKET3_SH_ACTION_ENA);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
(u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
/* EVENT_WRITE_EOP - flush caches, send int */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
radeon_ring_write(rdev, addr & 0xffffffff);
radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(rdev, fence->seq);
radeon_ring_write(rdev, 0);
} else {
/* flush read cache over gart */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
PACKET3_VC_ACTION_ENA |
PACKET3_SH_ACTION_ENA);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
/* wait for 3D idle clean */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(rdev, fence->seq);
/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
radeon_ring_write(ring, RB_INT_STAT);
radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
radeon_ring_write(rdev, RB_INT_STAT);
}
}
 
void r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
uint64_t addr = semaphore->gpu_addr;
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
 
if (rdev->family < CHIP_CAYMAN)
sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
 
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
}
 
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence)
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence)
{
struct radeon_semaphore *sem = NULL;
struct radeon_sa_bo *vb = NULL;
int r;
 
r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
mutex_lock(&rdev->r600_blit.mutex);
rdev->r600_blit.vb_ib = NULL;
r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
if (r) {
// if (rdev->r600_blit.vb_ib)
// radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
mutex_unlock(&rdev->r600_blit.mutex);
return r;
}
r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
r600_blit_done_copy(rdev, fence, vb, sem);
r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
r600_blit_done_copy(rdev, fence);
mutex_unlock(&rdev->r600_blit.mutex);
return 0;
}
 
1901,9 → 1903,8
/* FIXME: implement */
}
 
static int r600_startup(struct radeon_device *rdev)
int r600_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
 
/* enable pcie gen2 link */
1929,10 → 1930,17
r = r600_blit_init(rdev);
if (r) {
// r600_blit_fini(rdev);
rdev->asic->copy.copy = NULL;
rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
 
r = r600_video_init(rdev);
if (r) {
// r600_video_fini(rdev);
// rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed video blitter (%d) falling back to memcpy\n", r);
}
 
/* allocate wb buffer */
r = radeon_wb_init(rdev);
if (r)
1947,10 → 1955,7
}
r600_irq_set(rdev);
 
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
 
r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
return r;
r = r600_cp_load_microcode(rdev);
1994,6 → 1999,10
if (r600_debugfs_mc_info_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for mc !\n");
}
/* This don't do much */
r = radeon_gem_init(rdev);
if (r)
return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
2043,8 → 2052,8
if (r)
return r;
 
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
 
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
2057,9 → 2066,25
r = r600_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
// r600_suspend(rdev);
// r600_wb_fini(rdev);
// radeon_ring_fini(rdev);
r600_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
rdev->accel_working = false;
} else {
r = r600_ib_test(rdev);
if (r) {
dev_err(rdev->dev, "IB test failed (%d).\n", r);
rdev->accel_working = false;
}
}
}
 
return 0;
}
2069,37 → 2094,20
*/
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
u32 next_rptr;
 
if (ring->rptr_save_reg) {
next_rptr = ring->wptr + 3 + 4;
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, ((ring->rptr_save_reg -
PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(ring, next_rptr);
} else if (rdev->wb.enabled) {
next_rptr = ring->wptr + 5 + 4;
radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
radeon_ring_write(ring, next_rptr);
radeon_ring_write(ring, 0);
}
 
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(ring,
/* FIXME: implement */
radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(rdev,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
radeon_ring_write(ring, ib->length_dw);
radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
radeon_ring_write(rdev, ib->length_dw);
}
 
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
int r600_ib_test(struct radeon_device *rdev)
{
struct radeon_ib ib;
struct radeon_ib *ib;
uint32_t scratch;
uint32_t tmp = 0;
unsigned i;
2111,24 → 2119,39
return r;
}
WREG32(scratch, 0xCAFEDEAD);
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
r = radeon_ib_get(rdev, &ib);
if (r) {
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
goto free_scratch;
return r;
}
ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
r = radeon_ib_schedule(rdev, &ib, NULL);
ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
ib->ptr[2] = 0xDEADBEEF;
ib->ptr[3] = PACKET2(0);
ib->ptr[4] = PACKET2(0);
ib->ptr[5] = PACKET2(0);
ib->ptr[6] = PACKET2(0);
ib->ptr[7] = PACKET2(0);
ib->ptr[8] = PACKET2(0);
ib->ptr[9] = PACKET2(0);
ib->ptr[10] = PACKET2(0);
ib->ptr[11] = PACKET2(0);
ib->ptr[12] = PACKET2(0);
ib->ptr[13] = PACKET2(0);
ib->ptr[14] = PACKET2(0);
ib->ptr[15] = PACKET2(0);
ib->length_dw = 16;
r = radeon_ib_schedule(rdev, ib);
if (r) {
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
goto free_ib;
return r;
}
r = radeon_fence_wait(ib.fence, false);
r = radeon_fence_wait(ib->fence, false);
if (r) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
goto free_ib;
return r;
}
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
2137,16 → 2160,14
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
DRM_INFO("ib test succeeded in %u usecs\n", i);
} else {
DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
r = -EINVAL;
}
free_ib:
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
free_scratch:
radeon_scratch_free(rdev, scratch);
return r;
}
 
2173,7 → 2194,7
rdev->ih.rptr = 0;
}
 
int r600_ih_ring_alloc(struct radeon_device *rdev)
static int r600_ih_ring_alloc(struct radeon_device *rdev)
{
int r;
 
2182,7 → 2203,7
r = radeon_bo_create(rdev, rdev->ih.ring_size,
PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
NULL, &rdev->ih.ring_obj);
&rdev->ih.ring_obj);
if (r) {
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
return r;
2209,7 → 2230,7
return 0;
}
 
void r600_ih_ring_fini(struct radeon_device *rdev)
static void r600_ih_ring_fini(struct radeon_device *rdev)
{
int r;
if (rdev->ih.ring_obj) {
2233,7 → 2254,7
/* r7xx asics need to soft reset RLC before halting */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
RREG32(SRBM_SOFT_RESET);
mdelay(15);
udelay(15000);
WREG32(SRBM_SOFT_RESET, 0);
RREG32(SRBM_SOFT_RESET);
}
2256,17 → 2277,10
 
r600_rlc_stop(rdev);
 
WREG32(RLC_HB_BASE, 0);
WREG32(RLC_HB_CNTL, 0);
 
if (rdev->family == CHIP_ARUBA) {
WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
}
if (rdev->family <= CHIP_CAYMAN) {
WREG32(RLC_HB_BASE, 0);
WREG32(RLC_HB_RPTR, 0);
WREG32(RLC_HB_WPTR, 0);
}
if (rdev->family <= CHIP_CAICOS) {
WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2275,12 → 2289,7
WREG32(RLC_UCODE_CNTL, 0);
 
fw_data = (const __be32 *)rdev->rlc_fw->data;
if (rdev->family >= CHIP_ARUBA) {
for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
}
} else if (rdev->family >= CHIP_CAYMAN) {
if (rdev->family >= CHIP_CAYMAN) {
for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2333,6 → 2342,7
WREG32(IH_RB_RPTR, 0);
WREG32(IH_RB_WPTR, 0);
rdev->ih.enabled = false;
rdev->ih.wptr = 0;
rdev->ih.rptr = 0;
}
 
2361,15 → 2371,6
WREG32(DC_HPD5_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD6_INT_CONTROL, tmp);
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
} else {
tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
}
} else {
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2380,10 → 2381,6
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
}
}
 
2453,9 → 2450,6
else
r600_disable_interrupt_state(rdev);
 
/* at this point everything should be setup correctly to enable master */
pci_set_master(rdev->pdev);
 
/* enable irqs */
r600_enable_interrupts(rdev);
 
2467,7 → 2461,7
u32 mode_int = 0;
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
u32 grbm_int_cntl = 0;
u32 hdmi0, hdmi1;
u32 hdmi1, hdmi2;
u32 d1grph = 0, d2grph = 0;
 
if (!rdev->irq.installed) {
2482,7 → 2476,9
return 0;
}
 
hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
if (ASIC_IS_DCE3(rdev)) {
hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2490,32 → 2486,26
if (ASIC_IS_DCE32(rdev)) {
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
} else {
hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
}
} else {
hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
}
 
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
if (rdev->irq.sw_int) {
DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
rdev->irq.pflip[0]) {
DRM_DEBUG("r600_irq_set: vblank 0\n");
mode_int |= D1MODE_VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[1] ||
atomic_read(&rdev->irq.pflip[1])) {
rdev->irq.pflip[1]) {
DRM_DEBUG("r600_irq_set: vblank 1\n");
mode_int |= D2MODE_VBLANK_INT_MASK;
}
2543,14 → 2533,18
DRM_DEBUG("r600_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
}
if (rdev->irq.afmt[0]) {
DRM_DEBUG("r600_irq_set: hdmi 0\n");
hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
if (rdev->irq.hdmi[0]) {
DRM_DEBUG("r600_irq_set: hdmi 1\n");
hdmi1 |= R600_HDMI_INT_EN;
}
if (rdev->irq.afmt[1]) {
DRM_DEBUG("r600_irq_set: hdmi 0\n");
hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
if (rdev->irq.hdmi[1]) {
DRM_DEBUG("r600_irq_set: hdmi 2\n");
hdmi2 |= R600_HDMI_INT_EN;
}
if (rdev->irq.gui_idle) {
DRM_DEBUG("gui idle\n");
grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
}
 
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
2557,7 → 2551,9
WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
if (ASIC_IS_DCE3(rdev)) {
WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
WREG32(DC_HPD3_INT_CONTROL, hpd3);
2565,24 → 2561,18
if (ASIC_IS_DCE32(rdev)) {
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
} else {
WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
}
} else {
WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
}
 
return 0;
}
 
static void r600_irq_ack(struct radeon_device *rdev)
static inline void r600_irq_ack(struct radeon_device *rdev)
{
u32 tmp;
 
2590,19 → 2580,10
rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
if (ASIC_IS_DCE32(rdev)) {
rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
} else {
rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
}
} else {
rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
}
rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
2668,37 → 2649,22
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
}
if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
}
} else {
if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
if (ASIC_IS_DCE3(rdev)) {
if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
}
if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
if (ASIC_IS_DCE3(rdev)) {
tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
} else {
tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
}
}
}
}
 
static u32 r600_get_ih_wptr(struct radeon_device *rdev)
static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
{
u32 wptr, tmp;
 
2760,8 → 2726,8
u32 rptr;
u32 src_id, src_data;
u32 ring_index;
unsigned long flags;
bool queue_hotplug = false;
bool queue_hdmi = false;
 
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
2771,15 → 2737,17
RREG32(IH_RB_WPTR);
 
wptr = r600_get_ih_wptr(rdev);
rptr = rdev->ih.rptr;
// DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
 
restart_ih:
/* is somebody else already processing irqs? */
if (atomic_xchg(&rdev->ih.lock, 1))
spin_lock_irqsave(&rdev->ih.lock, flags);
 
if (rptr == wptr) {
spin_unlock_irqrestore(&rdev->ih.lock, flags);
return IRQ_NONE;
}
 
rptr = rdev->ih.rptr;
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
 
restart_ih:
/* Order reading of wptr vs. reading of IH ring data */
rmb();
 
2786,6 → 2754,7
/* display interrupts */
r600_irq_ack(rdev);
 
rdev->ih.wptr = wptr;
while (rptr != wptr) {
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
2894,39 → 2863,24
break;
}
break;
case 21: /* hdmi */
switch (src_data) {
case 4:
if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
queue_hdmi = true;
DRM_DEBUG("IH: HDMI0\n");
}
case 21: /* HDMI */
DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
// r600_audio_schedule_polling(rdev);
break;
case 5:
if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
queue_hdmi = true;
DRM_DEBUG("IH: HDMI1\n");
}
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
radeon_fence_process(rdev);
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
radeon_fence_process(rdev);
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
rdev->pm.gui_idle = true;
// wake_up(&rdev->irq.idle_queue);
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2937,15 → 2891,15
rptr += 16;
rptr &= rdev->ih.ptr_mask;
}
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
 
/* make sure wptr hasn't changed while processing */
wptr = r600_get_ih_wptr(rdev);
if (wptr != rptr)
if (wptr != rdev->ih.wptr)
goto restart_ih;
 
// if (queue_hotplug)
// schedule_work(&rdev->hotplug_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags);
return IRQ_HANDLED;
}
 
2954,6 → 2908,30
*/
#if defined(CONFIG_DEBUG_FS)
 
static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
unsigned count, i, j;
 
radeon_ring_free_size(rdev);
count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
i = rdev->cp.rptr;
for (j = 0; j <= count; j++) {
seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
i = (i + 1) & rdev->cp.ptr_mask;
}
return 0;
}
 
static int r600_debugfs_mc_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
2967,6 → 2945,7
 
static struct drm_info_list r600_mc_info_list[] = {
{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
{"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
};
#endif
 
3128,8 → 3107,6
{
u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
u16 link_cntl2;
u32 mask;
int ret;
 
if (radeon_pcie_gen2 == 0)
return;
3148,21 → 3125,6
if (rdev->family <= CHIP_R600)
return;
 
ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
if (ret != 0)
return;
 
if (!(mask & DRM_PCIE_SPEED_50))
return;
 
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
if (speed_cntl & LC_CURRENT_DATA_RATE) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
}
 
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
 
/* 55 nm r6xx asics */
if ((rdev->family == CHIP_RV670) ||
(rdev->family == CHIP_RV620) ||
3242,23 → 3204,3
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
 
/**
* r600_get_gpu_clock - return GPU clock counter snapshot
*
* @rdev: radeon_device pointer
*
* Fetches a GPU clock counter snapshot (R6xx-cayman).
* Returns the 64 bit clock counter snapshot.
*/
uint64_t r600_get_gpu_clock(struct radeon_device *rdev)
{
uint64_t clock;
 
mutex_lock(&rdev->gpu_clock_mutex);
WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
mutex_unlock(&rdev->gpu_clock_mutex);
return clock;
}
/drivers/video/drm/radeon/r600_blit_kms.c
23,20 → 23,30
*
*/
 
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
#include "radeon.h"
 
#include "r600d.h"
#include "r600_blit_shaders.h"
#include "radeon_blit_common.h"
 
#define DI_PT_RECTLIST 0x11
#define DI_INDEX_SIZE_16_BIT 0x0
#define DI_SRC_SEL_AUTO_INDEX 0x2
 
#define FMT_8 0x1
#define FMT_5_6_5 0x8
#define FMT_8_8_8_8 0x1a
#define COLOR_8 0x1
#define COLOR_5_6_5 0x8
#define COLOR_8_8_8_8 0x1a
 
/* emits 21 on rv770+, 23 on r600 */
static void
set_render_target(struct radeon_device *rdev, int format,
int w, int h, u64 gpu_addr)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cb_color_info;
int pitch, slice;
 
44,44 → 54,42
if (h < 8)
h = 8;
 
cb_color_info = CB_FORMAT(format) |
CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
cb_color_info = ((format << 2) | (1 << 27) | (1 << 8));
pitch = (w / 8) - 1;
slice = ((w * h) / 64) - 1;
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, gpu_addr >> 8);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, gpu_addr >> 8);
 
if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
radeon_ring_write(ring, 2 << 0);
radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
radeon_ring_write(rdev, 2 << 0);
}
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, (pitch << 0) | (slice << 10));
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, (pitch << 0) | (slice << 10));
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, 0);
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, cb_color_info);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, cb_color_info);
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, 0);
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, 0);
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, 0);
}
 
/* emits 5dw */
90,7 → 98,6
u32 sync_type, u32 size,
u64 mc_addr)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cp_coher_size;
 
if (size == 0xffffffff)
98,11 → 105,11
else
cp_coher_size = ((size + 255) >> 8);
 
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, sync_type);
radeon_ring_write(ring, cp_coher_size);
radeon_ring_write(ring, mc_addr >> 8);
radeon_ring_write(ring, 10); /* poll interval */
radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(rdev, sync_type);
radeon_ring_write(rdev, cp_coher_size);
radeon_ring_write(rdev, mc_addr >> 8);
radeon_ring_write(rdev, 10); /* poll interval */
}
 
/* emits 21dw + 1 surface sync = 26dw */
109,7 → 116,6
static void
set_shaders(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u64 gpu_addr;
u32 sq_pgm_resources;
 
118,35 → 124,35
 
/* VS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, gpu_addr >> 8);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, gpu_addr >> 8);
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, sq_pgm_resources);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, sq_pgm_resources);
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, 0);
 
/* PS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, gpu_addr >> 8);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, gpu_addr >> 8);
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, sq_pgm_resources | (1 << 28));
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, sq_pgm_resources | (1 << 28));
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, 2);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, 2);
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, 0);
 
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
156,24 → 162,22
static void
set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_vtx_constant_word2;
 
sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
SQ_VTXC_STRIDE(16);
sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
#ifdef __BIG_ENDIAN
sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
sq_vtx_constant_word2 |= (2 << 30);
#endif
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
radeon_ring_write(ring, 0x460);
radeon_ring_write(ring, gpu_addr & 0xffffffff);
radeon_ring_write(ring, 48 - 1);
radeon_ring_write(ring, sq_vtx_constant_word2);
radeon_ring_write(ring, 1 << 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
radeon_ring_write(rdev, 0x460);
radeon_ring_write(rdev, gpu_addr & 0xffffffff);
radeon_ring_write(rdev, 48 - 1);
radeon_ring_write(rdev, sq_vtx_constant_word2);
radeon_ring_write(rdev, 1 << 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
 
if ((rdev->family == CHIP_RV610) ||
(rdev->family == CHIP_RV620) ||
191,40 → 195,35
static void
set_tex_resource(struct radeon_device *rdev,
int format, int w, int h, int pitch,
u64 gpu_addr, u32 size)
u64 gpu_addr)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
 
if (h < 1)
h = 1;
 
sq_tex_resource_word0 = S_038000_DIM(V_038000_SQ_TEX_DIM_2D) |
S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
sq_tex_resource_word0 |= S_038000_PITCH((pitch >> 3) - 1) |
S_038000_TEX_WIDTH(w - 1);
sq_tex_resource_word0 = (1 << 0) | (1 << 3);
sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
((w - 1) << 19));
 
sq_tex_resource_word1 = S_038004_DATA_FORMAT(format);
sq_tex_resource_word1 |= S_038004_TEX_HEIGHT(h - 1);
sq_tex_resource_word1 = (format << 26);
sq_tex_resource_word1 |= ((h - 1) << 0);
 
sq_tex_resource_word4 = S_038010_REQUEST_SIZE(1) |
S_038010_DST_SEL_X(SQ_SEL_X) |
S_038010_DST_SEL_Y(SQ_SEL_Y) |
S_038010_DST_SEL_Z(SQ_SEL_Z) |
S_038010_DST_SEL_W(SQ_SEL_W);
sq_tex_resource_word4 = ((1 << 14) |
(0 << 16) |
(1 << 19) |
(2 << 22) |
(3 << 25));
 
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, size, gpu_addr);
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, sq_tex_resource_word0);
radeon_ring_write(ring, sq_tex_resource_word1);
radeon_ring_write(ring, gpu_addr >> 8);
radeon_ring_write(ring, gpu_addr >> 8);
radeon_ring_write(ring, sq_tex_resource_word4);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, sq_tex_resource_word0);
radeon_ring_write(rdev, sq_tex_resource_word1);
radeon_ring_write(rdev, gpu_addr >> 8);
radeon_ring_write(rdev, gpu_addr >> 8);
radeon_ring_write(rdev, sq_tex_resource_word4);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, SQ_TEX_VTX_VALID_TEXTURE << 30);
}
 
/* emits 12 */
232,21 → 231,20
set_scissors(struct radeon_device *rdev, int x1, int y1,
int x2, int y2)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
}
 
/* emits 10 */
253,24 → 251,23
static void
draw_auto(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(ring, DI_PT_RECTLIST);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(rdev, DI_PT_RECTLIST);
 
radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
radeon_ring_write(ring,
radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
radeon_ring_write(rdev,
#ifdef __BIG_ENDIAN
(2 << 2) |
#endif
DI_INDEX_SIZE_16_BIT);
 
radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
radeon_ring_write(ring, 1);
radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
radeon_ring_write(rdev, 1);
 
radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
radeon_ring_write(ring, 3);
radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
radeon_ring_write(rdev, 3);
radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
 
}
 
278,7 → 275,6
static void
set_default_state(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
434,26 → 430,51
/* emit an IB pointing at default state */
dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(ring,
radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(rdev,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
radeon_ring_write(ring, dwords);
radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
radeon_ring_write(rdev, dwords);
 
/* SQ config */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6));
radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(ring, sq_config);
radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
radeon_ring_write(ring, sq_thread_resource_mgmt);
radeon_ring_write(ring, sq_stack_resource_mgmt_1);
radeon_ring_write(ring, sq_stack_resource_mgmt_2);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6));
radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(rdev, sq_config);
radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
radeon_ring_write(rdev, sq_thread_resource_mgmt);
radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
}
 
static inline uint32_t i2f(uint32_t input)
{
u32 result, i, exponent, fraction;
 
if ((input & 0x3fff) == 0)
result = 0; /* 0 is a special case */
else {
exponent = 140; /* exponent biased by 127; */
fraction = (input & 0x3fff) << 10; /* cheat and only
handle numbers below 2^^15 */
for (i = 0; i < 14; i++) {
if (fraction & 0x800000)
break;
else {
fraction = fraction << 1; /* keep
shifting left until top bit = 1 */
exponent = exponent - 1;
}
}
result = exponent << 23 | (fraction & 0x7fffff); /* mask
off top bit; assumed 1 */
}
return result;
}
 
int r600_blit_init(struct radeon_device *rdev)
{
u32 obj_size;
462,27 → 483,11
u32 packet2s[16];
int num_packet2s = 0;
 
rdev->r600_blit.primitives.set_render_target = set_render_target;
rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
rdev->r600_blit.primitives.set_shaders = set_shaders;
rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
rdev->r600_blit.primitives.set_scissors = set_scissors;
rdev->r600_blit.primitives.draw_auto = draw_auto;
rdev->r600_blit.primitives.set_default_state = set_default_state;
/* pin copy shader into vram if already initialized */
if (rdev->r600_blit.shader_obj)
goto done;
 
rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
rdev->r600_blit.ring_size_common += 40; /* shaders + def state */
rdev->r600_blit.ring_size_common += 5; /* done copy */
rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
 
rdev->r600_blit.ring_size_per_loop = 76;
/* set_render_target emits 2 extra dwords on rv6xx */
if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
rdev->r600_blit.ring_size_per_loop += 2;
 
rdev->r600_blit.max_dim = 8192;
 
mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
 
if (rdev->family >= CHIP_RV770)
507,28 → 512,13
obj_size += r6xx_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
 
/* pin copy shader into vram if not already initialized */
if (rdev->r600_blit.shader_obj == NULL) {
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM,
NULL, &rdev->r600_blit.shader_obj);
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("r600 failed to allocate shader\n");
return r;
}
 
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_gpu_addr);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
if (r) {
dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
return r;
}
}
 
DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n",
obj_size,
rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
557,7 → 547,20
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 
done:
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_gpu_addr);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
if (r) {
dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
return r;
}
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
 
 
return 0;
}
 
579,176 → 582,263
radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
 
static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
int *width, int *height, int max_dim)
static int r600_vb_ib_get(struct radeon_device *rdev)
{
unsigned max_pages;
unsigned pages = num_gpu_pages;
int w, h;
int r;
r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
if (r) {
DRM_ERROR("failed to get IB for vertex buffer\n");
return r;
}
 
if (num_gpu_pages == 0) {
/* not supposed to be called with no pages, but just in case */
h = 0;
w = 0;
pages = 0;
WARN_ON(1);
} else {
int rect_order = 2;
h = RECT_UNIT_H;
while (num_gpu_pages / rect_order) {
h *= 2;
rect_order *= 4;
if (h >= max_dim) {
h = max_dim;
break;
rdev->r600_blit.vb_total = 64*1024;
rdev->r600_blit.vb_used = 0;
return 0;
}
}
max_pages = (max_dim * h) / (RECT_UNIT_W * RECT_UNIT_H);
if (pages > max_pages)
pages = max_pages;
w = (pages * RECT_UNIT_W * RECT_UNIT_H) / h;
w = (w / RECT_UNIT_W) * RECT_UNIT_W;
pages = (w * h) / (RECT_UNIT_W * RECT_UNIT_H);
BUG_ON(pages == 0);
}
 
 
DRM_DEBUG("blit_rectangle: h=%d, w=%d, pages=%d\n", h, w, pages);
 
/* return width and height only of the caller wants it */
if (height)
*height = h;
if (width)
*width = w;
 
return pages;
static void r600_vb_ib_put(struct radeon_device *rdev)
{
radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
}
 
 
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
struct radeon_fence **fence, struct radeon_sa_bo **vb,
struct radeon_semaphore **sem)
int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
int ring_size;
int num_loops = 0;
int dwords_per_loop = rdev->r600_blit.ring_size_per_loop;
int ring_size, line_size;
int max_size;
/* loops of emits 64 + fence emit possible */
int dwords_per_loop = 76, num_loops;
 
/* num loops */
while (num_gpu_pages) {
num_gpu_pages -=
r600_blit_create_rect(num_gpu_pages, NULL, NULL,
rdev->r600_blit.max_dim);
num_loops++;
}
 
/* 48 bytes for vertex per loop */
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, vb,
(num_loops*48)+256, 256, true);
if (r) {
r = r600_vb_ib_get(rdev);
if (r)
return r;
}
 
r = radeon_semaphore_create(rdev, sem);
if (r) {
radeon_sa_bo_free(rdev, vb, NULL);
return r;
}
/* set_render_target emits 2 extra dwords on rv6xx */
if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
dwords_per_loop += 2;
 
/* 8 bpp vs 32 bpp for xfer unit */
if (size_bytes & 3)
line_size = 8192;
else
line_size = 8192*4;
 
max_size = 8192 * line_size;
 
/* major loops cover the max size transfer */
num_loops = ((size_bytes + max_size) / max_size);
/* minor loops cover the extra non aligned bits */
num_loops += ((size_bytes % line_size) ? 1 : 0);
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
ring_size += rdev->r600_blit.ring_size_common;
r = radeon_ring_lock(rdev, ring, ring_size);
if (r) {
radeon_sa_bo_free(rdev, vb, NULL);
radeon_semaphore_free(rdev, sem, NULL);
/* set default + shaders */
ring_size += 40; /* shaders + def state */
ring_size += 10; /* fence emit for VB IB */
ring_size += 5; /* done copy */
ring_size += 10; /* fence emit for done copy */
r = radeon_ring_lock(rdev, ring_size);
if (r)
return r;
}
 
if (radeon_fence_need_sync(*fence, RADEON_RING_TYPE_GFX_INDEX)) {
radeon_semaphore_sync_rings(rdev, *sem, (*fence)->ring,
RADEON_RING_TYPE_GFX_INDEX);
radeon_fence_note_sync(*fence, RADEON_RING_TYPE_GFX_INDEX);
} else {
radeon_semaphore_free(rdev, sem, NULL);
}
 
rdev->r600_blit.primitives.set_default_state(rdev);
rdev->r600_blit.primitives.set_shaders(rdev);
set_default_state(rdev); /* 14 */
set_shaders(rdev); /* 26 */
return 0;
}
 
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
struct radeon_sa_bo *vb, struct radeon_semaphore *sem)
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
 
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
return;
}
if (rdev->r600_blit.vb_ib)
r600_vb_ib_put(rdev);
 
radeon_ring_unlock_commit(rdev, ring);
radeon_sa_bo_free(rdev, &vb, *fence);
radeon_semaphore_free(rdev, &sem, *fence);
if (fence)
r = radeon_fence_emit(rdev, fence);
 
radeon_ring_unlock_commit(rdev);
}
 
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
unsigned num_gpu_pages,
struct radeon_sa_bo *vb)
int size_bytes)
{
int max_bytes;
u64 vb_gpu_addr;
u32 *vb_cpu_addr;
u32 *vb;
 
DRM_DEBUG("emitting copy %16llx %16llx %d\n",
src_gpu_addr, dst_gpu_addr, num_gpu_pages);
vb_cpu_addr = (u32 *)radeon_sa_bo_cpu_addr(vb);
vb_gpu_addr = radeon_sa_bo_gpu_addr(vb);
DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
size_bytes, rdev->r600_blit.vb_used);
vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
max_bytes = 8192;
 
while (num_gpu_pages) {
int w, h;
unsigned size_in_bytes;
unsigned pages_per_loop =
r600_blit_create_rect(num_gpu_pages, &w, &h,
rdev->r600_blit.max_dim);
while (size_bytes) {
int cur_size = size_bytes;
int src_x = src_gpu_addr & 255;
int dst_x = dst_gpu_addr & 255;
int h = 1;
src_gpu_addr = src_gpu_addr & ~255ULL;
dst_gpu_addr = dst_gpu_addr & ~255ULL;
 
size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE;
DRM_DEBUG("rectangle w=%d h=%d\n", w, h);
if (!src_x && !dst_x) {
h = (cur_size / max_bytes);
if (h > 8192)
h = 8192;
if (h == 0)
h = 1;
else
cur_size = max_bytes;
} else {
if (cur_size > max_bytes)
cur_size = max_bytes;
if (cur_size > (max_bytes - dst_x))
cur_size = (max_bytes - dst_x);
if (cur_size > (max_bytes - src_x))
cur_size = (max_bytes - src_x);
}
 
vb_cpu_addr[0] = 0;
vb_cpu_addr[1] = 0;
vb_cpu_addr[2] = 0;
vb_cpu_addr[3] = 0;
if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
// WARN_ON(1);
}
 
vb_cpu_addr[4] = 0;
vb_cpu_addr[5] = int2float(h);
vb_cpu_addr[6] = 0;
vb_cpu_addr[7] = int2float(h);
vb[0] = i2f(dst_x);
vb[1] = 0;
vb[2] = i2f(src_x);
vb[3] = 0;
 
vb_cpu_addr[8] = int2float(w);
vb_cpu_addr[9] = int2float(h);
vb_cpu_addr[10] = int2float(w);
vb_cpu_addr[11] = int2float(h);
vb[4] = i2f(dst_x);
vb[5] = i2f(h);
vb[6] = i2f(src_x);
vb[7] = i2f(h);
 
rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
w, h, w, src_gpu_addr, size_in_bytes);
rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8,
w, h, dst_gpu_addr);
rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h);
rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr);
rdev->r600_blit.primitives.draw_auto(rdev);
rdev->r600_blit.primitives.cp_set_surface_sync(rdev,
vb[8] = i2f(dst_x + cur_size);
vb[9] = i2f(h);
vb[10] = i2f(src_x + cur_size);
vb[11] = i2f(h);
 
/* src 9 */
set_tex_resource(rdev, FMT_8,
src_x + cur_size, h, src_x + cur_size,
src_gpu_addr);
 
/* 5 */
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
 
/* dst 23 */
set_render_target(rdev, COLOR_8,
dst_x + cur_size, h,
dst_gpu_addr);
 
/* scissors 12 */
set_scissors(rdev, dst_x, 0, dst_x + cur_size, h);
 
/* 14 */
vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
set_vtx_resource(rdev, vb_gpu_addr);
 
/* draw 10 */
draw_auto(rdev);
 
/* 5 */
cp_set_surface_sync(rdev,
PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
size_in_bytes, dst_gpu_addr);
cur_size * h, dst_gpu_addr);
 
vb_cpu_addr += 12;
vb_gpu_addr += 4*12;
src_gpu_addr += size_in_bytes;
dst_gpu_addr += size_in_bytes;
num_gpu_pages -= pages_per_loop;
vb += 12;
rdev->r600_blit.vb_used += 12 * 4;
 
src_gpu_addr += cur_size * h;
dst_gpu_addr += cur_size * h;
size_bytes -= cur_size * h;
}
} else {
max_bytes = 8192 * 4;
 
while (size_bytes) {
int cur_size = size_bytes;
int src_x = (src_gpu_addr & 255);
int dst_x = (dst_gpu_addr & 255);
int h = 1;
src_gpu_addr = src_gpu_addr & ~255ULL;
dst_gpu_addr = dst_gpu_addr & ~255ULL;
 
if (!src_x && !dst_x) {
h = (cur_size / max_bytes);
if (h > 8192)
h = 8192;
if (h == 0)
h = 1;
else
cur_size = max_bytes;
} else {
if (cur_size > max_bytes)
cur_size = max_bytes;
if (cur_size > (max_bytes - dst_x))
cur_size = (max_bytes - dst_x);
if (cur_size > (max_bytes - src_x))
cur_size = (max_bytes - src_x);
}
 
if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
// WARN_ON(1);
}
 
vb[0] = i2f(dst_x / 4);
vb[1] = 0;
vb[2] = i2f(src_x / 4);
vb[3] = 0;
 
vb[4] = i2f(dst_x / 4);
vb[5] = i2f(h);
vb[6] = i2f(src_x / 4);
vb[7] = i2f(h);
 
vb[8] = i2f((dst_x + cur_size) / 4);
vb[9] = i2f(h);
vb[10] = i2f((src_x + cur_size) / 4);
vb[11] = i2f(h);
 
/* src 9 */
set_tex_resource(rdev, FMT_8_8_8_8,
(src_x + cur_size) / 4,
h, (src_x + cur_size) / 4,
src_gpu_addr);
/* 5 */
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
 
/* dst 23 */
set_render_target(rdev, COLOR_8_8_8_8,
(dst_x + cur_size) / 4, h,
dst_gpu_addr);
 
/* scissors 12 */
set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
 
/* Vertex buffer setup 14 */
vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
set_vtx_resource(rdev, vb_gpu_addr);
 
/* draw 10 */
draw_auto(rdev);
 
/* 5 */
cp_set_surface_sync(rdev,
PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
cur_size * h, dst_gpu_addr);
 
/* 78 ring dwords per loop */
vb += 12;
rdev->r600_blit.vb_used += 12 * 4;
 
src_gpu_addr += cur_size * h;
dst_gpu_addr += cur_size * h;
size_bytes -= cur_size * h;
}
}
}
 
 
 
/drivers/video/drm/radeon/radeon.h
61,10 → 61,9
*/
 
#include <asm/atomic.h>
#include <linux/wait.h>
 
#include <linux/list.h>
#include <linux/kref.h>
#include <asm/div64.h>
 
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
75,6 → 74,7
#include <pci.h>
 
#include <errno-base.h>
#include "drm_edid.h"
 
#include "radeon_family.h"
#include "radeon_mode.h"
82,6 → 82,8
 
#include <syscall.h>
 
extern unsigned long volatile jiffies;
 
/*
* Modules parameters.
*/
100,11 → 102,6
extern int radeon_disp_priority;
extern int radeon_hw_i2c;
extern int radeon_pcie_gen2;
extern int radeon_msi;
extern int radeon_lockup_timeout;
 
 
 
typedef struct pm_message {
int event;
} pm_message_t;
117,8 → 114,53
int freq;
}videomode_t;
 
static inline uint8_t __raw_readb(const volatile void __iomem *addr)
{
return *(const volatile uint8_t __force *) addr;
}
 
static inline uint16_t __raw_readw(const volatile void __iomem *addr)
{
return *(const volatile uint16_t __force *) addr;
}
 
static inline uint32_t __raw_readl(const volatile void __iomem *addr)
{
return *(const volatile uint32_t __force *) addr;
}
 
#define readb __raw_readb
#define readw __raw_readw
#define readl __raw_readl
 
 
 
static inline void __raw_writeb(uint8_t b, volatile void __iomem *addr)
{
*(volatile uint8_t __force *) addr = b;
}
 
static inline void __raw_writew(uint16_t b, volatile void __iomem *addr)
{
*(volatile uint16_t __force *) addr = b;
}
 
static inline void __raw_writel(uint32_t b, volatile void __iomem *addr)
{
*(volatile uint32_t __force *) addr = b;
}
 
static inline void __raw_writeq(__u64 b, volatile void __iomem *addr)
{
*(volatile __u64 *)addr = b;
}
 
#define writeb __raw_writeb
#define writew __raw_writew
#define writel __raw_writel
#define writeq __raw_writeq
 
 
static inline u32 ioread32(const volatile void __iomem *addr)
{
return in32((u32)addr);
129,11 → 171,11
out32((u32)addr, b);
}
 
//struct __wait_queue_head {
// spinlock_t lock;
// struct list_head task_list;
//};
//typedef struct __wait_queue_head wait_queue_head_t;
struct __wait_queue_head {
spinlock_t lock;
struct list_head task_list;
};
typedef struct __wait_queue_head wait_queue_head_t;
 
 
/*
144,29 → 186,10
#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_COMPONENTS 32
#define RADEON_DEBUGFS_MAX_NUM_FILES 32
#define RADEONFB_CONN_LIMIT 4
#define RADEON_BIOS_NUM_SCRATCH 8
 
/* max number of rings */
#define RADEON_NUM_RINGS 3
 
/* fence seq are set to this number when signaled */
#define RADEON_FENCE_SIGNALED_SEQ 0LL
 
/* internal ring indices */
/* r1xx+ has gfx CP ring */
#define RADEON_RING_TYPE_GFX_INDEX 0
 
/* cayman has 2 compute CP rings */
#define CAYMAN_RING_TYPE_CP1_INDEX 1
#define CAYMAN_RING_TYPE_CP2_INDEX 2
 
/* hardcode those limit for now */
#define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20)
#define RADEON_IB_VM_MAX_SIZE (64 << 10)
 
/*
* Errata workarounds.
*/
183,8 → 206,24
/*
* BIOS.
*/
#define ATRM_BIOS_PAGE 4096
 
#if defined(CONFIG_VGA_SWITCHEROO)
bool radeon_atrm_supported(struct pci_dev *pdev);
int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
#else
static inline bool radeon_atrm_supported(struct pci_dev *pdev)
{
return false;
}
 
static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
return -EINVAL;
}
#endif
bool radeon_get_bios(struct radeon_device *rdev);
 
 
/*
* Dummy page
*/
224,15 → 263,12
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
int radeon_atom_get_max_vddc(struct radeon_device *rdev, u16 *voltage);
void rs690_pm_info(struct radeon_device *rdev);
extern int rv6xx_get_temp(struct radeon_device *rdev);
extern int rv770_get_temp(struct radeon_device *rdev);
extern int evergreen_get_temp(struct radeon_device *rdev);
extern int sumo_get_temp(struct radeon_device *rdev);
extern int si_get_temp(struct radeon_device *rdev);
extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
unsigned *bankh, unsigned *mtaspect,
unsigned *tile_split);
 
/*
* Fences.
239,12 → 275,15
*/
struct radeon_fence_driver {
uint32_t scratch_reg;
uint64_t gpu_addr;
volatile uint32_t *cpu_addr;
/* sync_seq is protected by ring emission lock */
uint64_t sync_seq[RADEON_NUM_RINGS];
atomic64_t last_seq;
unsigned long last_activity;
atomic_t seq;
uint32_t last_seq;
unsigned long last_jiffies;
unsigned long last_timeout;
wait_queue_head_t queue;
rwlock_t lock;
struct list_head created;
struct list_head emited;
struct list_head signaled;
bool initialized;
};
 
251,65 → 290,26
struct radeon_fence {
struct radeon_device *rdev;
struct kref kref;
struct list_head list;
/* protected by radeon_fence.lock */
uint64_t seq;
/* RB, DMA, etc. */
unsigned ring;
uint32_t seq;
bool emited;
bool signaled;
evhandle_t evnt;
};
 
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
void radeon_fence_process(struct radeon_device *rdev, int ring);
int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
void radeon_fence_process(struct radeon_device *rdev);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev,
struct radeon_fence **fences,
bool intr);
int radeon_fence_wait_next(struct radeon_device *rdev);
int radeon_fence_wait_last(struct radeon_device *rdev);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence);
unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
bool radeon_fence_need_sync(struct radeon_fence *fence, int ring);
void radeon_fence_note_sync(struct radeon_fence *fence, int ring);
static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a,
struct radeon_fence *b)
{
if (!a) {
return b;
}
 
if (!b) {
return a;
}
 
BUG_ON(a->ring != b->ring);
 
if (a->seq > b->seq) {
return a;
} else {
return b;
}
}
 
static inline bool radeon_fence_is_earlier(struct radeon_fence *a,
struct radeon_fence *b)
{
if (!a) {
return false;
}
 
if (!b) {
return true;
}
 
BUG_ON(a->ring != b->ring);
 
return a->seq < b->seq;
}
 
/*
* Tiling registers
*/
330,24 → 330,6
bool initialized;
};
 
/* bo virtual address in a specific vm */
struct radeon_bo_va {
/* protected by bo being reserved */
struct list_head bo_list;
uint64_t soffset;
uint64_t eoffset;
uint32_t flags;
bool valid;
unsigned ref_count;
 
/* protected by vm mutex */
struct list_head vm_list;
 
/* constant after initialization */
struct radeon_vm *vm;
struct radeon_bo *bo;
};
 
struct radeon_bo {
/* Protected by gem.mutex */
struct list_head list;
363,15 → 345,10
u32 tiling_flags;
u32 pitch;
int surface_reg;
/* list of all virtual address to which this bo
* is associated to
*/
struct list_head va;
/* Constant after initialization */
struct radeon_device *rdev;
struct drm_gem_object gem_base;
u32 domain;
int vmapping_count;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
 
383,53 → 360,6
u32 tiling_flags;
};
 
/* sub-allocation manager, it has to be protected by another lock.
* By conception this is an helper for other part of the driver
* like the indirect buffer or semaphore, which both have their
* locking.
*
* Principe is simple, we keep a list of sub allocation in offset
* order (first entry has offset == 0, last entry has the highest
* offset).
*
* When allocating new object we first check if there is room at
* the end total_size - (last_object_offset + last_object_size) >=
* alloc_size. If so we allocate new object there.
*
* When there is not enough room at the end, we start waiting for
* each sub object until we reach object_offset+object_size >=
* alloc_size, this object then become the sub object we return.
*
* Alignment can't be bigger than page size.
*
* Hole are not considered for allocation to keep things simple.
* Assumption is that there won't be hole (all object on same
* alignment).
*/
struct radeon_sa_manager {
wait_queue_head_t wq;
struct radeon_bo *bo;
struct list_head *hole;
struct list_head flist[RADEON_NUM_RINGS];
struct list_head olist;
unsigned size;
uint64_t gpu_addr;
void *cpu_ptr;
uint32_t domain;
};
 
struct radeon_sa_bo;
 
/* sub-allocation buffer */
struct radeon_sa_bo {
struct list_head olist;
struct list_head flist;
struct radeon_sa_manager *manager;
unsigned soffset;
unsigned eoffset;
struct radeon_fence *fence;
};
 
/*
* GEM objects.
*/
444,6 → 374,9
int alignment, int initial_domain,
bool discardable, bool kernel,
struct drm_gem_object **obj);
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr);
void radeon_gem_object_unpin(struct drm_gem_object *obj);
 
int radeon_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
456,47 → 389,36
uint32_t handle);
 
/*
* Semaphores.
* GART structures, functions & helpers
*/
/* everything here is constant */
struct radeon_semaphore {
struct radeon_sa_bo *sa_bo;
signed waiters;
uint64_t gpu_addr;
struct radeon_mc;
 
struct radeon_gart_table_ram {
volatile uint32_t *ptr;
};
 
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore);
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
int signaler, int waiter);
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore **semaphore,
struct radeon_fence *fence);
struct radeon_gart_table_vram {
struct radeon_bo *robj;
volatile uint32_t *ptr;
};
 
/*
* GART structures, functions & helpers
*/
struct radeon_mc;
union radeon_gart_table {
struct radeon_gart_table_ram ram;
struct radeon_gart_table_vram vram;
};
 
#define RADEON_GPU_PAGE_SIZE 4096
#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
#define RADEON_GPU_PAGE_SHIFT 12
#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
 
struct radeon_gart {
dma_addr_t table_addr;
struct radeon_bo *robj;
void *ptr;
unsigned num_gpu_pages;
unsigned num_cpu_pages;
unsigned table_size;
union radeon_gart_table table;
struct page **pages;
dma_addr_t *pages_addr;
bool *ttm_alloced;
bool ready;
};
 
504,16 → 426,12
void radeon_gart_table_ram_free(struct radeon_device *rdev);
int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
void radeon_gart_table_vram_free(struct radeon_device *rdev);
int radeon_gart_table_vram_pin(struct radeon_device *rdev);
void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
int radeon_gart_init(struct radeon_device *rdev);
void radeon_gart_fini(struct radeon_device *rdev);
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int pages);
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, u32 *pagelist,
dma_addr_t *dma_addr);
void radeon_gart_restore(struct radeon_device *rdev);
int pages, u32_t *pagelist);
 
 
/*
562,7 → 480,6
*/
struct r500_irq_stat_regs {
u32 disp_int;
u32 hdmi0_status;
};
 
struct r600_irq_stat_regs {
571,8 → 488,6
u32 disp_int_cont2;
u32 d1grph_int;
u32 d2grph_int;
u32 hdmi0_status;
u32 hdmi1_status;
};
 
struct evergreen_irq_stat_regs {
588,12 → 503,6
u32 d4grph_int;
u32 d5grph_int;
u32 d6grph_int;
u32 afmt_status1;
u32 afmt_status2;
u32 afmt_status3;
u32 afmt_status4;
u32 afmt_status5;
u32 afmt_status6;
};
 
union radeon_irq_stat_regs {
602,133 → 511,77
struct evergreen_irq_stat_regs evergreen;
};
 
#define RADEON_MAX_HPD_PINS 6
#define RADEON_MAX_CRTCS 6
#define RADEON_MAX_AFMT_BLOCKS 6
 
struct radeon_irq {
bool installed;
spinlock_t lock;
atomic_t ring_int[RADEON_NUM_RINGS];
bool crtc_vblank_int[RADEON_MAX_CRTCS];
atomic_t pflip[RADEON_MAX_CRTCS];
bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */
bool crtc_vblank_int[6];
bool pflip[6];
wait_queue_head_t vblank_queue;
bool hpd[RADEON_MAX_HPD_PINS];
bool afmt[RADEON_MAX_AFMT_BLOCKS];
/* FIXME: use defines for max hpd/dacs */
bool hpd[6];
bool gui_idle;
bool gui_idle_acked;
wait_queue_head_t idle_queue;
/* FIXME: use defines for max HDMI blocks */
bool hdmi[2];
spinlock_t sw_lock;
int sw_refcount;
union radeon_irq_stat_regs stat_regs;
spinlock_t pflip_lock[6];
int pflip_refcount[6];
};
 
int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev);
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
 
/*
* CP & rings.
* CP & ring.
*/
 
struct radeon_ib {
struct radeon_sa_bo *sa_bo;
uint32_t length_dw;
struct list_head list;
unsigned idx;
uint64_t gpu_addr;
struct radeon_fence *fence;
uint32_t *ptr;
int ring;
struct radeon_fence *fence;
struct radeon_vm *vm;
bool is_const_ib;
struct radeon_fence *sync_to[RADEON_NUM_RINGS];
struct radeon_semaphore *semaphore;
uint32_t length_dw;
bool free;
};
 
struct radeon_ring {
/*
* locking -
* mutex protects scheduled_ibs, ready, alloc_bm
*/
struct radeon_ib_pool {
struct mutex mutex;
struct radeon_bo *robj;
struct list_head bogus_ib;
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
bool ready;
unsigned head_id;
};
 
struct radeon_cp {
struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
unsigned rptr_offs;
unsigned rptr_reg;
unsigned rptr_save_reg;
u64 next_rptr_gpu_addr;
volatile u32 *next_rptr_cpu_addr;
unsigned wptr;
unsigned wptr_old;
unsigned wptr_reg;
unsigned ring_size;
unsigned ring_free_dw;
int count_dw;
unsigned long last_activity;
unsigned last_rptr;
uint64_t gpu_addr;
uint32_t align_mask;
uint32_t ptr_mask;
struct mutex mutex;
bool ready;
u32 ptr_reg_shift;
u32 ptr_reg_mask;
u32 nop;
u32 idx;
};
 
/*
* VM
*/
 
/* maximum number of VMIDs */
#define RADEON_NUM_VM 16
 
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, 9 bits in the page
* table and the remaining 19 bits are in the page directory */
#define RADEON_VM_BLOCK_SIZE 9
 
/* number of entries in page table */
#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE)
 
struct radeon_vm {
struct list_head list;
struct list_head va;
unsigned id;
 
/* contains the page directory */
struct radeon_sa_bo *page_directory;
uint64_t pd_gpu_addr;
 
/* array of page tables, one for each page directory entry */
struct radeon_sa_bo **page_tables;
 
struct mutex mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
/* last flush or NULL if we still need to flush */
struct radeon_fence *last_flush;
};
 
struct radeon_vm_manager {
struct mutex lock;
struct list_head lru_vm;
struct radeon_fence *active[RADEON_NUM_VM];
struct radeon_sa_manager sa_manager;
uint32_t max_pfn;
/* number of VMIDs */
unsigned nvm;
/* vram base address for page table entry */
u64 vram_base_offset;
/* is vm enabled? */
bool enabled;
};
 
/*
* file private structure
*/
struct radeon_fpriv {
struct radeon_vm vm;
};
 
/*
* R6xx+ IH ring
*/
struct r600_ih {
735,85 → 588,43
struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
unsigned wptr;
unsigned wptr_old;
unsigned ring_size;
uint64_t gpu_addr;
uint32_t ptr_mask;
atomic_t lock;
spinlock_t lock;
bool enabled;
};
 
struct r600_blit_cp_primitives {
void (*set_render_target)(struct radeon_device *rdev, int format,
int w, int h, u64 gpu_addr);
void (*cp_set_surface_sync)(struct radeon_device *rdev,
u32 sync_type, u32 size,
u64 mc_addr);
void (*set_shaders)(struct radeon_device *rdev);
void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
void (*set_tex_resource)(struct radeon_device *rdev,
int format, int w, int h, int pitch,
u64 gpu_addr, u32 size);
void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
int x2, int y2);
void (*draw_auto)(struct radeon_device *rdev);
void (*set_default_state)(struct radeon_device *rdev);
};
 
struct r600_blit {
struct mutex mutex;
struct radeon_bo *shader_obj;
struct r600_blit_cp_primitives primitives;
int max_dim;
int ring_size_common;
int ring_size_per_loop;
u64 shader_gpu_addr;
u32 vs_offset, ps_offset;
u32 state_offset;
u32 state_len;
u32 vb_used, vb_total;
struct radeon_ib *vb_ib;
};
 
/*
* SI RLC stuff
*/
struct si_rlc {
/* for power gating */
struct radeon_bo *save_restore_obj;
uint64_t save_restore_gpu_addr;
/* for clear state */
struct radeon_bo *clear_state_obj;
uint64_t clear_state_gpu_addr;
};
 
int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib);
int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
int radeon_ib_ring_tests(struct radeon_device *rdev);
int radeon_ib_test(struct radeon_device *rdev);
extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
/* Ring access between begin & end cannot sleep */
bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
struct radeon_ring *ring);
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_undo(struct radeon_ring *ring);
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
void radeon_ring_lockup_update(struct radeon_ring *ring);
bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
uint32_t **data);
int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned size, uint32_t *data);
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_free_size(struct radeon_device *rdev);
int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
void radeon_ring_commit(struct radeon_device *rdev);
void radeon_ring_unlock_commit(struct radeon_device *rdev);
void radeon_ring_unlock_undo(struct radeon_device *rdev);
int radeon_ring_test(struct radeon_device *rdev);
int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size);
void radeon_ring_fini(struct radeon_device *rdev);
 
 
/*
856,21 → 667,41
/* indices of various chunks */
int chunk_ib_idx;
int chunk_relocs_idx;
int chunk_flags_idx;
int chunk_const_ib_idx;
struct radeon_ib ib;
struct radeon_ib const_ib;
struct radeon_ib *ib;
void *track;
unsigned family;
int parser_error;
u32 cs_flags;
u32 ring;
s32 priority;
};
 
extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
 
 
static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
u32 pg_idx, pg_offset;
u32 idx_value = 0;
int new_page;
 
pg_idx = (idx * 4) / PAGE_SIZE;
pg_offset = (idx * 4) % PAGE_SIZE;
 
if (ibc->kpage_idx[0] == pg_idx)
return ibc->kpage[0][pg_offset/4];
if (ibc->kpage_idx[1] == pg_idx)
return ibc->kpage[1][pg_offset/4];
 
new_page = radeon_cs_update_pages(p, pg_idx);
if (new_page < 0) {
p->parser_error = new_page;
return 0;
}
 
idx_value = ibc->kpage[new_page][pg_offset/4];
return idx_value;
}
 
struct radeon_cs_packet {
unsigned idx;
unsigned type;
908,7 → 739,6
};
 
#define RADEON_WB_SCRATCH_OFFSET 0
#define RADEON_WB_RING0_NEXT_RPTR 256
#define RADEON_WB_CP_RPTR_OFFSET 1024
#define RADEON_WB_CP1_RPTR_OFFSET 1280
#define RADEON_WB_CP2_RPTR_OFFSET 1536
1001,7 → 831,6
THERMAL_TYPE_EVERGREEN,
THERMAL_TYPE_SUMO,
THERMAL_TYPE_NI,
THERMAL_TYPE_SI,
};
 
struct radeon_voltage {
1039,7 → 868,8
 
struct radeon_power_state {
enum radeon_pm_state_type type;
struct radeon_pm_clock_info *clock_info;
/* XXX: use a define for num clock modes */
struct radeon_pm_clock_info clock_info[8];
/* number of valid clock modes in this power state */
int num_clock_modes;
struct radeon_pm_clock_info *default_clock_mode;
1057,12 → 887,11
 
struct radeon_pm {
struct mutex mutex;
/* write locked while reprogramming mclk */
struct rw_semaphore mclk_lock;
u32 active_crtcs;
int active_crtc_count;
int req_vblank;
bool vblank_sync;
bool gui_idle;
fixed20_12 max_bandwidth;
fixed20_12 igp_sideport_mclk;
fixed20_12 igp_system_mclk;
1110,17 → 939,6
struct device *int_hwmon_dev;
};
 
int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
int instance);
 
struct r600_audio {
int channels;
int rate;
int bits_per_sample;
u8 status_bits;
u8 category_code;
};
/*
* ASIC specific functions.
*/
1130,108 → 948,37
int (*resume)(struct radeon_device *rdev);
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
bool (*gpu_is_lockup)(struct radeon_device *rdev);
int (*asic_reset)(struct radeon_device *rdev);
/* ioctl hw specific callback. Some hw might want to perform special
* operation on specific ioctl. For instance on wait idle some hw
* might want to perform and HDP flush through MMIO as it seems that
* some R6XX/R7XX hw doesn't take HDP flush into account if programmed
* through ring.
*/
void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
/* check if 3D engine is idle */
bool (*gui_idle)(struct radeon_device *rdev);
/* wait for mc_idle */
int (*mc_wait_for_idle)(struct radeon_device *rdev);
/* gart */
struct {
void (*tlb_flush)(struct radeon_device *rdev);
int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
} gart;
struct {
int (*init)(struct radeon_device *rdev);
void (*fini)(struct radeon_device *rdev);
 
u32 pt_ring_index;
void (*set_page)(struct radeon_device *rdev, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
} vm;
/* ring specific callbacks */
struct {
void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, bool emit_wait);
void (*gart_tlb_flush)(struct radeon_device *rdev);
int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
void (*cp_fini)(struct radeon_device *rdev);
void (*cp_disable)(struct radeon_device *rdev);
void (*cp_commit)(struct radeon_device *rdev);
void (*ring_start)(struct radeon_device *rdev);
int (*ring_test)(struct radeon_device *rdev);
void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
int (*irq_set)(struct radeon_device *rdev);
int (*irq_process)(struct radeon_device *rdev);
u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence);
int (*cs_parse)(struct radeon_cs_parser *p);
void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
} ring[RADEON_NUM_RINGS];
/* irqs */
struct {
int (*set)(struct radeon_device *rdev);
int (*process)(struct radeon_device *rdev);
} irq;
/* displays */
struct {
/* display watermarks */
void (*bandwidth_update)(struct radeon_device *rdev);
/* get frame count */
u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
/* wait for vblank */
void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
/* set backlight level */
void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
/* get backlight level */
u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
} display;
/* copy functions for bo handling */
struct {
int (*blit)(struct radeon_device *rdev,
int (*copy_blit)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
u32 blit_ring_index;
int (*dma)(struct radeon_device *rdev,
unsigned num_pages,
struct radeon_fence *fence);
int (*copy_dma)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
u32 dma_ring_index;
/* method used for bo copy */
unsigned num_pages,
struct radeon_fence *fence);
int (*copy)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
/* ring used for bo copies */
u32 copy_ring_index;
} copy;
/* surfaces */
struct {
int (*set_reg)(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
void (*clear_reg)(struct radeon_device *rdev, int reg);
} surface;
/* hotplug detect */
struct {
void (*init)(struct radeon_device *rdev);
void (*fini)(struct radeon_device *rdev);
bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
} hpd;
/* power management */
struct {
void (*misc)(struct radeon_device *rdev);
void (*prepare)(struct radeon_device *rdev);
void (*finish)(struct radeon_device *rdev);
void (*init_profile)(struct radeon_device *rdev);
void (*get_dynpm_state)(struct radeon_device *rdev);
unsigned num_pages,
struct radeon_fence *fence);
uint32_t (*get_engine_clock)(struct radeon_device *rdev);
void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
uint32_t (*get_memory_clock)(struct radeon_device *rdev);
1239,22 → 986,48
int (*get_pcie_lanes)(struct radeon_device *rdev);
void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
void (*set_clock_gating)(struct radeon_device *rdev, int enable);
} pm;
int (*set_surface_reg)(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
void (*clear_surface_reg)(struct radeon_device *rdev, int reg);
void (*bandwidth_update)(struct radeon_device *rdev);
void (*hpd_init)(struct radeon_device *rdev);
void (*hpd_fini)(struct radeon_device *rdev);
bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
/* ioctl hw specific callback. Some hw might want to perform special
* operation on specific ioctl. For instance on wait idle some hw
* might want to perform and HDP flush through MMIO as it seems that
* some R6XX/R7XX hw doesn't take HDP flush into account if programmed
* through ring.
*/
void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
bool (*gui_idle)(struct radeon_device *rdev);
/* power management */
void (*pm_misc)(struct radeon_device *rdev);
void (*pm_prepare)(struct radeon_device *rdev);
void (*pm_finish)(struct radeon_device *rdev);
void (*pm_init_profile)(struct radeon_device *rdev);
void (*pm_get_dynpm_state)(struct radeon_device *rdev);
/* pageflipping */
struct {
void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
void (*post_page_flip)(struct radeon_device *rdev, int crtc);
} pflip;
};
 
/*
* Asic structures
*/
struct r100_gpu_lockup {
unsigned long last_jiffies;
u32 last_cp_rptr;
};
 
struct r100_asic {
const unsigned *reg_safe_bm;
unsigned reg_safe_bm_size;
u32 hdp_cntl;
struct r100_gpu_lockup lockup;
};
 
struct r300_asic {
1262,6 → 1035,7
unsigned reg_safe_bm_size;
u32 resync_scratch;
u32 hdp_cntl;
struct r100_gpu_lockup lockup;
};
 
struct r600_asic {
1283,6 → 1057,7
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
struct r100_gpu_lockup lockup;
};
 
struct rv770_asic {
1308,6 → 1083,7
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
struct r100_gpu_lockup lockup;
};
 
struct evergreen_asic {
1334,6 → 1110,7
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
struct r100_gpu_lockup lockup;
};
 
struct cayman_asic {
1372,37 → 1149,9
unsigned multi_gpu_tile_size;
 
unsigned tile_config;
struct r100_gpu_lockup lockup;
};
 
struct si_asic {
unsigned max_shader_engines;
unsigned max_tile_pipes;
unsigned max_cu_per_sh;
unsigned max_sh_per_se;
unsigned max_backends_per_se;
unsigned max_texture_channel_caches;
unsigned max_gprs;
unsigned max_gs_threads;
unsigned max_hw_contexts;
unsigned sc_prim_fifo_size_frontend;
unsigned sc_prim_fifo_size_backend;
unsigned sc_hiz_tile_fifo_size;
unsigned sc_earlyz_tile_fifo_size;
 
unsigned num_tile_pipes;
unsigned num_backends_per_se;
unsigned backend_disable_mask_per_asic;
unsigned backend_map;
unsigned num_texture_channel_caches;
unsigned mem_max_burst_length_bytes;
unsigned mem_row_size_in_kb;
unsigned shader_engine_tile_size;
unsigned num_gpus;
unsigned multi_gpu_tile_size;
 
unsigned tile_config;
};
 
union radeon_asic_config {
struct r300_asic r300;
struct r100_asic r100;
1410,7 → 1159,6
struct rv770_asic rv770;
struct evergreen_asic evergreen;
struct cayman_asic cayman;
struct si_asic si;
};
 
/*
1421,14 → 1169,12
 
 
 
/* VRAM scratch page for HDP bug, default vram page */
struct r600_vram_scratch {
/* VRAM scratch page for HDP bug */
struct r700_vram_scratch {
struct radeon_bo *robj;
volatile uint32_t *ptr;
u64 gpu_addr;
};
 
 
/*
* Core structure, functions and helpers.
*/
1439,7 → 1185,6
struct device *dev;
struct drm_device *ddev;
struct pci_dev *pdev;
struct rw_semaphore exclusive_lock;
/* ASIC */
union radeon_asic_config config;
enum radeon_family family;
1457,7 → 1202,7
/* Register mmio */
resource_size_t rmmio_base;
resource_size_t rmmio_size;
void __iomem *rmmio;
void *rmmio;
radeon_rreg_t mc_rreg;
radeon_wreg_t mc_wreg;
radeon_rreg_t pll_rreg;
1474,19 → 1219,21
struct radeon_mode_info mode_info;
struct radeon_scratch scratch;
struct radeon_mman mman;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
wait_queue_head_t fence_queue;
struct mutex ring_lock;
struct radeon_ring ring[RADEON_NUM_RINGS];
bool ib_pool_ready;
struct radeon_sa_manager ring_tmp_bo;
struct radeon_fence_driver fence_drv;
struct radeon_cp cp;
/* cayman compute rings */
struct radeon_cp cp1;
struct radeon_cp cp2;
struct radeon_ib_pool ib_pool;
struct radeon_irq irq;
struct radeon_asic *asic;
struct radeon_gem gem;
struct radeon_pm pm;
uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
struct mutex cs_mutex;
struct radeon_wb wb;
struct radeon_dummy_page dummy_page;
bool gpu_lockup;
bool shutdown;
bool suspend;
bool need_dma32;
1496,33 → 1243,28
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
const struct firmware *mc_fw; /* NI MC firmware */
const struct firmware *ce_fw; /* SI CE firmware */
struct r600_blit r600_blit;
struct r600_vram_scratch vram_scratch;
struct r600_blit r600_video;
struct r700_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
struct si_rlc rlc;
// struct work_struct hotplug_work;
// struct work_struct audio_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
struct mutex vram_mutex;
 
/* audio stuff */
bool audio_enabled;
// struct r600_audio audio_status; /* audio stuff */
// struct notifier_block acpi_nb;
/* only one userspace can use Hyperz features or CMASK at a time */
// struct drm_file *hyperz_filp;
// struct drm_file *cmask_filp;
// struct timer_list audio_timer;
int audio_channels;
int audio_rate;
int audio_bits_per_sample;
uint8_t audio_status_bits;
uint8_t audio_category_code;
 
 
/* i2c buses */
struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
/* debugfs */
// struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
unsigned debugfs_count;
/* virtual memory */
struct radeon_vm_manager vm_manager;
struct mutex gpu_clock_mutex;
/* ACPI interface */
// struct radeon_atif atif;
// struct radeon_atcs atcs;
};
 
int radeon_device_init(struct radeon_device *rdev,
1532,11 → 1274,46
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
 
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
{
if (reg < rdev->rmmio_size)
return readl(((void __iomem *)rdev->rmmio) + reg);
else {
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
}
}
 
static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
if (reg < rdev->rmmio_size)
writel(v, ((void __iomem *)rdev->rmmio) + reg);
else {
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
}
}
 
static inline u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
{
if (reg < rdev->rio_mem_size)
return ioread32(rdev->rio_mem + reg);
else {
iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
return ioread32(rdev->rio_mem + RADEON_MM_DATA);
}
}
 
static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
if (reg < rdev->rio_mem_size)
iowrite32(v, rdev->rio_mem + reg);
else {
iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
}
}
 
/*
* Cast helper
*/
1545,10 → 1322,10
/*
* Registers read & write functions.
*/
#define RREG8(reg) readb((rdev->rmmio) + (reg))
#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
#define RREG16(reg) readw((rdev->rmmio) + (reg))
#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
#define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg))
#define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg))
#define RREG32(reg) r100_mm_rreg(rdev, (reg))
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
1640,9 → 1417,6
#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
(rdev->flags & RADEON_IS_IGP))
#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
(rdev->flags & RADEON_IS_IGP))
 
/*
* BIOS helpers.
1660,19 → 1434,20
/*
* RING helpers.
*/
#if DRM_DEBUG_CODE == 0
static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
{
ring->ring[ring->wptr++] = v;
ring->wptr &= ring->ptr_mask;
ring->count_dw--;
ring->ring_free_dw--;
#if DRM_DEBUG_CODE
if (rdev->cp.count_dw <= 0) {
DRM_ERROR("radeon: writting more dword to ring than expected !\n");
}
#else
/* With debugging this is just too big to inline */
void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#endif
rdev->cp.ring[rdev->cp.wptr++] = v;
rdev->cp.wptr &= rdev->cp.ptr_mask;
rdev->cp.count_dw--;
rdev->cp.ring_free_dw--;
}
 
 
/*
* ASICs macro.
*/
1680,64 → 1455,53
#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
#define radeon_asic_vm_set_page(rdev, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (pe), (addr), (count), (incr), (flags)))
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm))
#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
#define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev))
#define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e))
#define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev))
#define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e))
#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
#define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev))
#define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev))
#define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h))
#define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev))
#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib))
#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence))
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
#define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev))
#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
#define radeon_get_pcie_lanes(rdev) (rdev)->asic->get_pcie_lanes((rdev))
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
#define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev))
#define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev))
#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc))
#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base))
#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
#define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev))
#define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev))
#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc))
#define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base))
#define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc))
 
/* Common functions */
/* AGP */
extern int radeon_gpu_reset(struct radeon_device *rdev);
extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
extern void radeon_gart_restore(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
extern bool radeon_card_posted(struct radeon_device *rdev);
1761,91 → 1525,12
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
 
/*
* vm
*/
int radeon_vm_manager_init(struct radeon_device *rdev);
void radeon_vm_manager_fini(struct radeon_device *rdev);
void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
struct radeon_vm *vm, int ring);
void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence);
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo,
struct ttm_mem_reg *mem);
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo);
struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
struct radeon_bo *bo);
struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo);
int radeon_vm_bo_set_addr(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
uint64_t offset,
uint32_t flags);
int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo_va *bo_va);
 
/* audio */
void r600_audio_update_hdmi(struct work_struct *work);
 
/*
* R600 vram scratch functions
*/
int r600_vram_scratch_init(struct radeon_device *rdev);
void r600_vram_scratch_fini(struct radeon_device *rdev);
 
/*
* r600 cs checking helper
*/
unsigned r600_mip_minify(unsigned size, unsigned level);
bool r600_fmt_is_valid_color(u32 format);
bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family);
int r600_fmt_get_blocksize(u32 format);
int r600_fmt_get_nblocksx(u32 format, u32 w);
int r600_fmt_get_nblocksy(u32 format, u32 h);
 
/*
* r600 functions used by radeon_encoder.c
*/
struct radeon_hdmi_acr {
u32 clock;
 
int n_32khz;
int cts_32khz;
 
int n_44_1khz;
int cts_44_1khz;
 
int n_48khz;
int cts_48khz;
 
};
 
extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
 
extern void r600_hdmi_enable(struct drm_encoder *encoder);
extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
u32 total_max_rb_num,
u32 enabled_rb_mask);
 
/*
* evergreen functions used by radeon_encoder.c
*/
 
extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
 
extern int ni_init_microcode(struct radeon_device *rdev);
extern int ni_mc_load_microcode(struct radeon_device *rdev);
 
1852,10 → 1537,8
/* radeon_acpi.c */
#if defined(CONFIG_ACPI)
extern int radeon_acpi_init(struct radeon_device *rdev);
extern void radeon_acpi_fini(struct radeon_device *rdev);
#else
static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
#endif
 
#include "radeon_object.h"
1872,4 → 1555,22
 
 
 
struct work_struct;
typedef void (*work_func_t)(struct work_struct *work);
 
/*
* The first word is the work queue pointer and the flags rolled into
* one
*/
#define work_data_bits(work) ((unsigned long *)(&(work)->data))
 
struct work_struct {
atomic_long_t data;
#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
#define WORK_STRUCT_FLAG_MASK (3UL)
#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
struct list_head entry;
work_func_t func;
};
 
#endif
/drivers/video/drm/radeon/radeon_device.c
26,7 → 26,7
* Jerome Glisse
*/
//#include <linux/console.h>
#include <linux/slab.h>
 
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
55,9 → 55,7
int radeon_hw_i2c = 0;
int radeon_pcie_gen2 = 0;
int radeon_disp_priority = 0;
int radeon_lockup_timeout = 10000;
 
 
int irq_override = 0;
 
 
67,7 → 65,7
int init_display(struct radeon_device *rdev, videomode_t *mode);
int init_display_kms(struct radeon_device *rdev, videomode_t *mode);
 
int get_modes(videomode_t *mode, u32_t *count);
int get_modes(videomode_t *mode, int *count);
int set_user_mode(videomode_t *mode);
int r100_2D_test(struct radeon_device *rdev);
 
134,19 → 132,11
"TURKS",
"CAICOS",
"CAYMAN",
"ARUBA",
"TAHITI",
"PITCAIRN",
"VERDE",
"LAST",
};
 
/**
* radeon_surface_init - Clear GPU surface registers.
*
* @rdev: radeon_device pointer
*
* Clear GPU surface registers (r1xx-r5xx).
/*
* Clear GPU surface registers.
*/
void radeon_surface_init(struct radeon_device *rdev)
{
165,13 → 155,6
/*
* GPU scratch registers helpers function.
*/
/**
* radeon_scratch_init - Init scratch register driver information.
*
* @rdev: radeon_device pointer
*
* Init CP scratch register driver information (r1xx-r5xx)
*/
void radeon_scratch_init(struct radeon_device *rdev)
{
int i;
189,15 → 172,6
}
}
 
/**
* radeon_scratch_get - Allocate a scratch register
*
* @rdev: radeon_device pointer
* @reg: scratch register mmio offset
*
* Allocate a CP scratch register for use by the driver (all asics).
* Returns 0 on success or -EINVAL on failure.
*/
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
{
int i;
212,14 → 186,6
return -EINVAL;
}
 
/**
* radeon_scratch_free - Free a scratch register
*
* @rdev: radeon_device pointer
* @reg: scratch register mmio offset
*
* Free a CP scratch register allocated for use by the driver (all asics)
*/
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
{
int i;
232,20 → 198,6
}
}
 
/*
* radeon_wb_*()
* Writeback is the the method by which the the GPU updates special pages
* in memory with the status of certain GPU events (fences, ring pointers,
* etc.).
*/
 
/**
* radeon_wb_disable - Disable Writeback
*
* @rdev: radeon_device pointer
*
* Disables Writeback (all asics). Used for suspend.
*/
void radeon_wb_disable(struct radeon_device *rdev)
{
int r;
261,14 → 213,6
rdev->wb.enabled = false;
}
 
/**
* radeon_wb_fini - Disable Writeback and free memory
*
* @rdev: radeon_device pointer
*
* Disables Writeback and frees the Writeback memory (all asics).
* Used at driver shutdown.
*/
void radeon_wb_fini(struct radeon_device *rdev)
{
radeon_wb_disable(rdev);
279,15 → 223,6
}
}
 
/**
* radeon_wb_init- Init Writeback driver info and allocate memory
*
* @rdev: radeon_device pointer
*
* Disables Writeback and frees the Writeback memory (all asics).
* Used at driver startup.
* Returns 0 on success or an -error on failure.
*/
int radeon_wb_init(struct radeon_device *rdev)
{
int r;
294,7 → 229,7
 
if (rdev->wb.wb_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r;
326,25 → 261,21
/* disable event_write fences */
rdev->wb.use_event = false;
/* disabled via module param */
if (radeon_no_wb == 1) {
if (radeon_no_wb == 1)
rdev->wb.enabled = false;
} else {
if (rdev->flags & RADEON_IS_AGP) {
else {
/* often unreliable on AGP */
rdev->wb.enabled = false;
} else if (rdev->family < CHIP_R300) {
/* often unreliable on pre-r300 */
rdev->wb.enabled = false;
} else {
// if (rdev->flags & RADEON_IS_AGP) {
// rdev->wb.enabled = false;
// } else {
rdev->wb.enabled = true;
/* event_write fences are only available on r600+ */
if (rdev->family >= CHIP_R600) {
if (rdev->family >= CHIP_R600)
rdev->wb.use_event = true;
// }
}
}
}
/* always use writeback/events on NI, APUs */
if (rdev->family >= CHIP_PALM) {
/* always use writeback/events on NI */
if (ASIC_IS_DCE5(rdev)) {
rdev->wb.enabled = true;
rdev->wb.use_event = true;
}
397,8 → 328,6
*/
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
{
uint64_t limit = (uint64_t)radeon_vram_limit << 20;
 
mc->vram_start = base;
if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
412,8 → 341,6
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
if (limit && limit < mc->real_vram_size)
mc->real_vram_size = limit;
dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
458,15 → 385,6
/*
* GPU helpers function.
*/
/**
* radeon_card_posted - check if the hw has already been initialized
*
* @rdev: radeon_device pointer
*
* Check if the asic has been initialized (all asics).
* Used at driver startup.
* Returns true if initialized or false if not.
*/
bool radeon_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
513,14 → 431,6
 
}
 
/**
* radeon_update_bandwidth_info - update display bandwidth params
*
* @rdev: radeon_device pointer
*
* Used when sclk/mclk are switched or display modes are set.
* params are used to calculate display watermarks (all asics)
*/
void radeon_update_bandwidth_info(struct radeon_device *rdev)
{
fixed20_12 a;
541,15 → 451,6
}
}
 
/**
* radeon_boot_test_post_card - check and possibly initialize the hw
*
* @rdev: radeon_device pointer
*
* Check if the asic is initialized and if not, attempt to initialize
* it (all asics).
* Returns true if initialized or false if not.
*/
bool radeon_boot_test_post_card(struct radeon_device *rdev)
{
if (radeon_card_posted(rdev))
568,24 → 469,14
}
}
 
/**
* radeon_dummy_page_init - init dummy page used by the driver
*
* @rdev: radeon_device pointer
*
* Allocate the dummy page used by the driver (all asics).
* This dummy page is used by the driver as a filler for gart entries
* when pages are taken out of the GART
* Returns 0 on sucess, -ENOMEM on failure.
*/
int radeon_dummy_page_init(struct radeon_device *rdev)
{
if (rdev->dummy_page.page)
return 0;
rdev->dummy_page.page = (void*)AllocPage();
rdev->dummy_page.page = AllocPage();
if (rdev->dummy_page.page == NULL)
return -ENOMEM;
rdev->dummy_page.addr = MapIoMem((addr_t)rdev->dummy_page.page, 4096, 3);
rdev->dummy_page.addr = MapIoMem(rdev->dummy_page.page, 4096, 5);
if (!rdev->dummy_page.addr) {
// __free_page(rdev->dummy_page.page);
rdev->dummy_page.page = NULL;
594,40 → 485,16
return 0;
}
 
/**
* radeon_dummy_page_fini - free dummy page used by the driver
*
* @rdev: radeon_device pointer
*
* Frees the dummy page used by the driver (all asics).
*/
void radeon_dummy_page_fini(struct radeon_device *rdev)
{
if (rdev->dummy_page.page == NULL)
return;
KernelFree((void*)rdev->dummy_page.addr);
KernelFree(rdev->dummy_page.addr);
rdev->dummy_page.page = NULL;
}
 
 
/* ATOM accessor methods */
/*
* ATOM is an interpreted byte code stored in tables in the vbios. The
* driver registers callbacks to access registers and the interpreter
* in the driver parses the tables and executes then to program specific
* actions (set display modes, asic init, etc.). See radeon_atombios.c,
* atombios.h, and atom.c
*/
 
/**
* cail_pll_read - read PLL register
*
* @info: atom card_info pointer
* @reg: PLL register offset
*
* Provides a PLL register accessor for the atom interpreter (r4xx+).
* Returns the value of the PLL register.
*/
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
637,15 → 504,6
return r;
}
 
/**
* cail_pll_write - write PLL register
*
* @info: atom card_info pointer
* @reg: PLL register offset
* @val: value to write to the pll register
*
* Provides a PLL register accessor for the atom interpreter (r4xx+).
*/
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
653,15 → 511,6
rdev->pll_wreg(rdev, reg, val);
}
 
/**
* cail_mc_read - read MC (Memory Controller) register
*
* @info: atom card_info pointer
* @reg: MC register offset
*
* Provides an MC register accessor for the atom interpreter (r4xx+).
* Returns the value of the MC register.
*/
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
671,15 → 520,6
return r;
}
 
/**
* cail_mc_write - write MC (Memory Controller) register
*
* @info: atom card_info pointer
* @reg: MC register offset
* @val: value to write to the pll register
*
* Provides a MC register accessor for the atom interpreter (r4xx+).
*/
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
687,15 → 527,6
rdev->mc_wreg(rdev, reg, val);
}
 
/**
* cail_reg_write - write MMIO register
*
* @info: atom card_info pointer
* @reg: MMIO register offset
* @val: value to write to the pll register
*
* Provides a MMIO register accessor for the atom interpreter (r4xx+).
*/
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
703,15 → 534,6
WREG32(reg*4, val);
}
 
/**
* cail_reg_read - read MMIO register
*
* @info: atom card_info pointer
* @reg: MMIO register offset
*
* Provides an MMIO register accessor for the atom interpreter (r4xx+).
* Returns the value of the MMIO register.
*/
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
721,15 → 543,6
return r;
}
 
/**
* cail_ioreg_write - write IO register
*
* @info: atom card_info pointer
* @reg: IO register offset
* @val: value to write to the pll register
*
* Provides a IO register accessor for the atom interpreter (r4xx+).
*/
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
737,15 → 550,6
WREG32_IO(reg*4, val);
}
 
/**
* cail_ioreg_read - read IO register
*
* @info: atom card_info pointer
* @reg: IO register offset
*
* Provides an IO register accessor for the atom interpreter (r4xx+).
* Returns the value of the IO register.
*/
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
755,16 → 559,6
return r;
}
 
/**
* radeon_atombios_init - init the driver info and callbacks for atombios
*
* @rdev: radeon_device pointer
*
* Initializes the driver info and register access callbacks for the
* ATOM interpreter (r4xx+).
* Returns 0 on sucess, -ENOMEM on failure.
* Called at driver startup.
*/
int radeon_atombios_init(struct radeon_device *rdev)
{
struct card_info *atom_card_info =
798,15 → 592,6
return 0;
}
 
/**
* radeon_atombios_fini - free the driver info and callbacks for atombios
*
* @rdev: radeon_device pointer
*
* Frees the driver info and register access callbacks for the ATOM
* interpreter (r4xx+).
* Called at driver shutdown.
*/
void radeon_atombios_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.atom_context) {
816,22 → 601,6
kfree(rdev->mode_info.atom_card_info);
}
 
/* COMBIOS */
/*
* COMBIOS is the bios format prior to ATOM. It provides
* command tables similar to ATOM, but doesn't have a unified
* parser. See radeon_combios.c
*/
 
/**
* radeon_combios_init - init the driver info for combios
*
* @rdev: radeon_device pointer
*
* Initializes the driver info for combios (r1xx-r3xx).
* Returns 0 on sucess.
* Called at driver startup.
*/
int radeon_combios_init(struct radeon_device *rdev)
{
radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
838,28 → 607,11
return 0;
}
 
/**
* radeon_combios_fini - free the driver info for combios
*
* @rdev: radeon_device pointer
*
* Frees the driver info for combios (r1xx-r3xx).
* Called at driver shutdown.
*/
void radeon_combios_fini(struct radeon_device *rdev)
{
}
 
/* if we get transitioned to only one device, take VGA back */
/**
* radeon_vga_set_decode - enable/disable vga decode
*
* @cookie: radeon_device pointer
* @state: enable/disable vga decode
*
* Enable/disable vga decode (all asics).
* Returns VGA resource flags.
*/
/* if we get transitioned to only one device, tak VGA back */
static unsigned int radeon_vga_set_decode(void *cookie, bool state)
{
struct radeon_device *rdev = cookie;
871,49 → 623,55
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
 
/**
* radeon_check_pot_argument - check that argument is a power of two
*
* @arg: value to check
*
* Validates that a certain argument is a power of two (all asics).
* Returns true if argument is valid.
*/
static bool radeon_check_pot_argument(int arg)
void radeon_check_arguments(struct radeon_device *rdev)
{
return (arg & (arg - 1)) == 0;
}
 
/**
* radeon_check_arguments - validate module params
*
* @rdev: radeon_device pointer
*
* Validates certain module parameters and updates
* the associated values used by the driver (all asics).
*/
static void radeon_check_arguments(struct radeon_device *rdev)
{
/* vramlimit must be a power of two */
if (!radeon_check_pot_argument(radeon_vram_limit)) {
switch (radeon_vram_limit) {
case 0:
case 4:
case 8:
case 16:
case 32:
case 64:
case 128:
case 256:
case 512:
case 1024:
case 2048:
case 4096:
break;
default:
dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
radeon_vram_limit);
radeon_vram_limit = 0;
break;
}
 
radeon_vram_limit = radeon_vram_limit << 20;
/* gtt size must be power of two and greater or equal to 32M */
if (radeon_gart_size < 32) {
switch (radeon_gart_size) {
case 4:
case 8:
case 16:
dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
radeon_gart_size);
radeon_gart_size = 512;
 
} else if (!radeon_check_pot_argument(radeon_gart_size)) {
break;
case 32:
case 64:
case 128:
case 256:
case 512:
case 1024:
case 2048:
case 4096:
break;
default:
dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
radeon_gart_size);
radeon_gart_size = 512;
break;
}
rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
 
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
/* AGP mode can only be -1, 1, 2, 4, 8 */
switch (radeon_agpmode) {
case -1:
947,38 → 705,25
rdev->is_atom_bios = false;
rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
rdev->gpu_lockup = false;
rdev->accel_working = false;
/* set up ring ids */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
rdev->ring[i].idx = i;
}
 
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device);
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n",
radeon_family_name[rdev->family], pdev->vendor, pdev->device);
 
/* mutex initialization are all done here so we
* can recall function without having locking issues */
mutex_init(&rdev->ring_lock);
mutex_init(&rdev->cs_mutex);
mutex_init(&rdev->ib_pool.mutex);
mutex_init(&rdev->cp.mutex);
mutex_init(&rdev->dc_hw_i2c_mutex);
atomic_set(&rdev->ih.lock, 0);
if (rdev->family >= CHIP_R600)
spin_lock_init(&rdev->ih.lock);
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->gpu_clock_mutex);
init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
r = radeon_gem_init(rdev);
if (r)
return r;
/* initialize vm here */
mutex_init(&rdev->vm_manager.lock);
/* Adjust VM size here.
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
rdev->vm_manager.max_pfn = 1 << 20;
INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
mutex_init(&rdev->vram_mutex);
rwlock_init(&rdev->fence_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects);
 
/* Set asic functions */
r = radeon_asic_init(rdev);
1000,15 → 745,14
 
/* set DMA mask + need_dma32 flags.
* PCIE - can handle 40-bits.
* IGP - can handle 40-bits
* IGP - can handle 40-bits (in theory)
* AGP - generally dma32 is safest
* PCI - dma32 for legacy pci gart, 40 bits on newer asics
* PCI - only dma32
*/
rdev->need_dma32 = false;
if (rdev->flags & RADEON_IS_AGP)
rdev->need_dma32 = true;
if ((rdev->flags & RADEON_IS_PCI) &&
(rdev->family <= CHIP_RS740))
if (rdev->flags & RADEON_IS_PCI)
rdev->need_dma32 = true;
 
dma_bits = rdev->need_dma32 ? 32 : 40;
1015,7 → 759,6
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
if (r) {
rdev->need_dma32 = true;
dma_bits = 32;
printk(KERN_WARNING "radeon: No suitable DMA available.\n");
}
 
1023,7 → 766,10
/* TODO: block userspace mapping of io register */
rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
 
rdev->rmmio = (void*)MapIoMem(rdev->rmmio_base, rdev->rmmio_size,
PG_SW+PG_NOCACHE);
 
if (rdev->rmmio == NULL) {
return -ENOMEM;
}
1030,18 → 776,6
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
 
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
break;
}
}
if (rdev->rio_mem == NULL)
DRM_ERROR("Unable to find PCI I/O BAR\n");
 
 
r = radeon_init(rdev);
if (r)
return r;
1060,94 → 794,13
// if (radeon_testing) {
// radeon_test_moves(rdev);
// }
// if ((radeon_testing & 2)) {
// radeon_test_syncing(rdev);
// }
if (radeon_benchmarking) {
radeon_benchmark(rdev, radeon_benchmarking);
radeon_benchmark(rdev);
}
return 0;
}
 
/**
* radeon_gpu_reset - reset the asic
*
* @rdev: radeon device pointer
*
* Attempt the reset the GPU if it has hung (all asics).
* Returns 0 for success or an error on failure.
*/
int radeon_gpu_reset(struct radeon_device *rdev)
{
unsigned ring_sizes[RADEON_NUM_RINGS];
uint32_t *ring_data[RADEON_NUM_RINGS];
 
bool saved = false;
 
int i, r;
int resched;
 
// down_write(&rdev->exclusive_lock);
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
// resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
radeon_suspend(rdev);
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
&ring_data[i]);
if (ring_sizes[i]) {
saved = true;
dev_info(rdev->dev, "Saved %d dwords of commands "
"on ring %d.\n", ring_sizes[i], i);
}
}
 
retry:
r = radeon_asic_reset(rdev);
if (!r) {
dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
radeon_resume(rdev);
}
 
radeon_restore_bios_scratch_regs(rdev);
drm_helper_resume_force_mode(rdev->ddev);
 
if (!r) {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
radeon_ring_restore(rdev, &rdev->ring[i],
ring_sizes[i], ring_data[i]);
ring_sizes[i] = 0;
ring_data[i] = NULL;
}
 
r = radeon_ib_ring_tests(rdev);
if (r) {
dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
if (saved) {
saved = false;
radeon_suspend(rdev);
goto retry;
}
}
} else {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
kfree(ring_data[i]);
}
}
 
// ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
if (r) {
/* bad news, how to tell it to userspace ? */
dev_info(rdev->dev, "GPU reset failed\n");
}
 
// up_write(&rdev->exclusive_lock);
return r;
}
 
 
 
/*
* Driver load/unload
*/
1248,6 → 901,15
init_display(dev->dev_private, &usermode);
 
 
uint32_t route0 = PciRead32(0, 31<<3, 0x60);
 
uint32_t route1 = PciRead32(0, 31<<3, 0x68);
 
uint8_t elcr0 = in8(0x4D0);
uint8_t elcr1 = in8(0x4D1);
 
dbgprintf("pci route: %x %x elcr: %x %x\n", route0, route1, elcr0, elcr1);
 
LEAVE();
 
return 0;
1360,6 → 1022,7
dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n",
inp, io->inp_size, io->out_size );
check_output(4);
check_input(*outp * sizeof(videomode_t));
if( radeon_modeset)
retval = get_modes((videomode_t*)inp, outp);
break;
1373,12 → 1036,12
break;
 
case SRV_CREATE_VIDEO:
// retval = r600_create_video(inp[0], inp[1], outp);
retval = r600_create_video(inp[0], inp[1], outp);
break;
 
case SRV_BLIT_VIDEO:
// r600_video_blit( ((uint64_t*)inp)[0], inp[2], inp[3],
// inp[4], inp[5], inp[6]);
r600_video_blit( ((uint64_t*)inp)[0], inp[2], inp[3],
inp[4], inp[5], inp[6]);
 
retval = 0;
break;
1386,7 → 1049,7
case SRV_CREATE_BITMAP:
check_input(8);
check_output(4);
// retval = create_bitmap(outp, inp[0], inp[1]);
retval = create_bitmap(outp, inp[0], inp[1]);
break;
 
};
1401,7 → 1064,7
{
struct radeon_device *rdev = NULL;
 
const struct pci_device_id *ent;
struct pci_device_id *ent;
 
int err;
u32_t retval = 0;
1425,7 → 1088,7
return 0;
};
}
dbgprintf("Radeon RC12 preview 1 cmdline %s\n", cmdline);
dbgprintf("Radeon RC11 cmdline %s\n", cmdline);
 
enum_pci_devices();
 
1458,26 → 1121,3
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
{};
 
 
#define PCI_CLASS_REVISION 0x08
#define PCI_CLASS_DISPLAY_VGA 0x0300
 
int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn)
{
u16_t vendor, device;
u32_t class;
int ret = 0;
 
vendor = id & 0xffff;
device = (id >> 16) & 0xffff;
 
if(vendor == 0x1002)
{
class = PciRead32(busnr, devfn, PCI_CLASS_REVISION);
class >>= 16;
 
if( class == PCI_CLASS_DISPLAY_VGA)
ret = 1;
}
return ret;
}
/drivers/video/drm/radeon/rdisplay.c
33,7 → 33,7
rdev = (struct radeon_device *)rdisplay->ddev->dev_private;
 
r = radeon_bo_create(rdev, CURSOR_WIDTH*CURSOR_HEIGHT*4,
PAGE_SIZE, false, RADEON_GEM_DOMAIN_VRAM, NULL, &cursor->robj);
PAGE_SIZE, false, RADEON_GEM_DOMAIN_VRAM, &cursor->robj);
 
if (unlikely(r != 0))
return r;
294,34 → 294,3
}
 
 
/* 23 bits of float fractional data */
#define I2F_FRAC_BITS 23
#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
 
/*
* Converts unsigned integer into 32-bit IEEE floating point representation.
* Will be exact from 0 to 2^24. Above that, we round towards zero
* as the fractional bits will not fit in a float. (It would be better to
* round towards even as the fpu does, but that is slower.)
*/
__pure uint32_t int2float(uint32_t x)
{
uint32_t msb, exponent, fraction;
 
/* Zero is special */
if (!x) return 0;
 
/* Get location of the most significant bit */
msb = __fls(x);
 
/*
* Use a rotate instead of a shift because that works both leftwards
* and rightwards due to the mod(32) behaviour. This means we don't
* need to check to see if we are above 2^24 or not.
*/
fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
exponent = (127 + msb) << I2F_FRAC_BITS;
 
return fraction + exponent;
}
 
/drivers/video/drm/radeon/rv770.c
28,10 → 28,10
#include <linux/firmware.h>
//#include <linux/platform_device.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include <drm/radeon_drm.h>
#include "radeon_drm.h"
#include "rv770d.h"
#include "atom.h"
#include "avivod.h"
47,12 → 47,12
/*
* GART
*/
static int rv770_pcie_gart_enable(struct radeon_device *rdev)
int rv770_pcie_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
int r, i;
 
if (rdev->gart.robj == NULL) {
if (rdev->gart.table.vram.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
74,8 → 74,6
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
if (rdev->family == CHIP_RV740)
WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
91,17 → 89,14
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
 
r600_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
 
static void rv770_pcie_gart_disable(struct radeon_device *rdev)
void rv770_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
int i;
int i, r;
 
/* Disable all tables */
for (i = 0; i < 7; i++)
121,10 → 116,17
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
radeon_gart_table_vram_unpin(rdev);
if (rdev->gart.table.vram.robj) {
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
}
}
 
static void rv770_pcie_gart_fini(struct radeon_device *rdev)
void rv770_pcie_gart_fini(struct radeon_device *rdev)
{
radeon_gart_fini(rdev);
rv770_pcie_gart_disable(rdev);
132,7 → 134,7
}
 
 
static void rv770_agp_enable(struct radeon_device *rdev)
void rv770_agp_enable(struct radeon_device *rdev)
{
u32 tmp;
int i;
205,7 → 207,7
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
rdev->mc.vram_end >> 12);
}
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
WREG32(MC_VM_FB_LOCATION, tmp);
283,6 → 285,229
/*
* Core functions
*/
static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
{
u32 backend_map = 0;
u32 enabled_backends_mask;
u32 enabled_backends_count;
u32 cur_pipe;
u32 swizzle_pipe[R7XX_MAX_PIPES];
u32 cur_backend;
u32 i;
bool force_no_swizzle;
 
if (num_tile_pipes > R7XX_MAX_PIPES)
num_tile_pipes = R7XX_MAX_PIPES;
if (num_tile_pipes < 1)
num_tile_pipes = 1;
if (num_backends > R7XX_MAX_BACKENDS)
num_backends = R7XX_MAX_BACKENDS;
if (num_backends < 1)
num_backends = 1;
 
enabled_backends_mask = 0;
enabled_backends_count = 0;
for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
if (((backend_disable_mask >> i) & 1) == 0) {
enabled_backends_mask |= (1 << i);
++enabled_backends_count;
}
if (enabled_backends_count == num_backends)
break;
}
 
if (enabled_backends_count == 0) {
enabled_backends_mask = 1;
enabled_backends_count = 1;
}
 
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
 
switch (rdev->family) {
case CHIP_RV770:
case CHIP_RV730:
force_no_swizzle = false;
break;
case CHIP_RV710:
case CHIP_RV740:
default:
force_no_swizzle = true;
break;
}
 
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
switch (num_tile_pipes) {
case 1:
swizzle_pipe[0] = 0;
break;
case 2:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
break;
case 3:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 1;
}
break;
case 4:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 3;
swizzle_pipe[3] = 1;
}
break;
case 5:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 1;
swizzle_pipe[4] = 3;
}
break;
case 6:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 5;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
}
break;
case 7:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
swizzle_pipe[6] = 6;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
swizzle_pipe[6] = 5;
}
break;
case 8:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
swizzle_pipe[6] = 6;
swizzle_pipe[7] = 7;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
swizzle_pipe[6] = 7;
swizzle_pipe[7] = 5;
}
break;
}
 
cur_backend = 0;
for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
while (((1 << cur_backend) & enabled_backends_mask) == 0)
cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
 
backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
 
cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
}
 
return backend_map;
}
 
static void rv770_program_channel_remap(struct radeon_device *rdev)
{
u32 tcp_chan_steer, mc_shared_chremap, tmp;
bool force_no_swizzle;
 
switch (rdev->family) {
case CHIP_RV770:
case CHIP_RV730:
force_no_swizzle = false;
break;
case CHIP_RV710:
case CHIP_RV740:
default:
force_no_swizzle = true;
break;
}
 
tmp = RREG32(MC_SHARED_CHMAP);
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
case 0:
case 1:
default:
/* default mapping */
mc_shared_chremap = 0x00fac688;
break;
case 2:
case 3:
if (force_no_swizzle)
mc_shared_chremap = 0x00fac688;
else
mc_shared_chremap = 0x00bbc298;
break;
}
 
if (rdev->family == CHIP_RV740)
tcp_chan_steer = 0x00ef2a60;
else
tcp_chan_steer = 0x00fac688;
 
/* RV770 CE has special chremap setup */
if (rdev->pdev->device == 0x944e) {
tcp_chan_steer = 0x00b08b08;
mc_shared_chremap = 0x00b08b08;
}
 
WREG32(TCP_CHAN_STEER, tcp_chan_steer);
WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
}
 
static void rv770_gpu_init(struct radeon_device *rdev)
{
int i, j, num_qd_pipes;
298,17 → 523,14
u32 sq_thread_resource_mgmt;
u32 hdp_host_path_cntl;
u32 sq_dyn_gpr_size_simd_ab_0;
u32 backend_map;
u32 gb_tiling_config = 0;
u32 cc_rb_backend_disable = 0;
u32 cc_gc_shader_pipe_config = 0;
u32 mc_arb_ramcfg;
u32 db_debug4, tmp;
u32 inactive_pipes, shader_pipe_config;
u32 disabled_rb_mask;
unsigned active_number;
u32 db_debug4;
 
/* setup chip specs */
rdev->config.rv770.tiling_group_size = 256;
switch (rdev->family) {
case CHIP_RV770:
rdev->config.rv770.max_pipes = 4;
419,70 → 641,33
/* setup tiling, simd, pipe config */
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) {
if (!(inactive_pipes & tmp)) {
active_number++;
}
tmp <<= 1;
}
if (active_number == 1) {
WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1);
} else {
WREG32(SPI_CONFIG_CNTL, 0);
}
 
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
if (tmp < rdev->config.rv770.max_backends) {
rdev->config.rv770.max_backends = tmp;
}
 
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
if (tmp < rdev->config.rv770.max_pipes) {
rdev->config.rv770.max_pipes = tmp;
}
tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
if (tmp < rdev->config.rv770.max_simds) {
rdev->config.rv770.max_simds = tmp;
}
 
switch (rdev->config.rv770.max_tile_pipes) {
case 1:
default:
gb_tiling_config = PIPE_TILING(0);
gb_tiling_config |= PIPE_TILING(0);
break;
case 2:
gb_tiling_config = PIPE_TILING(1);
gb_tiling_config |= PIPE_TILING(1);
break;
case 4:
gb_tiling_config = PIPE_TILING(2);
gb_tiling_config |= PIPE_TILING(2);
break;
case 8:
gb_tiling_config = PIPE_TILING(3);
gb_tiling_config |= PIPE_TILING(3);
break;
}
rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
 
disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
R7XX_MAX_BACKENDS, disabled_rb_mask);
gb_tiling_config |= tmp << 16;
rdev->config.rv770.backend_map = tmp;
 
if (rdev->family == CHIP_RV770)
gb_tiling_config |= BANK_TILING(1);
else {
if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
gb_tiling_config |= BANK_TILING(1);
else
gb_tiling_config |= BANK_TILING(0);
}
gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
rdev->config.rv770.tiling_group_size = 512;
else
rdev->config.rv770.tiling_group_size = 256;
if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
gb_tiling_config |= ROW_TILING(3);
gb_tiling_config |= SAMPLE_SPLIT(3);
494,19 → 679,49
}
 
gb_tiling_config |= BANK_SWAPS(1);
 
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
cc_rb_backend_disable |=
BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
 
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
cc_gc_shader_pipe_config |=
INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
 
if (rdev->family == CHIP_RV740)
backend_map = 0x28;
else
backend_map = r700_get_tile_pipe_to_backend_map(rdev,
rdev->config.rv770.max_tile_pipes,
(R7XX_MAX_BACKENDS -
r600_count_pipe_bits((cc_rb_backend_disable &
R7XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
 
rdev->config.rv770.tile_config = gb_tiling_config;
rdev->config.rv770.backend_map = backend_map;
gb_tiling_config |= BACKEND_MAP(backend_map);
 
WREG32(GB_TILING_CONFIG, gb_tiling_config);
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
 
rv770_program_channel_remap(rdev);
 
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
 
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
WREG32(CGTS_USER_TCC_DISABLE, 0);
 
 
num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
num_qd_pipes =
R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
 
534,9 → 749,6
ACK_FLUSH_CTL(3) |
SYNC_FLUSH_CTL));
 
if (rdev->family != CHIP_RV770)
WREG32(SMX_SAR_CTL0, 0x00003f3f);
 
db_debug3 = RREG32(DB_DEBUG3);
db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
switch (rdev->family) {
570,6 → 782,8
 
WREG32(VGT_NUM_INSTANCES, 1);
 
WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
 
WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
 
WREG32(CP_PERFMON_CNTL, 0);
713,9 → 927,57
 
WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
NUM_CLIP_SEQ(3)));
WREG32(VC_ENHANCE, 0);
 
}
 
static int rv770_vram_scratch_init(struct radeon_device *rdev)
{
int r;
u64 gpu_addr;
 
if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->vram_scratch.robj);
if (r) {
return r;
}
}
 
r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->vram_scratch.robj,
RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
if (r) {
radeon_bo_unreserve(rdev->vram_scratch.robj);
return r;
}
r = radeon_bo_kmap(rdev->vram_scratch.robj,
(void **)&rdev->vram_scratch.ptr);
if (r)
radeon_bo_unpin(rdev->vram_scratch.robj);
radeon_bo_unreserve(rdev->vram_scratch.robj);
 
return r;
}
 
static void rv770_vram_scratch_fini(struct radeon_device *rdev)
{
int r;
 
if (rdev->vram_scratch.robj == NULL) {
return;
}
r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->vram_scratch.robj);
radeon_bo_unpin(rdev->vram_scratch.robj);
radeon_bo_unreserve(rdev->vram_scratch.robj);
}
radeon_bo_unref(&rdev->vram_scratch.robj);
}
 
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
u64 size_bf, size_af;
728,7 → 990,7
}
if (rdev->flags & RADEON_IS_AGP) {
size_bf = mc->gtt_start;
size_af = 0xFFFFFFFF - mc->gtt_end;
size_af = 0xFFFFFFFF - mc->gtt_end + 1;
if (size_bf > size_af) {
if (mc->mc_vram_size > size_bf) {
dev_warn(rdev->dev, "limiting VRAM\n");
742,7 → 1004,7
mc->real_vram_size = size_af;
mc->mc_vram_size = size_af;
}
mc->vram_start = mc->gtt_end + 1;
mc->vram_start = mc->gtt_end;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
755,7 → 1017,7
}
}
 
static int rv770_mc_init(struct radeon_device *rdev)
int rv770_mc_init(struct radeon_device *rdev)
{
u32 tmp;
int chansize, numchan;
802,7 → 1064,6
 
static int rv770_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
 
/* enable pcie gen2 link */
816,10 → 1077,6
}
}
 
r = r600_vram_scratch_init(rdev);
if (r)
return r;
 
rv770_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev);
828,21 → 1085,23
if (r)
return r;
}
 
r = rv770_vram_scratch_init(rdev);
if (r)
return r;
rv770_gpu_init(rdev);
r = r600_blit_init(rdev);
if (r) {
r600_blit_fini(rdev);
rdev->asic->copy.copy = NULL;
// r600_blit_fini(rdev);
rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
 
// r = r600_video_init(rdev);
// if (r) {
r = r600_video_init(rdev);
if (r) {
// r600_video_fini(rdev);
// rdev->asic->copy = NULL;
// dev_warn(rdev->dev, "failed video blitter (%d) falling back to memcpy\n", r);
// }
dev_warn(rdev->dev, "failed video blitter (%d) falling back to memcpy\n", r);
}
 
/* allocate wb buffer */
r = radeon_wb_init(rdev);
858,9 → 1117,7
}
r600_irq_set(rdev);
 
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
return r;
r = rv770_cp_load_microcode(rdev);
870,13 → 1127,6
if (r)
return r;
 
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
}
 
 
return 0;
}
 
896,6 → 1146,10
{
int r;
 
/* This don't do much */
r = radeon_gem_init(rdev);
if (r)
return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
946,8 → 1200,8
if (r)
return r;
 
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
 
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
963,6 → 1217,19
rv770_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
rdev->accel_working = false;
} else {
r = r600_ib_test(rdev);
if (r) {
dev_err(rdev->dev, "IB test failed (%d).\n", r);
rdev->accel_working = false;
}
}
}
 
return 0;
}
971,8 → 1238,6
{
u32 link_width_cntl, lanes, speed_cntl, tmp;
u16 link_cntl2;
u32 mask;
int ret;
 
if (radeon_pcie_gen2 == 0)
return;
987,15 → 1252,6
if (ASIC_IS_X2(rdev))
return;
 
ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
if (ret != 0)
return;
 
if (!(mask & DRM_PCIE_SPEED_50))
return;
 
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
 
/* advertise upconfig capability */
link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
/drivers/video/drm/radeon/atom.c
277,12 → 277,7
case ATOM_ARG_FB:
idx = U8(*ptr);
(*ptr)++;
if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
val = 0;
} else
val = gctx->scratch[(gctx->fb_base / 4) + idx];
val = gctx->scratch[((gctx->fb_base + idx) / 4)];
if (print)
DEBUG("FB[0x%02X]", idx);
break;
536,11 → 531,7
case ATOM_ARG_FB:
idx = U8(*ptr);
(*ptr)++;
if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
} else
gctx->scratch[(gctx->fb_base / 4) + idx] = val;
gctx->scratch[((gctx->fb_base + idx) / 4)] = val;
DEBUG("FB[0x%02X]", idx);
break;
case ATOM_ARG_PLL:
723,26 → 714,9
if (arg != ATOM_COND_ALWAYS)
SDEBUG(" taken: %s\n", execute ? "yes" : "no");
SDEBUG(" target: 0x%04X\n", target);
if (execute) {
if (ctx->last_jump == (ctx->start + target)) {
cjiffies = GetTimerTicks();
if (time_after(cjiffies, ctx->last_jump_jiffies)) {
cjiffies -= ctx->last_jump_jiffies;
if ((jiffies_to_msecs(cjiffies) > 5000)) {
DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
ctx->abort = true;
}
} else {
/* jiffies wrap around we will just wait a little longer */
ctx->last_jump_jiffies = GetTimerTicks();
}
} else {
ctx->last_jump = ctx->start + target;
ctx->last_jump_jiffies = GetTimerTicks();
}
if (execute)
*ptr = ctx->start + target;
}
}
 
static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
{
1304,11 → 1278,8
 
int atom_asic_init(struct atom_context *ctx)
{
struct radeon_device *rdev = ctx->card->dev->dev_private;
int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
uint32_t ps[16];
int ret;
 
memset(ps, 0, 64);
 
ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
1318,18 → 1289,8
 
if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
return 1;
ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
if (ret)
return ret;
 
memset(ps, 0, 64);
 
if (rdev->family < CHIP_R600) {
if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
return atom_execute_table(ctx, ATOM_CMD_INIT, ps);
}
return ret;
}
 
void atom_destroy(struct atom_context *ctx)
{
1392,7 → 1353,6
 
usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
usage_bytes = 20 * 1024;
/* allocate some scratch memory */
1399,6 → 1359,5
ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
if (!ctx->scratch)
return -ENOMEM;
ctx->scratch_size_bytes = usage_bytes;
return 0;
}
/drivers/video/drm/radeon/atombios_crtc.c
83,20 → 83,26
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
ENABLE_SCALER_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
struct radeon_encoder *radeon_encoder =
to_radeon_encoder(radeon_crtc->encoder);
 
/* fixme - fill in enc_priv for atom dac */
enum radeon_tv_std tv_std = TV_STD_NTSC;
bool is_tv = false, is_cv = false;
struct drm_encoder *encoder;
 
if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
return;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
/* find tv std */
if (encoder->crtc == crtc) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
tv_std = tv_dac->tv_std;
is_tv = true;
}
}
}
 
memset(&args, 0, sizeof(args));
 
225,22 → 231,6
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
 
memset(&args, 0, sizeof(args));
 
args.ucDispPipeId = radeon_crtc->crtc_id;
args.ucEnable = state;
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
252,10 → 242,8
radeon_crtc->enabled = true;
/* adjust pm to dpms changes BEFORE enabling crtcs */
radeon_pm_compute_clocks(rdev);
if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
atombios_powergate_crtc(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
atombios_blank_crtc(crtc, ATOM_DISABLE);
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
267,12 → 255,10
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
if (radeon_crtc->enabled)
atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
radeon_crtc->enabled = false;
if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
atombios_powergate_crtc(crtc, ATOM_ENABLE);
/* adjust pm to dpms changes AFTER disabling crtcs */
radeon_pm_compute_clocks(rdev);
break;
369,12 → 355,15
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
static void atombios_disable_ss(struct radeon_device *rdev, int pll_id)
static void atombios_disable_ss(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
u32 ss_cntl;
 
if (ASIC_IS_DCE4(rdev)) {
switch (pll_id) {
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL);
ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
390,7 → 379,7
return;
}
} else if (ASIC_IS_AVIVO(rdev)) {
switch (pll_id) {
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
ss_cntl &= ~1;
417,31 → 406,16
ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
};
 
static void atombios_crtc_program_ss(struct radeon_device *rdev,
static void atombios_crtc_program_ss(struct drm_crtc *crtc,
int enable,
int pll_id,
int crtc_id,
struct radeon_atom_ss *ss)
{
unsigned i;
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
union atom_enable_ss args;
 
if (!enable) {
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->mode_info.crtcs[i] &&
rdev->mode_info.crtcs[i]->enabled &&
i != crtc_id &&
pll_id == rdev->mode_info.crtcs[i]->pll_id) {
/* one other crtc is using this pll don't turn
* off spread spectrum as it might turn off
* display on active crtc
*/
return;
}
}
}
 
memset(&args, 0, sizeof(args));
 
if (ASIC_IS_DCE5(rdev)) {
450,20 → 424,24
switch (pll_id) {
case ATOM_PPLL1:
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_PPLL2:
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_DCPLL:
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
args.v3.usSpreadSpectrumAmount = cpu_to_le16(0);
args.v3.usSpreadSpectrumStep = cpu_to_le16(0);
break;
case ATOM_PPLL_INVALID:
return;
}
args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v3.ucEnable = enable;
if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE61(rdev))
if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v3.ucEnable = ATOM_DISABLE;
} else if (ASIC_IS_DCE4(rdev)) {
args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
471,20 → 449,24
switch (pll_id) {
case ATOM_PPLL1:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_PPLL2:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_DCPLL:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
args.v2.usSpreadSpectrumAmount = cpu_to_le16(0);
args.v2.usSpreadSpectrumStep = cpu_to_le16(0);
break;
case ATOM_PPLL_INVALID:
return;
}
args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v2.ucEnable = enable;
if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev))
if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v2.ucEnable = ATOM_DISABLE;
} else if (ASIC_IS_DCE3(rdev)) {
args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
497,7 → 479,7
} else if (ASIC_IS_AVIVO(rdev)) {
if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
(ss->type & ATOM_EXTERNAL_SS_MASK)) {
atombios_disable_ss(rdev, pll_id);
atombios_disable_ss(crtc);
return;
}
args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
509,7 → 491,7
} else {
if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
(ss->type & ATOM_EXTERNAL_SS_MASK)) {
atombios_disable_ss(rdev, pll_id);
atombios_disable_ss(crtc);
return;
}
args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
527,51 → 509,56
};
 
static u32 atombios_adjust_pll(struct drm_crtc *crtc,
struct drm_display_mode *mode)
struct drm_display_mode *mode,
struct radeon_pll *pll,
bool ss_enabled,
struct radeon_atom_ss *ss)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder = radeon_crtc->encoder;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
struct drm_encoder *encoder = NULL;
struct radeon_encoder *radeon_encoder = NULL;
struct drm_connector *connector = NULL;
u32 adjusted_clock = mode->clock;
int encoder_mode = atombios_get_encoder_mode(encoder);
int encoder_mode = 0;
u32 dp_clock = mode->clock;
int bpc = radeon_get_monitor_bpc(connector);
bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
int bpc = 8;
 
/* reset the pll flags */
radeon_crtc->pll_flags = 0;
pll->flags = 0;
 
if (ASIC_IS_AVIVO(rdev)) {
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740))
radeon_crtc->pll_flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/
pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/
RADEON_PLL_PREFER_CLOSEST_LOWER);
 
if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
 
if (rdev->family < CHIP_RV770)
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
/* use frac fb div on APUs */
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
} else {
radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
pll->flags |= RADEON_PLL_LEGACY;
 
if (mode->clock > 200000) /* range limits??? */
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
connector = radeon_get_connector_for_encoder(encoder);
if (connector)
bpc = connector->display_info.bpc;
encoder_mode = atombios_get_encoder_mode(encoder);
if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
(radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
radeon_encoder_is_dp_bridge(encoder)) {
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
583,12 → 570,12
 
/* use recommended ref_div for ss */
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (radeon_crtc->ss_enabled) {
if (radeon_crtc->ss.refdiv) {
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
if (ss_enabled) {
if (ss->refdiv) {
pll->flags |= RADEON_PLL_USE_REF_DIV;
pll->reference_div = ss->refdiv;
if (ASIC_IS_AVIVO(rdev))
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
}
}
}
598,15 → 585,18
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
adjusted_clock = mode->clock * 2;
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
radeon_crtc->pll_flags |= RADEON_PLL_IS_LCD;
pll->flags |= RADEON_PLL_IS_LCD;
} else {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
radeon_crtc->pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
pll->flags |= RADEON_PLL_USE_REF_DIV;
}
break;
}
}
 
/* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
* accordingly based on the encoder/transmitter to work around
632,7 → 622,7
args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
args.v1.ucEncodeMode = encoder_mode;
if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
if (ss_enabled && ss->percentage)
args.v1.ucConfig |=
ADJUST_DISPLAY_CONFIG_SS_ENABLE;
 
645,32 → 635,47
args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
args.v3.sInput.ucEncodeMode = encoder_mode;
args.v3.sInput.ucDispPllConfig = 0;
if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
if (ss_enabled && ss->percentage)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_SS_ENABLE;
if (ENCODER_MODE_IS_DP(encoder_mode)) {
if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT) ||
radeon_encoder_is_dp_bridge(encoder)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
if (encoder_mode == ATOM_ENCODER_MODE_DP) {
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
/* 16200 or 27000 */
args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
if (encoder_mode == ATOM_ENCODER_MODE_HDMI)
} else {
if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
/* deep color support */
args.v3.sInput.usPixelClock =
cpu_to_le16((mode->clock * bpc / 8) / 10);
}
if (dig->coherent_mode)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
if (is_duallink)
if (mode->clock > 165000)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_DUAL_LINK;
}
if (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
ENCODER_OBJECT_ID_NONE)
args.v3.sInput.ucExtTransmitterID =
radeon_encoder_get_dp_bridge_encoder_id(encoder);
else
} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (encoder_mode == ATOM_ENCODER_MODE_DP) {
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
/* 16200 or 27000 */
args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
} else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) {
if (mode->clock > 165000)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_DUAL_LINK;
}
}
if (radeon_encoder_is_dp_bridge(encoder)) {
struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
args.v3.sInput.ucExtTransmitterID = ext_radeon_encoder->encoder_id;
} else
args.v3.sInput.ucExtTransmitterID = 0;
 
atom_execute_table(rdev->mode_info.atom_context,
677,14 → 682,14
index, (uint32_t *)&args);
adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
if (args.v3.sOutput.ucRefDiv) {
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
radeon_crtc->pll_reference_div = args.v3.sOutput.ucRefDiv;
pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
pll->flags |= RADEON_PLL_USE_REF_DIV;
pll->reference_div = args.v3.sOutput.ucRefDiv;
}
if (args.v3.sOutput.ucPostDiv) {
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
radeon_crtc->pll_flags |= RADEON_PLL_USE_POST_DIV;
radeon_crtc->pll_post_div = args.v3.sOutput.ucPostDiv;
pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
pll->flags |= RADEON_PLL_USE_POST_DIV;
pll->post_div = args.v3.sOutput.ucPostDiv;
}
break;
default:
712,9 → 717,11
/* on DCE5, make sure the voltage is high enough to support the
* required disp clk.
*/
static void atombios_crtc_set_disp_eng_pll(struct radeon_device *rdev,
static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
u32 dispclk)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
u8 frev, crev;
int index;
union set_pixel_clock args;
742,11 → 749,6
* SetPixelClock provides the dividers
*/
args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
if (ASIC_IS_DCE61(rdev))
args.v6.ucPpll = ATOM_EXT_PLL1;
else if (ASIC_IS_DCE6(rdev))
args.v6.ucPpll = ATOM_PPLL0;
else
args.v6.ucPpll = ATOM_DCPLL;
break;
default:
819,10 → 821,7
args.v3.ucFracFbDiv = frac_fb_div;
args.v3.ucPostDiv = post_div;
args.v3.ucPpll = pll_id;
if (crtc_id == ATOM_CRTC2)
args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2;
else
args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1;
args.v3.ucMiscInfo = (pll_id << 2);
if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
args.v3.ucTransmitterId = encoder_id;
892,84 → 891,103
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder =
to_radeon_encoder(radeon_crtc->encoder);
int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
struct drm_encoder *encoder = NULL;
struct radeon_encoder *radeon_encoder = NULL;
u32 pll_clock = mode->clock;
u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
struct radeon_pll *pll;
u32 adjusted_clock;
int encoder_mode = 0;
struct radeon_atom_ss ss;
bool ss_enabled = false;
int bpc = 8;
 
radeon_crtc->bpc = 8;
radeon_crtc->ss_enabled = false;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
encoder_mode = atombios_get_encoder_mode(encoder);
break;
}
}
 
if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
(radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) {
if (!radeon_encoder)
return;
 
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
pll = &rdev->clock.p1pll;
break;
case ATOM_PPLL2:
pll = &rdev->clock.p2pll;
break;
case ATOM_DCPLL:
case ATOM_PPLL_INVALID:
default:
pll = &rdev->clock.dcpll;
break;
}
 
if (radeon_encoder->active_device &
(ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector =
radeon_get_connector_for_encoder(radeon_crtc->encoder);
radeon_get_connector_for_encoder(encoder);
struct radeon_connector *radeon_connector =
to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
int dp_clock;
radeon_crtc->bpc = radeon_get_monitor_bpc(connector);
bpc = connector->display_info.bpc;
 
switch (encoder_mode) {
case ATOM_ENCODER_MODE_DP_MST:
case ATOM_ENCODER_MODE_DP:
/* DP/eDP */
dp_clock = dig_connector->dp_clock / 10;
if (ASIC_IS_DCE4(rdev))
radeon_crtc->ss_enabled =
radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss,
ss_enabled =
radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_SS_ON_DP,
dp_clock);
else {
if (dp_clock == 16200) {
radeon_crtc->ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev,
&radeon_crtc->ss,
ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev, &ss,
ATOM_DP_SS_ID2);
if (!radeon_crtc->ss_enabled)
radeon_crtc->ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev,
&radeon_crtc->ss,
if (!ss_enabled)
ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev, &ss,
ATOM_DP_SS_ID1);
} else
radeon_crtc->ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev,
&radeon_crtc->ss,
ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev, &ss,
ATOM_DP_SS_ID1);
}
break;
case ATOM_ENCODER_MODE_LVDS:
if (ASIC_IS_DCE4(rdev))
radeon_crtc->ss_enabled =
radeon_atombios_get_asic_ss_info(rdev,
&radeon_crtc->ss,
ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
dig->lcd_ss_id,
mode->clock / 10);
else
radeon_crtc->ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev,
&radeon_crtc->ss,
ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &ss,
dig->lcd_ss_id);
break;
case ATOM_ENCODER_MODE_DVI:
if (ASIC_IS_DCE4(rdev))
radeon_crtc->ss_enabled =
radeon_atombios_get_asic_ss_info(rdev,
&radeon_crtc->ss,
ss_enabled =
radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_SS_ON_TMDS,
mode->clock / 10);
break;
case ATOM_ENCODER_MODE_HDMI:
if (ASIC_IS_DCE4(rdev))
radeon_crtc->ss_enabled =
radeon_atombios_get_asic_ss_info(rdev,
&radeon_crtc->ss,
ss_enabled =
radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_SS_ON_HDMI,
mode->clock / 10);
break;
979,80 → 997,43
}
 
/* adjust pixel clock as needed */
radeon_crtc->adjusted_clock = atombios_adjust_pll(crtc, mode);
adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
 
return true;
}
 
static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder =
to_radeon_encoder(radeon_crtc->encoder);
u32 pll_clock = mode->clock;
u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
struct radeon_pll *pll;
int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
 
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
pll = &rdev->clock.p1pll;
break;
case ATOM_PPLL2:
pll = &rdev->clock.p2pll;
break;
case ATOM_DCPLL:
case ATOM_PPLL_INVALID:
default:
pll = &rdev->clock.dcpll;
break;
}
 
/* update pll params */
pll->flags = radeon_crtc->pll_flags;
pll->reference_div = radeon_crtc->pll_reference_div;
pll->post_div = radeon_crtc->pll_post_div;
 
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
/* TV seems to prefer the legacy algo on some boards */
radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div, &ref_div, &post_div);
radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
else if (ASIC_IS_AVIVO(rdev))
radeon_compute_pll_avivo(pll, radeon_crtc->adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div, &ref_div, &post_div);
radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
else
radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div, &ref_div, &post_div);
radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
 
atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id,
radeon_crtc->crtc_id, &radeon_crtc->ss);
atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
 
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
encoder_mode, radeon_encoder->encoder_id, mode->clock,
ref_div, fb_div, frac_fb_div, post_div,
radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss);
ref_div, fb_div, frac_fb_div, post_div, bpc, ss_enabled, &ss);
 
if (radeon_crtc->ss_enabled) {
if (ss_enabled) {
/* calculate ss amount and step size */
if (ASIC_IS_DCE4(rdev)) {
u32 step_size;
u32 amount = (((fb_div * 10) + frac_fb_div) * radeon_crtc->ss.percentage) / 10000;
radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000;
ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
step_size = (4 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
step_size = (4 * amount * ref_div * (ss.rate * 2048)) /
(125 * 25 * pll->reference_freq / 100);
else
step_size = (2 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
step_size = (2 * amount * ref_div * (ss.rate * 2048)) /
(125 * 25 * pll->reference_freq / 100);
radeon_crtc->ss.step = step_size;
ss.step = step_size;
}
 
atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id,
radeon_crtc->crtc_id, &radeon_crtc->ss);
atombios_crtc_program_ss(crtc, ATOM_ENABLE, radeon_crtc->pll_id, &ss);
}
}
 
1069,7 → 1050,6
struct radeon_bo *rbo;
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
unsigned bankw, bankh, mtaspect, tile_split;
u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
u32 tmp, viewport_w, viewport_h;
int r;
1141,43 → 1121,11
return -EINVAL;
}
 
if (tiling_flags & RADEON_TILING_MACRO) {
if (rdev->family >= CHIP_TAHITI)
tmp = rdev->config.si.tile_config;
else if (rdev->family >= CHIP_CAYMAN)
tmp = rdev->config.cayman.tile_config;
else
tmp = rdev->config.evergreen.tile_config;
 
switch ((tmp & 0xf0) >> 4) {
case 0: /* 4 banks */
fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
break;
case 1: /* 8 banks */
default:
fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
break;
case 2: /* 16 banks */
fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
break;
}
 
if (tiling_flags & RADEON_TILING_MACRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
 
evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
} else if (tiling_flags & RADEON_TILING_MICRO)
else if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
 
if ((rdev->family == CHIP_TAHITI) ||
(rdev->family == CHIP_PITCAIRN))
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
else if (rdev->family == CHIP_VERDE)
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
 
switch (radeon_crtc->crtc_id) {
case 0:
WREG32(AVIVO_D1VGA_CONTROL, 0);
1219,12 → 1167,12
WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
 
fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
 
WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
target_fb->height);
crtc->mode.vdisplay);
x &= ~3;
y &= ~1;
WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
1388,12 → 1336,12
WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
 
fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
 
WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
target_fb->height);
crtc->mode.vdisplay);
x &= ~3;
y &= ~1;
WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
1481,273 → 1429,54
}
}
 
/**
* radeon_get_pll_use_mask - look up a mask of which pplls are in use
*
* @crtc: drm crtc
*
* Returns the mask of which PPLLs (Pixel PLLs) are in use.
*/
static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc;
u32 pll_in_use = 0;
 
list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
if (crtc == test_crtc)
continue;
 
test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
pll_in_use |= (1 << test_radeon_crtc->pll_id);
}
return pll_in_use;
}
 
/**
* radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP
*
* @crtc: drm crtc
*
* Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
* also in DP mode. For DP, a single PPLL can be used for all DP
* crtcs/encoders.
*/
static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc;
 
list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
if (crtc == test_crtc)
continue;
test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->encoder &&
ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
/* for DP use the same PLL for all */
if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
return test_radeon_crtc->pll_id;
}
}
return ATOM_PPLL_INVALID;
}
 
/**
* radeon_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
*
* @crtc: drm crtc
* @encoder: drm encoder
*
* Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
* be shared (i.e., same clock).
*/
static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc;
u32 adjusted_clock, test_adjusted_clock;
 
adjusted_clock = radeon_crtc->adjusted_clock;
 
if (adjusted_clock == 0)
return ATOM_PPLL_INVALID;
 
list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
if (crtc == test_crtc)
continue;
test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->encoder &&
!ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
/* check if we are already driving this connector with another crtc */
if (test_radeon_crtc->connector == radeon_crtc->connector) {
/* if we are, return that pll */
if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
return test_radeon_crtc->pll_id;
}
/* for non-DP check the clock */
test_adjusted_clock = test_radeon_crtc->adjusted_clock;
if ((crtc->mode.clock == test_crtc->mode.clock) &&
(adjusted_clock == test_adjusted_clock) &&
(radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
(test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
return test_radeon_crtc->pll_id;
}
}
return ATOM_PPLL_INVALID;
}
 
/**
* radeon_atom_pick_pll - Allocate a PPLL for use by the crtc.
*
* @crtc: drm crtc
*
* Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
* a single PPLL can be used for all DP crtcs/encoders. For non-DP
* monitors a dedicated PPLL must be used. If a particular board has
* an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
* as there is no need to program the PLL itself. If we are not able to
* allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
* avoid messing up an existing monitor.
*
* Asic specific PLL information
*
* DCE 6.1
* - PPLL2 is only available to UNIPHYA (both DP and non-DP)
* - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
*
* DCE 6.0
* - PPLL0 is available to all UNIPHY (DP only)
* - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
*
* DCE 5.0
* - DCPLL is available to all UNIPHY (DP only)
* - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
*
* DCE 3.0/4.0/4.1
* - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
*
*/
static int radeon_atom_pick_pll(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder =
to_radeon_encoder(radeon_crtc->encoder);
u32 pll_in_use;
int pll;
struct drm_encoder *test_encoder;
struct drm_crtc *test_crtc;
uint32_t pll_in_use = 0;
 
if (ASIC_IS_DCE61(rdev)) {
struct radeon_encoder_atom_dig *dig =
radeon_encoder->enc_priv;
 
if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY) &&
(dig->linkb == false))
/* UNIPHY A uses PPLL2 */
return ATOM_PPLL2;
else if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
/* UNIPHY B/C/D/E/F */
if (rdev->clock.dp_extclk)
/* skip PPLL programming if using ext clock */
return ATOM_PPLL_INVALID;
else {
/* use the same PPLL for all DP monitors */
pll = radeon_get_shared_dp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
return pll;
}
} else {
/* use the same PPLL for all monitors with the same clock */
pll = radeon_get_shared_nondp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
return pll;
}
/* UNIPHY B/C/D/E/F */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL0)))
return ATOM_PPLL0;
if (!(pll_in_use & (1 << ATOM_PPLL1)))
return ATOM_PPLL1;
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
} else if (ASIC_IS_DCE4(rdev)) {
if (ASIC_IS_DCE4(rdev)) {
list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
/* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
* depending on the asic:
* DCE4: PPLL or ext clock
* DCE5: PPLL, DCPLL, or ext clock
* DCE6: PPLL, PPLL0, or ext clock
* DCE5: DCPLL or ext clock
*
* Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
* PPLL/DCPLL programming and only program the DP DTO for the
* crtc virtual pixel clock.
*/
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
if (rdev->clock.dp_extclk)
/* skip PPLL programming if using ext clock */
if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) {
if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk)
return ATOM_PPLL_INVALID;
else if (ASIC_IS_DCE6(rdev))
/* use PPLL0 for all DP */
return ATOM_PPLL0;
else if (ASIC_IS_DCE5(rdev))
/* use DCPLL for all DP */
return ATOM_DCPLL;
else {
/* use the same PPLL for all DP monitors */
pll = radeon_get_shared_dp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
return pll;
}
} else {
/* use the same PPLL for all monitors with the same clock */
pll = radeon_get_shared_nondp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
return pll;
}
/* all other cases */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL1)))
return ATOM_PPLL1;
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
} else {
if (ASIC_IS_AVIVO(rdev)) {
/* in DP mode, the DP ref clock can come from either PPLL
* depending on the asic:
* DCE3: PPLL1 or PPLL2
*/
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
/* use the same PPLL for all DP monitors */
pll = radeon_get_shared_dp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
return pll;
} else {
/* use the same PPLL for all monitors with the same clock */
pll = radeon_get_shared_nondp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
return pll;
}
/* all other cases */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL1)))
 
/* otherwise, pick one of the plls */
list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
struct radeon_crtc *radeon_test_crtc;
 
if (crtc == test_crtc)
continue;
 
radeon_test_crtc = to_radeon_crtc(test_crtc);
if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
(radeon_test_crtc->pll_id <= ATOM_PPLL2))
pll_in_use |= (1 << radeon_test_crtc->pll_id);
}
if (!(pll_in_use & 1))
return ATOM_PPLL1;
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
} else {
/* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
} else
return radeon_crtc->crtc_id;
}
}
}
 
void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
{
/* always set DCPLL */
if (ASIC_IS_DCE6(rdev))
atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
else if (ASIC_IS_DCE4(rdev)) {
struct radeon_atom_ss ss;
bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_SS_ON_DCPLL,
rdev->clock.default_dispclk);
if (ss_enabled)
atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss);
/* XXX: DCE5, make sure voltage, dispclk is high enough */
atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
if (ss_enabled)
atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss);
}
 
}
 
int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
1756,14 → 1485,32
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder =
to_radeon_encoder(radeon_crtc->encoder);
struct drm_encoder *encoder;
bool is_tvcv = false;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
/* find tv std */
if (encoder->crtc == crtc) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->active_device &
(ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
is_tvcv = true;
}
}
 
/* always set DCPLL */
if (ASIC_IS_DCE4(rdev)) {
struct radeon_atom_ss ss;
bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_SS_ON_DCPLL,
rdev->clock.default_dispclk);
if (ss_enabled)
atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss);
/* XXX: DCE5, make sure voltage, dispclk is high enough */
atombios_crtc_set_dcpll(crtc, rdev->clock.default_dispclk);
if (ss_enabled)
atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss);
}
atombios_crtc_set_pll(crtc, adjusted_mode);
 
if (ASIC_IS_DCE4(rdev))
1786,37 → 1533,17
}
 
static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
struct radeon_device *rdev = dev->dev_private;
 
/* assign the encoder to the radeon crtc to avoid repeated lookups later */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_crtc->encoder = encoder;
radeon_crtc->connector = radeon_get_connector_for_encoder(encoder);
break;
}
}
if ((radeon_crtc->encoder == NULL) || (radeon_crtc->connector == NULL)) {
radeon_crtc->encoder = NULL;
radeon_crtc->connector = NULL;
return false;
}
/* adjust pm to upcoming mode change */
radeon_pm_compute_clocks(rdev);
 
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
if (!atombios_crtc_prepare_pll(crtc, adjusted_mode))
return false;
/* pick pll */
radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
/* if we can't get a PPLL for a non-DP encoder, fail */
if ((radeon_crtc->pll_id == ATOM_PPLL_INVALID) &&
!ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder)))
return false;
 
return true;
}
 
1823,15 → 1550,10
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
 
radeon_crtc->in_mode_set = true;
/* pick pll */
radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
 
/* disable crtc pair power gating before programming */
if (ASIC_IS_DCE6(rdev))
atombios_powergate_crtc(crtc, ATOM_DISABLE);
 
atombios_lock_crtc(crtc, ATOM_ENABLE);
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
1838,35 → 1560,17
 
static void atombios_crtc_commit(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
atombios_lock_crtc(crtc, ATOM_DISABLE);
radeon_crtc->in_mode_set = false;
}
 
static void atombios_crtc_disable(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_atom_ss ss;
int i;
 
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->mode_info.crtcs[i] &&
rdev->mode_info.crtcs[i]->enabled &&
i != radeon_crtc->crtc_id &&
radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) {
/* one other crtc is using this pll don't turn
* off the pll
*/
goto done;
}
}
 
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
case ATOM_PPLL2:
1874,20 → 1578,10
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
break;
case ATOM_PPLL0:
/* disable the ppll */
if (ASIC_IS_DCE61(rdev))
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
break;
default:
break;
}
done:
radeon_crtc->pll_id = ATOM_PPLL_INVALID;
radeon_crtc->adjusted_clock = 0;
radeon_crtc->encoder = NULL;
radeon_crtc->connector = NULL;
radeon_crtc->pll_id = -1;
}
 
static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
1936,9 → 1630,6
else
radeon_crtc->crtc_offset = 0;
}
radeon_crtc->pll_id = ATOM_PPLL_INVALID;
radeon_crtc->adjusted_clock = 0;
radeon_crtc->encoder = NULL;
radeon_crtc->connector = NULL;
radeon_crtc->pll_id = -1;
drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
}
/drivers/video/drm/radeon/atombios_dp.c
22,15 → 22,14
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
 
#include "atom.h"
#include "atom-bits.h"
#include <drm/drm_dp_helper.h>
#include "drm_dp_helper.h"
 
/* move these to drm_dp_helper.c/h */
#define DP_LINK_CONFIGURATION_SIZE 9
64,12 → 63,12
 
memset(&args, 0, sizeof(args));
 
base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
 
memcpy(base, send, send_bytes);
 
args.v1.lpAuxRequest = 0 + 4;
args.v1.lpDataOut = 16 + 4;
args.v1.lpAuxRequest = 0;
args.v1.lpDataOut = 16;
args.v1.ucDataOutLen = 0;
args.v1.ucChannelID = chan->rec.i2c_id;
args.v1.ucDelay = delay / 10;
116,7 → 115,6
u8 msg[20];
int msg_bytes = send_bytes + 4;
u8 ack;
unsigned retry;
 
if (send_bytes > 16)
return -1;
127,15 → 125,13
msg[3] = (msg_bytes << 4) | (send_bytes - 1);
memcpy(&msg[4], send, send_bytes);
 
for (retry = 0; retry < 4; retry++) {
while (1) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, NULL, 0, delay, &ack);
if (ret == -EBUSY)
continue;
else if (ret < 0)
if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
return send_bytes;
break;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
udelay(400);
else
142,7 → 138,7
return -EIO;
}
 
return -EIO;
return send_bytes;
}
 
static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
153,7 → 149,6
int msg_bytes = 4;
u8 ack;
int ret;
unsigned retry;
 
msg[0] = address;
msg[1] = address >> 8;
160,24 → 155,20
msg[2] = AUX_NATIVE_READ << 4;
msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
 
for (retry = 0; retry < 4; retry++) {
while (1) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, recv, recv_bytes, delay, &ack);
if (ret == -EBUSY)
continue;
else if (ret < 0)
if (ret == 0)
return -EPROTO;
if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
return ret;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
udelay(400);
else if (ret == 0)
return -EPROTO;
else
return -EIO;
}
 
return -EIO;
}
 
static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
241,9 → 232,7
for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(auxch,
msg, msg_bytes, reply, reply_bytes, 0, &ack);
if (ret == -EBUSY)
continue;
else if (ret < 0) {
if (ret < 0) {
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
return ret;
}
284,7 → 273,7
}
}
 
DRM_DEBUG_KMS("aux i2c too many retries, giving up\n");
DRM_ERROR("aux i2c too many retries, giving up\n");
return -EREMOTEIO;
}
 
461,7 → 450,7
u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
int max_link_rate = dp_get_max_link_rate(dpcd);
int max_lane_num = dp_get_max_lane_number(dpcd);
int lane_num;
480,11 → 469,10
u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
int lane_num, max_pix_clock;
 
if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
ENCODER_OBJECT_ID_NUTMEG)
if (radeon_connector_encoder_is_dp_bridge(connector))
return 270000;
 
lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
531,23 → 519,6
dig_connector->dp_i2c_bus->rec.i2c_id, 0);
}
 
static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 buf[3];
 
if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
 
if (radeon_dp_aux_native_read(radeon_connector, DP_SINK_OUI, buf, 3, 0))
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
 
if (radeon_dp_aux_native_read(radeon_connector, DP_BRANCH_OUI, buf, 3, 0))
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
}
 
bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
561,9 → 532,6
for (i = 0; i < 8; i++)
DRM_DEBUG_KMS("%02x ", msg[i]);
DRM_DEBUG_KMS("\n");
 
radeon_dp_probe_oui(radeon_connector);
 
return true;
}
dig_connector->dpcd[0] = 0;
570,41 → 538,26
return false;
}
 
int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
u8 tmp;
 
if (!ASIC_IS_DCE4(rdev))
return panel_mode;
return;
 
if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
/* DP bridge chips */
tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
if (tmp & 1)
panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
(dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
if (radeon_connector_encoder_is_dp_bridge(connector))
panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
else
panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
/* eDP */
tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
if (tmp & 1)
panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
}
 
return panel_mode;
atombios_dig_encoder_setup(encoder,
ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
panel_mode);
}
 
void radeon_dp_set_link_config(struct drm_connector *connector,
const struct drm_display_mode *mode)
struct drm_display_mode *mode)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
650,25 → 603,16
ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
link_status, DP_LINK_STATUS_SIZE, 100);
if (ret <= 0) {
DRM_ERROR("displayport link status failed\n");
return false;
}
 
DRM_DEBUG_KMS("link status %*ph\n", 6, link_status);
DRM_DEBUG_KMS("link status %02x %02x %02x %02x %02x %02x\n",
link_status[0], link_status[1], link_status[2],
link_status[3], link_status[4], link_status[5]);
return true;
}
 
bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
{
u8 link_status[DP_LINK_STATUS_SIZE];
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
 
if (!radeon_dp_get_link_status(radeon_connector, link_status))
return false;
if (dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
return true;
}
 
struct radeon_dp_link_train_info {
struct radeon_device *rdev;
struct drm_encoder *encoder;
735,8 → 679,6
 
static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(dp_info->encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u8 tmp;
 
/* power up the sink */
752,15 → 694,11
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_DOWNSPREAD_CTRL, 0);
 
if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
(dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
}
radeon_dp_set_panel_mode(dp_info->encoder, dp_info->connector);
 
/* set the lane count on the sink */
tmp = dp_info->dp_lane_count;
if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 &&
dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
if (dp_info->dpcd[0] >= 0x11)
tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
 
826,10 → 764,8
else
mdelay(dp_info->rd_interval * 4);
 
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
DRM_ERROR("displayport link status failed\n");
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
break;
}
 
if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
clock_recovery = true;
891,10 → 827,8
else
mdelay(dp_info->rd_interval * 4);
 
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
DRM_ERROR("displayport link status failed\n");
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
break;
}
 
if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
channel_eq = true;
/drivers/video/drm/radeon/evergreend.h
37,15 → 37,6
#define EVERGREEN_MAX_PIPES_MASK 0xFF
#define EVERGREEN_MAX_LDS_NUM 0xFFFF
 
#define CYPRESS_GB_ADDR_CONFIG_GOLDEN 0x02011003
#define BARTS_GB_ADDR_CONFIG_GOLDEN 0x02011003
#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
#define JUNIPER_GB_ADDR_CONFIG_GOLDEN 0x02010002
#define REDWOOD_GB_ADDR_CONFIG_GOLDEN 0x02010002
#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002
#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001
#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001
 
/* Registers */
 
#define RCU_IND_INDEX 0x100
63,7 → 54,6
#define BACKEND_DISABLE(x) ((x) << 16)
#define GB_ADDR_CONFIG 0x98F8
#define NUM_PIPES(x) ((x) << 0)
#define NUM_PIPES_MASK 0x0000000f
#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
#define NUM_SHADER_ENGINES(x) ((x) << 12)
87,15 → 77,6
 
#define CONFIG_MEMSIZE 0x5428
 
#define BIF_FB_EN 0x5490
#define FB_READ_EN (1 << 0)
#define FB_WRITE_EN (1 << 1)
 
#define CP_COHER_BASE 0x85F8
#define CP_STALLED_STAT1 0x8674
#define CP_STALLED_STAT2 0x8678
#define CP_BUSY_STAT 0x867C
#define CP_STAT 0x8680
#define CP_ME_CNTL 0x86D8
#define CP_ME_HALT (1 << 28)
#define CP_PFP_HALT (1 << 26)
127,230 → 108,9
#define CP_RB_WPTR_ADDR_HI 0xC11C
#define CP_RB_WPTR_DELAY 0x8704
#define CP_SEM_WAIT_TIMER 0x85BC
#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8
#define CP_DEBUG 0xC1FC
 
/* Audio clocks */
#define DCCG_AUDIO_DTO_SOURCE 0x05ac
# define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
# define DCCG_AUDIO_DTO_SEL (1 << 4) /* 0=dto0 1=dto1 */
 
#define DCCG_AUDIO_DTO0_PHASE 0x05b0
#define DCCG_AUDIO_DTO0_MODULE 0x05b4
#define DCCG_AUDIO_DTO0_LOAD 0x05b8
#define DCCG_AUDIO_DTO0_CNTL 0x05bc
 
#define DCCG_AUDIO_DTO1_PHASE 0x05c0
#define DCCG_AUDIO_DTO1_MODULE 0x05c4
#define DCCG_AUDIO_DTO1_LOAD 0x05c8
#define DCCG_AUDIO_DTO1_CNTL 0x05cc
 
/* DCE 4.0 AFMT */
#define HDMI_CONTROL 0x7030
# define HDMI_KEEPOUT_MODE (1 << 0)
# define HDMI_PACKET_GEN_VERSION (1 << 4) /* 0 = r6xx compat */
# define HDMI_ERROR_ACK (1 << 8)
# define HDMI_ERROR_MASK (1 << 9)
# define HDMI_DEEP_COLOR_ENABLE (1 << 24)
# define HDMI_DEEP_COLOR_DEPTH (((x) & 3) << 28)
# define HDMI_24BIT_DEEP_COLOR 0
# define HDMI_30BIT_DEEP_COLOR 1
# define HDMI_36BIT_DEEP_COLOR 2
#define HDMI_STATUS 0x7034
# define HDMI_ACTIVE_AVMUTE (1 << 0)
# define HDMI_AUDIO_PACKET_ERROR (1 << 16)
# define HDMI_VBI_PACKET_ERROR (1 << 20)
#define HDMI_AUDIO_PACKET_CONTROL 0x7038
# define HDMI_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
# define HDMI_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
#define HDMI_ACR_PACKET_CONTROL 0x703c
# define HDMI_ACR_SEND (1 << 0)
# define HDMI_ACR_CONT (1 << 1)
# define HDMI_ACR_SELECT(x) (((x) & 3) << 4)
# define HDMI_ACR_HW 0
# define HDMI_ACR_32 1
# define HDMI_ACR_44 2
# define HDMI_ACR_48 3
# define HDMI_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
# define HDMI_ACR_AUTO_SEND (1 << 12)
# define HDMI_ACR_N_MULTIPLE(x) (((x) & 7) << 16)
# define HDMI_ACR_X1 1
# define HDMI_ACR_X2 2
# define HDMI_ACR_X4 4
# define HDMI_ACR_AUDIO_PRIORITY (1 << 31)
#define HDMI_VBI_PACKET_CONTROL 0x7040
# define HDMI_NULL_SEND (1 << 0)
# define HDMI_GC_SEND (1 << 4)
# define HDMI_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
#define HDMI_INFOFRAME_CONTROL0 0x7044
# define HDMI_AVI_INFO_SEND (1 << 0)
# define HDMI_AVI_INFO_CONT (1 << 1)
# define HDMI_AUDIO_INFO_SEND (1 << 4)
# define HDMI_AUDIO_INFO_CONT (1 << 5)
# define HDMI_MPEG_INFO_SEND (1 << 8)
# define HDMI_MPEG_INFO_CONT (1 << 9)
#define HDMI_INFOFRAME_CONTROL1 0x7048
# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
#define HDMI_GENERIC_PACKET_CONTROL 0x704c
# define HDMI_GENERIC0_SEND (1 << 0)
# define HDMI_GENERIC0_CONT (1 << 1)
# define HDMI_GENERIC1_SEND (1 << 4)
# define HDMI_GENERIC1_CONT (1 << 5)
# define HDMI_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
# define HDMI_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
#define HDMI_GC 0x7058
# define HDMI_GC_AVMUTE (1 << 0)
# define HDMI_GC_AVMUTE_CONT (1 << 2)
#define AFMT_AUDIO_PACKET_CONTROL2 0x705c
# define AFMT_AUDIO_LAYOUT_OVRD (1 << 0)
# define AFMT_AUDIO_LAYOUT_SELECT (1 << 1)
# define AFMT_60958_CS_SOURCE (1 << 4)
# define AFMT_AUDIO_CHANNEL_ENABLE(x) (((x) & 0xff) << 8)
# define AFMT_DP_AUDIO_STREAM_ID(x) (((x) & 0xff) << 16)
#define AFMT_AVI_INFO0 0x7084
# define AFMT_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
# define AFMT_AVI_INFO_S(x) (((x) & 3) << 8)
# define AFMT_AVI_INFO_B(x) (((x) & 3) << 10)
# define AFMT_AVI_INFO_A(x) (((x) & 1) << 12)
# define AFMT_AVI_INFO_Y(x) (((x) & 3) << 13)
# define AFMT_AVI_INFO_Y_RGB 0
# define AFMT_AVI_INFO_Y_YCBCR422 1
# define AFMT_AVI_INFO_Y_YCBCR444 2
# define AFMT_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
# define AFMT_AVI_INFO_R(x) (((x) & 0xf) << 16)
# define AFMT_AVI_INFO_M(x) (((x) & 0x3) << 20)
# define AFMT_AVI_INFO_C(x) (((x) & 0x3) << 22)
# define AFMT_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
# define AFMT_AVI_INFO_SC(x) (((x) & 0x3) << 24)
# define AFMT_AVI_INFO_Q(x) (((x) & 0x3) << 26)
# define AFMT_AVI_INFO_EC(x) (((x) & 0x3) << 28)
# define AFMT_AVI_INFO_ITC(x) (((x) & 0x1) << 31)
# define AFMT_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
#define AFMT_AVI_INFO1 0x7088
# define AFMT_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
# define AFMT_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
# define AFMT_AVI_INFO_CN(x) (((x) & 0x3) << 12)
# define AFMT_AVI_INFO_YQ(x) (((x) & 0x3) << 14)
# define AFMT_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
#define AFMT_AVI_INFO2 0x708c
# define AFMT_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
# define AFMT_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
#define AFMT_AVI_INFO3 0x7090
# define AFMT_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
# define AFMT_AVI_INFO_VERSION(x) (((x) & 3) << 24)
#define AFMT_MPEG_INFO0 0x7094
# define AFMT_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
# define AFMT_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
# define AFMT_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
# define AFMT_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
#define AFMT_MPEG_INFO1 0x7098
# define AFMT_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
# define AFMT_MPEG_INFO_MF(x) (((x) & 3) << 8)
# define AFMT_MPEG_INFO_FR(x) (((x) & 1) << 12)
#define AFMT_GENERIC0_HDR 0x709c
#define AFMT_GENERIC0_0 0x70a0
#define AFMT_GENERIC0_1 0x70a4
#define AFMT_GENERIC0_2 0x70a8
#define AFMT_GENERIC0_3 0x70ac
#define AFMT_GENERIC0_4 0x70b0
#define AFMT_GENERIC0_5 0x70b4
#define AFMT_GENERIC0_6 0x70b8
#define AFMT_GENERIC1_HDR 0x70bc
#define AFMT_GENERIC1_0 0x70c0
#define AFMT_GENERIC1_1 0x70c4
#define AFMT_GENERIC1_2 0x70c8
#define AFMT_GENERIC1_3 0x70cc
#define AFMT_GENERIC1_4 0x70d0
#define AFMT_GENERIC1_5 0x70d4
#define AFMT_GENERIC1_6 0x70d8
#define HDMI_ACR_32_0 0x70dc
# define HDMI_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
#define HDMI_ACR_32_1 0x70e0
# define HDMI_ACR_N_32(x) (((x) & 0xfffff) << 0)
#define HDMI_ACR_44_0 0x70e4
# define HDMI_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
#define HDMI_ACR_44_1 0x70e8
# define HDMI_ACR_N_44(x) (((x) & 0xfffff) << 0)
#define HDMI_ACR_48_0 0x70ec
# define HDMI_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
#define HDMI_ACR_48_1 0x70f0
# define HDMI_ACR_N_48(x) (((x) & 0xfffff) << 0)
#define HDMI_ACR_STATUS_0 0x70f4
#define HDMI_ACR_STATUS_1 0x70f8
#define AFMT_AUDIO_INFO0 0x70fc
# define AFMT_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
# define AFMT_AUDIO_INFO_CC(x) (((x) & 7) << 8)
# define AFMT_AUDIO_INFO_CT(x) (((x) & 0xf) << 11)
# define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x) (((x) & 0xff) << 16)
# define AFMT_AUDIO_INFO_CXT(x) (((x) & 0x1f) << 24)
#define AFMT_AUDIO_INFO1 0x7100
# define AFMT_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
# define AFMT_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
# define AFMT_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
# define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
# define AFMT_AUDIO_INFO_LFEBPL(x) (((x) & 3) << 16)
#define AFMT_60958_0 0x7104
# define AFMT_60958_CS_A(x) (((x) & 1) << 0)
# define AFMT_60958_CS_B(x) (((x) & 1) << 1)
# define AFMT_60958_CS_C(x) (((x) & 1) << 2)
# define AFMT_60958_CS_D(x) (((x) & 3) << 3)
# define AFMT_60958_CS_MODE(x) (((x) & 3) << 6)
# define AFMT_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
# define AFMT_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
# define AFMT_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
# define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
# define AFMT_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
#define AFMT_60958_1 0x7108
# define AFMT_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
# define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
# define AFMT_60958_CS_VALID_L(x) (((x) & 1) << 16)
# define AFMT_60958_CS_VALID_R(x) (((x) & 1) << 18)
# define AFMT_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
#define AFMT_AUDIO_CRC_CONTROL 0x710c
# define AFMT_AUDIO_CRC_EN (1 << 0)
#define AFMT_RAMP_CONTROL0 0x7110
# define AFMT_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
# define AFMT_RAMP_DATA_SIGN (1 << 31)
#define AFMT_RAMP_CONTROL1 0x7114
# define AFMT_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
# define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24)
#define AFMT_RAMP_CONTROL2 0x7118
# define AFMT_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
#define AFMT_RAMP_CONTROL3 0x711c
# define AFMT_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
#define AFMT_60958_2 0x7120
# define AFMT_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
# define AFMT_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
# define AFMT_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
# define AFMT_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
# define AFMT_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
# define AFMT_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
#define AFMT_STATUS 0x7128
# define AFMT_AUDIO_ENABLE (1 << 4)
# define AFMT_AUDIO_HBR_ENABLE (1 << 8)
# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
#define AFMT_AUDIO_PACKET_CONTROL 0x712c
# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
# define AFMT_RESET_FIFO_WHEN_AUDIO_DIS (1 << 11) /* set to 1 */
# define AFMT_AUDIO_TEST_EN (1 << 12)
# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
# define AFMT_60958_CS_UPDATE (1 << 26)
# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
#define AFMT_VBI_PACKET_CONTROL 0x7130
# define AFMT_GENERIC0_UPDATE (1 << 2)
#define AFMT_INFOFRAME_CONTROL0 0x7134
# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - afmt regs */
# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
# define AFMT_MPEG_INFO_UPDATE (1 << 10)
#define AFMT_GENERIC0_7 0x7138
 
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00
434,9 → 194,6
#define NOOFCHAN_MASK 0x00003000
#define MC_SHARED_CHREMAP 0x2008
 
#define MC_SHARED_BLACKOUT_CNTL 0x20ac
#define BLACKOUT_MODE_MASK 0x00000007
 
#define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0
#define NOOFBANK_MASK 0x00000003
473,7 → 230,6
#define MC_VM_MD_L1_TLB0_CNTL 0x2654
#define MC_VM_MD_L1_TLB1_CNTL 0x2658
#define MC_VM_MD_L1_TLB2_CNTL 0x265C
#define MC_VM_MD_L1_TLB3_CNTL 0x2698
 
#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C
#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660
486,7 → 242,6
#define PA_CL_ENHANCE 0x8A14
#define CLIP_VTX_REORDER_ENA (1 << 0)
#define NUM_CLIP_SEQ(x) ((x) << 1)
#define PA_SC_ENHANCE 0x8BF0
#define PA_SC_AA_CONFIG 0x28C04
#define MSAA_NUM_SAMPLES_SHIFT 0
#define MSAA_NUM_SAMPLES_MASK 0x3
514,7 → 269,6
#define SCRATCH_UMSK 0x8540
#define SCRATCH_ADDR 0x8544
 
#define SMX_SAR_CTL0 0xA008
#define SMX_DC_CTL0 0xA020
#define USE_HASH_FUNCTION (1 << 0)
#define NUMBER_OF_SETS(x) ((x) << 1)
565,8 → 319,6
#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C
#define NUM_HS_GPRS(x) ((x) << 0)
#define NUM_LS_GPRS(x) ((x) << 16)
#define SQ_GLOBAL_GPR_RESOURCE_MGMT_1 0x8C10
#define SQ_GLOBAL_GPR_RESOURCE_MGMT_2 0x8C14
#define SQ_THREAD_RESOURCE_MGMT 0x8C18
#define NUM_PS_THREADS(x) ((x) << 0)
#define NUM_VS_THREADS(x) ((x) << 8)
585,10 → 337,6
#define NUM_HS_STACK_ENTRIES(x) ((x) << 0)
#define NUM_LS_STACK_ENTRIES(x) ((x) << 16)
#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
#define SQ_DYN_GPR_SIMD_LOCK_EN 0x8D94
#define SQ_STATIC_THREAD_MGMT_1 0x8E20
#define SQ_STATIC_THREAD_MGMT_2 0x8E24
#define SQ_STATIC_THREAD_MGMT_3 0x8E28
#define SQ_LDS_RESOURCE_MGMT 0x8E2C
 
#define SQ_MS_FIFO_SIZES 0x8CF0
943,7 → 691,6
#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
#define PACKET3_MEM_SEMAPHORE 0x39
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_COPY_DW 0x3B
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
1021,8 → 768,6
#define SQ_TEX_VTX_VALID_TEXTURE 0x2
#define SQ_TEX_VTX_VALID_BUFFER 0x3
 
#define VGT_VTX_VECT_EJECT_REG 0x88b0
 
#define SQ_CONST_MEM_BASE 0x8df8
 
#define SQ_ESGS_RING_BASE 0x8c40
1147,162 → 892,19
#define PA_SC_SCREEN_SCISSOR_TL 0x28030
#define PA_SC_GENERIC_SCISSOR_TL 0x28240
#define PA_SC_WINDOW_SCISSOR_TL 0x28204
 
#define VGT_PRIMITIVE_TYPE 0x8958
#define VGT_INDEX_TYPE 0x895C
 
#define VGT_NUM_INDICES 0x8970
 
#define VGT_COMPUTE_DIM_X 0x8990
#define VGT_COMPUTE_DIM_Y 0x8994
#define VGT_COMPUTE_DIM_Z 0x8998
#define VGT_COMPUTE_START_X 0x899C
#define VGT_COMPUTE_START_Y 0x89A0
#define VGT_COMPUTE_START_Z 0x89A4
#define VGT_COMPUTE_INDEX 0x89A8
#define VGT_COMPUTE_THREAD_GROUP_SIZE 0x89AC
#define VGT_HS_OFFCHIP_PARAM 0x89B0
 
#define DB_DEBUG 0x9830
#define DB_DEBUG2 0x9834
#define DB_DEBUG3 0x9838
#define DB_DEBUG4 0x983C
#define DB_WATERMARKS 0x9854
#define DB_DEPTH_CONTROL 0x28800
#define R_028800_DB_DEPTH_CONTROL 0x028800
#define S_028800_STENCIL_ENABLE(x) (((x) & 0x1) << 0)
#define G_028800_STENCIL_ENABLE(x) (((x) >> 0) & 0x1)
#define C_028800_STENCIL_ENABLE 0xFFFFFFFE
#define S_028800_Z_ENABLE(x) (((x) & 0x1) << 1)
#define G_028800_Z_ENABLE(x) (((x) >> 1) & 0x1)
#define C_028800_Z_ENABLE 0xFFFFFFFD
#define S_028800_Z_WRITE_ENABLE(x) (((x) & 0x1) << 2)
#define G_028800_Z_WRITE_ENABLE(x) (((x) >> 2) & 0x1)
#define C_028800_Z_WRITE_ENABLE 0xFFFFFFFB
#define S_028800_ZFUNC(x) (((x) & 0x7) << 4)
#define G_028800_ZFUNC(x) (((x) >> 4) & 0x7)
#define C_028800_ZFUNC 0xFFFFFF8F
#define S_028800_BACKFACE_ENABLE(x) (((x) & 0x1) << 7)
#define G_028800_BACKFACE_ENABLE(x) (((x) >> 7) & 0x1)
#define C_028800_BACKFACE_ENABLE 0xFFFFFF7F
#define S_028800_STENCILFUNC(x) (((x) & 0x7) << 8)
#define G_028800_STENCILFUNC(x) (((x) >> 8) & 0x7)
#define C_028800_STENCILFUNC 0xFFFFF8FF
#define V_028800_STENCILFUNC_NEVER 0x00000000
#define V_028800_STENCILFUNC_LESS 0x00000001
#define V_028800_STENCILFUNC_EQUAL 0x00000002
#define V_028800_STENCILFUNC_LEQUAL 0x00000003
#define V_028800_STENCILFUNC_GREATER 0x00000004
#define V_028800_STENCILFUNC_NOTEQUAL 0x00000005
#define V_028800_STENCILFUNC_GEQUAL 0x00000006
#define V_028800_STENCILFUNC_ALWAYS 0x00000007
#define S_028800_STENCILFAIL(x) (((x) & 0x7) << 11)
#define G_028800_STENCILFAIL(x) (((x) >> 11) & 0x7)
#define C_028800_STENCILFAIL 0xFFFFC7FF
#define V_028800_STENCIL_KEEP 0x00000000
#define V_028800_STENCIL_ZERO 0x00000001
#define V_028800_STENCIL_REPLACE 0x00000002
#define V_028800_STENCIL_INCR 0x00000003
#define V_028800_STENCIL_DECR 0x00000004
#define V_028800_STENCIL_INVERT 0x00000005
#define V_028800_STENCIL_INCR_WRAP 0x00000006
#define V_028800_STENCIL_DECR_WRAP 0x00000007
#define S_028800_STENCILZPASS(x) (((x) & 0x7) << 14)
#define G_028800_STENCILZPASS(x) (((x) >> 14) & 0x7)
#define C_028800_STENCILZPASS 0xFFFE3FFF
#define S_028800_STENCILZFAIL(x) (((x) & 0x7) << 17)
#define G_028800_STENCILZFAIL(x) (((x) >> 17) & 0x7)
#define C_028800_STENCILZFAIL 0xFFF1FFFF
#define S_028800_STENCILFUNC_BF(x) (((x) & 0x7) << 20)
#define G_028800_STENCILFUNC_BF(x) (((x) >> 20) & 0x7)
#define C_028800_STENCILFUNC_BF 0xFF8FFFFF
#define S_028800_STENCILFAIL_BF(x) (((x) & 0x7) << 23)
#define G_028800_STENCILFAIL_BF(x) (((x) >> 23) & 0x7)
#define C_028800_STENCILFAIL_BF 0xFC7FFFFF
#define S_028800_STENCILZPASS_BF(x) (((x) & 0x7) << 26)
#define G_028800_STENCILZPASS_BF(x) (((x) >> 26) & 0x7)
#define C_028800_STENCILZPASS_BF 0xE3FFFFFF
#define S_028800_STENCILZFAIL_BF(x) (((x) & 0x7) << 29)
#define G_028800_STENCILZFAIL_BF(x) (((x) >> 29) & 0x7)
#define C_028800_STENCILZFAIL_BF 0x1FFFFFFF
#define DB_DEPTH_VIEW 0x28008
#define R_028008_DB_DEPTH_VIEW 0x00028008
#define S_028008_SLICE_START(x) (((x) & 0x7FF) << 0)
#define G_028008_SLICE_START(x) (((x) >> 0) & 0x7FF)
#define C_028008_SLICE_START 0xFFFFF800
#define S_028008_SLICE_MAX(x) (((x) & 0x7FF) << 13)
#define G_028008_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
#define C_028008_SLICE_MAX 0xFF001FFF
#define DB_HTILE_DATA_BASE 0x28014
#define DB_HTILE_SURFACE 0x28abc
#define S_028ABC_HTILE_WIDTH(x) (((x) & 0x1) << 0)
#define G_028ABC_HTILE_WIDTH(x) (((x) >> 0) & 0x1)
#define C_028ABC_HTILE_WIDTH 0xFFFFFFFE
#define S_028ABC_HTILE_HEIGHT(x) (((x) & 0x1) << 1)
#define G_028ABC_HTILE_HEIGHT(x) (((x) >> 1) & 0x1)
#define C_028ABC_HTILE_HEIGHT 0xFFFFFFFD
#define G_028ABC_LINEAR(x) (((x) >> 2) & 0x1)
#define DB_Z_INFO 0x28040
# define Z_ARRAY_MODE(x) ((x) << 4)
# define DB_TILE_SPLIT(x) (((x) & 0x7) << 8)
# define DB_NUM_BANKS(x) (((x) & 0x3) << 12)
# define DB_BANK_WIDTH(x) (((x) & 0x3) << 16)
# define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20)
# define DB_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 24)
#define R_028040_DB_Z_INFO 0x028040
#define S_028040_FORMAT(x) (((x) & 0x3) << 0)
#define G_028040_FORMAT(x) (((x) >> 0) & 0x3)
#define C_028040_FORMAT 0xFFFFFFFC
#define V_028040_Z_INVALID 0x00000000
#define V_028040_Z_16 0x00000001
#define V_028040_Z_24 0x00000002
#define V_028040_Z_32_FLOAT 0x00000003
#define S_028040_ARRAY_MODE(x) (((x) & 0xF) << 4)
#define G_028040_ARRAY_MODE(x) (((x) >> 4) & 0xF)
#define C_028040_ARRAY_MODE 0xFFFFFF0F
#define S_028040_READ_SIZE(x) (((x) & 0x1) << 28)
#define G_028040_READ_SIZE(x) (((x) >> 28) & 0x1)
#define C_028040_READ_SIZE 0xEFFFFFFF
#define S_028040_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 29)
#define G_028040_TILE_SURFACE_ENABLE(x) (((x) >> 29) & 0x1)
#define C_028040_TILE_SURFACE_ENABLE 0xDFFFFFFF
#define S_028040_ZRANGE_PRECISION(x) (((x) & 0x1) << 31)
#define G_028040_ZRANGE_PRECISION(x) (((x) >> 31) & 0x1)
#define C_028040_ZRANGE_PRECISION 0x7FFFFFFF
#define S_028040_TILE_SPLIT(x) (((x) & 0x7) << 8)
#define G_028040_TILE_SPLIT(x) (((x) >> 8) & 0x7)
#define S_028040_NUM_BANKS(x) (((x) & 0x3) << 12)
#define G_028040_NUM_BANKS(x) (((x) >> 12) & 0x3)
#define S_028040_BANK_WIDTH(x) (((x) & 0x3) << 16)
#define G_028040_BANK_WIDTH(x) (((x) >> 16) & 0x3)
#define S_028040_BANK_HEIGHT(x) (((x) & 0x3) << 20)
#define G_028040_BANK_HEIGHT(x) (((x) >> 20) & 0x3)
#define S_028040_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 24)
#define G_028040_MACRO_TILE_ASPECT(x) (((x) >> 24) & 0x3)
#define DB_STENCIL_INFO 0x28044
#define R_028044_DB_STENCIL_INFO 0x028044
#define S_028044_FORMAT(x) (((x) & 0x1) << 0)
#define G_028044_FORMAT(x) (((x) >> 0) & 0x1)
#define C_028044_FORMAT 0xFFFFFFFE
#define V_028044_STENCIL_INVALID 0
#define V_028044_STENCIL_8 1
#define G_028044_TILE_SPLIT(x) (((x) >> 8) & 0x7)
#define DB_Z_READ_BASE 0x28048
#define DB_STENCIL_READ_BASE 0x2804c
#define DB_Z_WRITE_BASE 0x28050
#define DB_STENCIL_WRITE_BASE 0x28054
#define DB_DEPTH_SIZE 0x28058
#define R_028058_DB_DEPTH_SIZE 0x028058
#define S_028058_PITCH_TILE_MAX(x) (((x) & 0x7FF) << 0)
#define G_028058_PITCH_TILE_MAX(x) (((x) >> 0) & 0x7FF)
#define C_028058_PITCH_TILE_MAX 0xFFFFF800
#define S_028058_HEIGHT_TILE_MAX(x) (((x) & 0x7FF) << 11)
#define G_028058_HEIGHT_TILE_MAX(x) (((x) >> 11) & 0x7FF)
#define C_028058_HEIGHT_TILE_MAX 0xFFC007FF
#define R_02805C_DB_DEPTH_SLICE 0x02805C
#define S_02805C_SLICE_TILE_MAX(x) (((x) & 0x3FFFFF) << 0)
#define G_02805C_SLICE_TILE_MAX(x) (((x) >> 0) & 0x3FFFFF)
#define C_02805C_SLICE_TILE_MAX 0xFFC00000
 
#define SQ_PGM_START_PS 0x28840
#define SQ_PGM_START_VS 0x2885c
1312,14 → 914,6
#define SQ_PGM_START_HS 0x288b8
#define SQ_PGM_START_LS 0x288d0
 
#define VGT_STRMOUT_BUFFER_BASE_0 0x28AD8
#define VGT_STRMOUT_BUFFER_BASE_1 0x28AE8
#define VGT_STRMOUT_BUFFER_BASE_2 0x28AF8
#define VGT_STRMOUT_BUFFER_BASE_3 0x28B08
#define VGT_STRMOUT_BUFFER_SIZE_0 0x28AD0
#define VGT_STRMOUT_BUFFER_SIZE_1 0x28AE0
#define VGT_STRMOUT_BUFFER_SIZE_2 0x28AF0
#define VGT_STRMOUT_BUFFER_SIZE_3 0x28B00
#define VGT_STRMOUT_CONFIG 0x28b94
#define VGT_STRMOUT_BUFFER_CONFIG 0x28b98
 
1346,163 → 940,13
#define CB_COLOR0_PITCH 0x28c64
#define CB_COLOR0_SLICE 0x28c68
#define CB_COLOR0_VIEW 0x28c6c
#define R_028C6C_CB_COLOR0_VIEW 0x00028C6C
#define S_028C6C_SLICE_START(x) (((x) & 0x7FF) << 0)
#define G_028C6C_SLICE_START(x) (((x) >> 0) & 0x7FF)
#define C_028C6C_SLICE_START 0xFFFFF800
#define S_028C6C_SLICE_MAX(x) (((x) & 0x7FF) << 13)
#define G_028C6C_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
#define C_028C6C_SLICE_MAX 0xFF001FFF
#define R_028C70_CB_COLOR0_INFO 0x028C70
#define S_028C70_ENDIAN(x) (((x) & 0x3) << 0)
#define G_028C70_ENDIAN(x) (((x) >> 0) & 0x3)
#define C_028C70_ENDIAN 0xFFFFFFFC
#define S_028C70_FORMAT(x) (((x) & 0x3F) << 2)
#define G_028C70_FORMAT(x) (((x) >> 2) & 0x3F)
#define C_028C70_FORMAT 0xFFFFFF03
#define V_028C70_COLOR_INVALID 0x00000000
#define V_028C70_COLOR_8 0x00000001
#define V_028C70_COLOR_4_4 0x00000002
#define V_028C70_COLOR_3_3_2 0x00000003
#define V_028C70_COLOR_16 0x00000005
#define V_028C70_COLOR_16_FLOAT 0x00000006
#define V_028C70_COLOR_8_8 0x00000007
#define V_028C70_COLOR_5_6_5 0x00000008
#define V_028C70_COLOR_6_5_5 0x00000009
#define V_028C70_COLOR_1_5_5_5 0x0000000A
#define V_028C70_COLOR_4_4_4_4 0x0000000B
#define V_028C70_COLOR_5_5_5_1 0x0000000C
#define V_028C70_COLOR_32 0x0000000D
#define V_028C70_COLOR_32_FLOAT 0x0000000E
#define V_028C70_COLOR_16_16 0x0000000F
#define V_028C70_COLOR_16_16_FLOAT 0x00000010
#define V_028C70_COLOR_8_24 0x00000011
#define V_028C70_COLOR_8_24_FLOAT 0x00000012
#define V_028C70_COLOR_24_8 0x00000013
#define V_028C70_COLOR_24_8_FLOAT 0x00000014
#define V_028C70_COLOR_10_11_11 0x00000015
#define V_028C70_COLOR_10_11_11_FLOAT 0x00000016
#define V_028C70_COLOR_11_11_10 0x00000017
#define V_028C70_COLOR_11_11_10_FLOAT 0x00000018
#define V_028C70_COLOR_2_10_10_10 0x00000019
#define V_028C70_COLOR_8_8_8_8 0x0000001A
#define V_028C70_COLOR_10_10_10_2 0x0000001B
#define V_028C70_COLOR_X24_8_32_FLOAT 0x0000001C
#define V_028C70_COLOR_32_32 0x0000001D
#define V_028C70_COLOR_32_32_FLOAT 0x0000001E
#define V_028C70_COLOR_16_16_16_16 0x0000001F
#define V_028C70_COLOR_16_16_16_16_FLOAT 0x00000020
#define V_028C70_COLOR_32_32_32_32 0x00000022
#define V_028C70_COLOR_32_32_32_32_FLOAT 0x00000023
#define V_028C70_COLOR_32_32_32_FLOAT 0x00000030
#define S_028C70_ARRAY_MODE(x) (((x) & 0xF) << 8)
#define G_028C70_ARRAY_MODE(x) (((x) >> 8) & 0xF)
#define C_028C70_ARRAY_MODE 0xFFFFF0FF
#define V_028C70_ARRAY_LINEAR_GENERAL 0x00000000
#define V_028C70_ARRAY_LINEAR_ALIGNED 0x00000001
#define V_028C70_ARRAY_1D_TILED_THIN1 0x00000002
#define V_028C70_ARRAY_2D_TILED_THIN1 0x00000004
#define S_028C70_NUMBER_TYPE(x) (((x) & 0x7) << 12)
#define G_028C70_NUMBER_TYPE(x) (((x) >> 12) & 0x7)
#define C_028C70_NUMBER_TYPE 0xFFFF8FFF
#define V_028C70_NUMBER_UNORM 0x00000000
#define V_028C70_NUMBER_SNORM 0x00000001
#define V_028C70_NUMBER_USCALED 0x00000002
#define V_028C70_NUMBER_SSCALED 0x00000003
#define V_028C70_NUMBER_UINT 0x00000004
#define V_028C70_NUMBER_SINT 0x00000005
#define V_028C70_NUMBER_SRGB 0x00000006
#define V_028C70_NUMBER_FLOAT 0x00000007
#define S_028C70_COMP_SWAP(x) (((x) & 0x3) << 15)
#define G_028C70_COMP_SWAP(x) (((x) >> 15) & 0x3)
#define C_028C70_COMP_SWAP 0xFFFE7FFF
#define V_028C70_SWAP_STD 0x00000000
#define V_028C70_SWAP_ALT 0x00000001
#define V_028C70_SWAP_STD_REV 0x00000002
#define V_028C70_SWAP_ALT_REV 0x00000003
#define S_028C70_FAST_CLEAR(x) (((x) & 0x1) << 17)
#define G_028C70_FAST_CLEAR(x) (((x) >> 17) & 0x1)
#define C_028C70_FAST_CLEAR 0xFFFDFFFF
#define S_028C70_COMPRESSION(x) (((x) & 0x3) << 18)
#define G_028C70_COMPRESSION(x) (((x) >> 18) & 0x3)
#define C_028C70_COMPRESSION 0xFFF3FFFF
#define S_028C70_BLEND_CLAMP(x) (((x) & 0x1) << 19)
#define G_028C70_BLEND_CLAMP(x) (((x) >> 19) & 0x1)
#define C_028C70_BLEND_CLAMP 0xFFF7FFFF
#define S_028C70_BLEND_BYPASS(x) (((x) & 0x1) << 20)
#define G_028C70_BLEND_BYPASS(x) (((x) >> 20) & 0x1)
#define C_028C70_BLEND_BYPASS 0xFFEFFFFF
#define S_028C70_SIMPLE_FLOAT(x) (((x) & 0x1) << 21)
#define G_028C70_SIMPLE_FLOAT(x) (((x) >> 21) & 0x1)
#define C_028C70_SIMPLE_FLOAT 0xFFDFFFFF
#define S_028C70_ROUND_MODE(x) (((x) & 0x1) << 22)
#define G_028C70_ROUND_MODE(x) (((x) >> 22) & 0x1)
#define C_028C70_ROUND_MODE 0xFFBFFFFF
#define S_028C70_TILE_COMPACT(x) (((x) & 0x1) << 23)
#define G_028C70_TILE_COMPACT(x) (((x) >> 23) & 0x1)
#define C_028C70_TILE_COMPACT 0xFF7FFFFF
#define S_028C70_SOURCE_FORMAT(x) (((x) & 0x3) << 24)
#define G_028C70_SOURCE_FORMAT(x) (((x) >> 24) & 0x3)
#define C_028C70_SOURCE_FORMAT 0xFCFFFFFF
#define V_028C70_EXPORT_4C_32BPC 0x0
#define V_028C70_EXPORT_4C_16BPC 0x1
#define V_028C70_EXPORT_2C_32BPC 0x2 /* Do not use */
#define S_028C70_RAT(x) (((x) & 0x1) << 26)
#define G_028C70_RAT(x) (((x) >> 26) & 0x1)
#define C_028C70_RAT 0xFBFFFFFF
#define S_028C70_RESOURCE_TYPE(x) (((x) & 0x7) << 27)
#define G_028C70_RESOURCE_TYPE(x) (((x) >> 27) & 0x7)
#define C_028C70_RESOURCE_TYPE 0xC7FFFFFF
 
#define CB_COLOR0_INFO 0x28c70
# define CB_FORMAT(x) ((x) << 2)
# define CB_ARRAY_MODE(x) ((x) << 8)
# define ARRAY_LINEAR_GENERAL 0
# define ARRAY_LINEAR_ALIGNED 1
# define ARRAY_1D_TILED_THIN1 2
# define ARRAY_2D_TILED_THIN1 4
# define CB_SOURCE_FORMAT(x) ((x) << 24)
# define CB_SF_EXPORT_FULL 0
# define CB_SF_EXPORT_NORM 1
#define R_028C74_CB_COLOR0_ATTRIB 0x028C74
#define S_028C74_NON_DISP_TILING_ORDER(x) (((x) & 0x1) << 4)
#define G_028C74_NON_DISP_TILING_ORDER(x) (((x) >> 4) & 0x1)
#define C_028C74_NON_DISP_TILING_ORDER 0xFFFFFFEF
#define S_028C74_TILE_SPLIT(x) (((x) & 0xf) << 5)
#define G_028C74_TILE_SPLIT(x) (((x) >> 5) & 0xf)
#define S_028C74_NUM_BANKS(x) (((x) & 0x3) << 10)
#define G_028C74_NUM_BANKS(x) (((x) >> 10) & 0x3)
#define S_028C74_BANK_WIDTH(x) (((x) & 0x3) << 13)
#define G_028C74_BANK_WIDTH(x) (((x) >> 13) & 0x3)
#define S_028C74_BANK_HEIGHT(x) (((x) & 0x3) << 16)
#define G_028C74_BANK_HEIGHT(x) (((x) >> 16) & 0x3)
#define S_028C74_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 19)
#define G_028C74_MACRO_TILE_ASPECT(x) (((x) >> 19) & 0x3)
#define CB_COLOR0_ATTRIB 0x28c74
# define CB_TILE_SPLIT(x) (((x) & 0x7) << 5)
# define ADDR_SURF_TILE_SPLIT_64B 0
# define ADDR_SURF_TILE_SPLIT_128B 1
# define ADDR_SURF_TILE_SPLIT_256B 2
# define ADDR_SURF_TILE_SPLIT_512B 3
# define ADDR_SURF_TILE_SPLIT_1KB 4
# define ADDR_SURF_TILE_SPLIT_2KB 5
# define ADDR_SURF_TILE_SPLIT_4KB 6
# define CB_NUM_BANKS(x) (((x) & 0x3) << 10)
# define ADDR_SURF_2_BANK 0
# define ADDR_SURF_4_BANK 1
# define ADDR_SURF_8_BANK 2
# define ADDR_SURF_16_BANK 3
# define CB_BANK_WIDTH(x) (((x) & 0x3) << 13)
# define ADDR_SURF_BANK_WIDTH_1 0
# define ADDR_SURF_BANK_WIDTH_2 1
# define ADDR_SURF_BANK_WIDTH_4 2
# define ADDR_SURF_BANK_WIDTH_8 3
# define CB_BANK_HEIGHT(x) (((x) & 0x3) << 16)
# define ADDR_SURF_BANK_HEIGHT_1 0
# define ADDR_SURF_BANK_HEIGHT_2 1
# define ADDR_SURF_BANK_HEIGHT_4 2
# define ADDR_SURF_BANK_HEIGHT_8 3
# define CB_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 19)
#define CB_COLOR0_DIM 0x28c78
/* only CB0-7 blocks have these regs */
#define CB_COLOR0_CMASK 0x28c7c
1663,226 → 1107,17
#define CB_COLOR7_CLEAR_WORD3 0x28e3c
 
#define SQ_TEX_RESOURCE_WORD0_0 0x30000
# define TEX_DIM(x) ((x) << 0)
# define SQ_TEX_DIM_1D 0
# define SQ_TEX_DIM_2D 1
# define SQ_TEX_DIM_3D 2
# define SQ_TEX_DIM_CUBEMAP 3
# define SQ_TEX_DIM_1D_ARRAY 4
# define SQ_TEX_DIM_2D_ARRAY 5
# define SQ_TEX_DIM_2D_MSAA 6
# define SQ_TEX_DIM_2D_ARRAY_MSAA 7
#define SQ_TEX_RESOURCE_WORD1_0 0x30004
# define TEX_ARRAY_MODE(x) ((x) << 28)
#define SQ_TEX_RESOURCE_WORD2_0 0x30008
#define SQ_TEX_RESOURCE_WORD3_0 0x3000C
#define SQ_TEX_RESOURCE_WORD4_0 0x30010
# define TEX_DST_SEL_X(x) ((x) << 16)
# define TEX_DST_SEL_Y(x) ((x) << 19)
# define TEX_DST_SEL_Z(x) ((x) << 22)
# define TEX_DST_SEL_W(x) ((x) << 25)
# define SQ_SEL_X 0
# define SQ_SEL_Y 1
# define SQ_SEL_Z 2
# define SQ_SEL_W 3
# define SQ_SEL_0 4
# define SQ_SEL_1 5
#define SQ_TEX_RESOURCE_WORD5_0 0x30014
#define SQ_TEX_RESOURCE_WORD6_0 0x30018
# define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29)
#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
# define MACRO_TILE_ASPECT(x) (((x) & 0x3) << 6)
# define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8)
# define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10)
# define TEX_NUM_BANKS(x) (((x) & 0x3) << 16)
#define R_030000_SQ_TEX_RESOURCE_WORD0_0 0x030000
#define S_030000_DIM(x) (((x) & 0x7) << 0)
#define G_030000_DIM(x) (((x) >> 0) & 0x7)
#define C_030000_DIM 0xFFFFFFF8
#define V_030000_SQ_TEX_DIM_1D 0x00000000
#define V_030000_SQ_TEX_DIM_2D 0x00000001
#define V_030000_SQ_TEX_DIM_3D 0x00000002
#define V_030000_SQ_TEX_DIM_CUBEMAP 0x00000003
#define V_030000_SQ_TEX_DIM_1D_ARRAY 0x00000004
#define V_030000_SQ_TEX_DIM_2D_ARRAY 0x00000005
#define V_030000_SQ_TEX_DIM_2D_MSAA 0x00000006
#define V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA 0x00000007
#define S_030000_NON_DISP_TILING_ORDER(x) (((x) & 0x1) << 5)
#define G_030000_NON_DISP_TILING_ORDER(x) (((x) >> 5) & 0x1)
#define C_030000_NON_DISP_TILING_ORDER 0xFFFFFFDF
#define S_030000_PITCH(x) (((x) & 0xFFF) << 6)
#define G_030000_PITCH(x) (((x) >> 6) & 0xFFF)
#define C_030000_PITCH 0xFFFC003F
#define S_030000_TEX_WIDTH(x) (((x) & 0x3FFF) << 18)
#define G_030000_TEX_WIDTH(x) (((x) >> 18) & 0x3FFF)
#define C_030000_TEX_WIDTH 0x0003FFFF
#define R_030004_SQ_TEX_RESOURCE_WORD1_0 0x030004
#define S_030004_TEX_HEIGHT(x) (((x) & 0x3FFF) << 0)
#define G_030004_TEX_HEIGHT(x) (((x) >> 0) & 0x3FFF)
#define C_030004_TEX_HEIGHT 0xFFFFC000
#define S_030004_TEX_DEPTH(x) (((x) & 0x1FFF) << 14)
#define G_030004_TEX_DEPTH(x) (((x) >> 14) & 0x1FFF)
#define C_030004_TEX_DEPTH 0xF8003FFF
#define S_030004_ARRAY_MODE(x) (((x) & 0xF) << 28)
#define G_030004_ARRAY_MODE(x) (((x) >> 28) & 0xF)
#define C_030004_ARRAY_MODE 0x0FFFFFFF
#define R_030008_SQ_TEX_RESOURCE_WORD2_0 0x030008
#define S_030008_BASE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
#define G_030008_BASE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
#define C_030008_BASE_ADDRESS 0x00000000
#define R_03000C_SQ_TEX_RESOURCE_WORD3_0 0x03000C
#define S_03000C_MIP_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
#define G_03000C_MIP_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
#define C_03000C_MIP_ADDRESS 0x00000000
#define R_030010_SQ_TEX_RESOURCE_WORD4_0 0x030010
#define S_030010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
#define G_030010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
#define C_030010_FORMAT_COMP_X 0xFFFFFFFC
#define V_030010_SQ_FORMAT_COMP_UNSIGNED 0x00000000
#define V_030010_SQ_FORMAT_COMP_SIGNED 0x00000001
#define V_030010_SQ_FORMAT_COMP_UNSIGNED_BIASED 0x00000002
#define S_030010_FORMAT_COMP_Y(x) (((x) & 0x3) << 2)
#define G_030010_FORMAT_COMP_Y(x) (((x) >> 2) & 0x3)
#define C_030010_FORMAT_COMP_Y 0xFFFFFFF3
#define S_030010_FORMAT_COMP_Z(x) (((x) & 0x3) << 4)
#define G_030010_FORMAT_COMP_Z(x) (((x) >> 4) & 0x3)
#define C_030010_FORMAT_COMP_Z 0xFFFFFFCF
#define S_030010_FORMAT_COMP_W(x) (((x) & 0x3) << 6)
#define G_030010_FORMAT_COMP_W(x) (((x) >> 6) & 0x3)
#define C_030010_FORMAT_COMP_W 0xFFFFFF3F
#define S_030010_NUM_FORMAT_ALL(x) (((x) & 0x3) << 8)
#define G_030010_NUM_FORMAT_ALL(x) (((x) >> 8) & 0x3)
#define C_030010_NUM_FORMAT_ALL 0xFFFFFCFF
#define V_030010_SQ_NUM_FORMAT_NORM 0x00000000
#define V_030010_SQ_NUM_FORMAT_INT 0x00000001
#define V_030010_SQ_NUM_FORMAT_SCALED 0x00000002
#define S_030010_SRF_MODE_ALL(x) (((x) & 0x1) << 10)
#define G_030010_SRF_MODE_ALL(x) (((x) >> 10) & 0x1)
#define C_030010_SRF_MODE_ALL 0xFFFFFBFF
#define V_030010_SRF_MODE_ZERO_CLAMP_MINUS_ONE 0x00000000
#define V_030010_SRF_MODE_NO_ZERO 0x00000001
#define S_030010_FORCE_DEGAMMA(x) (((x) & 0x1) << 11)
#define G_030010_FORCE_DEGAMMA(x) (((x) >> 11) & 0x1)
#define C_030010_FORCE_DEGAMMA 0xFFFFF7FF
#define S_030010_ENDIAN_SWAP(x) (((x) & 0x3) << 12)
#define G_030010_ENDIAN_SWAP(x) (((x) >> 12) & 0x3)
#define C_030010_ENDIAN_SWAP 0xFFFFCFFF
#define S_030010_DST_SEL_X(x) (((x) & 0x7) << 16)
#define G_030010_DST_SEL_X(x) (((x) >> 16) & 0x7)
#define C_030010_DST_SEL_X 0xFFF8FFFF
#define V_030010_SQ_SEL_X 0x00000000
#define V_030010_SQ_SEL_Y 0x00000001
#define V_030010_SQ_SEL_Z 0x00000002
#define V_030010_SQ_SEL_W 0x00000003
#define V_030010_SQ_SEL_0 0x00000004
#define V_030010_SQ_SEL_1 0x00000005
#define S_030010_DST_SEL_Y(x) (((x) & 0x7) << 19)
#define G_030010_DST_SEL_Y(x) (((x) >> 19) & 0x7)
#define C_030010_DST_SEL_Y 0xFFC7FFFF
#define S_030010_DST_SEL_Z(x) (((x) & 0x7) << 22)
#define G_030010_DST_SEL_Z(x) (((x) >> 22) & 0x7)
#define C_030010_DST_SEL_Z 0xFE3FFFFF
#define S_030010_DST_SEL_W(x) (((x) & 0x7) << 25)
#define G_030010_DST_SEL_W(x) (((x) >> 25) & 0x7)
#define C_030010_DST_SEL_W 0xF1FFFFFF
#define S_030010_BASE_LEVEL(x) (((x) & 0xF) << 28)
#define G_030010_BASE_LEVEL(x) (((x) >> 28) & 0xF)
#define C_030010_BASE_LEVEL 0x0FFFFFFF
#define R_030014_SQ_TEX_RESOURCE_WORD5_0 0x030014
#define S_030014_LAST_LEVEL(x) (((x) & 0xF) << 0)
#define G_030014_LAST_LEVEL(x) (((x) >> 0) & 0xF)
#define C_030014_LAST_LEVEL 0xFFFFFFF0
#define S_030014_BASE_ARRAY(x) (((x) & 0x1FFF) << 4)
#define G_030014_BASE_ARRAY(x) (((x) >> 4) & 0x1FFF)
#define C_030014_BASE_ARRAY 0xFFFE000F
#define S_030014_LAST_ARRAY(x) (((x) & 0x1FFF) << 17)
#define G_030014_LAST_ARRAY(x) (((x) >> 17) & 0x1FFF)
#define C_030014_LAST_ARRAY 0xC001FFFF
#define R_030018_SQ_TEX_RESOURCE_WORD6_0 0x030018
#define S_030018_MAX_ANISO(x) (((x) & 0x7) << 0)
#define G_030018_MAX_ANISO(x) (((x) >> 0) & 0x7)
#define C_030018_MAX_ANISO 0xFFFFFFF8
#define S_030018_PERF_MODULATION(x) (((x) & 0x7) << 3)
#define G_030018_PERF_MODULATION(x) (((x) >> 3) & 0x7)
#define C_030018_PERF_MODULATION 0xFFFFFFC7
#define S_030018_INTERLACED(x) (((x) & 0x1) << 6)
#define G_030018_INTERLACED(x) (((x) >> 6) & 0x1)
#define C_030018_INTERLACED 0xFFFFFFBF
#define S_030018_TILE_SPLIT(x) (((x) & 0x7) << 29)
#define G_030018_TILE_SPLIT(x) (((x) >> 29) & 0x7)
#define R_03001C_SQ_TEX_RESOURCE_WORD7_0 0x03001C
#define S_03001C_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 6)
#define G_03001C_MACRO_TILE_ASPECT(x) (((x) >> 6) & 0x3)
#define S_03001C_BANK_WIDTH(x) (((x) & 0x3) << 8)
#define G_03001C_BANK_WIDTH(x) (((x) >> 8) & 0x3)
#define S_03001C_BANK_HEIGHT(x) (((x) & 0x3) << 10)
#define G_03001C_BANK_HEIGHT(x) (((x) >> 10) & 0x3)
#define S_03001C_NUM_BANKS(x) (((x) & 0x3) << 16)
#define G_03001C_NUM_BANKS(x) (((x) >> 16) & 0x3)
#define S_03001C_TYPE(x) (((x) & 0x3) << 30)
#define G_03001C_TYPE(x) (((x) >> 30) & 0x3)
#define C_03001C_TYPE 0x3FFFFFFF
#define V_03001C_SQ_TEX_VTX_INVALID_TEXTURE 0x00000000
#define V_03001C_SQ_TEX_VTX_INVALID_BUFFER 0x00000001
#define V_03001C_SQ_TEX_VTX_VALID_TEXTURE 0x00000002
#define V_03001C_SQ_TEX_VTX_VALID_BUFFER 0x00000003
#define S_03001C_DATA_FORMAT(x) (((x) & 0x3F) << 0)
#define G_03001C_DATA_FORMAT(x) (((x) >> 0) & 0x3F)
#define C_03001C_DATA_FORMAT 0xFFFFFFC0
 
#define SQ_VTX_CONSTANT_WORD0_0 0x30000
#define SQ_VTX_CONSTANT_WORD1_0 0x30004
#define SQ_VTX_CONSTANT_WORD2_0 0x30008
# define SQ_VTXC_BASE_ADDR_HI(x) ((x) << 0)
# define SQ_VTXC_STRIDE(x) ((x) << 8)
# define SQ_VTXC_ENDIAN_SWAP(x) ((x) << 30)
# define SQ_ENDIAN_NONE 0
# define SQ_ENDIAN_8IN16 1
# define SQ_ENDIAN_8IN32 2
#define SQ_VTX_CONSTANT_WORD3_0 0x3000C
# define SQ_VTCX_SEL_X(x) ((x) << 3)
# define SQ_VTCX_SEL_Y(x) ((x) << 6)
# define SQ_VTCX_SEL_Z(x) ((x) << 9)
# define SQ_VTCX_SEL_W(x) ((x) << 12)
#define SQ_VTX_CONSTANT_WORD4_0 0x30010
#define SQ_VTX_CONSTANT_WORD5_0 0x30014
#define SQ_VTX_CONSTANT_WORD6_0 0x30018
#define SQ_VTX_CONSTANT_WORD7_0 0x3001c
 
#define TD_PS_BORDER_COLOR_INDEX 0xA400
#define TD_PS_BORDER_COLOR_RED 0xA404
#define TD_PS_BORDER_COLOR_GREEN 0xA408
#define TD_PS_BORDER_COLOR_BLUE 0xA40C
#define TD_PS_BORDER_COLOR_ALPHA 0xA410
#define TD_VS_BORDER_COLOR_INDEX 0xA414
#define TD_VS_BORDER_COLOR_RED 0xA418
#define TD_VS_BORDER_COLOR_GREEN 0xA41C
#define TD_VS_BORDER_COLOR_BLUE 0xA420
#define TD_VS_BORDER_COLOR_ALPHA 0xA424
#define TD_GS_BORDER_COLOR_INDEX 0xA428
#define TD_GS_BORDER_COLOR_RED 0xA42C
#define TD_GS_BORDER_COLOR_GREEN 0xA430
#define TD_GS_BORDER_COLOR_BLUE 0xA434
#define TD_GS_BORDER_COLOR_ALPHA 0xA438
#define TD_HS_BORDER_COLOR_INDEX 0xA43C
#define TD_HS_BORDER_COLOR_RED 0xA440
#define TD_HS_BORDER_COLOR_GREEN 0xA444
#define TD_HS_BORDER_COLOR_BLUE 0xA448
#define TD_HS_BORDER_COLOR_ALPHA 0xA44C
#define TD_LS_BORDER_COLOR_INDEX 0xA450
#define TD_LS_BORDER_COLOR_RED 0xA454
#define TD_LS_BORDER_COLOR_GREEN 0xA458
#define TD_LS_BORDER_COLOR_BLUE 0xA45C
#define TD_LS_BORDER_COLOR_ALPHA 0xA460
#define TD_CS_BORDER_COLOR_INDEX 0xA464
#define TD_CS_BORDER_COLOR_RED 0xA468
#define TD_CS_BORDER_COLOR_GREEN 0xA46C
#define TD_CS_BORDER_COLOR_BLUE 0xA470
#define TD_CS_BORDER_COLOR_ALPHA 0xA474
 
/* cayman 3D regs */
#define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B4
#define CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS 0x8E48
#define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B0
#define CAYMAN_DB_EQAA 0x28804
#define CAYMAN_DB_DEPTH_INFO 0x2803C
#define CAYMAN_PA_SC_AA_CONFIG 0x28BE0
/drivers/video/drm/radeon/ni.c
24,11 → 24,10
#include <linux/firmware.h>
//#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <drm/drmP.h>
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include <drm/radeon_drm.h>
#include "radeon_drm.h"
#include "nid.h"
#include "atom.h"
#include "ni_reg.h"
40,10 → 39,6
extern void evergreen_mc_program(struct radeon_device *rdev);
extern void evergreen_irq_suspend(struct radeon_device *rdev);
extern int evergreen_mc_init(struct radeon_device *rdev);
extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
extern void si_rlc_fini(struct radeon_device *rdev);
extern int si_rlc_init(struct radeon_device *rdev);
 
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
55,8 → 50,6
#define CAYMAN_RLC_UCODE_SIZE 1024
#define CAYMAN_MC_UCODE_SIZE 6037
 
#define ARUBA_RLC_UCODE_SIZE 1536
 
/* Firmware Names */
MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
MODULE_FIRMWARE("radeon/BARTS_me.bin");
72,9 → 65,6
MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
MODULE_FIRMWARE("radeon/ARUBA_me.bin");
MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
 
#define BTC_IO_MC_REGS_SIZE 29
 
269,11 → 259,8
WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
 
/* wait for training to complete */
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
break;
udelay(1);
}
while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD))
udelay(10);
 
if (running)
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
333,15 → 320,6
rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
break;
case CHIP_ARUBA:
chip_name = "ARUBA";
rlc_chip_name = "ARUBA";
/* pfp/me same size as CAYMAN */
pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
mc_req_size = 0;
break;
default: BUG();
}
 
381,8 → 359,6
err = -EINVAL;
}
 
/* no MC ucode on TN */
if (!(rdev->flags & RADEON_IS_IGP)) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
if (err)
393,7 → 369,6
rdev->mc_fw->size, fw_name);
err = -EINVAL;
}
}
out:
platform_device_unregister(pdev);
 
417,21 → 392,249
/*
* Core functions
*/
static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
u32 num_tile_pipes,
u32 num_backends_per_asic,
u32 *backend_disable_mask_per_asic,
u32 num_shader_engines)
{
u32 backend_map = 0;
u32 enabled_backends_mask = 0;
u32 enabled_backends_count = 0;
u32 num_backends_per_se;
u32 cur_pipe;
u32 swizzle_pipe[CAYMAN_MAX_PIPES];
u32 cur_backend = 0;
u32 i;
bool force_no_swizzle;
 
/* force legal values */
if (num_tile_pipes < 1)
num_tile_pipes = 1;
if (num_tile_pipes > rdev->config.cayman.max_tile_pipes)
num_tile_pipes = rdev->config.cayman.max_tile_pipes;
if (num_shader_engines < 1)
num_shader_engines = 1;
if (num_shader_engines > rdev->config.cayman.max_shader_engines)
num_shader_engines = rdev->config.cayman.max_shader_engines;
if (num_backends_per_asic < num_shader_engines)
num_backends_per_asic = num_shader_engines;
if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
 
/* make sure we have the same number of backends per se */
num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
/* set up the number of backends per se */
num_backends_per_se = num_backends_per_asic / num_shader_engines;
if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) {
num_backends_per_se = rdev->config.cayman.max_backends_per_se;
num_backends_per_asic = num_backends_per_se * num_shader_engines;
}
 
/* create enable mask and count for enabled backends */
for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
enabled_backends_mask |= (1 << i);
++enabled_backends_count;
}
if (enabled_backends_count == num_backends_per_asic)
break;
}
 
/* force the backends mask to match the current number of backends */
if (enabled_backends_count != num_backends_per_asic) {
u32 this_backend_enabled;
u32 shader_engine;
u32 backend_per_se;
 
enabled_backends_mask = 0;
enabled_backends_count = 0;
*backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK;
for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
/* calc the current se */
shader_engine = i / rdev->config.cayman.max_backends_per_se;
/* calc the backend per se */
backend_per_se = i % rdev->config.cayman.max_backends_per_se;
/* default to not enabled */
this_backend_enabled = 0;
if ((shader_engine < num_shader_engines) &&
(backend_per_se < num_backends_per_se))
this_backend_enabled = 1;
if (this_backend_enabled) {
enabled_backends_mask |= (1 << i);
*backend_disable_mask_per_asic &= ~(1 << i);
++enabled_backends_count;
}
}
}
 
 
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES);
switch (rdev->family) {
case CHIP_CAYMAN:
force_no_swizzle = true;
break;
default:
force_no_swizzle = false;
break;
}
if (force_no_swizzle) {
bool last_backend_enabled = false;
 
force_no_swizzle = false;
for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
if (((enabled_backends_mask >> i) & 1) == 1) {
if (last_backend_enabled)
force_no_swizzle = true;
last_backend_enabled = true;
} else
last_backend_enabled = false;
}
}
 
switch (num_tile_pipes) {
case 1:
case 3:
case 5:
case 7:
DRM_ERROR("odd number of pipes!\n");
break;
case 2:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
break;
case 4:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 1;
swizzle_pipe[3] = 3;
}
break;
case 6:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 1;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 5;
}
break;
case 8:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
swizzle_pipe[6] = 6;
swizzle_pipe[7] = 7;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
swizzle_pipe[6] = 5;
swizzle_pipe[7] = 7;
}
break;
}
 
for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
while (((1 << cur_backend) & enabled_backends_mask) == 0)
cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
 
backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
 
cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
}
 
return backend_map;
}
 
static void cayman_program_channel_remap(struct radeon_device *rdev)
{
u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
 
tmp = RREG32(MC_SHARED_CHMAP);
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
case 0:
case 1:
case 2:
case 3:
default:
/* default mapping */
mc_shared_chremap = 0x00fac688;
break;
}
 
switch (rdev->family) {
case CHIP_CAYMAN:
default:
//tcp_chan_steer_lo = 0x54763210
tcp_chan_steer_lo = 0x76543210;
tcp_chan_steer_hi = 0x0000ba98;
break;
}
 
WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
}
 
static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
u32 disable_mask_per_se,
u32 max_disable_mask_per_se,
u32 num_shader_engines)
{
u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
 
if (num_shader_engines == 1)
return disable_mask_per_asic;
else if (num_shader_engines == 2)
return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
else
return 0xffffffff;
}
 
static void cayman_gpu_init(struct radeon_device *rdev)
{
u32 cc_rb_backend_disable = 0;
u32 cc_gc_shader_pipe_config;
u32 gb_addr_config = 0;
u32 mc_shared_chmap, mc_arb_ramcfg;
u32 gb_backend_map;
u32 cgts_tcc_disable;
u32 sx_debug_1;
u32 smx_dc_ctl0;
u32 gc_user_shader_pipe_config;
u32 gc_user_rb_backend_disable;
u32 cgts_user_tcc_disable;
u32 cgts_sm_ctrl_reg;
u32 hdp_host_path_cntl;
u32 tmp;
u32 disabled_rb_mask;
int i, j;
 
switch (rdev->family) {
case CHIP_CAYMAN:
default:
rdev->config.cayman.max_shader_engines = 2;
rdev->config.cayman.max_pipes_per_simd = 4;
rdev->config.cayman.max_tile_pipes = 8;
452,61 → 655,9
rdev->config.cayman.sc_prim_fifo_size = 0x100;
rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_ARUBA:
default:
rdev->config.cayman.max_shader_engines = 1;
rdev->config.cayman.max_pipes_per_simd = 4;
rdev->config.cayman.max_tile_pipes = 2;
if ((rdev->pdev->device == 0x9900) ||
(rdev->pdev->device == 0x9901) ||
(rdev->pdev->device == 0x9905) ||
(rdev->pdev->device == 0x9906) ||
(rdev->pdev->device == 0x9907) ||
(rdev->pdev->device == 0x9908) ||
(rdev->pdev->device == 0x9909) ||
(rdev->pdev->device == 0x9910) ||
(rdev->pdev->device == 0x9917)) {
rdev->config.cayman.max_simds_per_se = 6;
rdev->config.cayman.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x9903) ||
(rdev->pdev->device == 0x9904) ||
(rdev->pdev->device == 0x990A) ||
(rdev->pdev->device == 0x9913) ||
(rdev->pdev->device == 0x9918)) {
rdev->config.cayman.max_simds_per_se = 4;
rdev->config.cayman.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x9919) ||
(rdev->pdev->device == 0x9990) ||
(rdev->pdev->device == 0x9991) ||
(rdev->pdev->device == 0x9994) ||
(rdev->pdev->device == 0x99A0)) {
rdev->config.cayman.max_simds_per_se = 3;
rdev->config.cayman.max_backends_per_se = 1;
} else {
rdev->config.cayman.max_simds_per_se = 2;
rdev->config.cayman.max_backends_per_se = 1;
}
rdev->config.cayman.max_texture_channel_caches = 2;
rdev->config.cayman.max_gprs = 256;
rdev->config.cayman.max_threads = 256;
rdev->config.cayman.max_gs_threads = 32;
rdev->config.cayman.max_stack_entries = 512;
rdev->config.cayman.sx_num_of_sets = 8;
rdev->config.cayman.sx_max_export_size = 256;
rdev->config.cayman.sx_max_export_pos_size = 64;
rdev->config.cayman.sx_max_export_smx_size = 192;
rdev->config.cayman.max_hw_contexts = 8;
rdev->config.cayman.sq_num_cf_insts = 2;
 
rdev->config.cayman.sc_prim_fifo_size = 0x40;
rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
break;
}
 
/* Initialize HDP */
for (i = 0, j = 0; i < 32; i++, j += 0x18) {
WREG32((0x2c14 + j), 0x00000000);
518,11 → 669,40
 
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
 
evergreen_fix_pci_max_read_req_size(rdev);
 
mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
cgts_tcc_disable = 0xff000000;
gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
 
rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines;
tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp);
rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes;
tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT;
rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp);
tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp);
tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
rdev->config.cayman.backend_disable_mask_per_asic =
cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK,
rdev->config.cayman.num_shader_engines);
rdev->config.cayman.backend_map =
cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
rdev->config.cayman.num_backends_per_se *
rdev->config.cayman.num_shader_engines,
&rdev->config.cayman.backend_disable_mask_per_asic,
rdev->config.cayman.num_shader_engines);
tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp);
tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT;
rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
if (rdev->config.cayman.mem_max_burst_length_bytes > 512)
rdev->config.cayman.mem_max_burst_length_bytes = 512;
tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
if (rdev->config.cayman.mem_row_size_in_kb > 4)
532,6 → 712,73
rdev->config.cayman.num_gpus = 1;
rdev->config.cayman.multi_gpu_tile_size = 64;
 
//gb_addr_config = 0x02011003
#if 0
gb_addr_config = RREG32(GB_ADDR_CONFIG);
#else
gb_addr_config = 0;
switch (rdev->config.cayman.num_tile_pipes) {
case 1:
default:
gb_addr_config |= NUM_PIPES(0);
break;
case 2:
gb_addr_config |= NUM_PIPES(1);
break;
case 4:
gb_addr_config |= NUM_PIPES(2);
break;
case 8:
gb_addr_config |= NUM_PIPES(3);
break;
}
 
tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1;
gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1);
tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1;
gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
switch (rdev->config.cayman.num_gpus) {
case 1:
default:
gb_addr_config |= NUM_GPUS(0);
break;
case 2:
gb_addr_config |= NUM_GPUS(1);
break;
case 4:
gb_addr_config |= NUM_GPUS(2);
break;
}
switch (rdev->config.cayman.multi_gpu_tile_size) {
case 16:
gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
break;
case 32:
default:
gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
break;
case 64:
gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
break;
case 128:
gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
break;
}
switch (rdev->config.cayman.mem_row_size_in_kb) {
case 1:
default:
gb_addr_config |= ROW_SIZE(0);
break;
case 2:
gb_addr_config |= ROW_SIZE(1);
break;
case 4:
gb_addr_config |= ROW_SIZE(2);
break;
}
#endif
 
tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
rdev->config.cayman.num_tile_pipes = (1 << tmp);
tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
545,7 → 792,17
tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
 
 
//gb_backend_map = 0x76541032;
#if 0
gb_backend_map = RREG32(GB_BACKEND_MAP);
#else
gb_backend_map =
cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
rdev->config.cayman.num_backends_per_se *
rdev->config.cayman.num_shader_engines,
&rdev->config.cayman.backend_disable_mask_per_asic,
rdev->config.cayman.num_shader_engines);
#endif
/* setup tiling info dword. gb_addr_config is not adequate since it does
* not have bank info, so create a custom tiling dword.
* bits 3:0 num_pipes
569,61 → 826,34
rdev->config.cayman.tile_config |= (3 << 0);
break;
}
 
/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
if (rdev->flags & RADEON_IS_IGP)
rdev->config.cayman.tile_config |= 1 << 4;
else {
switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
case 0: /* four banks */
rdev->config.cayman.tile_config |= 0 << 4;
break;
case 1: /* eight banks */
rdev->config.cayman.tile_config |= 1 << 4;
break;
case 2: /* sixteen banks */
default:
rdev->config.cayman.tile_config |= 2 << 4;
break;
}
}
rdev->config.cayman.tile_config |=
((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
rdev->config.cayman.tile_config |=
((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
rdev->config.cayman.tile_config |=
((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
 
tmp = 0;
for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
u32 rb_disable_bitmap;
 
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
tmp <<= 4;
tmp |= rb_disable_bitmap;
}
/* enabled rb are just the one not disabled :) */
disabled_rb_mask = tmp;
 
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
 
rdev->config.cayman.backend_map = gb_backend_map;
WREG32(GB_BACKEND_MAP, gb_backend_map);
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
tmp = gb_addr_config & NUM_PIPES_MASK;
tmp = r6xx_remap_render_backend(rdev, tmp,
rdev->config.cayman.max_backends_per_se *
rdev->config.cayman.max_shader_engines,
CAYMAN_MAX_BACKENDS, disabled_rb_mask);
WREG32(GB_BACKEND_MAP, tmp);
cayman_program_channel_remap(rdev);
 
cgts_tcc_disable = 0xffff0000;
for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
cgts_tcc_disable &= ~(1 << (16 + i));
/* primary versions */
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
 
WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
 
/* user versions */
WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
 
WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
 
726,11 → 956,11
WREG32(VM_INVALIDATE_REQUEST, 1);
}
 
static int cayman_pcie_gart_enable(struct radeon_device *rdev)
int cayman_pcie_gart_enable(struct radeon_device *rdev)
{
int i, r;
int r;
 
if (rdev->gart.robj == NULL) {
if (rdev->gart.table.vram.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
739,12 → 969,9
return r;
radeon_gart_restore(rdev);
/* Setup TLB control */
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
ENABLE_L1_TLB |
WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB |
ENABLE_L1_FRAGMENT_PROCESSING |
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
ENABLE_ADVANCED_DRIVER_MODEL |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
764,41 → 991,19
WREG32(VM_CONTEXT0_CNTL2, 0);
WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
 
WREG32(0x15D4, 0);
WREG32(0x15D8, 0);
WREG32(0x15DC, 0);
 
/* empty context1-7 */
/* Assign the pt base to something valid for now; the pts used for
* the VMs are determined by the application and setup and assigned
* on the fly in the vm part of radeon_gart.c
*/
for (i = 1; i < 8; i++) {
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
rdev->gart.table_addr >> 12);
}
 
/* enable context1-7 */
WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
/* disable context1-7 */
WREG32(VM_CONTEXT1_CNTL2, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
 
cayman_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
 
static void cayman_pcie_gart_disable(struct radeon_device *rdev)
void cayman_pcie_gart_disable(struct radeon_device *rdev)
{
int r;
 
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
814,82 → 1019,20
WREG32(VM_L2_CNTL2, 0);
WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
L2_CACHE_BIGK_FRAGMENT_SIZE(6));
radeon_gart_table_vram_unpin(rdev);
if (rdev->gart.table.vram.robj) {
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
}
}
 
void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl)
{
u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
 
WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
WREG32(CP_INT_CNTL, cp_int_cntl);
}
 
/*
* CP.
*/
void cayman_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
 
/* flush read cache over gart for this vmid */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
/* EVENT_WRITE_EOP - flush caches, send int */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
}
 
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
 
/* set to DX10/11 mode */
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
radeon_ring_write(ring, 1);
 
if (ring->rptr_save_reg) {
uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, ((ring->rptr_save_reg -
PACKET3_SET_CONFIG_REG_START) >> 2));
radeon_ring_write(ring, next_rptr);
}
 
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
radeon_ring_write(ring, ib->length_dw |
(ib->vm ? (ib->vm->id << 24) : 0));
 
/* flush read cache over gart for this vmid */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
}
 
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
929,26 → 1072,25
 
static int cayman_cp_start(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r, i;
 
r = radeon_ring_lock(rdev, ring, 7);
r = radeon_ring_lock(rdev, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(ring, 0x1);
radeon_ring_write(ring, 0x0);
radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring);
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1);
radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_unlock_commit(rdev);
 
cayman_cp_enable(rdev, true);
 
r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
r = radeon_ring_lock(rdev, cayman_default_size + 19);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
955,38 → 1097,38
}
 
/* setup clear context state */
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
 
for (i = 0; i < cayman_default_size; i++)
radeon_ring_write(ring, cayman_default_state[i]);
radeon_ring_write(rdev, cayman_default_state[i]);
 
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
 
/* set clear context state */
radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(rdev, 0);
 
/* SQ_VTX_BASE_VTX_LOC */
radeon_ring_write(ring, 0xc0026f00);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(rdev, 0xc0026f00);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
 
/* Clear consts */
radeon_ring_write(ring, 0xc0036f00);
radeon_ring_write(ring, 0x00000bc4);
radeon_ring_write(ring, 0xffffffff);
radeon_ring_write(ring, 0xffffffff);
radeon_ring_write(ring, 0xffffffff);
radeon_ring_write(rdev, 0xc0036f00);
radeon_ring_write(rdev, 0x00000bc4);
radeon_ring_write(rdev, 0xffffffff);
radeon_ring_write(rdev, 0xffffffff);
radeon_ring_write(rdev, 0xffffffff);
 
radeon_ring_write(ring, 0xc0026900);
radeon_ring_write(ring, 0x00000316);
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* */
radeon_ring_write(rdev, 0xc0026900);
radeon_ring_write(rdev, 0x00000316);
radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(rdev, 0x00000010); /* */
 
radeon_ring_unlock_commit(rdev, ring);
radeon_ring_unlock_commit(rdev);
 
/* XXX init other rings */
 
994,35 → 1136,12
}
 
 
static int cayman_cp_resume(struct radeon_device *rdev)
 
int cayman_cp_resume(struct radeon_device *rdev)
{
static const int ridx[] = {
RADEON_RING_TYPE_GFX_INDEX,
CAYMAN_RING_TYPE_CP1_INDEX,
CAYMAN_RING_TYPE_CP2_INDEX
};
static const unsigned cp_rb_cntl[] = {
CP_RB0_CNTL,
CP_RB1_CNTL,
CP_RB2_CNTL,
};
static const unsigned cp_rb_rptr_addr[] = {
CP_RB0_RPTR_ADDR,
CP_RB1_RPTR_ADDR,
CP_RB2_RPTR_ADDR
};
static const unsigned cp_rb_rptr_addr_hi[] = {
CP_RB0_RPTR_ADDR_HI,
CP_RB1_RPTR_ADDR_HI,
CP_RB2_RPTR_ADDR_HI
};
static const unsigned cp_rb_base[] = {
CP_RB0_BASE,
CP_RB1_BASE,
CP_RB2_BASE
};
struct radeon_ring *ring;
int i, r;
u32 tmp;
u32 rb_bufsz;
int r;
 
/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1036,8 → 1155,7
WREG32(GRBM_SOFT_RESET, 0);
RREG32(GRBM_SOFT_RESET);
 
WREG32(CP_SEM_WAIT_TIMER, 0x0);
WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
WREG32(CP_SEM_WAIT_TIMER, 0x4);
 
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
1044,59 → 1162,100
 
WREG32(CP_DEBUG, (1 << 27));
 
/* ring 0 - compute and gfx */
/* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp.ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB0_CNTL, tmp);
 
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB0_WPTR, 0);
 
/* set the wb address wether it's enabled or not */
WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
 
if (rdev->wb.enabled)
WREG32(SCRATCH_UMSK, 0xff);
else {
tmp |= RB_NO_UPDATE;
WREG32(SCRATCH_UMSK, 0);
}
 
for (i = 0; i < 3; ++i) {
uint32_t rb_cntl;
uint64_t addr;
mdelay(1);
WREG32(CP_RB0_CNTL, tmp);
 
WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
 
rdev->cp.rptr = RREG32(CP_RB0_RPTR);
rdev->cp.wptr = RREG32(CP_RB0_WPTR);
 
/* ring1 - compute only */
/* Set ring buffer size */
ring = &rdev->ring[ridx[i]];
rb_cntl = drm_order(ring->ring_size / 8);
rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8;
rb_bufsz = drm_order(rdev->cp1.ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
rb_cntl |= BUF_SWAP_32BIT;
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(cp_rb_cntl[i], rb_cntl);
WREG32(CP_RB1_CNTL, tmp);
 
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB1_WPTR, 0);
 
/* set the wb address wether it's enabled or not */
addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
}
WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
 
/* set the rb base addr, this causes an internal reset of ALL rings */
for (i = 0; i < 3; ++i) {
ring = &rdev->ring[ridx[i]];
WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
}
mdelay(1);
WREG32(CP_RB1_CNTL, tmp);
 
for (i = 0; i < 3; ++i) {
WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
 
rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
rdev->cp1.wptr = RREG32(CP_RB1_WPTR);
 
/* ring2 - compute only */
/* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp2.ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB2_CNTL, tmp);
 
/* Initialize the ring buffer's read and write pointers */
ring = &rdev->ring[ridx[i]];
WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB2_WPTR, 0);
 
ring->rptr = ring->wptr = 0;
WREG32(ring->rptr_reg, ring->rptr);
WREG32(ring->wptr_reg, ring->wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
 
mdelay(1);
WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
}
WREG32(CP_RB2_CNTL, tmp);
 
WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
 
rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
rdev->cp2.wptr = RREG32(CP_RB2_WPTR);
 
/* start the rings */
cayman_cp_start(rdev);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
rdev->cp.ready = true;
rdev->cp1.ready = true;
rdev->cp2.ready = true;
/* this only test cp0 */
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
r = radeon_ring_test(rdev);
if (r) {
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
rdev->cp.ready = false;
rdev->cp1.ready = false;
rdev->cp2.ready = false;
return r;
}
 
1103,6 → 1262,35
return 0;
}
 
bool cayman_gpu_is_lockup(struct radeon_device *rdev)
{
u32 srbm_status;
u32 grbm_status;
u32 grbm_status_se0, grbm_status_se1;
struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup;
int r;
 
srbm_status = RREG32(SRBM_STATUS);
grbm_status = RREG32(GRBM_STATUS);
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
r100_gpu_lockup_update(lockup, &rdev->cp);
return false;
}
/* force CP activities */
r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
}
/* XXX deal with CP0,1,2 */
rdev->cp.rptr = RREG32(CP_RB0_RPTR);
return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
}
 
static int cayman_gpu_soft_reset(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
1120,23 → 1308,6
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
RREG32(CP_STALLED_STAT2));
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(0x14F8));
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(0x14D8));
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(0x14FC));
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(0x14DC));
 
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1167,7 → 1338,6
(void)RREG32(GRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
 
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
1176,14 → 1346,6
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
RREG32(CP_STALLED_STAT2));
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
evergreen_mc_resume(rdev, &save);
return 0;
}
1195,21 → 1357,8
 
static int cayman_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
 
/* enable pcie gen2 link */
evergreen_pcie_gen2_enable(rdev);
 
if (rdev->flags & RADEON_IS_IGP) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = ni_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
} else {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
r = ni_init_microcode(rdev);
if (r) {
1217,18 → 1366,12
return r;
}
}
 
r = ni_mc_load_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
return r;
}
}
 
r = r600_vram_scratch_init(rdev);
if (r)
return r;
 
evergreen_mc_program(rdev);
r = cayman_pcie_gart_enable(rdev);
if (r)
1237,20 → 1380,11
 
r = evergreen_blit_init(rdev);
if (r) {
// r600_blit_fini(rdev);
rdev->asic->copy.copy = NULL;
// evergreen_blit_fini(rdev);
rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
 
/* allocate rlc buffers */
if (rdev->flags & RADEON_IS_IGP) {
r = si_rlc_init(rdev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
}
}
 
/* allocate wb buffer */
r = radeon_wb_init(rdev);
if (r)
1265,9 → 1399,7
}
evergreen_irq_set(rdev);
 
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
CP_RB0_RPTR, CP_RB0_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r)
return r;
r = cayman_cp_load_microcode(rdev);
1292,9 → 1424,12
*/
int cayman_init(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
 
/* This don't do much */
r = radeon_gem_init(rdev);
if (r)
return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
1341,8 → 1476,8
if (r)
return r;
 
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
 
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
1357,15 → 1492,24
dev_err(rdev->dev, "disabling GPU acceleration\n");
rdev->accel_working = false;
}
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
rdev->accel_working = false;
}
r = r600_ib_test(rdev);
if (r) {
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
}
}
 
/* Don't start up if the MC ucode is missing.
* The default clocks and voltages before the MC ucode
* is loaded are not suffient for advanced operations.
*
* We can skip this check for TN, because there is no MC
* ucode.
*/
if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
if (!rdev->mc_fw) {
DRM_ERROR("radeon: MC ucode required for NI+.\n");
return -EINVAL;
}
1373,119 → 1517,3
return 0;
}
 
/*
* vm
*/
int cayman_vm_init(struct radeon_device *rdev)
{
/* number of VMs */
rdev->vm_manager.nvm = 8;
/* base offset of vram pages */
if (rdev->flags & RADEON_IS_IGP) {
u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
tmp <<= 22;
rdev->vm_manager.vram_base_offset = tmp;
} else
rdev->vm_manager.vram_base_offset = 0;
return 0;
}
 
void cayman_vm_fini(struct radeon_device *rdev)
{
}
 
#define R600_ENTRY_VALID (1 << 0)
#define R600_PTE_SYSTEM (1 << 1)
#define R600_PTE_SNOOPED (1 << 2)
#define R600_PTE_READABLE (1 << 5)
#define R600_PTE_WRITEABLE (1 << 6)
 
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
{
uint32_t r600_flags = 0;
r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
if (flags & RADEON_VM_PAGE_SYSTEM) {
r600_flags |= R600_PTE_SYSTEM;
r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
}
return r600_flags;
}
 
/**
* cayman_vm_set_page - update the page tables using the CP
*
* @rdev: radeon_device pointer
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
* Update the page tables using the CP (cayman-si).
*/
void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
 
while (count) {
unsigned ndw = 1 + count * 2;
if (ndw > 0x3FFF)
ndw = 0x3FFF;
 
radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
radeon_ring_write(ring, pe);
radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
for (; ndw > 1; ndw -= 2, --count, pe += 8) {
uint64_t value = 0;
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
addr += incr;
 
} else if (flags & RADEON_VM_PAGE_VALID) {
value = addr;
addr += incr;
}
 
value |= r600_flags;
radeon_ring_write(ring, value);
radeon_ring_write(ring, upper_32_bits(value));
}
}
}
 
/**
* cayman_vm_flush - vm flush using the CP
*
* @rdev: radeon_device pointer
*
* Update the page table base and flush the VM TLB
* using the CP (cayman-si).
*/
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
struct radeon_ring *ring = &rdev->ring[ridx];
 
if (vm == NULL)
return;
 
radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
 
/* flush hdp cache */
radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
radeon_ring_write(ring, 0x1);
 
/* bits 0-7 are the VM contexts0-7 */
radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
radeon_ring_write(ring, 1 << vm->id);
 
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
}
/drivers/video/drm/radeon/pci.c
1,6 → 1,5
 
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/mod_devicetable.h>
#include <errno-base.h>
7,10 → 6,11
#include <pci.h>
#include <syscall.h>
 
extern int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn);
 
static LIST_HEAD(devices);
 
static pci_dev_t* pci_scan_device(u32_t bus, int devfn);
 
 
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
 
345,17 → 345,14
}
};
 
if( pci_scan_filter(id, busnr, devfn) == 0)
return NULL;
 
hdr = PciRead8(busnr, devfn, PCI_HEADER_TYPE);
 
dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0);
if(unlikely(dev == NULL))
return NULL;
 
INIT_LIST_HEAD(&dev->link);
 
if(unlikely(dev == NULL))
return NULL;
 
dev->pci_dev.busnr = busnr;
dev->pci_dev.devfn = devfn;
370,9 → 367,6
 
};
 
 
 
 
int pci_scan_slot(u32_t bus, int devfn)
{
int func, nr = 0;
411,6 → 405,49
return nr;
};
 
 
void pci_scan_bus(u32_t bus)
{
u32_t devfn;
pci_dev_t *dev;
 
 
for (devfn = 0; devfn < 0x100; devfn += 8)
pci_scan_slot(bus, devfn);
 
}
 
int enum_pci_devices()
{
pci_dev_t *dev;
u32_t last_bus;
u32_t bus = 0 , devfn = 0;
 
// list_initialize(&devices);
 
last_bus = PciApi(1);
 
 
if( unlikely(last_bus == -1))
return -1;
 
for(;bus <= last_bus; bus++)
pci_scan_bus(bus);
 
// for(dev = (dev_t*)devices.next;
// &dev->link != &devices;
// dev = (dev_t*)dev->link.next)
// {
// dbgprintf("PCI device %x:%x bus:%x devfn:%x\n",
// dev->pci_dev.vendor,
// dev->pci_dev.device,
// dev->pci_dev.bus,
// dev->pci_dev.devfn);
//
// }
return 0;
}
 
#define PCI_FIND_CAP_TTL 48
 
static int __pci_find_next_cap_ttl(unsigned int bus, unsigned int devfn,
476,364 → 513,317
}
 
 
 
 
int enum_pci_devices()
#if 0
/**
* pci_set_power_state - Set the power state of a PCI device
* @dev: PCI device to be suspended
* @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering
*
* Transition a device to a new power state, using the Power Management
* Capabilities in the device's config space.
*
* RETURN VALUE:
* -EINVAL if trying to enter a lower state than we're already in.
* 0 if we're already in the requested state.
* -EIO if device does not support PCI PM.
* 0 if we can successfully change the power state.
*/
int
pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{
pci_dev_t *dev;
u32_t last_bus;
u32_t bus = 0 , devfn = 0;
int pm, need_restore = 0;
u16 pmcsr, pmc;
 
/* bound the state we're entering */
if (state > PCI_D3hot)
state = PCI_D3hot;
 
last_bus = PciApi(1);
/*
* If the device or the parent bridge can't support PCI PM, ignore
* the request if we're doing anything besides putting it into D0
* (which would only happen on boot).
*/
if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
return 0;
 
/* find PCI PM capability in list */
pm = pci_find_capability(dev, PCI_CAP_ID_PM);
 
if( unlikely(last_bus == -1))
return -1;
/* abort if the device doesn't support PM capabilities */
if (!pm)
return -EIO;
 
for(;bus <= last_bus; bus++)
{
for (devfn = 0; devfn < 0x100; devfn += 8)
pci_scan_slot(bus, devfn);
/* Validate current state:
* Can enter D0 from any state, but if we can only go deeper
* to sleep if we're already in a low power state
*/
if (state != PCI_D0 && dev->current_state > state) {
printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n",
__FUNCTION__, pci_name(dev), state, dev->current_state);
return -EINVAL;
} else if (dev->current_state == state)
return 0; /* we're already there */
 
 
pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc);
if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
printk(KERN_DEBUG
"PCI: %s has unsupported PM cap regs version (%u)\n",
pci_name(dev), pmc & PCI_PM_CAP_VER_MASK);
return -EIO;
}
for(dev = (pci_dev_t*)devices.next;
&dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
dbgprintf("PCI device %x:%x bus:%x devfn:%x\n",
dev->pci_dev.vendor,
dev->pci_dev.device,
dev->pci_dev.busnr,
dev->pci_dev.devfn);
 
/* check if this device supports the desired state */
if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1))
return -EIO;
else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2))
return -EIO;
 
pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
 
/* If we're (effectively) in D3, force entire word to 0.
* This doesn't affect PME_Status, disables PME_En, and
* sets PowerState to 0.
*/
switch (dev->current_state) {
case PCI_D0:
case PCI_D1:
case PCI_D2:
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= state;
break;
case PCI_UNKNOWN: /* Boot-up */
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
&& !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
need_restore = 1;
/* Fall-through: force to D0 */
default:
pmcsr = 0;
break;
}
return 0;
}
 
const struct pci_device_id* find_pci_device(pci_dev_t* pdev, const struct pci_device_id *idlist)
{
pci_dev_t *dev;
const struct pci_device_id *ent;
/* enter specified state */
pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr);
 
for(dev = (pci_dev_t*)devices.next;
&dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
if( dev->pci_dev.vendor != idlist->vendor )
continue;
/* Mandatory power management transition delays */
/* see PCI PM 1.1 5.6.1 table 18 */
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
msleep(pci_pm_d3_delay);
else if (state == PCI_D2 || dev->current_state == PCI_D2)
udelay(200);
 
for(ent = idlist; ent->vendor != 0; ent++)
{
if(unlikely(ent->device == dev->pci_dev.device))
{
pdev->pci_dev = dev->pci_dev;
return ent;
}
};
}
/*
* Give firmware a chance to be called, such as ACPI _PRx, _PSx
* Firmware method after native method ?
*/
if (platform_pci_set_power_state)
platform_pci_set_power_state(dev, state);
 
return NULL;
};
dev->current_state = state;
 
struct pci_dev *
pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from)
{
pci_dev_t *dev;
/* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
* INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
* from D3hot to D0 _may_ perform an internal reset, thereby
* going to "D0 Uninitialized" rather than "D0 Initialized".
* For example, at least some versions of the 3c905B and the
* 3c556B exhibit this behaviour.
*
* At least some laptop BIOSen (e.g. the Thinkpad T21) leave
* devices in a D3hot state at boot. Consequently, we need to
* restore at least the BARs so that the device will be
* accessible to its driver.
*/
if (need_restore)
pci_restore_bars(dev);
 
dev = (pci_dev_t*)devices.next;
return 0;
}
#endif
 
if(from != NULL)
int pcibios_enable_resources(struct pci_dev *dev, int mask)
{
for(; &dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
if( &dev->pci_dev == from)
{
dev = (pci_dev_t*)dev->link.next;
break;
};
}
};
u16_t cmd, old_cmd;
int idx;
struct resource *r;
 
for(; &dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
cmd = PciRead16(dev->busnr, dev->devfn, PCI_COMMAND);
old_cmd = cmd;
for (idx = 0; idx < PCI_NUM_RESOURCES; idx++)
{
if( dev->pci_dev.vendor != vendor )
/* Only set up the requested stuff */
if (!(mask & (1 << idx)))
continue;
 
if(dev->pci_dev.device == device)
{
return &dev->pci_dev;
r = &dev->resource[idx];
if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
continue;
if ((idx == PCI_ROM_RESOURCE) &&
(!(r->flags & IORESOURCE_ROM_ENABLE)))
continue;
if (!r->start && r->end) {
printk(KERN_ERR "PCI: Device %s not available "
"because of resource %d collisions\n",
pci_name(dev), idx);
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (r->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
return NULL;
};
 
 
struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
{
pci_dev_t *dev;
 
for(dev = (pci_dev_t*)devices.next;
&dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
if ( dev->pci_dev.busnr == bus && dev->pci_dev.devfn == devfn)
return &dev->pci_dev;
if (cmd != old_cmd) {
printk("PCI: Enabling device %s (%04x -> %04x)\n",
pci_name(dev), old_cmd, cmd);
PciWrite16(dev->busnr, dev->devfn, PCI_COMMAND, cmd);
}
return NULL;
return 0;
}
 
struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from)
{
pci_dev_t *dev;
 
dev = (pci_dev_t*)devices.next;
 
if(from != NULL)
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
for(; &dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
if( &dev->pci_dev == from)
{
dev = (pci_dev_t*)dev->link.next;
break;
};
}
};
int err;
 
for(; &dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
if( dev->pci_dev.class == class)
{
return &dev->pci_dev;
}
}
if ((err = pcibios_enable_resources(dev, mask)) < 0)
return err;
 
return NULL;
// if (!dev->msi_enabled)
// return pcibios_enable_irq(dev);
return 0;
}
 
 
#define PIO_OFFSET 0x10000UL
#define PIO_MASK 0x0ffffUL
#define PIO_RESERVED 0x40000UL
static int do_pci_enable_device(struct pci_dev *dev, int bars)
{
int err;
 
#define IO_COND(addr, is_pio, is_mmio) do { \
unsigned long port = (unsigned long __force)addr; \
if (port >= PIO_RESERVED) { \
is_mmio; \
} else if (port > PIO_OFFSET) { \
port &= PIO_MASK; \
is_pio; \
}; \
} while (0)
// err = pci_set_power_state(dev, PCI_D0);
// if (err < 0 && err != -EIO)
// return err;
err = pcibios_enable_device(dev, bars);
// if (err < 0)
// return err;
// pci_fixup_device(pci_fixup_enable, dev);
 
/* Create a virtual mapping cookie for an IO port range */
void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
return (void __iomem *) port;
return 0;
}
 
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
{
resource_size_t start = pci_resource_start(dev, bar);
resource_size_t len = pci_resource_len(dev, bar);
unsigned long flags = pci_resource_flags(dev, bar);
 
if (!len || !start)
return NULL;
if (maxlen && len > maxlen)
len = maxlen;
if (flags & IORESOURCE_IO)
return ioport_map(start, len);
if (flags & IORESOURCE_MEM) {
return ioremap(start, len);
}
/* What? */
return NULL;
}
 
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
static int __pci_enable_device_flags(struct pci_dev *dev,
resource_size_t flags)
{
IO_COND(addr, /* nothing */, iounmap(addr));
}
int err;
int i, bars = 0;
 
// if (atomic_add_return(1, &dev->enable_cnt) > 1)
// return 0; /* already enabled */
 
struct pci_bus_region {
resource_size_t start;
resource_size_t end;
};
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
if (dev->resource[i].flags & flags)
bars |= (1 << i);
 
static inline void
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
struct resource *res)
{
region->start = res->start;
region->end = res->end;
err = do_pci_enable_device(dev, bars);
// if (err < 0)
// atomic_dec(&dev->enable_cnt);
return err;
}
 
static inline int pci_read_config_dword(struct pci_dev *dev, int where,
u32 *val)
{
*val = PciRead32(dev->busnr, dev->devfn, where);
return 1;
}
 
static inline int pci_write_config_dword(struct pci_dev *dev, int where,
u32 val)
/**
* pci_enable_device - Initialize device before it's used by a driver.
* @dev: PCI device to be initialized
*
* Initialize device before it's used by a driver. Ask low-level code
* to enable I/O and memory. Wake up the device if it was suspended.
* Beware, this function can fail.
*
* Note we don't actually enable the device many times if we call
* this function repeatedly (we just increment the count).
*/
int pci_enable_device(struct pci_dev *dev)
{
PciWrite32(dev->busnr, dev->devfn, where, val);
return 1;
return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
}
 
static inline int pci_read_config_word(struct pci_dev *dev, int where,
u16 *val)
{
*val = PciRead16(dev->busnr, dev->devfn, where);
return 1;
}
 
static inline int pci_write_config_word(struct pci_dev *dev, int where,
u16 val)
 
struct pci_device_id* find_pci_device(pci_dev_t* pdev, struct pci_device_id *idlist)
{
PciWrite16(dev->busnr, dev->devfn, where, val);
return 1;
}
pci_dev_t *dev;
struct pci_device_id *ent;
 
 
int pci_enable_rom(struct pci_dev *pdev)
for(dev = (pci_dev_t*)devices.next;
&dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
struct resource *res = pdev->resource + PCI_ROM_RESOURCE;
struct pci_bus_region region;
u32 rom_addr;
if( dev->pci_dev.vendor != idlist->vendor )
continue;
 
if (!res->flags)
return -1;
 
pcibios_resource_to_bus(pdev, &region, res);
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
rom_addr &= ~PCI_ROM_ADDRESS_MASK;
rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE;
pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr);
return 0;
}
 
void pci_disable_rom(struct pci_dev *pdev)
for(ent = idlist; ent->vendor != 0; ent++)
{
u32 rom_addr;
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
rom_addr &= ~PCI_ROM_ADDRESS_ENABLE;
pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr);
if(unlikely(ent->device == dev->pci_dev.device))
{
pdev->pci_dev = dev->pci_dev;
return ent;
}
};
}
 
/**
* pci_get_rom_size - obtain the actual size of the ROM image
* @pdev: target PCI device
* @rom: kernel virtual pointer to image of ROM
* @size: size of PCI window
* return: size of actual ROM image
*
* Determine the actual length of the ROM image.
* The PCI window size could be much larger than the
* actual image size.
*/
size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
{
void __iomem *image;
int last_image;
return NULL;
};
 
image = rom;
do {
void __iomem *pds;
/* Standard PCI ROMs start out with these bytes 55 AA */
if (readb(image) != 0x55) {
dev_err(&pdev->dev, "Invalid ROM contents\n");
break;
}
if (readb(image + 1) != 0xAA)
break;
/* get the PCI data structure and check its signature */
pds = image + readw(image + 24);
if (readb(pds) != 'P')
break;
if (readb(pds + 1) != 'C')
break;
if (readb(pds + 2) != 'I')
break;
if (readb(pds + 3) != 'R')
break;
last_image = readb(pds + 21) & 0x80;
/* this length is reliable */
image += readw(pds + 16) * 512;
} while (!last_image);
 
/* never return a size larger than the PCI resource window */
/* there are known ROMs that get the size wrong */
return min((size_t)(image - rom), size);
}
 
 
/**
* pci_map_rom - map a PCI ROM to kernel space
* @pdev: pointer to pci device struct
* @size: pointer to receive size of pci window over ROM
* @return: kernel virtual pointer to image of ROM
*
* Return: kernel virtual pointer to image of ROM
*
* Map a PCI ROM into kernel space. If ROM is boot video ROM,
* the shadow BIOS copy will be returned instead of the
* actual ROM.
*/
void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
 
#define legacyBIOSLocation 0xC0000
#define OS_BASE 0x80000000
 
void *pci_map_rom(struct pci_dev *pdev, size_t *size)
{
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
loff_t start;
void __iomem *rom;
u32_t start;
void *rom;
 
// ENTER();
 
// dbgprintf("resource start %x end %x flags %x\n",
// res->start, res->end, res->flags);
#if 0
/*
* IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy
* memory map if the VGA enable bit of the Bridge Control register is
* set for embedded VGA.
*/
 
start = (loff_t)0xC0000;
*size = 0x20000; /* cover C000:0 through E000:0 */
 
#if 0
 
if (res->flags & IORESOURCE_ROM_SHADOW) {
/* primary video rom always starts here */
start = (loff_t)0xC0000;
start = (u32_t)0xC0000;
*size = 0x20000; /* cover C000:0 through E000:0 */
} else {
if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) {
*size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
return (void __iomem *)(unsigned long)
return (void *)(unsigned long)
pci_resource_start(pdev, PCI_ROM_RESOURCE);
} else {
/* assign the ROM an address if it doesn't have one */
// if (res->parent == NULL &&
// pci_assign_resource(pdev,PCI_ROM_RESOURCE))
// return NULL;
start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
*size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
if (*size == 0)
return NULL;
// start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
// *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
// if (*size == 0)
// return NULL;
 
/* Enable ROM space decodes */
// if (pci_enable_rom(pdev))
// return NULL;
if (pci_enable_rom(pdev))
return NULL;
}
}
#endif
 
rom = ioremap(start, *size);
if (!rom) {
850,237 → 840,37
* size is much larger than the actual size of the ROM.
* True size is important if the ROM is going to be copied.
*/
*size = pci_get_rom_size(pdev, rom, *size);
// LEAVE();
return rom;
}
*size = pci_get_rom_size(rom, *size);
 
void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
{
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
#endif
 
if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY))
return;
unsigned char tmp[32];
rom = NULL;
 
iounmap(rom);
 
/* Disable again before continuing, leave enabled if pci=rom */
if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW)))
pci_disable_rom(pdev);
}
 
int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
dbgprintf("Getting BIOS copy from legacy VBIOS location\n");
memcpy(tmp,(char*)(OS_BASE+legacyBIOSLocation), 32);
*size = tmp[2] * 512;
if (*size > 0x10000 )
{
dev->dma_mask = mask;
 
return 0;
*size = 0;
dbgprintf("Invalid BIOS length field\n");
}
 
 
 
static void __pci_set_master(struct pci_dev *dev, bool enable)
{
u16 old_cmd, cmd;
 
pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
if (enable)
cmd = old_cmd | PCI_COMMAND_MASTER;
else
cmd = old_cmd & ~PCI_COMMAND_MASTER;
if (cmd != old_cmd) {
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
dev->is_busmaster = enable;
}
rom = (void*)( OS_BASE+legacyBIOSLocation);
 
 
/* pci_set_master - enables bus-mastering for device dev
* @dev: the PCI device to enable
*
* Enables bus-mastering on the device and calls pcibios_set_master()
* to do the needed arch specific settings.
*/
void pci_set_master(struct pci_dev *dev)
{
__pci_set_master(dev, true);
// pcibios_set_master(dev);
return rom;
}
 
/**
* pci_clear_master - disables bus-mastering for device dev
* @dev: the PCI device to disable
*/
void pci_clear_master(struct pci_dev *dev)
{
__pci_set_master(dev, false);
}
 
 
static inline int pcie_cap_version(const struct pci_dev *dev)
int
pci_set_dma_mask(struct pci_dev *dev, u64 mask)
{
return dev->pcie_flags_reg & PCI_EXP_FLAGS_VERS;
}
// if (!pci_dma_supported(dev, mask))
// return -EIO;
 
static inline bool pcie_cap_has_devctl(const struct pci_dev *dev)
{
return true;
}
dev->dma_mask = mask;
 
static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
 
return pcie_cap_version(dev) > 1 ||
type == PCI_EXP_TYPE_ROOT_PORT ||
type == PCI_EXP_TYPE_ENDPOINT ||
type == PCI_EXP_TYPE_LEG_END;
}
 
static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
 
return pcie_cap_version(dev) > 1 ||
type == PCI_EXP_TYPE_ROOT_PORT ||
(type == PCI_EXP_TYPE_DOWNSTREAM &&
dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT);
}
 
static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
 
return pcie_cap_version(dev) > 1 ||
type == PCI_EXP_TYPE_ROOT_PORT ||
type == PCI_EXP_TYPE_RC_EC;
}
 
static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
{
if (!pci_is_pcie(dev))
return false;
 
switch (pos) {
case PCI_EXP_FLAGS_TYPE:
return true;
case PCI_EXP_DEVCAP:
case PCI_EXP_DEVCTL:
case PCI_EXP_DEVSTA:
return pcie_cap_has_devctl(dev);
case PCI_EXP_LNKCAP:
case PCI_EXP_LNKCTL:
case PCI_EXP_LNKSTA:
return pcie_cap_has_lnkctl(dev);
case PCI_EXP_SLTCAP:
case PCI_EXP_SLTCTL:
case PCI_EXP_SLTSTA:
return pcie_cap_has_sltctl(dev);
case PCI_EXP_RTCTL:
case PCI_EXP_RTCAP:
case PCI_EXP_RTSTA:
return pcie_cap_has_rtctl(dev);
case PCI_EXP_DEVCAP2:
case PCI_EXP_DEVCTL2:
case PCI_EXP_LNKCAP2:
case PCI_EXP_LNKCTL2:
case PCI_EXP_LNKSTA2:
return pcie_cap_version(dev) > 1;
default:
return false;
}
}
 
/*
* Note that these accessor functions are only for the "PCI Express
* Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
* other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
*/
int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
{
int ret;
 
*val = 0;
if (pos & 1)
return -EINVAL;
 
if (pcie_capability_reg_implemented(dev, pos)) {
ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
/*
* Reset *val to 0 if pci_read_config_word() fails, it may
* have been written as 0xFFFF if hardware error happens
* during pci_read_config_word().
*/
if (ret)
*val = 0;
return ret;
}
 
/*
* For Functions that do not implement the Slot Capabilities,
* Slot Status, and Slot Control registers, these spaces must
* be hardwired to 0b, with the exception of the Presence Detect
* State bit in the Slot Status register of Downstream Ports,
* which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
*/
if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
*val = PCI_EXP_SLTSTA_PDS;
}
 
return 0;
}
EXPORT_SYMBOL(pcie_capability_read_word);
 
int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
{
int ret;
 
*val = 0;
if (pos & 3)
return -EINVAL;
 
if (pcie_capability_reg_implemented(dev, pos)) {
ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
/*
* Reset *val to 0 if pci_read_config_dword() fails, it may
* have been written as 0xFFFFFFFF if hardware error happens
* during pci_read_config_dword().
*/
if (ret)
*val = 0;
return ret;
}
 
if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL &&
pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
*val = PCI_EXP_SLTSTA_PDS;
}
 
return 0;
}
EXPORT_SYMBOL(pcie_capability_read_dword);
 
int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
{
if (pos & 1)
return -EINVAL;
 
if (!pcie_capability_reg_implemented(dev, pos))
return 0;
 
return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
}
EXPORT_SYMBOL(pcie_capability_write_word);
 
int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
{
if (pos & 3)
return -EINVAL;
 
if (!pcie_capability_reg_implemented(dev, pos))
return 0;
 
return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
}
EXPORT_SYMBOL(pcie_capability_write_dword);
 
/drivers/video/drm/radeon/radeon_asic.c
40,16 → 40,6
/*
* Registers accessors functions.
*/
/**
* radeon_invalid_rreg - dummy reg read function
*
* @rdev: radeon device pointer
* @reg: offset of register
*
* Dummy register read function. Used for register blocks
* that certain asics don't have (all asics).
* Returns the value in the register.
*/
static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
{
DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
57,16 → 47,6
return 0;
}
 
/**
* radeon_invalid_wreg - dummy reg write function
*
* @rdev: radeon device pointer
* @reg: offset of register
* @v: value to write to the register
*
* Dummy register read function. Used for register blocks
* that certain asics don't have (all asics).
*/
static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
74,14 → 54,6
BUG_ON(1);
}
 
/**
* radeon_register_accessor_init - sets up the register accessor callbacks
*
* @rdev: radeon device pointer
*
* Sets up the register accessor callbacks for various register
* apertures. Not all asics have all apertures (all asics).
*/
static void radeon_register_accessor_init(struct radeon_device *rdev)
{
rdev->mc_rreg = &radeon_invalid_rreg;
130,14 → 102,6
 
 
/* helper to disable agp */
/**
* radeon_agp_disable - AGP disable helper function
*
* @rdev: radeon device pointer
*
* Removes AGP flags and changes the gart callbacks on AGP
* cards when using the internal gart rather than AGP (all asics).
*/
void radeon_agp_disable(struct radeon_device *rdev)
{
rdev->flags &= ~RADEON_IS_AGP;
150,13 → 114,13
rdev->family == CHIP_R423) {
DRM_INFO("Forcing AGP to PCIE mode\n");
rdev->flags |= RADEON_IS_PCIE;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
} else {
DRM_INFO("Forcing AGP to PCI mode\n");
rdev->flags |= RADEON_IS_PCI;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart.set_page = &r100_pci_gart_set_page;
rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart_set_page = &r100_pci_gart_set_page;
}
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
}
170,74 → 134,38
// .suspend = &r100_suspend,
// .resume = &r100_resume,
// .vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r100_gpu_is_lockup,
.asic_reset = &r100_asic_reset,
// .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r100_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r100_cs_parse,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &r100_irq_set,
.process = &r100_irq_process,
},
.display = {
.ring_ib_execute = &r100_ring_ib_execute,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r100_fence_ring_emit,
// .cs_parse = &r100_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = NULL,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
// .set_backlight_level = &radeon_legacy_set_backlight_level,
// .get_backlight_level = &radeon_legacy_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
// .init = &r100_hpd_init,
// .fini = &r100_hpd_fini,
// .sense = &r100_hpd_sense,
// .set_polarity = &r100_hpd_set_polarity,
},
.pm = {
// .misc = &r100_pm_misc,
// .prepare = &r100_pm_prepare,
// .finish = &r100_pm_finish,
// .init_profile = &r100_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_legacy_get_engine_clock,
// .set_engine_clock = &radeon_legacy_set_engine_clock,
// .get_memory_clock = &radeon_legacy_get_memory_clock,
// .set_memory_clock = NULL,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &r100_pre_page_flip,
// .page_flip = &r100_page_flip,
// .post_page_flip = &r100_post_page_flip,
},
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
};
 
static struct radeon_asic r200_asic = {
246,74 → 174,37
// .suspend = &r100_suspend,
// .resume = &r100_resume,
// .vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r100_gpu_is_lockup,
.asic_reset = &r100_asic_reset,
// .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r100_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r100_cs_parse,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &r100_irq_set,
.process = &r100_irq_process,
},
.display = {
.ring_ib_execute = &r100_ring_ib_execute,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r100_fence_ring_emit,
// .cs_parse = &r100_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
// .set_backlight_level = &radeon_legacy_set_backlight_level,
// .get_backlight_level = &radeon_legacy_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
// .init = &r100_hpd_init,
// .fini = &r100_hpd_fini,
// .sense = &r100_hpd_sense,
// .set_polarity = &r100_hpd_set_polarity,
},
.pm = {
// .misc = &r100_pm_misc,
// .prepare = &r100_pm_prepare,
// .finish = &r100_pm_finish,
// .init_profile = &r100_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_legacy_get_engine_clock,
// .set_engine_clock = &radeon_legacy_set_engine_clock,
// .get_memory_clock = &radeon_legacy_get_memory_clock,
// .set_memory_clock = NULL,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &r100_pre_page_flip,
// .page_flip = &r100_page_flip,
// .post_page_flip = &r100_post_page_flip,
},
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
};
 
static struct radeon_asic r300_asic = {
323,73 → 214,36
// .resume = &r300_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
// .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &r100_irq_set,
.process = &r100_irq_process,
},
.display = {
.ring_ib_execute = &r100_ring_ib_execute,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
// .set_backlight_level = &radeon_legacy_set_backlight_level,
// .get_backlight_level = &radeon_legacy_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
// .init = &r100_hpd_init,
// .fini = &r100_hpd_fini,
// .sense = &r100_hpd_sense,
// .set_polarity = &r100_hpd_set_polarity,
},
.pm = {
// .misc = &r100_pm_misc,
// .prepare = &r100_pm_prepare,
// .finish = &r100_pm_finish,
// .init_profile = &r100_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_legacy_get_engine_clock,
// .set_engine_clock = &radeon_legacy_set_engine_clock,
// .get_memory_clock = &radeon_legacy_get_memory_clock,
// .set_memory_clock = NULL,
// .get_pcie_lanes = &rv370_get_pcie_lanes,
// .set_pcie_lanes = &rv370_set_pcie_lanes,
// .set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &r100_pre_page_flip,
// .page_flip = &r100_page_flip,
// .post_page_flip = &r100_post_page_flip,
},
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
};
 
static struct radeon_asic r300_asic_pcie = {
399,73 → 253,35
// .resume = &r300_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
// .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &r100_irq_set,
.process = &r100_irq_process,
},
.display = {
.ring_ib_execute = &r100_ring_ib_execute,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
// .set_backlight_level = &radeon_legacy_set_backlight_level,
// .get_backlight_level = &radeon_legacy_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &r100_hpd_init,
.fini = &r100_hpd_fini,
.sense = &r100_hpd_sense,
.set_polarity = &r100_hpd_set_polarity,
},
.pm = {
// .misc = &r100_pm_misc,
// .prepare = &r100_pm_prepare,
// .finish = &r100_pm_finish,
// .init_profile = &r100_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_legacy_get_engine_clock,
// .set_engine_clock = &radeon_legacy_set_engine_clock,
// .get_memory_clock = &radeon_legacy_get_memory_clock,
// .set_memory_clock = NULL,
// .get_pcie_lanes = &rv370_get_pcie_lanes,
// .set_pcie_lanes = &rv370_set_pcie_lanes,
// .set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &r100_pre_page_flip,
// .page_flip = &r100_page_flip,
// .post_page_flip = &r100_post_page_flip,
},
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
};
 
static struct radeon_asic r420_asic = {
475,73 → 291,36
// .resume = &r420_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
// .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &r100_irq_set,
.process = &r100_irq_process,
},
.display = {
.ring_ib_execute = &r100_ring_ib_execute,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
// .init = &r100_hpd_init,
// .fini = &r100_hpd_fini,
// .sense = &r100_hpd_sense,
// .set_polarity = &r100_hpd_set_polarity,
},
.pm = {
// .misc = &r100_pm_misc,
// .prepare = &r100_pm_prepare,
// .finish = &r100_pm_finish,
// .init_profile = &r420_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = &rv370_get_pcie_lanes,
// .set_pcie_lanes = &rv370_set_pcie_lanes,
// .set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &r100_pre_page_flip,
// .page_flip = &r100_page_flip,
// .post_page_flip = &r100_post_page_flip,
},
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
};
 
static struct radeon_asic rs400_asic = {
551,73 → 330,36
// .resume = &rs400_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
// .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs400_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
.set_page = &rs400_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &r100_irq_set,
.process = &r100_irq_process,
},
.display = {
.ring_ib_execute = &r100_ring_ib_execute,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
// .set_backlight_level = &radeon_legacy_set_backlight_level,
// .get_backlight_level = &radeon_legacy_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
// .init = &r100_hpd_init,
// .fini = &r100_hpd_fini,
// .sense = &r100_hpd_sense,
// .set_polarity = &r100_hpd_set_polarity,
},
.pm = {
// .misc = &r100_pm_misc,
// .prepare = &r100_pm_prepare,
// .finish = &r100_pm_finish,
// .init_profile = &r100_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_legacy_get_engine_clock,
// .set_engine_clock = &radeon_legacy_set_engine_clock,
// .get_memory_clock = &radeon_legacy_get_memory_clock,
// .set_memory_clock = NULL,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &r100_pre_page_flip,
// .page_flip = &r100_page_flip,
// .post_page_flip = &r100_post_page_flip,
},
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
};
 
static struct radeon_asic rs600_asic = {
627,73 → 369,36
// .resume = &rs600_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
// .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs600_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs600_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.gart_tlb_flush = &rs600_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &rs600_irq_set,
.process = &rs600_irq_process,
},
.display = {
.ring_ib_execute = &r100_ring_ib_execute,
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
// .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rs600_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
// .init = &rs600_hpd_init,
// .fini = &rs600_hpd_fini,
// .sense = &rs600_hpd_sense,
// .set_polarity = &rs600_hpd_set_polarity,
},
.pm = {
// .misc = &rs600_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &r420_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rs600_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
};
 
static struct radeon_asic rs690_asic = {
703,73 → 408,36
// .resume = &rs690_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
// .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs690_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
.set_page = &rs400_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &rs600_irq_set,
.process = &rs600_irq_process,
},
.display = {
.get_vblank_counter = &rs600_get_vblank_counter,
.ring_ib_execute = &r100_ring_ib_execute,
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
// .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
.copy = &r200_copy_dma,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rs690_bandwidth_update,
.wait_for_vblank = &avivo_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r200_copy_dma,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
// .init = &rs600_hpd_init,
// .fini = &rs600_hpd_fini,
.sense = &rs600_hpd_sense,
.set_polarity = &rs600_hpd_set_polarity,
},
.pm = {
// .misc = &rs600_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &r420_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rs600_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
};
 
static struct radeon_asic rv515_asic = {
779,73 → 447,36
// .resume = &rv515_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
// .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rv515_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &rs600_irq_set,
.process = &rs600_irq_process,
},
.display = {
.get_vblank_counter = &rs600_get_vblank_counter,
.ring_ib_execute = &r100_ring_ib_execute,
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
// .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
.wait_for_vblank = &avivo_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
// .init = &rs600_hpd_init,
// .fini = &rs600_hpd_fini,
// .sense = &rs600_hpd_sense,
// .set_polarity = &rs600_hpd_set_polarity,
},
.pm = {
// .misc = &rs600_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &r420_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = &rv370_get_pcie_lanes,
// .set_pcie_lanes = &rv370_set_pcie_lanes,
// .set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rs600_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
};
 
static struct radeon_asic r520_asic = {
855,73 → 486,36
// .resume = &r520_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
// .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r520_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &rs600_irq_set,
.process = &rs600_irq_process,
},
.display = {
.ring_ib_execute = &r100_ring_ib_execute,
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
// .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
// .init = &rs600_hpd_init,
// .fini = &rs600_hpd_fini,
// .sense = &rs600_hpd_sense,
// .set_polarity = &rs600_hpd_set_polarity,
},
.pm = {
// .misc = &rs600_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &r420_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = &rv370_get_pcie_lanes,
// .set_pcie_lanes = &rv370_set_pcie_lanes,
// .set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rs600_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
};
 
static struct radeon_asic r600_asic = {
929,74 → 523,35
// .fini = &r600_fini,
// .suspend = &r600_suspend,
// .resume = &r600_resume,
// .vga_set_state = &r600_vga_set_state,
.cp_commit = &r600_cp_commit,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r600_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
.ring_ib_execute = &r600_ring_ib_execute,
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gpu_is_lockup,
}
},
.irq = {
.set = &r600_irq_set,
.process = &r600_irq_process,
},
.display = {
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
.copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
// .init = &r600_hpd_init,
// .fini = &r600_hpd_fini,
// .sense = &r600_hpd_sense,
// .set_polarity = &r600_hpd_set_polarity,
},
.pm = {
// .misc = &r600_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &r600_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = &r600_get_pcie_lanes,
// .set_pcie_lanes = &r600_set_pcie_lanes,
// .set_clock_gating = NULL,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rs600_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
.hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
};
 
static struct radeon_asic rs780_asic = {
1004,74 → 559,35
// .fini = &r600_fini,
// .suspend = &r600_suspend,
// .resume = &r600_resume,
// .vga_set_state = &r600_vga_set_state,
.cp_commit = &r600_cp_commit,
.gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r600_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
.ring_ib_execute = &r600_ring_ib_execute,
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gpu_is_lockup,
}
},
.irq = {
.set = &r600_irq_set,
.process = &r600_irq_process,
},
.display = {
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
.copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = NULL,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rs690_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
// .init = &r600_hpd_init,
// .fini = &r600_hpd_fini,
// .sense = &r600_hpd_sense,
// .set_polarity = &r600_hpd_set_polarity,
},
.pm = {
// .misc = &r600_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &rs780_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = NULL,
// .set_memory_clock = NULL,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = NULL,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rs600_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
.hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
};
 
static struct radeon_asic rv770_asic = {
1079,74 → 595,34
// .fini = &rv770_fini,
// .suspend = &rv770_suspend,
// .resume = &rv770_resume,
.cp_commit = &r600_cp_commit,
.asic_reset = &r600_asic_reset,
// .vga_set_state = &r600_vga_set_state,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r600_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
.ring_ib_execute = &r600_ring_ib_execute,
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gpu_is_lockup,
}
},
.irq = {
.set = &r600_irq_set,
.process = &r600_irq_process,
},
.display = {
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
.copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
// .init = &r600_hpd_init,
// .fini = &r600_hpd_fini,
// .sense = &r600_hpd_sense,
// .set_polarity = &r600_hpd_set_polarity,
},
.pm = {
// .misc = &rv770_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &r600_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = &r600_get_pcie_lanes,
// .set_pcie_lanes = &r600_set_pcie_lanes,
// .set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rv770_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
.hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
};
 
static struct radeon_asic evergreen_asic = {
1154,74 → 630,35
// .fini = &evergreen_fini,
// .suspend = &evergreen_suspend,
// .resume = &evergreen_resume,
.cp_commit = &r600_cp_commit,
.asic_reset = &evergreen_asic_reset,
// .vga_set_state = &r600_vga_set_state,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &evergreen_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
.ring_ib_execute = &evergreen_ring_ib_execute,
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
}
},
.irq = {
.set = &evergreen_irq_set,
.process = &evergreen_irq_process,
},
.display = {
.copy_blit = &evergreen_copy_blit,
.copy_dma = NULL,
.copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &evergreen_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
// .init = &evergreen_hpd_init,
// .fini = &evergreen_hpd_fini,
// .sense = &evergreen_hpd_sense,
// .set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
// .misc = &evergreen_pm_misc,
// .prepare = &evergreen_pm_prepare,
// .finish = &evergreen_pm_finish,
// .init_profile = &r600_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = &r600_get_pcie_lanes,
// .set_pcie_lanes = &r600_set_pcie_lanes,
// .set_clock_gating = NULL,
},
.pflip = {
// .pre_page_flip = &evergreen_pre_page_flip,
// .page_flip = &evergreen_page_flip,
// .post_page_flip = &evergreen_post_page_flip,
},
.hpd_init = &evergreen_hpd_init,
.hpd_fini = &evergreen_hpd_fini,
.hpd_sense = &evergreen_hpd_sense,
.hpd_set_polarity = &evergreen_hpd_set_polarity,
 
};
 
static struct radeon_asic sumo_asic = {
1229,74 → 666,34
// .fini = &evergreen_fini,
// .suspend = &evergreen_suspend,
// .resume = &evergreen_resume,
.cp_commit = &r600_cp_commit,
.asic_reset = &evergreen_asic_reset,
// .vga_set_state = &r600_vga_set_state,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &evergreen_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
},
},
.irq = {
.set = &evergreen_irq_set,
.process = &evergreen_irq_process,
},
.display = {
.ring_ib_execute = &evergreen_ring_ib_execute,
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &r600_cs_parse,
.copy_blit = &evergreen_copy_blit,
.copy_dma = NULL,
.copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = NULL,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &evergreen_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
// .init = &evergreen_hpd_init,
// .fini = &evergreen_hpd_fini,
// .sense = &evergreen_hpd_sense,
// .set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
// .misc = &evergreen_pm_misc,
// .prepare = &evergreen_pm_prepare,
// .finish = &evergreen_pm_finish,
// .init_profile = &sumo_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = NULL,
// .set_memory_clock = NULL,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = NULL,
},
.pflip = {
// .pre_page_flip = &evergreen_pre_page_flip,
// .page_flip = &evergreen_page_flip,
// .post_page_flip = &evergreen_post_page_flip,
},
.hpd_init = &evergreen_hpd_init,
.hpd_fini = &evergreen_hpd_fini,
.hpd_sense = &evergreen_hpd_sense,
.hpd_set_polarity = &evergreen_hpd_set_polarity,
};
 
static struct radeon_asic btc_asic = {
1304,401 → 701,71
// .fini = &evergreen_fini,
// .suspend = &evergreen_suspend,
// .resume = &evergreen_resume,
.cp_commit = &r600_cp_commit,
.asic_reset = &evergreen_asic_reset,
// .vga_set_state = &r600_vga_set_state,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &evergreen_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
.ring_ib_execute = &evergreen_ring_ib_execute,
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
}
},
.irq = {
.set = &evergreen_irq_set,
.process = &evergreen_irq_process,
},
.display = {
.copy_blit = &evergreen_copy_blit,
.copy_dma = NULL,
.copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &evergreen_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
// .init = &evergreen_hpd_init,
// .fini = &evergreen_hpd_fini,
// .sense = &evergreen_hpd_sense,
// .set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
// .misc = &evergreen_pm_misc,
// .prepare = &evergreen_pm_prepare,
// .finish = &evergreen_pm_finish,
// .init_profile = &btc_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = NULL,
},
.pflip = {
// .pre_page_flip = &evergreen_pre_page_flip,
// .page_flip = &evergreen_page_flip,
// .post_page_flip = &evergreen_post_page_flip,
},
.hpd_init = &evergreen_hpd_init,
.hpd_fini = &evergreen_hpd_fini,
.hpd_sense = &evergreen_hpd_sense,
.hpd_set_polarity = &evergreen_hpd_set_polarity,
};
 
static struct radeon_asic cayman_asic = {
.init = &cayman_init,
// .fini = &cayman_fini,
// .suspend = &cayman_suspend,
// .resume = &cayman_resume,
// .fini = &evergreen_fini,
// .suspend = &evergreen_suspend,
// .resume = &evergreen_resume,
.cp_commit = &r600_cp_commit,
.asic_reset = &cayman_asic_reset,
// .vga_set_state = &r600_vga_set_state,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.set_page = &cayman_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
// .ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &cayman_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
// .ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
.ring_ib_execute = &evergreen_ring_ib_execute,
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
// .ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
.vm_flush = &cayman_vm_flush,
}
},
.irq = {
.set = &evergreen_irq_set,
.process = &evergreen_irq_process,
},
.display = {
.copy_blit = &evergreen_copy_blit,
.copy_dma = NULL,
.copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &evergreen_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
// .init = &evergreen_hpd_init,
// .fini = &evergreen_hpd_fini,
// .sense = &evergreen_hpd_sense,
// .set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
// .misc = &evergreen_pm_misc,
// .prepare = &evergreen_pm_prepare,
// .finish = &evergreen_pm_finish,
// .init_profile = &btc_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = NULL,
},
.pflip = {
// .pre_page_flip = &evergreen_pre_page_flip,
// .page_flip = &evergreen_page_flip,
// .post_page_flip = &evergreen_post_page_flip,
},
.hpd_init = &evergreen_hpd_init,
.hpd_fini = &evergreen_hpd_fini,
.hpd_sense = &evergreen_hpd_sense,
.hpd_set_polarity = &evergreen_hpd_set_polarity,
};
 
static struct radeon_asic trinity_asic = {
.init = &cayman_init,
// .fini = &cayman_fini,
// .suspend = &cayman_suspend,
// .resume = &cayman_resume,
.asic_reset = &cayman_asic_reset,
// .vga_set_state = &r600_vga_set_state,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.set_page = &cayman_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
// .ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
// .ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
// .ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
.vm_flush = &cayman_vm_flush,
}
},
.irq = {
.set = &evergreen_irq_set,
.process = &evergreen_irq_process,
},
.display = {
.bandwidth_update = &dce6_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
// .init = &evergreen_hpd_init,
// .fini = &evergreen_hpd_fini,
// .sense = &evergreen_hpd_sense,
// .set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
// .misc = &evergreen_pm_misc,
// .prepare = &evergreen_pm_prepare,
// .finish = &evergreen_pm_finish,
// .init_profile = &sumo_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = NULL,
// .set_memory_clock = NULL,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = NULL,
},
.pflip = {
// .pre_page_flip = &evergreen_pre_page_flip,
// .page_flip = &evergreen_page_flip,
// .post_page_flip = &evergreen_post_page_flip,
},
};
 
static struct radeon_asic si_asic = {
.init = &si_init,
// .fini = &si_fini,
// .suspend = &si_suspend,
// .resume = &si_resume,
.asic_reset = &si_asic_reset,
// .vga_set_state = &r600_vga_set_state,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.gart = {
.tlb_flush = &si_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.vm = {
.init = &si_vm_init,
.fini = &si_vm_fini,
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.set_page = &si_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &si_ring_ib_execute,
// .ib_parse = &si_ib_parse,
.emit_fence = &si_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &si_gpu_is_lockup,
.vm_flush = &si_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &si_ring_ib_execute,
// .ib_parse = &si_ib_parse,
.emit_fence = &si_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &si_gpu_is_lockup,
.vm_flush = &si_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &si_ring_ib_execute,
// .ib_parse = &si_ib_parse,
.emit_fence = &si_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &si_gpu_is_lockup,
.vm_flush = &si_vm_flush,
}
},
.irq = {
.set = &si_irq_set,
.process = &si_irq_process,
},
.display = {
.bandwidth_update = &dce6_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = NULL,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = NULL,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &evergreen_hpd_init,
.fini = &evergreen_hpd_fini,
.sense = &evergreen_hpd_sense,
.set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
// .misc = &evergreen_pm_misc,
// .prepare = &evergreen_pm_prepare,
// .finish = &evergreen_pm_finish,
// .init_profile = &sumo_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = NULL,
},
.pflip = {
// .pre_page_flip = &evergreen_pre_page_flip,
// .page_flip = &evergreen_page_flip,
// .post_page_flip = &evergreen_post_page_flip,
},
};
 
/**
* radeon_asic_init - register asic specific callbacks
*
* @rdev: radeon device pointer
*
* Registers the appropriate asic specific callbacks for each
* chip family. Also sets other asics specific info like the number
* of crtcs and the register aperture accessors (all asics).
* Returns 0 for success.
*/
int radeon_asic_init(struct radeon_device *rdev)
{
radeon_register_accessor_init(rdev);
1738,11 → 805,10
rdev->asic = &r420_asic;
/* handle macs */
if (rdev->bios == NULL) {
rdev->asic->pm.get_engine_clock = &radeon_legacy_get_engine_clock;
rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock;
rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock;
rdev->asic->pm.set_memory_clock = NULL;
rdev->asic->display.set_backlight_level = &radeon_legacy_set_backlight_level;
rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock;
rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock;
rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock;
rdev->asic->set_memory_clock = NULL;
}
break;
case CHIP_RS400:
1816,18 → 882,6
/* set num crtcs */
rdev->num_crtc = 6;
break;
case CHIP_ARUBA:
rdev->asic = &trinity_asic;
/* set num crtcs */
rdev->num_crtc = 4;
break;
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
rdev->asic = &si_asic;
/* set num crtcs */
rdev->num_crtc = 6;
break;
default:
/* FIXME: not supported yet */
return -EINVAL;
1834,8 → 888,8
}
 
if (rdev->flags & RADEON_IS_IGP) {
rdev->asic->pm.get_memory_clock = NULL;
rdev->asic->pm.set_memory_clock = NULL;
rdev->asic->get_memory_clock = NULL;
rdev->asic->set_memory_clock = NULL;
}
 
return 0;
/drivers/video/drm/radeon/radeon_clocks.c
25,8 → 25,8
* Alex Deucher
* Jerome Glisse
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
219,9 → 219,6
} else {
DRM_INFO("Using generic clock info\n");
 
/* may need to be per card */
rdev->clock.max_pixel_clock = 35000;
 
if (rdev->flags & RADEON_IS_IGP) {
p1pll->reference_freq = 1432;
p2pll->reference_freq = 1432;
334,7 → 331,7
 
if (!rdev->clock.default_sclk)
rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
if ((!rdev->clock.default_mclk) && rdev->asic->pm.get_memory_clock)
if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock)
rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
 
rdev->pm.current_sclk = rdev->clock.default_sclk;
633,7 → 630,7
tmp &= ~(R300_SCLK_FORCE_VAP);
tmp |= RADEON_SCLK_FORCE_CP;
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
mdelay(15);
udelay(15000);
 
tmp = RREG32_PLL(R300_SCLK_CNTL2);
tmp &= ~(R300_SCLK_FORCE_TCL |
651,12 → 648,12
tmp |= (RADEON_ENGIN_DYNCLK_MODE |
(0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT));
WREG32_PLL(RADEON_CLK_PWRMGT_CNTL, tmp);
mdelay(15);
udelay(15000);
 
tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
tmp |= RADEON_SCLK_DYN_START_CNTL;
WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
mdelay(15);
udelay(15000);
 
/* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200
to lockup randomly, leave them as set by BIOS.
696,7 → 693,7
tmp |= RADEON_SCLK_MORE_FORCEON;
}
WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
mdelay(15);
udelay(15000);
}
 
/* RV200::A11 A12, RV250::A11 A12 */
709,7 → 706,7
tmp |= RADEON_TCL_BYPASS_DISABLE;
WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
}
mdelay(15);
udelay(15000);
 
/*enable dynamic mode for display clocks (PIXCLK and PIX2CLK) */
tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
722,7 → 719,7
RADEON_PIXCLK_TMDS_ALWAYS_ONb);
 
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
mdelay(15);
udelay(15000);
 
tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
729,7 → 726,7
RADEON_PIXCLK_DAC_ALWAYS_ONb);
 
WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
mdelay(15);
udelay(15000);
}
} else {
/* Turn everything OFF (ForceON to everything) */
861,7 → 858,7
}
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
 
mdelay(16);
udelay(16000);
 
if ((rdev->family == CHIP_R300) ||
(rdev->family == CHIP_R350)) {
870,7 → 867,7
R300_SCLK_FORCE_GA |
R300_SCLK_FORCE_CBA);
WREG32_PLL(R300_SCLK_CNTL2, tmp);
mdelay(16);
udelay(16000);
}
 
if (rdev->flags & RADEON_IS_IGP) {
878,7 → 875,7
tmp &= ~(RADEON_FORCEON_MCLKA |
RADEON_FORCEON_YCLKA);
WREG32_PLL(RADEON_MCLK_CNTL, tmp);
mdelay(16);
udelay(16000);
}
 
if ((rdev->family == CHIP_RV200) ||
887,7 → 884,7
tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
tmp |= RADEON_SCLK_MORE_FORCEON;
WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
mdelay(16);
udelay(16000);
}
 
tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
900,7 → 897,7
RADEON_PIXCLK_TMDS_ALWAYS_ONb);
 
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
mdelay(16);
udelay(16000);
 
tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
/drivers/video/drm/radeon/radeon_combios.c
24,8 → 24,8
* Authors: Dave Airlie
* Alex Deucher
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
#include "atom.h"
 
620,8 → 620,8
i2c.y_data_mask = 0x80;
} else {
/* default masks for ddc pads */
i2c.mask_clk_mask = RADEON_GPIO_MASK_1;
i2c.mask_data_mask = RADEON_GPIO_MASK_0;
i2c.mask_clk_mask = RADEON_GPIO_EN_1;
i2c.mask_data_mask = RADEON_GPIO_EN_0;
i2c.a_clk_mask = RADEON_GPIO_A_1;
i2c.a_data_mask = RADEON_GPIO_A_0;
i2c.en_clk_mask = RADEON_GPIO_EN_1;
719,34 → 719,6
return i2c;
}
 
static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct radeon_i2c_bus_rec i2c;
u16 offset;
u8 id, blocks, clk, data;
int i;
 
i2c.valid = false;
 
offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
if (offset) {
blocks = RBIOS8(offset + 2);
for (i = 0; i < blocks; i++) {
id = RBIOS8(offset + 3 + (i * 5) + 0);
if (id == 136) {
clk = RBIOS8(offset + 3 + (i * 5) + 3);
data = RBIOS8(offset + 3 + (i * 5) + 4);
/* gpiopad */
i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
(1 << clk), (1 << data));
break;
}
}
}
return i2c;
}
 
void radeon_combios_i2c_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
783,14 → 755,30
} else if (rdev->family == CHIP_RS300 ||
rdev->family == CHIP_RS400 ||
rdev->family == CHIP_RS480) {
u16 offset;
u8 id, blocks, clk, data;
int i;
 
/* 0x68 */
i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
 
offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
if (offset) {
blocks = RBIOS8(offset + 2);
for (i = 0; i < blocks; i++) {
id = RBIOS8(offset + 3 + (i * 5) + 0);
if (id == 136) {
clk = RBIOS8(offset + 3 + (i * 5) + 3);
data = RBIOS8(offset + 3 + (i * 5) + 4);
/* gpiopad */
i2c = radeon_combios_get_i2c_info_from_table(rdev);
if (i2c.valid)
i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
(1 << clk), (1 << data));
rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
break;
}
}
}
} else if ((rdev->family == CHIP_R200) ||
(rdev->family >= CHIP_R300)) {
/* 0x68 */
1573,11 → 1561,6
(rdev->pdev->subsystem_device == 0x4150)) {
/* Mac G5 tower 9600 */
rdev->mode_info.connector_table = CT_MAC_G5_9600;
} else if ((rdev->pdev->device == 0x4c66) &&
(rdev->pdev->subsystem_vendor == 0x1002) &&
(rdev->pdev->subsystem_device == 0x4c66)) {
/* SAM440ep RV250 embedded board */
rdev->mode_info.connector_table = CT_SAM440EP;
} else
#endif /* CONFIG_PPC_PMAC */
#ifdef CONFIG_PPC64
2151,67 → 2134,6
CONNECTOR_OBJECT_ID_SVIDEO,
&hpd);
break;
case CT_SAM440EP:
DRM_INFO("Connector Table: %d (SAM440ep embedded board)\n",
rdev->mode_info.connector_table);
/* LVDS */
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
CONNECTOR_OBJECT_ID_LVDS,
&hpd);
/* DVI-I - secondary dac, int tmds */
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 1,
ATOM_DEVICE_DFP1_SUPPORT |
ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
&hpd);
/* VGA - primary dac */
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_connector(dev, 2,
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
CONNECTOR_OBJECT_ID_VGA,
&hpd);
/* TV - TV DAC */
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
radeon_add_legacy_connector(dev, 3, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
CONNECTOR_OBJECT_ID_SVIDEO,
&hpd);
break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
rdev->mode_info.connector_table);
2333,9 → 2255,6
connector = (tmp >> 12) & 0xf;
 
ddc_type = (tmp >> 8) & 0xf;
if (ddc_type == 5)
ddc_i2c = radeon_combios_get_i2c_info_from_table(rdev);
else
ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
 
switch (connector) {
2644,18 → 2563,15
 
/* allocate 2 power states */
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
if (rdev->pm.power_state) {
/* allocate 1 clock mode per state */
rdev->pm.power_state[0].clock_info =
kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
rdev->pm.power_state[1].clock_info =
kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
if (!rdev->pm.power_state[0].clock_info ||
!rdev->pm.power_state[1].clock_info)
goto pm_failed;
} else
goto pm_failed;
if (!rdev->pm.power_state) {
rdev->pm.default_power_state_index = state_index;
rdev->pm.num_power_states = 0;
 
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
return;
}
 
/* check for a thermal chip */
offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
if (offset) {
2701,26 → 2617,7
i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
}
}
} else {
/* boards with a thermal chip, but no overdrive table */
 
/* Asus 9600xt has an f75375 on the monid bus */
if ((dev->pdev->device == 0x4152) &&
(dev->pdev->subsystem_vendor == 0x1043) &&
(dev->pdev->subsystem_device == 0xc002)) {
i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
if (rdev->pm.i2c_bus) {
struct i2c_board_info info = { };
const char *name = "f75375";
info.addr = 0x28;
strlcpy(info.type, name, sizeof(info.type));
i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
DRM_INFO("Possible %s thermal controller at 0x%02x\n",
name, info.addr);
}
}
}
 
if (rdev->flags & RADEON_IS_MOBILITY) {
offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
2819,14 → 2716,6
 
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
return;
 
pm_failed:
rdev->pm.default_power_state_index = state_index;
rdev->pm.num_power_states = 0;
 
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
}
 
void radeon_external_tmds_setup(struct drm_encoder *encoder)
2926,7 → 2815,7
case 4:
val = RBIOS16(index);
index += 2;
mdelay(val);
udelay(val * 1000);
break;
case 6:
slave_addr = id & 0xff;
3125,7 → 3014,7
udelay(150);
break;
case 2:
mdelay(1);
udelay(1000);
break;
case 3:
while (tmp--) {
3156,13 → 3045,13
/*mclk_cntl |= 0x00001111;*//* ??? */
WREG32_PLL(RADEON_MCLK_CNTL,
mclk_cntl);
mdelay(10);
udelay(10000);
#endif
WREG32_PLL
(RADEON_CLK_PWRMGT_CNTL,
tmp &
~RADEON_CG_NO1_DEBUG_0);
mdelay(10);
udelay(10000);
}
break;
default:
3319,6 → 3208,15
WREG32(RADEON_CONFIG_MEMSIZE, mem_size);
}
 
void radeon_combios_dyn_clk_setup(struct drm_device *dev, int enable)
{
uint16_t dyn_clk_info =
combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
 
if (dyn_clk_info)
combios_parse_pll_table(dev, dyn_clk_info);
}
 
void radeon_combios_asic_init(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
3381,14 → 3279,6
rdev->pdev->subsystem_device == 0x30a4)
return;
 
/* quirk for rs4xx Compaq Presario V5245EU laptop to make it resume
* - it hangs on resume inside the dynclk 1 table.
*/
if (rdev->family == CHIP_RS480 &&
rdev->pdev->subsystem_vendor == 0x103c &&
rdev->pdev->subsystem_device == 0x30ae)
return;
 
/* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
if (table)
/drivers/video/drm/radeon/radeon_connectors.c
23,11 → 23,11
* Authors: Dave Airlie
* Alex Deucher
*/
#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/radeon_drm.h>
#include "drmP.h"
#include "drm_edid.h"
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
#include "radeon_drm.h"
#include "radeon.h"
#include "atom.h"
 
40,6 → 40,12
struct drm_encoder *encoder,
bool connected);
 
extern void
radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
struct drm_connector *drm_connector);
 
bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector);
 
void radeon_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
54,41 → 60,20
 
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
 
/* if the connector is already off, don't turn it back on */
if (connector->dpms != DRM_MODE_DPMS_ON)
/* powering up/down the eDP panel generates hpd events which
* can interfere with modesetting.
*/
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
return;
 
/* just deal with DP (not eDP) here. */
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
 
/* if existing sink type was not DP no need to retrain */
if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT)
return;
 
/* first get sink type as it may be reset after (un)plug */
dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
/* don't do anything if sink is not display port, i.e.,
* passive dp->(dvi|hdmi) adaptor
*/
if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
int saved_dpms = connector->dpms;
/* Only turn off the display if it's physically disconnected */
if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
/* pre-r600 did not always have the hpd pins mapped accurately to connectors */
if (rdev->family >= CHIP_R600) {
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
else
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
} else if (radeon_dp_needs_link_train(radeon_connector)) {
/* set it to OFF so that drm_helper_connector_dpms()
* won't return immediately since the current state
* is ON at this point.
*/
connector->dpms = DRM_MODE_DPMS_OFF;
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
connector->dpms = saved_dpms;
}
}
}
 
static void radeon_property_change_mode(struct drm_encoder *encoder)
{
99,62 → 84,6
crtc->x, crtc->y, crtc->fb);
}
}
 
int radeon_get_monitor_bpc(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
int bpc = 8;
 
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB:
if (radeon_connector->use_digital) {
if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
}
break;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
break;
case DRM_MODE_CONNECTOR_DisplayPort:
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
break;
case DRM_MODE_CONNECTOR_eDP:
case DRM_MODE_CONNECTOR_LVDS:
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
else if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
struct drm_encoder *encoder = connector_funcs->best_encoder(connector);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 
if (dig->lcd_misc & ATOM_PANEL_MISC_V13_6BIT_PER_COLOR)
bpc = 6;
else if (dig->lcd_misc & ATOM_PANEL_MISC_V13_8BIT_PER_COLOR)
bpc = 8;
}
break;
}
return bpc;
}
 
static void
radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status)
{
194,7 → 123,7
}
}
 
static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
{
struct drm_mode_object *obj;
struct drm_encoder *encoder;
215,7 → 144,7
return NULL;
}
 
static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
struct drm_mode_object *obj;
366,7 → 295,7
}
}
 
static int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = connector->dev;
501,24 → 430,55
return 0;
}
 
/*
* Some integrated ATI Radeon chipset implementations (e. g.
* Asus M2A-VM HDMI) may indicate the availability of a DDC,
* even when there's no monitor connected. For these connectors
* following DDC probe extension will be applied: check also for the
* availability of EDID with at least a correct EDID header. Only then,
* DDC is assumed to be available. This prevents drm_get_edid() and
* drm_edid_block_valid() from periodically dumping data and kernel
* errors into the logs and onto the terminal.
*/
static bool radeon_connector_needs_extended_probe(struct radeon_device *dev,
uint32_t supported_device,
int connector_type)
{
/* Asus M2A-VM HDMI board sends data to i2c bus even,
* if HDMI add-on card is not plugged in or HDMI is disabled in
* BIOS. Valid DDC can only be assumed, if also a valid EDID header
* can be retrieved via i2c bus during DDC probe */
if ((dev->pdev->device == 0x791e) &&
(dev->pdev->subsystem_vendor == 0x1043) &&
(dev->pdev->subsystem_device == 0x826d)) {
if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
(supported_device == ATOM_DEVICE_DFP2_SUPPORT))
return true;
}
/* ECS A740GM-M with ATI RADEON 2100 sends data to i2c bus
* for a DVI connector that is not implemented */
if ((dev->pdev->device == 0x796e) &&
(dev->pdev->subsystem_vendor == 0x1019) &&
(dev->pdev->subsystem_device == 0x2615)) {
if ((connector_type == DRM_MODE_CONNECTOR_DVID) &&
(supported_device == ATOM_DEVICE_DFP2_SUPPORT))
return true;
}
 
/* Default: no EDID header probe required for DDC probing */
return false;
}
 
static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
struct drm_display_mode *t, *mode;
 
/* If the EDID preferred mode doesn't match the native mode, use it */
list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
if (mode->type & DRM_MODE_TYPE_PREFERRED) {
if (mode->hdisplay != native_mode->hdisplay ||
mode->vdisplay != native_mode->vdisplay)
memcpy(native_mode, mode, sizeof(*mode));
}
}
 
/* Try to get native mode details from EDID if necessary */
if (!native_mode->clock) {
struct drm_display_mode *t, *mode;
 
list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
if (mode->hdisplay == native_mode->hdisplay &&
mode->vdisplay == native_mode->vdisplay) {
529,7 → 489,6
}
}
}
 
if (!native_mode->clock) {
DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
radeon_encoder->rmx_type = RMX_OFF;
646,7 → 605,7
if (radeon_connector->edid)
kfree(radeon_connector->edid);
kfree(radeon_connector->con_priv);
// drm_sysfs_connector_remove(connector);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
687,13 → 646,13
}
 
 
static const struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = {
struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = {
.get_modes = radeon_lvds_get_modes,
.mode_valid = radeon_lvds_mode_valid,
.best_encoder = radeon_best_single_encoder,
};
 
static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
struct drm_connector_funcs radeon_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
741,9 → 700,9
ret = connector_status_disconnected;
 
if (radeon_connector->ddc_bus)
dret = radeon_ddc_probe(radeon_connector);
dret = radeon_ddc_probe(radeon_connector,
radeon_connector->requires_extended_probe);
if (dret) {
radeon_connector->detected_by_load = false;
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
770,21 → 729,12
} else {
 
/* if we aren't forcing don't do destructive polling */
if (!force) {
/* only return the previous status if we last
* detected a monitor via load.
*/
if (radeon_connector->detected_by_load)
if (!force)
return connector->status;
else
return ret;
}
 
if (radeon_connector->dac_load_detect && encoder) {
encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
if (ret != connector_status_disconnected)
radeon_connector->detected_by_load = true;
}
}
 
805,13 → 755,13
return ret;
}
 
static const struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
.get_modes = radeon_vga_get_modes,
.mode_valid = radeon_vga_mode_valid,
.best_encoder = radeon_best_single_encoder,
};
 
static const struct drm_connector_funcs radeon_vga_connector_funcs = {
struct drm_connector_funcs radeon_vga_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
875,13 → 825,13
return ret;
}
 
static const struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = {
struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = {
.get_modes = radeon_tv_get_modes,
.mode_valid = radeon_tv_mode_valid,
.best_encoder = radeon_best_single_encoder,
};
 
static const struct drm_connector_funcs radeon_tv_connector_funcs = {
struct drm_connector_funcs radeon_tv_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_tv_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
898,27 → 848,6
return ret;
}
 
static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
enum drm_connector_status status;
 
/* We only trust HPD on R600 and newer ASICS. */
if (rdev->family >= CHIP_R600
&& radeon_connector->hpd.hpd != RADEON_HPD_NONE) {
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
status = connector_status_connected;
else
status = connector_status_disconnected;
if (connector->status == status)
return true;
}
 
return false;
}
 
/*
* DVI is complicated
* Do a DDC probe, if DDC probe passes, get the full EDID so
943,13 → 872,10
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false;
 
if (!force && radeon_check_hpd_status_unchanged(connector))
return connector->status;
 
if (radeon_connector->ddc_bus)
dret = radeon_ddc_probe(radeon_connector);
dret = radeon_ddc_probe(radeon_connector,
radeon_connector->requires_extended_probe);
if (dret) {
radeon_connector->detected_by_load = false;
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
1012,17 → 938,7
if ((ret == connector_status_connected) && (radeon_connector->use_digital == true))
goto out;
 
/* DVI-D and HDMI-A are digital only */
if ((connector->connector_type == DRM_MODE_CONNECTOR_DVID) ||
(connector->connector_type == DRM_MODE_CONNECTOR_HDMIA))
goto out;
 
/* if we aren't forcing don't do destructive polling */
if (!force) {
/* only return the previous status if we last
* detected a monitor via load.
*/
if (radeon_connector->detected_by_load)
ret = connector->status;
goto out;
}
1041,10 → 957,6
 
encoder = obj_to_encoder(obj);
 
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
 
encoder_funcs = encoder->helper_private;
if (encoder_funcs->detect) {
if (ret != connector_status_connected) {
1052,8 → 964,6
if (ret == connector_status_connected) {
radeon_connector->use_digital = false;
}
if (ret != connector_status_disconnected)
radeon_connector->detected_by_load = true;
}
break;
}
1071,7 → 981,6
* cases the DVI port is actually a virtual KVM port connected to the service
* processor.
*/
out:
if ((!rdev->is_atom_bios) &&
(ret == connector_status_disconnected) &&
rdev->mode_info.bios_hardcoded_edid_size) {
1079,6 → 988,7
ret = connector_status_connected;
}
 
out:
/* updated in get modes as well since we need to know if it's analog or digital */
radeon_connector_update_scratch_regs(connector, ret);
return ret;
1085,7 → 995,7
}
 
/* okay need to be smart in here about which encoder to pick */
static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1156,7 → 1066,7
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
return MODE_OK;
else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) {
if (ASIC_IS_DCE6(rdev)) {
if (ASIC_IS_DCE3(rdev)) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
1175,13 → 1085,13
return MODE_OK;
}
 
static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
.get_modes = radeon_dvi_get_modes,
.mode_valid = radeon_dvi_mode_valid,
.best_encoder = radeon_dvi_encoder,
};
 
static const struct drm_connector_funcs radeon_dvi_connector_funcs = {
struct drm_connector_funcs radeon_dvi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_dvi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
1200,7 → 1110,7
if (radeon_dig_connector->dp_i2c_bus)
radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
kfree(radeon_connector->con_priv);
// drm_sysfs_connector_remove(connector);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
1216,7 → 1126,6
(connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
struct drm_display_mode *mode;
 
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
1224,15 → 1133,6
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
} else {
/* need to setup ddc on the bridge */
if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
ENCODER_OBJECT_ID_NONE) {
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
}
ret = radeon_ddc_get_modes(radeon_connector);
}
 
if (ret > 0) {
if (encoder) {
1243,6 → 1143,7
return ret;
}
 
encoder = radeon_best_single_encoder(connector);
if (!encoder)
return 0;
 
1259,8 → 1160,7
}
} else {
/* need to setup ddc on the bridge */
if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
ENCODER_OBJECT_ID_NONE) {
if (radeon_connector_encoder_is_dp_bridge(connector)) {
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
}
1270,12 → 1170,13
return ret;
}
 
u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector)
bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector)
{
struct drm_mode_object *obj;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
int i;
bool found = false;
 
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
1291,13 → 1192,14
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
return radeon_encoder->encoder_id;
found = true;
break;
default:
break;
}
}
 
return ENCODER_OBJECT_ID_NONE;
return found;
}
 
bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
1349,9 → 1251,6
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
 
if (!force && radeon_check_hpd_status_unchanged(connector))
return connector->status;
 
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
1377,24 → 1276,12
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
} else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
ENCODER_OBJECT_ID_NONE) {
/* DP bridges are always DP */
radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
/* get the DPCD from the bridge */
radeon_dp_getdpcd(radeon_connector);
 
if (encoder) {
/* setup ddc on the bridge */
} else {
/* need to setup ddc on the bridge */
if (radeon_connector_encoder_is_dp_bridge(connector)) {
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
if (radeon_ddc_probe(radeon_connector)) /* try DDC */
ret = connector_status_connected;
else if (radeon_connector->dac_load_detect) { /* try load detection */
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
}
}
} else {
radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
ret = connector_status_connected;
1405,11 → 1292,22
if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
} else {
if (radeon_ddc_probe(radeon_connector))
if (radeon_ddc_probe(radeon_connector,
radeon_connector->requires_extended_probe))
ret = connector_status_connected;
}
}
 
if ((ret == connector_status_disconnected) &&
radeon_connector->dac_load_detect) {
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
struct drm_encoder_helper_funcs *encoder_funcs;
if (encoder) {
encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
}
}
}
 
radeon_connector_update_scratch_regs(connector, ret);
return ret;
1458,13 → 1356,13
}
}
 
static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
.get_modes = radeon_dp_get_modes,
.mode_valid = radeon_dp_mode_valid,
.best_encoder = radeon_dvi_encoder,
};
 
static const struct drm_connector_funcs radeon_dp_connector_funcs = {
struct drm_connector_funcs radeon_dp_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
1550,7 → 1448,9
radeon_connector->shared_ddc = shared_ddc;
radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
 
radeon_connector->requires_extended_probe =
radeon_connector_needs_extended_probe(rdev, supported_device,
connector_type);
radeon_connector->router = *router;
if (router->ddc_valid || router->cd_valid) {
radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
1846,7 → 1746,7
connector->polled = DRM_CONNECTOR_POLL_HPD;
 
connector->display_info.subpixel_order = subpixel_order;
// drm_sysfs_connector_add(connector);
drm_sysfs_connector_add(connector);
return;
 
failed:
1897,7 → 1797,9
radeon_connector->devices = supported_device;
radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
 
radeon_connector->requires_extended_probe =
radeon_connector_needs_extended_probe(rdev, supported_device,
connector_type);
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
2003,5 → 1905,16
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
// drm_sysfs_connector_add(connector);
drm_sysfs_connector_add(connector);
if (connector_type == DRM_MODE_CONNECTOR_LVDS) {
struct drm_encoder *drm_encoder;
 
list_for_each_entry(drm_encoder, &dev->mode_config.encoder_list, head) {
struct radeon_encoder *radeon_encoder;
 
radeon_encoder = to_radeon_encoder(drm_encoder);
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_LVDS)
radeon_legacy_backlight_init(radeon_encoder, connector);
}
}
}
/drivers/video/drm/radeon/radeon_display.c
23,16 → 23,18
* Authors: Dave Airlie
* Alex Deucher
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
 
#include "atom.h"
#include <asm/div64.h>
 
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include "drm_crtc_helper.h"
#include "drm_edid.h"
 
static int radeon_ddc_dump(struct drm_connector *connector);
 
static void avivo_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
286,7 → 288,7
radeon_legacy_init_crtc(dev, radeon_crtc);
}
 
static const char *encoder_names[37] = {
static const char *encoder_names[36] = {
"NONE",
"INTERNAL_LVDS",
"INTERNAL_TMDS1",
323,9 → 325,26
"INTERNAL_UNIPHY2",
"NUTMEG",
"TRAVIS",
"INTERNAL_VCE"
};
 
static const char *connector_names[15] = {
"Unknown",
"VGA",
"DVI-I",
"DVI-D",
"DVI-A",
"Composite",
"S-video",
"LVDS",
"Component",
"DIN",
"DisplayPort",
"HDMI-A",
"HDMI-B",
"TV",
"eDP",
};
 
static const char *hpd_names[6] = {
"HPD1",
"HPD2",
348,7 → 367,7
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
DRM_INFO("Connector %d:\n", i);
DRM_INFO(" %s\n", drm_get_connector_name(connector));
DRM_INFO(" %s\n", connector_names[connector->connector_type]);
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
if (radeon_connector->ddc_bus) {
414,6 → 433,7
static bool radeon_setup_enc_conn(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *drm_connector;
bool ret = false;
 
if (rdev->bios) {
433,6 → 453,8
if (ret) {
radeon_setup_encoder_clones(dev);
radeon_print_display_setup(dev);
list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
radeon_ddc_dump(drm_connector);
}
 
return ret;
449,23 → 471,17
radeon_router_select_ddc_port(radeon_connector);
 
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
(radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
ENCODER_OBJECT_ID_NONE)) {
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
 
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&dig->dp_i2c_bus->adapter);
else if (radeon_connector->ddc_bus && !radeon_connector->edid)
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&radeon_connector->ddc_bus->adapter);
} else {
if (radeon_connector->ddc_bus && !radeon_connector->edid)
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&radeon_connector->ddc_bus->adapter);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
}
if (!radeon_connector->ddc_bus)
return -1;
if (!radeon_connector->edid) {
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
}
 
if (!radeon_connector->edid) {
if (rdev->is_atom_bios) {
486,6 → 502,34
return 0;
}
 
static int radeon_ddc_dump(struct drm_connector *connector)
{
struct edid *edid;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int ret = 0;
 
/* on hw with routers, select right port */
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
 
if (!radeon_connector->ddc_bus)
return -1;
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
/* Log EDID retrieval status here. In particular with regard to
* connectors with requires_extended_probe flag set, that will prevent
* function radeon_dvi_detect() to fetch EDID on this connector,
* as long as there is no valid EDID header found */
if (edid) {
DRM_INFO("Radeon display connector %s: Found valid EDID",
drm_get_connector_name(connector));
kfree(edid);
} else {
DRM_INFO("Radeon display connector %s: No monitor connected or invalid EDID",
drm_get_connector_name(connector));
}
return ret;
}
 
/* avivo */
static void avivo_get_fb_div(struct radeon_pll *pll,
u32 target_clock,
823,25 → 867,15
.create_handle = radeon_user_framebuffer_create_handle,
};
 
int
void
radeon_framebuffer_init(struct drm_device *dev,
struct radeon_framebuffer *rfb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
 
ENTER();
 
rfb->obj = obj;
ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
if (ret) {
rfb->obj = NULL;
return ret;
}
drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
LEAVE();
return 0;
}
 
 
851,6 → 885,11
// .output_poll_changed = radeon_output_poll_changed
};
 
struct drm_prop_enum_list {
int type;
char *name;
};
 
static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] =
{ { 0, "driver" },
{ 1, "bios" },
875,53 → 914,86
 
static int radeon_modeset_create_props(struct radeon_device *rdev)
{
int sz;
int i, sz;
 
if (rdev->is_atom_bios) {
rdev->mode_info.coherent_mode_property =
drm_property_create_range(rdev->ddev, 0 , "coherent", 0, 1);
drm_property_create(rdev->ddev,
DRM_MODE_PROP_RANGE,
"coherent", 2);
if (!rdev->mode_info.coherent_mode_property)
return -ENOMEM;
 
rdev->mode_info.coherent_mode_property->values[0] = 0;
rdev->mode_info.coherent_mode_property->values[1] = 1;
}
 
if (!ASIC_IS_AVIVO(rdev)) {
sz = ARRAY_SIZE(radeon_tmds_pll_enum_list);
rdev->mode_info.tmds_pll_property =
drm_property_create_enum(rdev->ddev, 0,
"tmds_pll",
radeon_tmds_pll_enum_list, sz);
drm_property_create(rdev->ddev,
DRM_MODE_PROP_ENUM,
"tmds_pll", sz);
for (i = 0; i < sz; i++) {
drm_property_add_enum(rdev->mode_info.tmds_pll_property,
i,
radeon_tmds_pll_enum_list[i].type,
radeon_tmds_pll_enum_list[i].name);
}
}
 
rdev->mode_info.load_detect_property =
drm_property_create_range(rdev->ddev, 0, "load detection", 0, 1);
drm_property_create(rdev->ddev,
DRM_MODE_PROP_RANGE,
"load detection", 2);
if (!rdev->mode_info.load_detect_property)
return -ENOMEM;
rdev->mode_info.load_detect_property->values[0] = 0;
rdev->mode_info.load_detect_property->values[1] = 1;
 
drm_mode_create_scaling_mode_property(rdev->ddev);
 
sz = ARRAY_SIZE(radeon_tv_std_enum_list);
rdev->mode_info.tv_std_property =
drm_property_create_enum(rdev->ddev, 0,
"tv standard",
radeon_tv_std_enum_list, sz);
drm_property_create(rdev->ddev,
DRM_MODE_PROP_ENUM,
"tv standard", sz);
for (i = 0; i < sz; i++) {
drm_property_add_enum(rdev->mode_info.tv_std_property,
i,
radeon_tv_std_enum_list[i].type,
radeon_tv_std_enum_list[i].name);
}
 
sz = ARRAY_SIZE(radeon_underscan_enum_list);
rdev->mode_info.underscan_property =
drm_property_create_enum(rdev->ddev, 0,
"underscan",
radeon_underscan_enum_list, sz);
drm_property_create(rdev->ddev,
DRM_MODE_PROP_ENUM,
"underscan", sz);
for (i = 0; i < sz; i++) {
drm_property_add_enum(rdev->mode_info.underscan_property,
i,
radeon_underscan_enum_list[i].type,
radeon_underscan_enum_list[i].name);
}
 
rdev->mode_info.underscan_hborder_property =
drm_property_create_range(rdev->ddev, 0,
"underscan hborder", 0, 128);
drm_property_create(rdev->ddev,
DRM_MODE_PROP_RANGE,
"underscan hborder", 2);
if (!rdev->mode_info.underscan_hborder_property)
return -ENOMEM;
rdev->mode_info.underscan_hborder_property->values[0] = 0;
rdev->mode_info.underscan_hborder_property->values[1] = 128;
 
rdev->mode_info.underscan_vborder_property =
drm_property_create_range(rdev->ddev, 0,
"underscan vborder", 0, 128);
drm_property_create(rdev->ddev,
DRM_MODE_PROP_RANGE,
"underscan vborder", 2);
if (!rdev->mode_info.underscan_vborder_property)
return -ENOMEM;
rdev->mode_info.underscan_vborder_property->values[0] = 0;
rdev->mode_info.underscan_vborder_property->values[1] = 128;
 
return 0;
}
946,93 → 1018,6
 
}
 
/*
* Allocate hdmi structs and determine register offsets
*/
static void radeon_afmt_init(struct radeon_device *rdev)
{
int i;
 
for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++)
rdev->mode_info.afmt[i] = NULL;
 
if (ASIC_IS_DCE6(rdev)) {
/* todo */
} else if (ASIC_IS_DCE4(rdev)) {
/* DCE4/5 has 6 audio blocks tied to DIG encoders */
/* DCE4.1 has 2 audio blocks tied to DIG encoders */
rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[0]) {
rdev->mode_info.afmt[0]->offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
rdev->mode_info.afmt[0]->id = 0;
}
rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[1]) {
rdev->mode_info.afmt[1]->offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
rdev->mode_info.afmt[1]->id = 1;
}
if (!ASIC_IS_DCE41(rdev)) {
rdev->mode_info.afmt[2] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[2]) {
rdev->mode_info.afmt[2]->offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
rdev->mode_info.afmt[2]->id = 2;
}
rdev->mode_info.afmt[3] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[3]) {
rdev->mode_info.afmt[3]->offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
rdev->mode_info.afmt[3]->id = 3;
}
rdev->mode_info.afmt[4] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[4]) {
rdev->mode_info.afmt[4]->offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
rdev->mode_info.afmt[4]->id = 4;
}
rdev->mode_info.afmt[5] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[5]) {
rdev->mode_info.afmt[5]->offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
rdev->mode_info.afmt[5]->id = 5;
}
}
} else if (ASIC_IS_DCE3(rdev)) {
/* DCE3.x has 2 audio blocks tied to DIG encoders */
rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[0]) {
rdev->mode_info.afmt[0]->offset = DCE3_HDMI_OFFSET0;
rdev->mode_info.afmt[0]->id = 0;
}
rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[1]) {
rdev->mode_info.afmt[1]->offset = DCE3_HDMI_OFFSET1;
rdev->mode_info.afmt[1]->id = 1;
}
} else if (ASIC_IS_DCE2(rdev)) {
/* DCE2 has at least 1 routable audio block */
rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[0]) {
rdev->mode_info.afmt[0]->offset = DCE2_HDMI_OFFSET0;
rdev->mode_info.afmt[0]->id = 0;
}
/* r6xx has 2 routable audio blocks */
if (rdev->family >= CHIP_R600) {
rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[1]) {
rdev->mode_info.afmt[1]->offset = DCE2_HDMI_OFFSET1;
rdev->mode_info.afmt[1]->id = 1;
}
}
}
}
 
static void radeon_afmt_fini(struct radeon_device *rdev)
{
int i;
 
for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) {
kfree(rdev->mode_info.afmt[i]);
rdev->mode_info.afmt[i] = NULL;
}
}
 
int radeon_modeset_init(struct radeon_device *rdev)
{
int i;
1041,7 → 1026,7
drm_mode_config_init(rdev->ddev);
rdev->mode_info.mode_config_initialized = true;
 
rdev->ddev->mode_config.funcs = &radeon_mode_funcs;
rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs;
 
if (ASIC_IS_DCE5(rdev)) {
rdev->ddev->mode_config.max_width = 16384;
1054,9 → 1039,6
rdev->ddev->mode_config.max_height = 4096;
}
 
rdev->ddev->mode_config.preferred_depth = 24;
rdev->ddev->mode_config.prefer_shadow = 1;
 
rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
 
ret = radeon_modeset_create_props(rdev);
1084,18 → 1066,13
return ret;
}
 
/* init dig PHYs, disp eng pll */
if (rdev->is_atom_bios) {
/* init dig PHYs */
if (rdev->is_atom_bios)
radeon_atom_encoder_init(rdev);
radeon_atom_disp_eng_pll_init(rdev);
}
 
/* initialize hpd */
// radeon_hpd_init(rdev);
 
/* setup afmt */
// radeon_afmt_init(rdev);
 
/* Initialize power management */
// radeon_pm_init(rdev);
 
1110,7 → 1087,6
kfree(rdev->mode_info.bios_hardcoded_edid);
 
if (rdev->mode_info.mode_config_initialized) {
// radeon_afmt_fini(rdev);
// drm_kms_helper_poll_fini(rdev->ddev);
// radeon_hpd_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
1120,7 → 1096,7
radeon_i2c_fini(rdev);
}
 
static bool is_hdtv_mode(const struct drm_display_mode *mode)
static bool is_hdtv_mode(struct drm_display_mode *mode)
{
/* try and guess if this is a tv or a monitor */
if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
1133,7 → 1109,7
}
 
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
1150,8 → 1126,6
radeon_crtc->h_border = 0;
radeon_crtc->v_border = 0;
 
ENTER();
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc != crtc)
continue;
1159,10 → 1133,6
connector = radeon_get_connector_for_encoder(encoder);
radeon_connector = to_radeon_connector(connector);
 
dbgprintf("native_hdisplay %d vdisplay %d\n",
radeon_encoder->native_mode.hdisplay,
radeon_encoder->native_mode.vdisplay);
 
if (first) {
/* set scaling */
if (radeon_encoder->rmx_type == RMX_OFF)
1228,9 → 1198,6
radeon_crtc->vsc.full = dfixed_const(1);
radeon_crtc->hsc.full = dfixed_const(1);
}
 
LEAVE();
 
return true;
}
 
/drivers/video/drm/radeon/radeon_encoders.c
23,19 → 23,17
* Authors: Dave Airlie
* Alex Deucher
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "drmP.h"
#include "drm_crtc_helper.h"
#include "radeon_drm.h"
#include "radeon.h"
#include "atom.h"
 
extern void
radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
struct drm_connector *drm_connector);
extern void
radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
struct drm_connector *drm_connector);
extern int atom_debug;
 
/* evil but including atombios.h is much worse */
bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
struct drm_display_mode *mode);
 
static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
{
158,10 → 156,30
return ret;
}
 
static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
return true;
default:
return false;
}
}
 
void
radeon_link_encoder_connector(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct drm_encoder *encoder;
172,19 → 190,11
radeon_connector = to_radeon_connector(connector);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->devices & radeon_connector->devices) {
if (radeon_encoder->devices & radeon_connector->devices)
drm_mode_connector_attach_encoder(connector, encoder);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (rdev->is_atom_bios)
radeon_atom_backlight_init(radeon_encoder, connector);
else
radeon_legacy_backlight_init(radeon_encoder, connector);
rdev->mode_info.bl_encoder = radeon_encoder;
}
}
}
}
}
 
void radeon_encoder_set_active_device(struct drm_encoder *encoder)
{
219,7 → 229,7
return NULL;
}
 
struct drm_connector *
static struct drm_connector *
radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
235,7 → 245,7
return NULL;
}
 
struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder)
struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
256,9 → 266,9
return NULL;
}
 
u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder)
{
struct drm_encoder *other_encoder = radeon_get_external_encoder(encoder);
struct drm_encoder *other_encoder = radeon_atom_get_external_encoder(encoder);
 
if (other_encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder);
266,12 → 276,13
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
return radeon_encoder->encoder_id;
return true;
default:
return ENCODER_OBJECT_ID_NONE;
return false;
}
}
return ENCODER_OBJECT_ID_NONE;
 
return false;
}
 
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
321,15 → 332,339
 
}
 
bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
u32 pixel_clock)
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
 
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
 
/* hw bug */
if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
 
/* get the native mode for LVDS */
if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
radeon_panel_mode_fixup(encoder, adjusted_mode);
 
/* get the native mode for TV */
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
if (tv_dac) {
if (tv_dac->tv_std == TV_STD_NTSC ||
tv_dac->tv_std == TV_STD_NTSC_J ||
tv_dac->tv_std == TV_STD_PAL_M)
radeon_atom_get_tv_timings(rdev, 0, adjusted_mode);
else
radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
}
}
 
if (ASIC_IS_DCE3(rdev) &&
((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
radeon_encoder_is_dp_bridge(encoder))) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
radeon_dp_set_link_config(connector, mode);
}
 
return true;
}
 
static void
atombios_dac_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
DAC_ENCODER_CONTROL_PS_ALLOCATION args;
int index = 0;
struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
 
memset(&args, 0, sizeof(args));
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
break;
}
 
args.ucAction = action;
 
if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT))
args.ucDacStandard = ATOM_DAC1_PS2;
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.ucDacStandard = ATOM_DAC1_CV;
else {
switch (dac_info->tv_std) {
case TV_STD_PAL:
case TV_STD_PAL_M:
case TV_STD_SCART_PAL:
case TV_STD_SECAM:
case TV_STD_PAL_CN:
args.ucDacStandard = ATOM_DAC1_PAL;
break;
case TV_STD_NTSC:
case TV_STD_NTSC_J:
case TV_STD_PAL_60:
default:
args.ucDacStandard = ATOM_DAC1_NTSC;
break;
}
}
args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
}
 
static void
atombios_tv_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
TV_ENCODER_CONTROL_PS_ALLOCATION args;
int index = 0;
struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
 
memset(&args, 0, sizeof(args));
 
index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl);
 
args.sTVEncoder.ucAction = action;
 
if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
else {
switch (dac_info->tv_std) {
case TV_STD_NTSC:
args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
break;
case TV_STD_PAL:
args.sTVEncoder.ucTvStandard = ATOM_TV_PAL;
break;
case TV_STD_PAL_M:
args.sTVEncoder.ucTvStandard = ATOM_TV_PALM;
break;
case TV_STD_PAL_60:
args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60;
break;
case TV_STD_NTSC_J:
args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ;
break;
case TV_STD_SCART_PAL:
args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */
break;
case TV_STD_SECAM:
args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM;
break;
case TV_STD_PAL_CN:
args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN;
break;
default:
args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
break;
}
}
 
args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
}
 
union dvo_encoder_control {
ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
};
 
void
atombios_dvo_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
union dvo_encoder_control args;
int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
 
memset(&args, 0, sizeof(args));
 
if (ASIC_IS_DCE3(rdev)) {
/* DCE3+ */
args.dvo_v3.ucAction = action;
args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.dvo_v3.ucDVOConfig = 0; /* XXX */
} else if (ASIC_IS_DCE2(rdev)) {
/* DCE2 (pre-DCE3 R6xx, RS600/690/740 */
args.dvo.sDVOEncoder.ucAction = action;
args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
/* DFP1, CRT1, TV1 depending on the type of port */
args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
 
if (radeon_encoder->pixel_clock > 165000)
args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
} else {
/* R4xx, R5xx */
args.ext_tmds.sXTmdsEncoder.ucEnable = action;
 
if (radeon_encoder->pixel_clock > 165000)
args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
 
/*if (pScrn->rgbBits == 8)*/
args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
union lvds_encoder_control {
LVDS_ENCODER_CONTROL_PS_ALLOCATION v1;
LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
};
 
void
atombios_digital_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
union lvds_encoder_control args;
int index = 0;
int hdmi_detected = 0;
uint8_t frev, crev;
 
if (!dig)
return;
 
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
hdmi_detected = 1;
 
memset(&args, 0, sizeof(args));
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
else
index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl);
break;
}
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
switch (frev) {
case 1:
case 2:
switch (crev) {
case 1:
args.v1.ucMisc = 0;
args.v1.ucAction = action;
if (hdmi_detected)
args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
} else {
if (dig->linkb)
args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
if (radeon_encoder->pixel_clock > 165000)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
/*if (pScrn->rgbBits == 8) */
args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
}
break;
case 2:
case 3:
args.v2.ucMisc = 0;
args.v2.ucAction = action;
if (crev == 3) {
if (dig->coherent_mode)
args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
}
if (hdmi_detected)
args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v2.ucTruncate = 0;
args.v2.ucSpatial = 0;
args.v2.ucTemporal = 0;
args.v2.ucFRC = 0;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) {
args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
}
if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) {
args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
}
} else {
if (dig->linkb)
args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
if (radeon_encoder->pixel_clock > 165000)
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
}
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
}
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
 
/* dp bridges are always DP */
if (radeon_encoder_is_dp_bridge(encoder))
return ATOM_ENCODER_MODE_DP;
 
/* DVO is always DVO */
if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO)
return ATOM_ENCODER_MODE_DVO;
 
connector = radeon_get_connector_for_encoder(encoder);
/* if we don't have an active device yet, just use one of
* the connectors tied to the encoder.
340,45 → 675,1737
 
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB:
if (radeon_connector->use_digital) {
/* HDMI 1.3 supports up to 340 Mhz over single link */
if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (pixel_clock > 340000)
return true;
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
/* fix me */
if (ASIC_IS_DCE4(rdev))
return ATOM_ENCODER_MODE_DVI;
else
return false;
} else {
if (pixel_clock > 165000)
return true;
return ATOM_ENCODER_MODE_HDMI;
} else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
else
return false;
}
} else
return false;
return ATOM_ENCODER_MODE_CRT;
break;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
/* fix me */
if (ASIC_IS_DCE4(rdev))
return ATOM_ENCODER_MODE_DVI;
else
return ATOM_ENCODER_MODE_HDMI;
} else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_LVDS:
return ATOM_ENCODER_MODE_LVDS;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return false;
return ATOM_ENCODER_MODE_DP;
else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
/* fix me */
if (ASIC_IS_DCE4(rdev))
return ATOM_ENCODER_MODE_DVI;
else
return ATOM_ENCODER_MODE_HDMI;
} else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_eDP:
return ATOM_ENCODER_MODE_DP;
case DRM_MODE_CONNECTOR_DVIA:
case DRM_MODE_CONNECTOR_VGA:
return ATOM_ENCODER_MODE_CRT;
break;
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_9PinDIN:
/* fix me */
return ATOM_ENCODER_MODE_TV;
/*return ATOM_ENCODER_MODE_CV;*/
break;
}
}
 
/*
* DIG Encoder/Transmitter Setup
*
* DCE 3.0/3.1
* - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
* Supports up to 3 digital outputs
* - 2 DIG encoder blocks.
* DIG1 can drive UNIPHY link A or link B
* DIG2 can drive UNIPHY link B or LVTMA
*
* DCE 3.2
* - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
* Supports up to 5 digital outputs
* - 2 DIG encoder blocks.
* DIG1/2 can drive UNIPHY0/1/2 link A or link B
*
* DCE 4.0/5.0
* - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
* Supports up to 6 digital outputs
* - 6 DIG encoder blocks.
* - DIG to PHY mapping is hardcoded
* DIG1 drives UNIPHY0 link A, A+B
* DIG2 drives UNIPHY0 link B
* DIG3 drives UNIPHY1 link A, A+B
* DIG4 drives UNIPHY1 link B
* DIG5 drives UNIPHY2 link A, A+B
* DIG6 drives UNIPHY2 link B
*
* DCE 4.1
* - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
* Supports up to 6 digital outputs
* - 2 DIG encoder blocks.
* DIG1/2 can drive UNIPHY0/1/2 link A or link B
*
* Routing
* crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
* Examples:
* crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI
* crtc1 -> dig1 -> UNIPHY0 link B -> DP
* crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
* crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
*/
 
union dig_encoder_control {
DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
};
 
void
atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
union dig_encoder_control args;
int index = 0;
uint8_t frev, crev;
int dp_clock = 0;
int dp_lane_count = 0;
int hpd_id = RADEON_HPD_NONE;
int bpc = 8;
 
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
 
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
hpd_id = radeon_connector->hpd.hpd;
bpc = connector->display_info.bpc;
}
 
/* no dig encoder assigned */
if (dig->dig_encoder == -1)
return;
 
memset(&args, 0, sizeof(args));
 
if (ASIC_IS_DCE4(rdev))
index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
else {
/* HDMI 1.3 supports up to 340 Mhz over single link */
if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (pixel_clock > 340000)
return true;
if (dig->dig_encoder)
index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
else
return false;
index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
}
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
args.v1.ucAction = action;
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
args.v3.ucPanelMode = panel_mode;
else
args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
(args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST))
args.v1.ucLaneNum = dp_lane_count;
else if (radeon_encoder->pixel_clock > 165000)
args.v1.ucLaneNum = 8;
else
args.v1.ucLaneNum = 4;
 
if (ASIC_IS_DCE5(rdev)) {
if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
(args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) {
if (dp_clock == 270000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
else if (dp_clock == 540000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
}
args.v4.acConfig.ucDigSel = dig->dig_encoder;
switch (bpc) {
case 0:
args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE;
break;
case 6:
args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR;
break;
case 8:
default:
args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
break;
case 10:
args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR;
break;
case 12:
args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR;
break;
case 16:
args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR;
break;
}
if (hpd_id == RADEON_HPD_NONE)
args.v4.ucHPD_ID = 0;
else
args.v4.ucHPD_ID = hpd_id + 1;
} else if (ASIC_IS_DCE4(rdev)) {
if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
args.v3.acConfig.ucDigSel = dig->dig_encoder;
switch (bpc) {
case 0:
args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE;
break;
case 6:
args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR;
break;
case 8:
default:
args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
break;
case 10:
args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR;
break;
case 12:
args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR;
break;
case 16:
args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR;
break;
}
} else {
if (pixel_clock > 165000)
if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
break;
}
if (dig->linkb)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
}
 
union dig_transmitter_control {
DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
};
 
void
atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector;
union dig_transmitter_control args;
int index = 0;
uint8_t frev, crev;
bool is_dp = false;
int pll_id = 0;
int dp_clock = 0;
int dp_lane_count = 0;
int connector_object_id = 0;
int igp_lane_info = 0;
int dig_encoder = dig->dig_encoder;
 
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
connector = radeon_get_connector_for_encoder_init(encoder);
/* just needed to avoid bailing in the encoder check. the encoder
* isn't used for init
*/
dig_encoder = 0;
} else
connector = radeon_get_connector_for_encoder(encoder);
 
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
 
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
connector_object_id =
(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
igp_lane_info = dig_connector->igp_lane_info;
}
 
/* no dig encoder assigned */
if (dig_encoder == -1)
return;
 
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
is_dp = true;
 
memset(&args, 0, sizeof(args));
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
break;
}
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
args.v1.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
args.v1.usInitInfo = cpu_to_le16(connector_object_id);
} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
args.v1.asMode.ucLaneSel = lane_num;
args.v1.asMode.ucLaneSet = lane_set;
} else {
if (is_dp)
args.v1.usPixelClock =
cpu_to_le16(dp_clock / 10);
else if (radeon_encoder->pixel_clock > 165000)
args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
}
if (ASIC_IS_DCE4(rdev)) {
if (is_dp)
args.v3.ucLaneNum = dp_lane_count;
else if (radeon_encoder->pixel_clock > 165000)
args.v3.ucLaneNum = 8;
else
args.v3.ucLaneNum = 4;
 
if (dig->linkb)
args.v3.acConfig.ucLinkSel = 1;
if (dig_encoder & 1)
args.v3.acConfig.ucEncoderSel = 1;
 
/* Select the PLL for the PHY
* DP PHY should be clocked from external src if there is
* one.
*/
if (encoder->crtc) {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
pll_id = radeon_crtc->pll_id;
}
 
if (ASIC_IS_DCE5(rdev)) {
/* On DCE5 DCPLL usually generates the DP ref clock */
if (is_dp) {
if (rdev->clock.dp_extclk)
args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
else
args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
} else
args.v4.acConfig.ucRefClkSource = pll_id;
} else {
/* On DCE4, if there is an external clock, it generates the DP ref clock */
if (is_dp && rdev->clock.dp_extclk)
args.v3.acConfig.ucRefClkSource = 2; /* external src */
else
args.v3.acConfig.ucRefClkSource = pll_id;
}
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v3.acConfig.ucTransmitterSel = 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
args.v3.acConfig.ucTransmitterSel = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
args.v3.acConfig.ucTransmitterSel = 2;
break;
}
 
if (is_dp)
args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v3.acConfig.fCoherentMode = 1;
if (radeon_encoder->pixel_clock > 165000)
args.v3.acConfig.fDualLinkConnector = 1;
}
} else if (ASIC_IS_DCE32(rdev)) {
args.v2.acConfig.ucEncoderSel = dig_encoder;
if (dig->linkb)
args.v2.acConfig.ucLinkSel = 1;
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v2.acConfig.ucTransmitterSel = 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
args.v2.acConfig.ucTransmitterSel = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
args.v2.acConfig.ucTransmitterSel = 2;
break;
}
 
if (is_dp) {
args.v2.acConfig.fCoherentMode = 1;
args.v2.acConfig.fDPConnector = 1;
} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v2.acConfig.fCoherentMode = 1;
if (radeon_encoder->pixel_clock > 165000)
args.v2.acConfig.fDualLinkConnector = 1;
}
} else {
args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
 
if (dig_encoder)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
 
if ((rdev->flags & RADEON_IS_IGP) &&
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
if (igp_lane_info & 0x1)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
else if (igp_lane_info & 0x2)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
else if (igp_lane_info & 0x4)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
else if (igp_lane_info & 0x8)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
} else {
if (igp_lane_info & 0x3)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
else if (igp_lane_info & 0xc)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
}
}
 
if (dig->linkb)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
 
if (is_dp)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
if (radeon_encoder->pixel_clock > 165000)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
}
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
bool
atombios_set_edp_panel_power(struct drm_connector *connector, int action)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_device *dev = radeon_connector->base.dev;
struct radeon_device *rdev = dev->dev_private;
union dig_transmitter_control args;
int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
uint8_t frev, crev;
 
if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
goto done;
 
if (!ASIC_IS_DCE4(rdev))
goto done;
 
if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
(action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
goto done;
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
goto done;
 
memset(&args, 0, sizeof(args));
 
args.v1.ucAction = action;
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
/* wait for the panel to power up */
if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
int i;
 
for (i = 0; i < 300; i++) {
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
return true;
else
mdelay(1);
}
return false;
}
done:
return true;
}
 
union external_encoder_control {
EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
};
 
static void
atombios_external_encoder_setup(struct drm_encoder *encoder,
struct drm_encoder *ext_encoder,
int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
union external_encoder_control args;
struct drm_connector *connector;
int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
u8 frev, crev;
int dp_clock = 0;
int dp_lane_count = 0;
int connector_object_id = 0;
u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
int bpc = 8;
 
if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
connector = radeon_get_connector_for_encoder_init(encoder);
else
connector = radeon_get_connector_for_encoder(encoder);
 
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
 
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
connector_object_id =
(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
bpc = connector->display_info.bpc;
}
 
memset(&args, 0, sizeof(args));
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
switch (frev) {
case 1:
/* no params on frev 1 */
break;
case 2:
switch (crev) {
case 1:
case 2:
args.v1.sDigEncoder.ucAction = action;
args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
if (args.v1.sDigEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
if (dp_clock == 270000)
args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
} else if (radeon_encoder->pixel_clock > 165000)
args.v1.sDigEncoder.ucLaneNum = 8;
else
args.v1.sDigEncoder.ucLaneNum = 4;
break;
case 3:
args.v3.sExtEncoder.ucAction = action;
if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
else
args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
if (args.v3.sExtEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
if (dp_clock == 270000)
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
else if (dp_clock == 540000)
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
} else if (radeon_encoder->pixel_clock > 165000)
args.v3.sExtEncoder.ucLaneNum = 8;
else
args.v3.sExtEncoder.ucLaneNum = 4;
switch (ext_enum) {
case GRAPH_OBJECT_ENUM_ID1:
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
break;
case GRAPH_OBJECT_ENUM_ID2:
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
break;
case GRAPH_OBJECT_ENUM_ID3:
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
break;
}
switch (bpc) {
case 0:
args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE;
break;
case 6:
args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR;
break;
case 8:
default:
args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
break;
case 10:
args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR;
break;
case 12:
args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR;
break;
case 16:
args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR;
break;
}
break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
}
break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
static void
atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
ENABLE_YUV_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, EnableYUV);
uint32_t temp, reg;
 
memset(&args, 0, sizeof(args));
 
if (rdev->family >= CHIP_R600)
reg = R600_BIOS_3_SCRATCH;
else
reg = RADEON_BIOS_3_SCRATCH;
 
/* XXX: fix up scratch reg handling */
temp = RREG32(reg);
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
WREG32(reg, (ATOM_S3_TV1_ACTIVE |
(radeon_crtc->crtc_id << 18)));
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24)));
else
WREG32(reg, 0);
 
if (enable)
args.ucEnable = ATOM_ENABLE;
args.ucCRTC = radeon_crtc->crtc_id;
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
WREG32(reg, temp);
}
 
static void
radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
int index = 0;
bool is_dig = false;
bool is_dce5_dac = false;
bool is_dce5_dvo = false;
 
memset(&args, 0, sizeof(args));
 
DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
radeon_encoder->encoder_id, mode, radeon_encoder->devices,
radeon_encoder->active_device);
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
is_dig = true;
break;
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
if (ASIC_IS_DCE5(rdev))
is_dce5_dvo = true;
else if (ASIC_IS_DCE3(rdev))
is_dig = true;
else
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
else
index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
if (ASIC_IS_DCE5(rdev))
is_dce5_dac = true;
else {
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
else
index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
}
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
else
index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
break;
}
 
if (is_dig) {
switch (mode) {
case DRM_MODE_DPMS_ON:
/* some early dce3.2 boards have a bug in their transmitter control table */
if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
else
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
if (connector &&
(connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *radeon_dig_connector =
radeon_connector->con_priv;
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
radeon_dig_connector->edp_on = true;
}
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
radeon_dp_link_train(encoder, connector);
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
}
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
if (connector &&
(connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *radeon_dig_connector =
radeon_connector->con_priv;
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
radeon_dig_connector->edp_on = false;
}
}
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
break;
}
} else if (is_dce5_dac) {
switch (mode) {
case DRM_MODE_DPMS_ON:
atombios_dac_setup(encoder, ATOM_ENABLE);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
atombios_dac_setup(encoder, ATOM_DISABLE);
break;
}
} else if (is_dce5_dvo) {
switch (mode) {
case DRM_MODE_DPMS_ON:
atombios_dvo_setup(encoder, ATOM_ENABLE);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
atombios_dvo_setup(encoder, ATOM_DISABLE);
break;
}
} else {
switch (mode) {
case DRM_MODE_DPMS_ON:
args.ucAction = ATOM_ENABLE;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
args.ucAction = ATOM_LCD_BLON;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
args.ucAction = ATOM_DISABLE;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
args.ucAction = ATOM_LCD_BLOFF;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
break;
}
}
 
if (ext_encoder) {
switch (mode) {
case DRM_MODE_DPMS_ON:
default:
if (ASIC_IS_DCE41(rdev)) {
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
} else
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
if (ASIC_IS_DCE41(rdev)) {
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
} else
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
break;
}
}
 
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
}
 
union crtc_source_param {
SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
};
 
static void
atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
union crtc_source_param args;
int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
uint8_t frev, crev;
struct radeon_encoder_atom_dig *dig;
 
memset(&args, 0, sizeof(args));
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
switch (frev) {
case 1:
switch (crev) {
case 1:
default:
if (ASIC_IS_AVIVO(rdev))
args.v1.ucCRTC = radeon_crtc->crtc_id;
else {
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
args.v1.ucCRTC = radeon_crtc->crtc_id;
} else {
args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
}
}
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
break;
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
else
args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
break;
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
else
args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
else
args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
break;
}
break;
case 2:
args.v2.ucCRTC = radeon_crtc->crtc_id;
args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
dig = radeon_encoder->enc_priv;
switch (dig->dig_encoder) {
case 0:
args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
break;
case 1:
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
break;
case 2:
args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
break;
case 3:
args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
break;
case 4:
args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
break;
case 5:
args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
break;
}
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else
args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else
args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
break;
}
break;
}
break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
/* update scratch regs with new routing */
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
}
 
static void
atombios_apply_encoder_quirks(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
 
/* Funky macbooks */
if ((dev->pdev->device == 0x71C5) &&
(dev->pdev->subsystem_vendor == 0x106b) &&
(dev->pdev->subsystem_device == 0x0080)) {
if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
 
lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN;
lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
 
WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control);
}
}
 
/* set scaler clears this on some chips */
if (ASIC_IS_AVIVO(rdev) &&
(!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
if (ASIC_IS_DCE4(rdev)) {
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
EVERGREEN_INTERLEAVE_EN);
else
WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
} else {
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
AVIVO_D1MODE_INTERLEAVE_EN);
else
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
}
}
}
 
static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *test_encoder;
struct radeon_encoder_atom_dig *dig;
uint32_t dig_enc_in_use = 0;
 
/* DCE4/5 */
if (ASIC_IS_DCE4(rdev)) {
dig = radeon_encoder->enc_priv;
if (ASIC_IS_DCE41(rdev))
return radeon_crtc->crtc_id;
else {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig->linkb)
return 1;
else
return 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig->linkb)
return 3;
else
return 2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->linkb)
return 5;
else
return 4;
break;
}
}
}
 
/* on DCE32 and encoder can driver any block so just crtc id */
if (ASIC_IS_DCE32(rdev)) {
return radeon_crtc->crtc_id;
}
 
/* on DCE3 - LVTMA can only be driven by DIGB */
list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
struct radeon_encoder *radeon_test_encoder;
 
if (encoder == test_encoder)
continue;
 
if (!radeon_encoder_is_digital(test_encoder))
continue;
 
radeon_test_encoder = to_radeon_encoder(test_encoder);
dig = radeon_test_encoder->enc_priv;
 
if (dig->dig_encoder >= 0)
dig_enc_in_use |= (1 << dig->dig_encoder);
}
 
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
if (dig_enc_in_use & 0x2)
DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
return 1;
}
if (!(dig_enc_in_use & 1))
return 0;
return 1;
}
 
/* This only needs to be called once at startup */
void
radeon_atom_encoder_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_encoder *encoder;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
break;
default:
break;
}
 
if (ext_encoder && ASIC_IS_DCE41(rdev))
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
}
}
 
static void
radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
 
radeon_encoder->pixel_clock = adjusted_mode->clock;
 
if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
atombios_yuv_setup(encoder, true);
else
atombios_yuv_setup(encoder, false);
}
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
if (ASIC_IS_DCE4(rdev)) {
/* disable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
/* setup and enable the encoder */
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
 
/* enable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
} else {
/* disable the encoder and transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
 
/* setup and enable the encoder and transmitter */
atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
}
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
atombios_dvo_setup(encoder, ATOM_ENABLE);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
atombios_dac_setup(encoder, ATOM_ENABLE);
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
atombios_tv_setup(encoder, ATOM_ENABLE);
else
atombios_tv_setup(encoder, ATOM_DISABLE);
}
break;
}
 
if (ext_encoder) {
if (ASIC_IS_DCE41(rdev))
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
else
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
}
 
atombios_apply_encoder_quirks(encoder, adjusted_mode);
 
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
r600_hdmi_enable(encoder);
r600_hdmi_setmode(encoder, adjusted_mode);
}
}
 
static bool
atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
ATOM_DEVICE_CV_SUPPORT |
ATOM_DEVICE_CRT_SUPPORT)) {
DAC_LOAD_DETECTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
uint8_t frev, crev;
 
memset(&args, 0, sizeof(args));
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return false;
 
args.sDacload.ucMisc = 0;
 
if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
args.sDacload.ucDacType = ATOM_DAC_A;
else
args.sDacload.ucDacType = ATOM_DAC_B;
 
if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
if (crev >= 3)
args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
} else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
if (crev >= 3)
args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
return true;
} else
return false;
}
 
static enum drm_connector_status
radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
uint32_t bios_0_scratch;
 
if (!atombios_dac_load_detect(encoder, connector)) {
DRM_DEBUG_KMS("detect returned false \n");
return connector_status_unknown;
}
 
if (rdev->family >= CHIP_R600)
bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
else
bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
 
DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT1_MASK)
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT2_MASK)
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
return connector_status_connected; /* CTV */
else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
return connector_status_connected; /* STV */
}
return connector_status_disconnected;
}
 
static enum drm_connector_status
radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
u32 bios_0_scratch;
 
if (!ASIC_IS_DCE4(rdev))
return connector_status_unknown;
 
if (!ext_encoder)
return connector_status_unknown;
 
if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
return connector_status_unknown;
 
/* load detect on the dp bridge */
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
 
bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
 
DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT1_MASK)
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT2_MASK)
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
return connector_status_connected; /* CTV */
else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
return connector_status_connected; /* STV */
}
return connector_status_disconnected;
}
 
void
radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
{
struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
 
if (ext_encoder)
/* ddc_setup on the dp bridge */
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
 
}
 
static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
if ((radeon_encoder->active_device &
(ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
radeon_encoder_is_dp_bridge(encoder)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
if (dig)
dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
}
 
radeon_atom_output_lock(encoder, true);
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
/* select the clock/data port if it uses a router */
if (radeon_connector->router.cd_valid)
radeon_router_select_cd_port(radeon_connector);
 
/* turn eDP panel on for mode set */
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
}
 
/* this is needed for the pll/ss setup to work correctly in some cases */
atombios_set_encoder_crtc_source(encoder);
}
 
static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
{
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
radeon_atom_output_lock(encoder, false);
}
 
static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
 
/* check for pre-DCE3 cards with shared encoders;
* can't really use the links individually, so don't disable
* the encoder if it's in use by another connector
*/
if (!ASIC_IS_DCE3(rdev)) {
struct drm_encoder *other_encoder;
struct radeon_encoder *other_radeon_encoder;
 
list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
other_radeon_encoder = to_radeon_encoder(other_encoder);
if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
drm_helper_encoder_in_use(other_encoder))
goto disable_done;
}
}
 
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
if (ASIC_IS_DCE4(rdev))
/* disable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
else {
/* disable the encoder and transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
}
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
atombios_dvo_setup(encoder, ATOM_DISABLE);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
atombios_dac_setup(encoder, ATOM_DISABLE);
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
atombios_tv_setup(encoder, ATOM_DISABLE);
break;
}
 
disable_done:
if (radeon_encoder_is_digital(encoder)) {
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
r600_hdmi_disable(encoder);
dig = radeon_encoder->enc_priv;
dig->dig_encoder = -1;
}
radeon_encoder->active_device = 0;
}
 
/* these are handled by the primary encoders */
static void radeon_atom_ext_prepare(struct drm_encoder *encoder)
{
 
}
 
static void radeon_atom_ext_commit(struct drm_encoder *encoder)
{
 
}
 
static void
radeon_atom_ext_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
 
}
 
static void radeon_atom_ext_disable(struct drm_encoder *encoder)
{
 
}
 
static void
radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
{
 
}
 
static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
 
static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
.dpms = radeon_atom_ext_dpms,
.mode_fixup = radeon_atom_ext_mode_fixup,
.prepare = radeon_atom_ext_prepare,
.mode_set = radeon_atom_ext_mode_set,
.commit = radeon_atom_ext_commit,
.disable = radeon_atom_ext_disable,
/* no detect for TMDS/LVDS yet */
};
 
static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
.dpms = radeon_atom_encoder_dpms,
.mode_fixup = radeon_atom_mode_fixup,
.prepare = radeon_atom_encoder_prepare,
.mode_set = radeon_atom_encoder_mode_set,
.commit = radeon_atom_encoder_commit,
.disable = radeon_atom_encoder_disable,
.detect = radeon_atom_dig_detect,
};
 
static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
.dpms = radeon_atom_encoder_dpms,
.mode_fixup = radeon_atom_mode_fixup,
.prepare = radeon_atom_encoder_prepare,
.mode_set = radeon_atom_encoder_mode_set,
.commit = radeon_atom_encoder_commit,
.detect = radeon_atom_dac_detect,
};
 
void radeon_enc_destroy(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
kfree(radeon_encoder->enc_priv);
drm_encoder_cleanup(encoder);
kfree(radeon_encoder);
}
 
static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
.destroy = radeon_enc_destroy,
};
 
struct radeon_encoder_atom_dac *
radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
{
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL);
 
if (!dac)
return NULL;
 
dac->tv_std = radeon_atombios_get_tv_info(rdev);
return dac;
}
 
struct radeon_encoder_atom_dig *
radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
{
int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
 
if (!dig)
return NULL;
 
/* coherent mode by default */
dig->coherent_mode = true;
dig->dig_encoder = -1;
 
if (encoder_enum == 2)
dig->linkb = true;
else
dig->linkb = false;
 
return dig;
}
 
void
radeon_add_atom_encoder(struct drm_device *dev,
uint32_t encoder_enum,
uint32_t supported_device,
u16 caps)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
 
/* see if we already added it */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->encoder_enum == encoder_enum) {
radeon_encoder->devices |= supported_device;
return;
}
 
}
 
/* add a new one */
radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
if (!radeon_encoder)
return;
 
encoder = &radeon_encoder->base;
switch (rdev->num_crtc) {
case 1:
encoder->possible_crtcs = 0x1;
break;
case 2:
default:
encoder->possible_crtcs = 0x3;
break;
case 4:
encoder->possible_crtcs = 0xf;
break;
case 6:
encoder->possible_crtcs = 0x3f;
break;
}
 
radeon_encoder->enc_priv = NULL;
 
radeon_encoder->encoder_enum = encoder_enum;
radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
radeon_encoder->devices = supported_device;
radeon_encoder->rmx_type = RMX_OFF;
radeon_encoder->underscan_type = UNDERSCAN_OFF;
radeon_encoder->is_ext_encoder = false;
radeon_encoder->caps = caps;
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
radeon_encoder->rmx_type = RMX_FULL;
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
} else {
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
}
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC);
radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
break;
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
radeon_encoder->rmx_type = RMX_FULL;
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
} else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
} else {
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
}
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break;
case ENCODER_OBJECT_ID_SI170B:
case ENCODER_OBJECT_ID_CH7303:
case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
case ENCODER_OBJECT_ID_TITFP513:
case ENCODER_OBJECT_ID_VT1623:
case ENCODER_OBJECT_ID_HDMI_SI1930:
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
/* these are handled by the primary encoders */
radeon_encoder->is_ext_encoder = true;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
else
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
break;
}
}
/drivers/video/drm/radeon/radeon_i2c.c
23,23 → 23,16
* Authors: Dave Airlie
* Alex Deucher
*/
#include <linux/export.h>
 
#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include <drm/radeon_drm.h>
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
#include "atom.h"
 
extern int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num);
extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap);
 
/**
* radeon_ddc_probe
*
*/
bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_extended_probe)
{
u8 out = 0x0;
u8 buf[8];
46,19 → 39,23
int ret;
struct i2c_msg msgs[] = {
{
.addr = DDC_ADDR,
.addr = 0x50,
.flags = 0,
.len = 1,
.buf = &out,
},
{
.addr = DDC_ADDR,
.addr = 0x50,
.flags = I2C_M_RD,
.len = 8,
.len = 1,
.buf = buf,
}
};
 
/* Read 8 bytes from i2c for extended probe of EDID header */
if (requires_extended_probe)
msgs[1].len = 8;
 
/* on hw with routers, select right port */
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
67,6 → 64,7
if (ret != 2)
/* Couldn't find an accessible DDC on this connector */
return false;
if (requires_extended_probe) {
/* Probe also for valid EDID header
* EDID header starts with:
* 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
77,14 → 75,14
* connector */
return false;
}
}
return true;
}
 
/* bit banging i2c */
 
static int pre_xfer(struct i2c_adapter *i2c_adap)
static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
{
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
struct radeon_device *rdev = i2c->dev->dev_private;
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
139,30 → 137,19
WREG32(rec->en_data_reg, temp);
 
/* mask the gpio pins for software use */
temp = RREG32(rec->mask_clk_reg) | rec->mask_clk_mask;
temp = RREG32(rec->mask_clk_reg);
if (lock_state)
temp |= rec->mask_clk_mask;
else
temp &= ~rec->mask_clk_mask;
WREG32(rec->mask_clk_reg, temp);
temp = RREG32(rec->mask_clk_reg);
 
temp = RREG32(rec->mask_data_reg) | rec->mask_data_mask;
WREG32(rec->mask_data_reg, temp);
temp = RREG32(rec->mask_data_reg);
 
return 0;
}
 
static void post_xfer(struct i2c_adapter *i2c_adap)
{
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
struct radeon_device *rdev = i2c->dev->dev_private;
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
 
/* unmask the gpio pins for software use */
temp = RREG32(rec->mask_clk_reg) & ~rec->mask_clk_mask;
WREG32(rec->mask_clk_reg, temp);
temp = RREG32(rec->mask_clk_reg);
 
temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask;
if (lock_state)
temp |= rec->mask_data_mask;
else
temp &= ~rec->mask_data_mask;
WREG32(rec->mask_data_reg, temp);
temp = RREG32(rec->mask_data_reg);
}
222,6 → 209,22
WREG32(rec->en_data_reg, val);
}
 
static int pre_xfer(struct i2c_adapter *i2c_adap)
{
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
 
radeon_i2c_do_lock(i2c, 1);
 
return 0;
}
 
static void post_xfer(struct i2c_adapter *i2c_adap)
{
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
 
radeon_i2c_do_lock(i2c, 0);
}
 
/* hw i2c */
 
static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
887,11 → 890,6
.functionality = radeon_hw_i2c_func,
};
 
static const struct i2c_algorithm radeon_atom_i2c_algo = {
.master_xfer = radeon_atom_hw_i2c_xfer,
.functionality = radeon_atom_hw_i2c_func,
};
 
struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name)
900,17 → 898,12
struct radeon_i2c_chan *i2c;
int ret;
 
/* don't add the mm_i2c bus unless hw_i2c is enabled */
if (rec->mm_i2c && (radeon_hw_i2c == 0))
return NULL;
 
i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
if (i2c == NULL)
return NULL;
 
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
// i2c->adapter.owner = THIS_MODULE;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
if (rec->mm_i2c ||
927,18 → 920,6
// DRM_ERROR("Failed to register hw i2c %s\n", name);
// goto out_free;
// }
} else if (rec->hw_capable &&
radeon_hw_i2c &&
ASIC_IS_DCE3(rdev)) {
/* hw i2c using atom */
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"Radeon i2c hw bus %s", name);
i2c->adapter.algo = &radeon_atom_i2c_algo;
// ret = i2c_add_adapter(&i2c->adapter);
// if (ret) {
// DRM_ERROR("Failed to register hw i2c %s\n", name);
// goto out_free;
// }
} else {
/* set the radeon bit adapter */
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
950,8 → 931,10
i2c->algo.bit.setscl = set_clock;
i2c->algo.bit.getsda = get_data;
i2c->algo.bit.getscl = get_clock;
i2c->algo.bit.udelay = 10;
i2c->algo.bit.timeout = usecs_to_jiffies(2200); /* from VESA */
i2c->algo.bit.udelay = 20;
/* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
* make this, 2 jiffies is a lot more reliable */
i2c->algo.bit.timeout = 2;
i2c->algo.bit.data = i2c;
ret = i2c_bit_add_bus(&i2c->adapter);
if (ret) {
979,7 → 962,7
return NULL;
 
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
// i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
i2c->dev = dev;
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
/drivers/video/drm/radeon/radeon_irq_kms.c
25,93 → 25,55
* Alex Deucher
* Jerome Glisse
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "drmP.h"
#include "drm_crtc_helper.h"
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
 
#define RADEON_WAIT_IDLE_TIMEOUT 200
 
struct radeon_device *main_device;
 
extern int irq_override;
 
 
/**
* radeon_driver_irq_handler_kms - irq handler for KMS
*
* @DRM_IRQ_ARGS: args
*
* This is the irq handler for the radeon KMS driver (all asics).
* radeon_irq_process is a macro that points to the per-asic
* irq handler callback.
*/
void irq_handler_kms()
{
// dbgprintf("%s\n",__FUNCTION__);
radeon_irq_process(main_device);
}
 
/**
* radeon_driver_irq_preinstall_kms - drm irq preinstall callback
*
* @dev: drm dev pointer
*
* Gets the hw ready to enable irqs (all asics).
* This function disables all interrupt sources on the GPU.
*/
void radeon_irq_preinstall_kms(struct radeon_device *rdev)
 
static void radeon_irq_preinstall(struct radeon_device *rdev)
{
unsigned long irqflags;
unsigned i;
 
spin_lock_irqsave(&rdev->irq.lock, irqflags);
/* Disable *all* interrupts */
for (i = 0; i < RADEON_NUM_RINGS; i++)
atomic_set(&rdev->irq.ring_int[i], 0);
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.sw_int = false;
rdev->irq.gui_idle = false;
for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false;
for (i = 0; i < 6; i++) {
rdev->irq.hpd[i] = false;
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
rdev->irq.crtc_vblank_int[i] = false;
atomic_set(&rdev->irq.pflip[i], 0);
rdev->irq.afmt[i] = false;
rdev->irq.pflip[i] = false;
}
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
/* Clear bits */
radeon_irq_process(rdev);
}
 
/**
* radeon_driver_irq_postinstall_kms - drm irq preinstall callback
*
* @dev: drm dev pointer
*
* Handles stuff to be done after enabling irqs (all asics).
* Returns 0 on success.
*/
 
int radeon_driver_irq_postinstall_kms(struct radeon_device *rdev)
int radeon_driver_irq_postinstall(struct radeon_device *rdev)
{
// struct radeon_device *rdev = dev->dev_private;
 
// dev->max_vblank_count = 0x001fffff;
 
rdev->irq.sw_int = true;
radeon_irq_set(rdev);
return 0;
}
 
/**
* radeon_irq_kms_init - init driver interrupt info
*
* @rdev: radeon device pointer
*
* Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
* Returns 0 for success, error for failure.
*/
int radeon_irq_kms_init(struct radeon_device *rdev)
{
int i;
int irq_line;
int r = 0;
 
118,27 → 80,20
ENTER();
 
// INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
// INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
 
spin_lock_init(&rdev->irq.lock);
spin_lock_init(&rdev->irq.sw_lock);
for (i = 0; i < rdev->num_crtc; i++)
spin_lock_init(&rdev->irq.pflip_lock[i]);
// r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
// if (r) {
// return r;
// }
/* enable msi */
 
rdev->msi_enabled = 0;
 
// if (radeon_msi_ok(rdev)) {
// int ret = pci_enable_msi(rdev->pdev);
// if (!ret) {
// rdev->msi_enabled = 1;
// dev_info(rdev->dev, "radeon: using MSI.\n");
// }
// }
rdev->irq.installed = true;
main_device = rdev;
 
radeon_irq_preinstall_kms(rdev);
radeon_irq_preinstall(rdev);
 
if (irq_override)
irq_line = irq_override;
151,81 → 106,41
 
// r = drm_irq_install(rdev->ddev);
 
r = radeon_driver_irq_postinstall_kms(rdev);
r = radeon_driver_irq_postinstall(rdev);
if (r) {
rdev->irq.installed = false;
LEAVE();
return r;
}
 
DRM_INFO("radeon: irq initialized.\n");
return 0;
}
 
/**
* radeon_irq_kms_fini - tear down driver interrrupt info
*
* @rdev: radeon device pointer
*
* Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
*/
void radeon_irq_kms_fini(struct radeon_device *rdev)
{
// drm_vblank_cleanup(rdev->ddev);
if (rdev->irq.installed) {
// drm_irq_uninstall(rdev->ddev);
rdev->irq.installed = false;
// if (rdev->msi_enabled)
// pci_disable_msi(rdev->pdev);
}
// flush_work(&rdev->hotplug_work);
}
 
/**
* radeon_irq_kms_sw_irq_get - enable software interrupt
*
* @rdev: radeon device pointer
* @ring: ring whose interrupt you want to enable
*
* Enables the software interrupt for a specific ring (all asics).
* The software interrupt is generally used to signal a fence on
* a particular ring.
*/
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
{
unsigned long irqflags;
 
if (!rdev->ddev->irq_enabled)
return;
 
if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
spin_lock_irqsave(&rdev->irq.lock, irqflags);
spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) {
rdev->irq.sw_int = true;
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
}
 
/**
* radeon_irq_kms_sw_irq_put - disable software interrupt
*
* @rdev: radeon device pointer
* @ring: ring whose interrupt you want to disable
*
* Disables the software interrupt for a specific ring (all asics).
* The software interrupt is generally used to signal a fence on
* a particular ring.
*/
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
{
unsigned long irqflags;
 
if (!rdev->ddev->irq_enabled)
return;
 
if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
spin_lock_irqsave(&rdev->irq.lock, irqflags);
spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0);
if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) {
rdev->irq.sw_int = false;
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
}
 
 
/drivers/video/drm/radeon/radeon_mode.h
30,11 → 30,12
#ifndef RADEON_MODE_H
#define RADEON_MODE_H
 
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_crtc_helper.h>
#include <drm_crtc.h>
#include <drm_mode.h>
#include <drm_edid.h>
#include <drm_dp_helper.h>
#include <drm_fixed.h>
#include <drm_crtc_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
 
209,7 → 210,6
CT_RN50_POWER,
CT_MAC_X800,
CT_MAC_G5_9600,
CT_SAM440EP
};
 
enum radeon_dvo_chip {
219,13 → 219,6
 
struct radeon_fbdev;
 
struct radeon_afmt {
bool enabled;
int offset;
bool last_buffer_filled_status;
int id;
};
 
struct radeon_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
232,7 → 225,6
enum radeon_connector_table connector_table;
bool mode_config_initialized;
struct radeon_crtc *crtcs[6];
struct radeon_afmt *afmt[6];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
251,23 → 243,8
 
/* pointer to fbdev info structure */
struct radeon_fbdev *rfbdev;
/* firmware flags */
u16 firmware_flags;
/* pointer to backlight encoder */
struct radeon_encoder *bl_encoder;
};
 
#define RADEON_MAX_BL_LEVEL 0xFF
 
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
 
struct radeon_backlight_privdata {
struct radeon_encoder *encoder;
uint8_t negative;
};
 
#endif
 
#define MAX_H_CODE_TIMING_LEN 32
#define MAX_V_CODE_TIMING_LEN 32
 
283,18 → 260,6
uint16_t v_code_timing[MAX_V_CODE_TIMING_LEN];
};
 
struct radeon_atom_ss {
uint16_t percentage;
uint8_t type;
uint16_t step;
uint8_t delay;
uint8_t range;
uint8_t refdiv;
/* asic_ss */
uint16_t rate;
uint16_t amount;
};
 
struct radeon_crtc {
struct drm_crtc base;
int crtc_id;
301,7 → 266,6
u16 lut_r[256], lut_g[256], lut_b[256];
bool enabled;
bool can_tile;
bool in_mode_set;
uint32_t crtc_offset;
struct drm_gem_object *cursor_bo;
uint64_t cursor_addr;
317,16 → 281,6
struct drm_display_mode native_mode;
int pll_id;
int deferred_flip_completion;
/* pll sharing */
struct radeon_atom_ss ss;
bool ss_enabled;
u32 adjusted_clock;
int bpc;
u32 pll_reference_div;
u32 pll_post_div;
u32 pll_flags;
struct drm_encoder *encoder;
struct drm_connector *connector;
};
 
struct radeon_encoder_primary_dac {
380,6 → 334,18
};
 
/* spread spectrum */
struct radeon_atom_ss {
uint16_t percentage;
uint8_t type;
uint16_t step;
uint8_t delay;
uint8_t range;
uint8_t refdiv;
/* asic_ss */
uint16_t rate;
uint16_t amount;
};
 
struct radeon_encoder_atom_dig {
bool linkb;
/* atom dig */
394,8 → 360,6
struct backlight_device *bl_dev;
int dpms_mode;
uint8_t backlight_level;
int panel_mode;
struct radeon_afmt *afmt;
};
 
struct radeon_encoder_atom_dac {
417,6 → 381,10
struct drm_display_mode native_mode;
void *enc_priv;
int audio_polling_active;
int hdmi_offset;
int hdmi_config_offset;
int hdmi_audio_workaround;
int hdmi_buffer_status;
bool is_ext_encoder;
u16 caps;
};
468,6 → 436,9
struct radeon_i2c_chan *ddc_bus;
/* some systems have an hdmi and vga port with a shared ddc line */
bool shared_ddc;
/* for some Radeon chip families we apply an additional EDID header
check as part of the DDC probe */
bool requires_extended_probe;
bool use_digital;
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
474,7 → 445,6
struct edid *edid;
void *con_priv;
bool dac_load_detect;
bool detected_by_load; /* if the connection status was determined by load */
uint16_t connector_object_id;
struct radeon_hpd hpd;
struct radeon_router router;
486,8 → 456,6
struct drm_gem_object *obj;
};
 
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
((em) == ATOM_ENCODER_MODE_DP_MST))
 
extern enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev);
496,37 → 464,28
 
extern struct drm_connector *
radeon_get_connector_for_encoder(struct drm_encoder *encoder);
extern struct drm_connector *
radeon_get_connector_for_encoder_init(struct drm_encoder *encoder);
extern bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
u32 pixel_clock);
 
extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
extern bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder);
extern bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector);
extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
extern int radeon_get_monitor_bpc(struct drm_connector *connector);
 
extern void radeon_connector_hotplug(struct drm_connector *connector);
extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
struct drm_display_mode *mode);
extern void radeon_dp_set_link_config(struct drm_connector *connector,
const struct drm_display_mode *mode);
struct drm_display_mode *mode);
extern void radeon_dp_link_train(struct drm_encoder *encoder,
struct drm_connector *connector);
extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_connector *connector);
extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
extern void radeon_atom_encoder_init(struct radeon_device *rdev);
extern void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev);
extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
int action, uint8_t lane_num,
uint8_t lane_set);
extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
extern struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder);
extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
u8 write_byte, u8 *read_byte);
 
556,7 → 515,8
u8 val);
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector,
bool requires_extended_probe);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
 
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
679,9 → 639,9
u16 blue, int regno);
extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
int radeon_framebuffer_init(struct drm_device *dev,
void radeon_framebuffer_init(struct drm_device *dev,
struct radeon_framebuffer *rfb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object *obj);
 
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
701,7 → 661,7
void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
void radeon_combios_asic_init(struct drm_device *dev);
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
/drivers/video/drm/radeon/radeon_reg.h
56,7 → 56,6
#include "r600_reg.h"
#include "evergreen_reg.h"
#include "ni_reg.h"
#include "si_reg.h"
 
#define RADEON_MC_AGP_LOCATION 0x014c
#define RADEON_MC_AGP_START_MASK 0x0000FFFF
540,11 → 539,9
 
#define RADEON_CRTC2_PITCH 0x032c
#define RADEON_CRTC_STATUS 0x005c
# define RADEON_CRTC_VBLANK_CUR (1 << 0)
# define RADEON_CRTC_VBLANK_SAVE (1 << 1)
# define RADEON_CRTC_VBLANK_SAVE_CLEAR (1 << 1)
#define RADEON_CRTC2_STATUS 0x03fc
# define RADEON_CRTC2_VBLANK_CUR (1 << 0)
# define RADEON_CRTC2_VBLANK_SAVE (1 << 1)
# define RADEON_CRTC2_VBLANK_SAVE_CLEAR (1 << 1)
#define RADEON_CRTC_V_SYNC_STRT_WID 0x020c
/drivers/video/drm/radeon/rs600.c
35,7 → 35,7
* close to the one of the R600 family (R600 likely being an evolution
* of the RS600 GART block).
*/
#include <drm/drmP.h>
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
43,35 → 43,9
 
#include "rs600_reg_safe.h"
 
static void rs600_gpu_init(struct radeon_device *rdev);
void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
 
static const u32 crtc_offsets[2] =
{
0,
AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
};
 
void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
int i;
 
if (crtc >= rdev->num_crtc)
return;
 
if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN) {
for (i = 0; i < rdev->usec_timeout; i++) {
if (!(RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK))
break;
udelay(1);
}
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
break;
udelay(1);
}
}
}
/* hpd for digital panel detect/disconnect */
bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
127,7 → 101,6
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
unsigned enable = 0;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
135,18 → 108,19
case RADEON_HPD_1:
WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
rdev->irq.hpd[1] = true;
break;
default:
break;
}
enable |= 1 << radeon_connector->hpd.hpd;
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
// radeon_irq_kms_enable_hpd(rdev, enable);
if (rdev->irq.installed)
rs600_irq_set(rdev);
}
 
void rs600_hpd_fini(struct radeon_device *rdev)
153,7 → 127,6
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
unsigned disable = 0;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
161,19 → 134,29
case RADEON_HPD_1:
WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
rdev->irq.hpd[1] = false;
break;
default:
break;
}
disable |= 1 << radeon_connector->hpd.hpd;
}
// radeon_irq_kms_disable_hpd(rdev, disable);
}
 
void rs600_bm_disable(struct radeon_device *rdev)
{
u32 tmp;
 
/* disable bus mastering */
tmp = PciRead16(rdev->pdev->bus, rdev->pdev->devfn, 0x4);
PciWrite16(rdev->pdev->bus, rdev->pdev->devfn, 0x4, tmp & 0xFFFB);
mdelay(1);
}
 
int rs600_asic_reset(struct radeon_device *rdev)
{
struct rv515_mc_save save;
197,8 → 180,7
WREG32(RADEON_CP_RB_CNTL, tmp);
// pci_save_state(rdev->pdev);
/* disable bus mastering */
// pci_clear_master(rdev->pdev);
mdelay(1);
rs600_bm_disable(rdev);
/* reset GA+VAP */
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
S_0000F0_SOFT_RESET_GA(1));
229,6 → 211,7
/* Check if GPU is idle */
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true;
ret = -1;
} else
dev_info(rdev->dev, "GPU reset succeed\n");
257,11 → 240,11
tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
}
 
static int rs600_gart_init(struct radeon_device *rdev)
int rs600_gart_init(struct radeon_device *rdev)
{
int r;
 
if (rdev->gart.robj) {
if (rdev->gart.table.vram.robj) {
WARN(1, "RS600 GART already initialized\n");
return 0;
}
279,7 → 262,7
u32 tmp;
int r, i;
 
if (rdev->gart.robj == NULL) {
if (rdev->gart.table.vram.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
332,25 → 315,30
tmp = RREG32_MC(R_000009_MC_CNTL1);
WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
rs600_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
 
static void rs600_gart_disable(struct radeon_device *rdev)
void rs600_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
int r;
 
/* FIXME: disable out of gart access */
WREG32_MC(R_000100_MC_PT0_CNTL, 0);
tmp = RREG32_MC(R_000009_MC_CNTL1);
WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
radeon_gart_table_vram_unpin(rdev);
if (rdev->gart.table.vram.robj) {
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (r == 0) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
}
}
 
static void rs600_gart_fini(struct radeon_device *rdev)
void rs600_gart_fini(struct radeon_device *rdev)
{
radeon_gart_fini(rdev);
rs600_gart_disable(rdev);
365,7 → 353,7
 
int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
void __iomem *ptr = (void *)rdev->gart.ptr;
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
 
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
385,12 → 373,6
~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
u32 hdmi0;
if (ASIC_IS_DCE2(rdev))
hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
else
hdmi0 = 0;
 
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
397,15 → 379,18
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
if (rdev->irq.sw_int) {
tmp |= S_000040_SW_INT_EN(1);
}
if (rdev->irq.gui_idle) {
tmp |= S_000040_GUI_IDLE(1);
}
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
rdev->irq.pflip[0]) {
mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
}
if (rdev->irq.crtc_vblank_int[1] ||
atomic_read(&rdev->irq.pflip[1])) {
rdev->irq.pflip[1]) {
mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
}
if (rdev->irq.hpd[0]) {
414,15 → 399,10
if (rdev->irq.hpd[1]) {
hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
}
if (rdev->irq.afmt[0]) {
hdmi0 |= S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
}
WREG32(R_000040_GEN_INT_CNTL, tmp);
WREG32(R_006540_DxMODE_INT_MASK, mode_int);
WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
if (ASIC_IS_DCE2(rdev))
WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
return 0;
}
 
432,6 → 412,12
uint32_t irq_mask = S_000044_SW_INT(1);
u32 tmp;
 
/* the interrupt works, but the status bit is permanently asserted */
if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
if (!rdev->irq.gui_idle_acked)
irq_mask |= S_000044_GUI_IDLE_STAT(1);
}
 
if (G_000044_DISPLAY_INT_STAT(irqs)) {
rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
456,17 → 442,6
rdev->irq.stat_regs.r500.disp_int = 0;
}
 
if (ASIC_IS_DCE2(rdev)) {
rdev->irq.stat_regs.r500.hdmi0_status = RREG32(R_007404_HDMI0_STATUS) &
S_007404_HDMI0_AZ_FORMAT_WTRIG(1);
if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
tmp = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL);
tmp |= S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(1);
WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, tmp);
}
} else
rdev->irq.stat_regs.r500.hdmi0_status = 0;
 
if (irqs) {
WREG32(R_000044_GEN_INT_STATUS, irqs);
}
475,9 → 450,6
 
void rs600_irq_disable(struct radeon_device *rdev)
{
u32 hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
WREG32(R_000040_GEN_INT_CNTL, 0);
WREG32(R_006540_DxMODE_INT_MASK, 0);
/* Wait and acknowledge irq */
489,21 → 461,25
{
u32 status, msi_rearm;
bool queue_hotplug = false;
bool queue_hdmi = false;
 
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
 
status = rs600_irq_ack(rdev);
if (!status &&
!rdev->irq.stat_regs.r500.disp_int &&
!rdev->irq.stat_regs.r500.hdmi0_status) {
if (!status && !rdev->irq.stat_regs.r500.disp_int) {
return IRQ_NONE;
}
while (status ||
rdev->irq.stat_regs.r500.disp_int ||
rdev->irq.stat_regs.r500.hdmi0_status) {
while (status || rdev->irq.stat_regs.r500.disp_int) {
/* SW interrupt */
if (G_000044_SW_INT(status)) {
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
radeon_fence_process(rdev);
}
/* GUI idle */
if (G_000040_GUI_IDLE(status)) {
rdev->irq.gui_idle_acked = true;
rdev->pm.gui_idle = true;
// wake_up(&rdev->irq.idle_queue);
}
/* Vertical blank interrupts */
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
if (rdev->irq.crtc_vblank_int[0]) {
531,16 → 507,12
queue_hotplug = true;
DRM_DEBUG("HPD2\n");
}
if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
queue_hdmi = true;
DRM_DEBUG("HDMI0\n");
}
status = rs600_irq_ack(rdev);
}
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
// if (queue_hotplug)
// schedule_work(&rdev->hotplug_work);
// if (queue_hdmi)
// schedule_work(&rdev->audio_work);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS600:
551,7 → 523,9
WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
break;
default:
WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
WREG32(RADEON_MSI_REARM_EN, msi_rearm);
WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
break;
}
}
578,7 → 552,7
return -1;
}
 
static void rs600_gpu_init(struct radeon_device *rdev)
void rs600_gpu_init(struct radeon_device *rdev)
{
r420_pipes_init(rdev);
/* Wait for mc idle */
586,7 → 560,7
dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
}
 
static void rs600_mc_init(struct radeon_device *rdev)
void rs600_mc_init(struct radeon_device *rdev)
{
u64 base;
 
648,7 → 622,7
WREG32(R_000074_MC_IND_DATA, v);
}
 
static void rs600_debugfs(struct radeon_device *rdev)
void rs600_debugfs(struct radeon_device *rdev)
{
if (r100_debugfs_rbbm_init(rdev))
DRM_ERROR("Failed to register debugfs file for RBBM !\n");
714,14 → 688,11
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
 
r = radeon_ib_pool_init(rdev);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
 
 
return 0;
}
 
783,7 → 754,6
if (r)
return r;
rs600_set_safe_registers(rdev);
 
rdev->accel_working = true;
r = rs600_startup(rdev);
if (r) {
/drivers/video/drm/radeon/radeon_benchmark.c
26,81 → 26,33
#include "radeon_reg.h"
#include "radeon.h"
 
#define RADEON_BENCHMARK_COPY_BLIT 1
#define RADEON_BENCHMARK_COPY_DMA 0
 
#define RADEON_BENCHMARK_ITERATIONS 1024
#define RADEON_BENCHMARK_COMMON_MODES_N 17
 
static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
uint64_t saddr, uint64_t daddr,
int flag, int n)
unsigned int inline jiffies_to_msecs(const unsigned long j)
{
unsigned long start_jiffies;
unsigned long end_jiffies;
struct radeon_fence *fence = NULL;
int i, r;
return (10 * j);
};
 
start_jiffies = jiffies;
for (i = 0; i < n; i++) {
switch (flag) {
case RADEON_BENCHMARK_COPY_DMA:
r = radeon_copy_dma(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE,
&fence);
break;
case RADEON_BENCHMARK_COPY_BLIT:
r = radeon_copy_blit(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE,
&fence);
break;
default:
DRM_ERROR("Unknown copy method\n");
r = -EINVAL;
}
if (r)
goto exit_do_move;
r = radeon_fence_wait(fence, false);
if (r)
goto exit_do_move;
radeon_fence_unref(&fence);
}
end_jiffies = GetTimerTicks();
r = jiffies_to_msecs(end_jiffies - start_jiffies);
 
exit_do_move:
if (fence)
radeon_fence_unref(&fence);
return r;
}
 
 
static void radeon_benchmark_log_results(int n, unsigned size,
unsigned int time,
unsigned sdomain, unsigned ddomain,
char *kind)
{
unsigned int throughput = (n * (size >> 10)) / time;
DRM_INFO("radeon: %s %u bo moves of %u kB from"
" %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
kind, n, size >> 10, sdomain, ddomain, time,
throughput * 8, throughput);
}
 
static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
unsigned sdomain, unsigned ddomain)
{
struct radeon_bo *dobj = NULL;
struct radeon_bo *sobj = NULL;
struct radeon_fence *fence = NULL;
uint64_t saddr, daddr;
int r, n;
int time;
unsigned long start_jiffies;
unsigned long end_jiffies;
unsigned long time;
unsigned i, n, size;
int r;
 
 
ENTER();
 
n = RADEON_BENCHMARK_ITERATIONS;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, NULL, &sobj);
size = bsize;
n = 4; //1024;
 
dbgprintf("source domain %x\n", sdomain);
 
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
if (r) {
goto out_cleanup;
}
112,7 → 64,10
if (r) {
goto out_cleanup;
}
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, NULL, &dobj);
 
dbgprintf("destination domain %x\n", ddomain);
 
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, &dobj);
if (r) {
goto out_cleanup;
}
127,27 → 82,75
dbgprintf("done\n");
 
/* r100 doesn't have dma engine so skip the test */
/* also, VRAM-to-VRAM test doesn't make much sense for DMA */
/* skip it as well if domains are the same */
if ((rdev->asic->copy.dma) && (sdomain != ddomain)) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
RADEON_BENCHMARK_COPY_DMA, n);
if (time < 0)
if (rdev->asic->copy_dma) {
 
dbgprintf("copy dma\n");
 
start_jiffies = GetTimerTicks();
for (i = 0; i < n; i++) {
r = radeon_fence_create(rdev, &fence);
if (r) {
goto out_cleanup;
if (time > 0)
radeon_benchmark_log_results(n, size, time,
sdomain, ddomain, "dma");
}
 
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
RADEON_BENCHMARK_COPY_BLIT, n);
if (time < 0)
r = radeon_copy_dma(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE, fence);
 
if (r) {
goto out_cleanup;
if (time > 0)
radeon_benchmark_log_results(n, size, time,
sdomain, ddomain, "blit");
}
}
 
r = radeon_fence_wait(fence, false);
if (r) {
goto out_cleanup;
}
radeon_fence_unref(&fence);
 
end_jiffies = GetTimerTicks();
time = end_jiffies - start_jiffies;
time = jiffies_to_msecs(time);
if (time > 0) {
i = ((n * size) >> 10) / time;
printk(KERN_INFO "radeon: dma %u bo moves of %ukb from"
" %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n",
n, size >> 10,
sdomain, ddomain, time,
i, i * 1000, (i * 1000) / 1024);
}
}
 
start_jiffies = GetTimerTicks();
for (i = 0; i < n; i++) {
r = radeon_fence_create(rdev, &fence);
if (r) {
goto out_cleanup;
}
r = radeon_copy_blit(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
if (r) {
goto out_cleanup;
}
}
 
r = radeon_fence_wait(fence, false);
if (r) {
goto out_cleanup;
}
radeon_fence_unref(&fence);
 
end_jiffies = GetTimerTicks();
time = end_jiffies - start_jiffies;
time = jiffies_to_msecs(time);
if (time > 0) {
i = ((n * size) >> 10) / time;
printk(KERN_INFO "radeon: blit %u bo moves of %ukb from %d to %d"
" in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
}
out_cleanup:
 
dbgprintf("cleanup\n");
 
if (sobj) {
r = radeon_bo_reserve(sobj, false);
if (likely(r == 0)) {
164,9 → 167,11
}
radeon_bo_unref(&dobj);
}
 
if (fence) {
radeon_fence_unref(&fence);
}
if (r) {
DRM_ERROR("Error while benchmarking BO move.\n");
printk(KERN_WARNING "Error while benchmarking BO move.\n");
}
 
LEAVE();
173,86 → 178,12
 
}
 
void radeon_benchmark(struct radeon_device *rdev, int test_number)
void radeon_benchmark(struct radeon_device *rdev)
{
int i;
int common_modes[RADEON_BENCHMARK_COMMON_MODES_N] = {
640 * 480 * 4,
720 * 480 * 4,
800 * 600 * 4,
848 * 480 * 4,
1024 * 768 * 4,
1152 * 768 * 4,
1280 * 720 * 4,
1280 * 800 * 4,
1280 * 854 * 4,
1280 * 960 * 4,
1280 * 1024 * 4,
1440 * 900 * 4,
1400 * 1050 * 4,
1680 * 1050 * 4,
1600 * 1200 * 4,
1920 * 1080 * 4,
1920 * 1200 * 4
};
 
switch (test_number) {
case 1:
/* simple test, VRAM to GTT and GTT to VRAM */
radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_GTT,
radeon_benchmark_move(rdev, 4096*4096, RADEON_GEM_DOMAIN_GTT,
RADEON_GEM_DOMAIN_VRAM);
radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
radeon_benchmark_move(rdev, 4096*4096, RADEON_GEM_DOMAIN_VRAM,
RADEON_GEM_DOMAIN_GTT);
break;
case 2:
/* simple test, VRAM to VRAM */
radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
radeon_benchmark_move(rdev, 4096*4096, RADEON_GEM_DOMAIN_VRAM,
RADEON_GEM_DOMAIN_VRAM);
break;
case 3:
/* GTT to VRAM, buffer size sweep, powers of 2 */
for (i = 1; i <= 16384; i <<= 1)
radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
RADEON_GEM_DOMAIN_GTT,
RADEON_GEM_DOMAIN_VRAM);
break;
case 4:
/* VRAM to GTT, buffer size sweep, powers of 2 */
for (i = 1; i <= 16384; i <<= 1)
radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
RADEON_GEM_DOMAIN_VRAM,
RADEON_GEM_DOMAIN_GTT);
break;
case 5:
/* VRAM to VRAM, buffer size sweep, powers of 2 */
for (i = 1; i <= 16384; i <<= 1)
radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
RADEON_GEM_DOMAIN_VRAM,
RADEON_GEM_DOMAIN_VRAM);
break;
case 6:
/* GTT to VRAM, buffer size sweep, common modes */
for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
radeon_benchmark_move(rdev, common_modes[i],
RADEON_GEM_DOMAIN_GTT,
RADEON_GEM_DOMAIN_VRAM);
break;
case 7:
/* VRAM to GTT, buffer size sweep, common modes */
for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
radeon_benchmark_move(rdev, common_modes[i],
RADEON_GEM_DOMAIN_VRAM,
RADEON_GEM_DOMAIN_GTT);
break;
case 8:
/* VRAM to VRAM, buffer size sweep, common modes */
for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
radeon_benchmark_move(rdev, common_modes[i],
RADEON_GEM_DOMAIN_VRAM,
RADEON_GEM_DOMAIN_VRAM);
break;
 
default:
DRM_ERROR("Unknown benchmark\n");
}
}
/drivers/video/drm/radeon/radeon_object_kos.c
122,11 → 122,9
bo->reserved.counter = 1;
}
 
struct sg_table;
 
int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, bool kernel, u32 domain,
struct sg_table *sg, struct radeon_bo **bo_ptr)
struct radeon_bo **bo_ptr)
{
struct radeon_bo *bo;
enum ttm_bo_type type;
220,7 → 218,7
pagelist = &((u32_t*)page_tabs)[(u32_t)bo->kptr >> 12];
dbgprintf("pagelist %x\n", pagelist);
radeon_gart_bind(bo->rdev, bo->tbo.offset,
bo->tbo.vm_node->size, pagelist, NULL);
bo->tbo.vm_node->size, pagelist);
bo->tbo.offset += (u64)bo->rdev->mc.gtt_start;
}
else
/drivers/video/drm/radeon/evergreen_blit_kms.c
24,21 → 24,31
* Alex Deucher <alexander.deucher@amd.com>
*/
 
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
#include "radeon.h"
 
#include "evergreend.h"
#include "evergreen_blit_shaders.h"
#include "cayman_blit_shaders.h"
#include "radeon_blit_common.h"
 
#define DI_PT_RECTLIST 0x11
#define DI_INDEX_SIZE_16_BIT 0x0
#define DI_SRC_SEL_AUTO_INDEX 0x2
 
#define FMT_8 0x1
#define FMT_5_6_5 0x8
#define FMT_8_8_8_8 0x1a
#define COLOR_8 0x1
#define COLOR_5_6_5 0x8
#define COLOR_8_8_8_8 0x1a
 
/* emits 17 */
static void
set_render_target(struct radeon_device *rdev, int format,
int w, int h, u64 gpu_addr)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cb_color_info;
int pitch, slice;
 
46,29 → 56,27
if (h < 8)
h = 8;
 
cb_color_info = CB_FORMAT(format) |
CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
cb_color_info = ((format << 2) | (1 << 24) | (1 << 8));
pitch = (w / 8) - 1;
slice = ((w * h) / 64) - 1;
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(ring, gpu_addr >> 8);
radeon_ring_write(ring, pitch);
radeon_ring_write(ring, slice);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, cb_color_info);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, (w - 1) | ((h - 1) << 16));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(rdev, gpu_addr >> 8);
radeon_ring_write(rdev, pitch);
radeon_ring_write(rdev, slice);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, cb_color_info);
radeon_ring_write(rdev, (1 << 4));
radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
}
 
/* emits 5dw */
77,7 → 85,6
u32 sync_type, u32 size,
u64 mc_addr)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cp_coher_size;
 
if (size == 0xffffffff)
85,45 → 92,35
else
cp_coher_size = ((size + 255) >> 8);
 
if (rdev->family >= CHIP_CAYMAN) {
/* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync
* to the RB directly. For IBs, the CP programs this as part of the
* surface_sync packet.
*/
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */
radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(rdev, sync_type);
radeon_ring_write(rdev, cp_coher_size);
radeon_ring_write(rdev, mc_addr >> 8);
radeon_ring_write(rdev, 10); /* poll interval */
}
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, sync_type);
radeon_ring_write(ring, cp_coher_size);
radeon_ring_write(ring, mc_addr >> 8);
radeon_ring_write(ring, 10); /* poll interval */
}
 
/* emits 11dw + 1 surface sync = 16dw */
static void
set_shaders(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u64 gpu_addr;
 
/* VS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(ring, gpu_addr >> 8);
radeon_ring_write(ring, 2);
radeon_ring_write(ring, 0);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(rdev, gpu_addr >> 8);
radeon_ring_write(rdev, 2);
radeon_ring_write(rdev, 0);
 
/* PS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(ring, gpu_addr >> 8);
radeon_ring_write(ring, 1);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 2);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(rdev, gpu_addr >> 8);
radeon_ring_write(rdev, 1);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 2);
 
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
133,31 → 130,26
static void
set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
 
/* high addr, stride */
sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
SQ_VTXC_STRIDE(16);
sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
#ifdef __BIG_ENDIAN
sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
sq_vtx_constant_word2 |= (2 << 30);
#endif
/* xyzw swizzles */
sq_vtx_constant_word3 = SQ_VTCX_SEL_X(SQ_SEL_X) |
SQ_VTCX_SEL_Y(SQ_SEL_Y) |
SQ_VTCX_SEL_Z(SQ_SEL_Z) |
SQ_VTCX_SEL_W(SQ_SEL_W);
sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12);
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
radeon_ring_write(ring, 0x580);
radeon_ring_write(ring, gpu_addr & 0xffffffff);
radeon_ring_write(ring, 48 - 1); /* size */
radeon_ring_write(ring, sq_vtx_constant_word2);
radeon_ring_write(ring, sq_vtx_constant_word3);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
radeon_ring_write(rdev, 0x580);
radeon_ring_write(rdev, gpu_addr & 0xffffffff);
radeon_ring_write(rdev, 48 - 1); /* size */
radeon_ring_write(rdev, sq_vtx_constant_word2);
radeon_ring_write(rdev, sq_vtx_constant_word3);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
 
if ((rdev->family == CHIP_CEDAR) ||
(rdev->family == CHIP_PALM) ||
176,9 → 168,8
static void
set_tex_resource(struct radeon_device *rdev,
int format, int w, int h, int pitch,
u64 gpu_addr, u32 size)
u64 gpu_addr)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_tex_resource_word0, sq_tex_resource_word1;
u32 sq_tex_resource_word4, sq_tex_resource_word7;
 
185,33 → 176,25
if (h < 1)
h = 1;
 
sq_tex_resource_word0 = TEX_DIM(SQ_TEX_DIM_2D);
sq_tex_resource_word0 = (1 << 0); /* 2D */
sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
((w - 1) << 18));
sq_tex_resource_word1 = ((h - 1) << 0) |
TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
sq_tex_resource_word1 = ((h - 1) << 0) | (1 << 28);
/* xyzw swizzles */
sq_tex_resource_word4 = TEX_DST_SEL_X(SQ_SEL_X) |
TEX_DST_SEL_Y(SQ_SEL_Y) |
TEX_DST_SEL_Z(SQ_SEL_Z) |
TEX_DST_SEL_W(SQ_SEL_W);
sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25);
 
sq_tex_resource_word7 = format |
S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE);
sq_tex_resource_word7 = format | (SQ_TEX_VTX_VALID_TEXTURE << 30);
 
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, size, gpu_addr);
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, sq_tex_resource_word0);
radeon_ring_write(ring, sq_tex_resource_word1);
radeon_ring_write(ring, gpu_addr >> 8);
radeon_ring_write(ring, gpu_addr >> 8);
radeon_ring_write(ring, sq_tex_resource_word4);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, sq_tex_resource_word7);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, sq_tex_resource_word0);
radeon_ring_write(rdev, sq_tex_resource_word1);
radeon_ring_write(rdev, gpu_addr >> 8);
radeon_ring_write(rdev, gpu_addr >> 8);
radeon_ring_write(rdev, sq_tex_resource_word4);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, sq_tex_resource_word7);
}
 
/* emits 12 */
219,31 → 202,30
set_scissors(struct radeon_device *rdev, int x1, int y1,
int x2, int y2)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
/* workaround some hw bugs */
if (x2 == 0)
x1 = 1;
if (y2 == 0)
y1 = 1;
if (rdev->family >= CHIP_CAYMAN) {
if (rdev->family == CHIP_CAYMAN) {
if ((x2 == 1) && (y2 == 1))
x2 = 2;
}
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
}
 
/* emits 10 */
250,24 → 232,23
static void
draw_auto(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, DI_PT_RECTLIST);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(rdev, DI_PT_RECTLIST);
 
radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
radeon_ring_write(ring,
radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
radeon_ring_write(rdev,
#ifdef __BIG_ENDIAN
(2 << 2) |
#endif
DI_INDEX_SIZE_16_BIT);
 
radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
radeon_ring_write(ring, 1);
radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
radeon_ring_write(rdev, 1);
 
radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
radeon_ring_write(ring, 3);
radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
radeon_ring_write(rdev, 3);
radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
 
}
 
275,7 → 256,6
static void
set_default_state(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
289,8 → 269,8
int dwords;
 
/* set clear context state */
radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(rdev, 0);
 
if (rdev->family < CHIP_CAYMAN) {
switch (rdev->family) {
547,63 → 527,88
NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
 
/* disable dyn gprs */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(rdev, 0);
 
/* setup LDS */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, 0x10001000);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(rdev, 0x10001000);
 
/* SQ config */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11));
radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, sq_config);
radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
radeon_ring_write(ring, sq_gpr_resource_mgmt_3);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, sq_thread_resource_mgmt);
radeon_ring_write(ring, sq_thread_resource_mgmt_2);
radeon_ring_write(ring, sq_stack_resource_mgmt_1);
radeon_ring_write(ring, sq_stack_resource_mgmt_2);
radeon_ring_write(ring, sq_stack_resource_mgmt_3);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(rdev, sq_config);
radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, sq_thread_resource_mgmt);
radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
}
 
/* CONTEXT_CONTROL */
radeon_ring_write(ring, 0xc0012800);
radeon_ring_write(ring, 0x80000000);
radeon_ring_write(ring, 0x80000000);
radeon_ring_write(rdev, 0xc0012800);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
 
/* SQ_VTX_BASE_VTX_LOC */
radeon_ring_write(ring, 0xc0026f00);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(rdev, 0xc0026f00);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
 
/* SET_SAMPLER */
radeon_ring_write(ring, 0xc0036e00);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(ring, 0x00000012);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(rdev, 0xc0036e00);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000012);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
 
/* set to DX10/11 mode */
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
radeon_ring_write(ring, 1);
radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
radeon_ring_write(rdev, 1);
 
/* emit an IB pointing at default state */
dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC);
radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
radeon_ring_write(ring, dwords);
radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
radeon_ring_write(rdev, dwords);
 
}
 
static inline uint32_t i2f(uint32_t input)
{
u32 result, i, exponent, fraction;
 
if ((input & 0x3fff) == 0)
result = 0; /* 0 is a special case */
else {
exponent = 140; /* exponent biased by 127; */
fraction = (input & 0x3fff) << 10; /* cheat and only
handle numbers below 2^^15 */
for (i = 0; i < 14; i++) {
if (fraction & 0x800000)
break;
else {
fraction = fraction << 1; /* keep
shifting left until top bit = 1 */
exponent = exponent - 1;
}
}
result = exponent << 23 | (fraction & 0x7fffff); /* mask
off top bit; assumed 1 */
}
return result;
}
 
int evergreen_blit_init(struct radeon_device *rdev)
{
u32 obj_size;
611,28 → 616,7
void *ptr;
u32 packet2s[16];
int num_packet2s = 0;
#if 0
rdev->r600_blit.primitives.set_render_target = set_render_target;
rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
rdev->r600_blit.primitives.set_shaders = set_shaders;
rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
rdev->r600_blit.primitives.set_scissors = set_scissors;
rdev->r600_blit.primitives.draw_auto = draw_auto;
rdev->r600_blit.primitives.set_default_state = set_default_state;
 
rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
rdev->r600_blit.ring_size_common += 55; /* shaders + def state */
rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
rdev->r600_blit.ring_size_common += 5; /* done copy */
rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
 
rdev->r600_blit.ring_size_per_loop = 74;
if (rdev->family >= CHIP_CAYMAN)
rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */
 
rdev->r600_blit.max_dim = 16384;
 
/* pin copy shader into vram if already initialized */
if (rdev->r600_blit.shader_obj)
goto done;
726,8 → 710,279
return r;
}
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
 
#endif
void evergreen_blit_fini(struct radeon_device *rdev)
{
int r;
 
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
if (rdev->r600_blit.shader_obj == NULL)
return;
/* If we can't reserve the bo, unref should be enough to destroy
* it when it becomes idle.
*/
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
if (!r) {
radeon_bo_unpin(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
}
radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
 
static int evergreen_vb_ib_get(struct radeon_device *rdev)
{
int r;
r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
if (r) {
DRM_ERROR("failed to get IB for vertex buffer\n");
return r;
}
 
rdev->r600_blit.vb_total = 64*1024;
rdev->r600_blit.vb_used = 0;
return 0;
}
 
static void evergreen_vb_ib_put(struct radeon_device *rdev)
{
radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
}
 
int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
{
int r;
int ring_size, line_size;
int max_size;
/* loops of emits + fence emit possible */
int dwords_per_loop = 74, num_loops;
 
r = evergreen_vb_ib_get(rdev);
if (r)
return r;
 
/* 8 bpp vs 32 bpp for xfer unit */
if (size_bytes & 3)
line_size = 8192;
else
line_size = 8192 * 4;
 
max_size = 8192 * line_size;
 
/* major loops cover the max size transfer */
num_loops = ((size_bytes + max_size) / max_size);
/* minor loops cover the extra non aligned bits */
num_loops += ((size_bytes % line_size) ? 1 : 0);
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
/* set default + shaders */
ring_size += 55; /* shaders + def state */
ring_size += 10; /* fence emit for VB IB */
ring_size += 5; /* done copy */
ring_size += 10; /* fence emit for done copy */
r = radeon_ring_lock(rdev, ring_size);
if (r)
return r;
 
set_default_state(rdev); /* 36 */
set_shaders(rdev); /* 16 */
return 0;
}
 
void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
{
int r;
 
if (rdev->r600_blit.vb_ib)
evergreen_vb_ib_put(rdev);
 
if (fence)
r = radeon_fence_emit(rdev, fence);
 
radeon_ring_unlock_commit(rdev);
}
 
void evergreen_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
int size_bytes)
{
int max_bytes;
u64 vb_gpu_addr;
u32 *vb;
 
DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
size_bytes, rdev->r600_blit.vb_used);
vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
max_bytes = 8192;
 
while (size_bytes) {
int cur_size = size_bytes;
int src_x = src_gpu_addr & 255;
int dst_x = dst_gpu_addr & 255;
int h = 1;
src_gpu_addr = src_gpu_addr & ~255ULL;
dst_gpu_addr = dst_gpu_addr & ~255ULL;
 
if (!src_x && !dst_x) {
h = (cur_size / max_bytes);
if (h > 8192)
h = 8192;
if (h == 0)
h = 1;
else
cur_size = max_bytes;
} else {
if (cur_size > max_bytes)
cur_size = max_bytes;
if (cur_size > (max_bytes - dst_x))
cur_size = (max_bytes - dst_x);
if (cur_size > (max_bytes - src_x))
cur_size = (max_bytes - src_x);
}
 
if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
// WARN_ON(1);
}
 
vb[0] = i2f(dst_x);
vb[1] = 0;
vb[2] = i2f(src_x);
vb[3] = 0;
 
vb[4] = i2f(dst_x);
vb[5] = i2f(h);
vb[6] = i2f(src_x);
vb[7] = i2f(h);
 
vb[8] = i2f(dst_x + cur_size);
vb[9] = i2f(h);
vb[10] = i2f(src_x + cur_size);
vb[11] = i2f(h);
 
/* src 10 */
set_tex_resource(rdev, FMT_8,
src_x + cur_size, h, src_x + cur_size,
src_gpu_addr);
 
/* 5 */
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
 
 
/* dst 17 */
set_render_target(rdev, COLOR_8,
dst_x + cur_size, h,
dst_gpu_addr);
 
/* scissors 12 */
set_scissors(rdev, dst_x, 0, dst_x + cur_size, h);
 
/* 15 */
vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
set_vtx_resource(rdev, vb_gpu_addr);
 
/* draw 10 */
draw_auto(rdev);
 
/* 5 */
cp_set_surface_sync(rdev,
PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
cur_size * h, dst_gpu_addr);
 
vb += 12;
rdev->r600_blit.vb_used += 12 * 4;
 
src_gpu_addr += cur_size * h;
dst_gpu_addr += cur_size * h;
size_bytes -= cur_size * h;
}
} else {
max_bytes = 8192 * 4;
 
while (size_bytes) {
int cur_size = size_bytes;
int src_x = (src_gpu_addr & 255);
int dst_x = (dst_gpu_addr & 255);
int h = 1;
src_gpu_addr = src_gpu_addr & ~255ULL;
dst_gpu_addr = dst_gpu_addr & ~255ULL;
 
if (!src_x && !dst_x) {
h = (cur_size / max_bytes);
if (h > 8192)
h = 8192;
if (h == 0)
h = 1;
else
cur_size = max_bytes;
} else {
if (cur_size > max_bytes)
cur_size = max_bytes;
if (cur_size > (max_bytes - dst_x))
cur_size = (max_bytes - dst_x);
if (cur_size > (max_bytes - src_x))
cur_size = (max_bytes - src_x);
}
 
if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
// WARN_ON(1);
}
 
vb[0] = i2f(dst_x / 4);
vb[1] = 0;
vb[2] = i2f(src_x / 4);
vb[3] = 0;
 
vb[4] = i2f(dst_x / 4);
vb[5] = i2f(h);
vb[6] = i2f(src_x / 4);
vb[7] = i2f(h);
 
vb[8] = i2f((dst_x + cur_size) / 4);
vb[9] = i2f(h);
vb[10] = i2f((src_x + cur_size) / 4);
vb[11] = i2f(h);
 
/* src 10 */
set_tex_resource(rdev, FMT_8_8_8_8,
(src_x + cur_size) / 4,
h, (src_x + cur_size) / 4,
src_gpu_addr);
/* 5 */
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
 
/* dst 17 */
set_render_target(rdev, COLOR_8_8_8_8,
(dst_x + cur_size) / 4, h,
dst_gpu_addr);
 
/* scissors 12 */
set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
 
/* Vertex buffer setup 15 */
vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
set_vtx_resource(rdev, vb_gpu_addr);
 
/* draw 10 */
draw_auto(rdev);
 
/* 5 */
cp_set_surface_sync(rdev,
PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
cur_size * h, dst_gpu_addr);
 
/* 74 ring dwords per loop */
vb += 12;
rdev->r600_blit.vb_used += 12 * 4;
 
src_gpu_addr += cur_size * h;
dst_gpu_addr += cur_size * h;
size_bytes -= cur_size * h;
}
}
}
 
/drivers/video/drm/radeon/r100.c
27,8 → 27,9
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
39,7 → 40,6
#include "atom.h"
 
#include <linux/firmware.h>
#include <linux/module.h>
 
#include "r100_reg_safe.h"
#include "rn50_reg_safe.h"
64,57 → 64,12
 
/* This files gather functions specifics to:
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
* and others in some cases.
*/
 
/**
* r100_wait_for_vblank - vblank wait asic callback.
*
* @rdev: radeon_device pointer
* @crtc: crtc to wait for vblank on
*
* Wait for vblank on the requested crtc (r1xx-r4xx).
*/
void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
int i;
 
if (crtc >= rdev->num_crtc)
return;
 
if (crtc == 0) {
if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) {
for (i = 0; i < rdev->usec_timeout; i++) {
if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
break;
udelay(1);
}
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
break;
udelay(1);
}
}
} else {
if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) {
for (i = 0; i < rdev->usec_timeout; i++) {
if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR))
break;
udelay(1);
}
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
break;
udelay(1);
}
}
}
}
u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
int i;
 
/* Lock the graphics update lock */
/* update the scanout addresses */
121,11 → 76,7
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
 
/* Wait for update_pending to go high. */
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
break;
udelay(1);
}
while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
/* Unlock the lock, so double-buffering can take place inside vblank */
144,15 → 95,6
}
 
/* hpd for digital panel detect/disconnect */
/**
* r100_hpd_sense - hpd sense callback.
*
* @rdev: radeon_device pointer
* @hpd: hpd (hotplug detect) pin
*
* Checks if a digital monitor is connected (r1xx-r4xx).
* Returns true if connected, false if not connected.
*/
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
bool connected = false;
172,14 → 114,6
return connected;
}
 
/**
* r100_hpd_set_polarity - hpd set polarity callback.
*
* @rdev: radeon_device pointer
* @hpd: hpd (hotplug detect) pin
*
* Set the polarity of the hpd pin (r1xx-r4xx).
*/
void r100_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd)
{
208,48 → 142,47
}
}
 
/**
* r100_hpd_init - hpd setup callback.
*
* @rdev: radeon_device pointer
*
* Setup the hpd pins used by the card (r1xx-r4xx).
* Set the polarity, and enable the hpd interrupts.
*/
void r100_hpd_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
unsigned enable = 0;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
enable |= 1 << radeon_connector->hpd.hpd;
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
rdev->irq.hpd[1] = true;
break;
default:
break;
}
// radeon_irq_kms_enable_hpd(rdev, enable);
}
if (rdev->irq.installed)
r100_irq_set(rdev);
}
 
/**
* r100_hpd_fini - hpd tear down callback.
*
* @rdev: radeon_device pointer
*
* Tear down the hpd pins used by the card (r1xx-r4xx).
* Disable the hpd interrupts.
*/
void r100_hpd_fini(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
unsigned disable = 0;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
disable |= 1 << radeon_connector->hpd.hpd;
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
rdev->irq.hpd[1] = false;
break;
default:
break;
}
// radeon_irq_kms_disable_hpd(rdev, disable);
}
}
 
/*
* PCI GART
266,7 → 199,7
{
int r;
 
if (rdev->gart.ptr) {
if (rdev->gart.table.ram.ptr) {
WARN(1, "R100 PCI GART already initialized\n");
return 0;
}
275,11 → 208,20
if (r)
return r;
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart.set_page = &r100_pci_gart_set_page;
rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart_set_page = &r100_pci_gart_set_page;
return radeon_gart_table_ram_alloc(rdev);
}
 
/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
void r100_enable_bm(struct radeon_device *rdev)
{
uint32_t tmp;
/* Enable bus mastering */
tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
WREG32(RADEON_BUS_CNTL, tmp);
}
 
int r100_pci_gart_enable(struct radeon_device *rdev)
{
uint32_t tmp;
296,9 → 238,6
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
WREG32(RADEON_AIC_CNTL, tmp);
r100_pci_gart_tlb_flush(rdev);
DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
316,12 → 255,10
 
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
u32 *gtt = rdev->gart.ptr;
 
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
}
gtt[i] = cpu_to_le32(lower_32_bits(addr));
rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr));
return 0;
}
 
341,15 → 278,18
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
if (rdev->irq.sw_int) {
tmp |= RADEON_SW_INT_ENABLE;
}
if (rdev->irq.gui_idle) {
tmp |= RADEON_GUI_IDLE_MASK;
}
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
rdev->irq.pflip[0]) {
tmp |= RADEON_CRTC_VBLANK_MASK;
}
if (rdev->irq.crtc_vblank_int[1] ||
atomic_read(&rdev->irq.pflip[1])) {
rdev->irq.pflip[1]) {
tmp |= RADEON_CRTC2_VBLANK_MASK;
}
if (rdev->irq.hpd[0]) {
373,7 → 313,7
WREG32(R_000044_GEN_INT_STATUS, tmp);
}
 
static uint32_t r100_irq_ack(struct radeon_device *rdev)
static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
{
uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
uint32_t irq_mask = RADEON_SW_INT_TEST |
380,6 → 320,12
RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
 
/* the interrupt works, but the status bit is permanently asserted */
if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
if (!rdev->irq.gui_idle_acked)
irq_mask |= RADEON_GUI_IDLE_STAT;
}
 
if (irqs) {
WREG32(RADEON_GEN_INT_STATUS, irqs);
}
391,6 → 337,9
uint32_t status, msi_rearm;
bool queue_hotplug = false;
 
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
 
status = r100_irq_ack(rdev);
if (!status) {
return IRQ_NONE;
401,8 → 350,14
while (status) {
/* SW interrupt */
if (status & RADEON_SW_INT_TEST) {
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
radeon_fence_process(rdev);
}
/* gui idle interrupt */
if (status & RADEON_GUI_IDLE_STAT) {
rdev->irq.gui_idle_acked = true;
rdev->pm.gui_idle = true;
// wake_up(&rdev->irq.idle_queue);
}
/* Vertical blank interrupts */
if (status & RADEON_CRTC_VBLANK_STAT) {
if (rdev->irq.crtc_vblank_int[0]) {
432,6 → 387,8
}
status = r100_irq_ack(rdev);
}
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
// if (queue_hotplug)
// schedule_work(&rdev->hotplug_work);
if (rdev->msi_enabled) {
443,7 → 400,9
WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
break;
default:
WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
WREG32(RADEON_MSI_REARM_EN, msi_rearm);
WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
break;
}
}
463,47 → 422,35
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
 
/* We have to make sure that caches are flushed before
* CPU might read something from VRAM. */
radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
/* Wait until IDLE & CLEAN */
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
radeon_ring_write(ring, RADEON_SW_INT_FIRE);
radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
radeon_ring_write(rdev, fence->seq);
radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
}
 
void r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
/* Unused on older asics, since we don't have semaphores or multiple rings */
BUG();
}
 
int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence)
unsigned num_pages,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t cur_pages;
uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
uint32_t stride_bytes = PAGE_SIZE;
uint32_t pitch;
uint32_t stride_pixels;
unsigned ndw;
515,26 → 462,26
/* radeon pitch is /64 */
pitch = stride_bytes / 64;
stride_pixels = stride_bytes / 4;
num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
num_loops = DIV_ROUND_UP(num_pages, 8191);
 
/* Ask for enough room for blit + flush + fence */
ndw = 64 + (10 * num_loops);
r = radeon_ring_lock(rdev, ring, ndw);
r = radeon_ring_lock(rdev, ndw);
if (r) {
DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
return -EINVAL;
}
while (num_gpu_pages > 0) {
cur_pages = num_gpu_pages;
while (num_pages > 0) {
cur_pages = num_pages;
if (cur_pages > 8191) {
cur_pages = 8191;
}
num_gpu_pages -= cur_pages;
num_pages -= cur_pages;
 
/* pages are in Y direction - height
page width in X direction - width */
radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
radeon_ring_write(ring,
radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
radeon_ring_write(rdev,
RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
RADEON_GMC_DST_PITCH_OFFSET_CNTL |
RADEON_GMC_SRC_CLIPPING |
546,26 → 493,26
RADEON_DP_SRC_SOURCE_MEMORY |
RADEON_GMC_CLR_CMP_CNTL_DIS |
RADEON_GMC_WR_MSK_DIS);
radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
radeon_ring_write(ring, num_gpu_pages);
radeon_ring_write(ring, num_gpu_pages);
radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
radeon_ring_write(rdev, num_pages);
radeon_ring_write(rdev, num_pages);
radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
}
radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring,
radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_HOST_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
r = radeon_fence_emit(rdev, fence);
}
radeon_ring_unlock_commit(rdev, ring);
radeon_ring_unlock_commit(rdev);
return r;
}
 
584,21 → 531,21
return -1;
}
 
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
void r100_ring_start(struct radeon_device *rdev)
{
int r;
 
r = radeon_ring_lock(rdev, ring, 2);
r = radeon_ring_lock(rdev, 2);
if (r) {
return;
}
radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
radeon_ring_write(ring,
radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
radeon_ring_write(rdev,
RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
radeon_ring_unlock_commit(rdev, ring);
radeon_ring_unlock_commit(rdev);
}
 
 
699,7 → 646,6
 
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
unsigned rb_bufsz;
unsigned rb_blksz;
unsigned max_fetch;
725,9 → 671,7
rb_bufsz = drm_order(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
r100_cp_load_microcode(rdev);
r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
0, 0x7fffff, RADEON_CP_PACKET2);
r = radeon_ring_init(rdev, ring_size);
if (r) {
return r;
}
736,7 → 680,7
rb_blksz = 9;
/* cp will read 128bytes at a time (4 dwords) */
max_fetch = 1;
ring->align_mask = 16 - 1;
rdev->cp.align_mask = 16 - 1;
/* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
pre_write_timer = 64;
/* Force CP_RB_WPTR write if written more than one time before the
766,13 → 710,12
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
 
/* Set ring address */
DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
/* Force read & write ptr to 0 */
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
ring->wptr = 0;
WREG32(RADEON_CP_RB_WPTR, ring->wptr);
WREG32(RADEON_CP_RB_WPTR, 0);
 
/* set the wb address whether it's enabled or not */
WREG32(R_00070C_CP_RB_RPTR_ADDR,
788,7 → 731,10
 
WREG32(RADEON_CP_RB_CNTL, tmp);
udelay(10);
ring->rptr = RREG32(RADEON_CP_RB_RPTR);
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
/* protect against crazy HW on resume */
rdev->cp.wptr &= rdev->cp.ptr_mask;
/* Set cp mode to bus mastering & enable cp*/
WREG32(RADEON_CP_CSQ_MODE,
REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
796,13 → 742,13
WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
radeon_ring_start(rdev);
r = radeon_ring_test(rdev);
if (r) {
DRM_ERROR("radeon: cp isn't working (%d).\n", r);
return r;
}
ring->ready = true;
rdev->cp.ready = true;
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
814,7 → 760,7
}
/* Disable ring */
r100_cp_disable(rdev);
radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
radeon_ring_fini(rdev);
DRM_INFO("radeon: cp finalized\n");
}
 
822,7 → 768,7
{
/* Disable ring */
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
rdev->cp.ready = false;
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
WREG32(R_000770_SCRATCH_UMSK, 0);
832,116 → 778,17
}
}
 
void r100_cp_commit(struct radeon_device *rdev)
{
WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
(void)RREG32(RADEON_CP_RB_WPTR);
}
 
 
#if 0
/*
* CS functions
*/
int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx,
unsigned reg)
{
int r;
u32 tile_flags = 0;
u32 tmp;
struct radeon_cs_reloc *reloc;
u32 value;
 
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
r100_cs_dump_packet(p, pkt);
return r;
}
 
value = radeon_get_ib_value(p, idx);
tmp = value & 0x003fffff;
tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
 
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_DST_TILE_MACRO;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
if (reg == RADEON_SRC_PITCH_OFFSET) {
DRM_ERROR("Cannot src blit from microtiled surface\n");
r100_cs_dump_packet(p, pkt);
return -EINVAL;
}
tile_flags |= RADEON_DST_TILE_MICRO;
}
 
tmp |= tile_flags;
p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
} else
p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
return 0;
}
 
int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
int idx)
{
unsigned c, i;
struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
int r = 0;
volatile uint32_t *ib;
u32 idx_value;
 
ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
c = radeon_get_ib_value(p, idx++) & 0x1F;
if (c > 16) {
DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return -EINVAL;
}
track->num_arrays = c;
for (i = 0; i < (c - 1); i+=2, idx+=3) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
idx_value = radeon_get_ib_value(p, idx);
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
 
track->arrays[i + 0].esize = idx_value >> 8;
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize &= 0x7F;
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 1].robj = reloc->robj;
track->arrays[i + 1].esize = idx_value >> 24;
track->arrays[i + 1].esize &= 0x7F;
}
if (c & 1) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
idx_value = radeon_get_ib_value(p, idx);
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize = idx_value >> 8;
track->arrays[i + 0].esize &= 0x7F;
}
return r;
}
 
int r100_cs_parse_packet0(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
const unsigned *auth, unsigned n,
994,7 → 841,7
unsigned i;
unsigned idx;
 
ib = p->ib.ptr;
ib = p->ib->ptr;
idx = pkt->idx;
for (i = 0; i <= (pkt->count + 1); i++, idx++) {
DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
1073,7 → 920,7
uint32_t header, h_idx, reg;
volatile uint32_t *ib;
 
ib = p->ib.ptr;
ib = p->ib->ptr;
 
/* parse the wait until */
r = r100_cs_packet_parse(p, &waitreloc, p->idx);
1252,7 → 1099,7
u32 tile_flags = 0;
u32 idx_value;
 
ib = p->ib.ptr;
ib = p->ib->ptr;
track = (struct r100_cs_track *)p->track;
 
idx_value = radeon_get_ib_value(p, idx);
1312,16 → 1159,6
r100_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_TXO_MACRO_TILE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_TXO_MICRO_TILE_X2;
 
tmp = idx_value & ~(0x7 << 2);
tmp |= tile_flags;
ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
} else
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
1393,7 → 1230,7
r100_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
 
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_COLOR_TILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1402,8 → 1239,6
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
} else
ib[idx] = idx_value;
 
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
track->cb_dirty = true;
1608,7 → 1443,7
volatile uint32_t *ib;
int r;
 
ib = p->ib.ptr;
ib = p->ib->ptr;
idx = pkt->idx + 1;
track = (struct r100_cs_track *)p->track;
switch (pkt->opcode) {
1727,8 → 1562,6
int r;
 
track = kzalloc(sizeof(*track), GFP_KERNEL);
if (!track)
return -ENOMEM;
r100_cs_track_clear(p->rdev, track);
p->track = track;
do {
1767,401 → 1600,72
return 0;
}
 
static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
{
DRM_ERROR("pitch %d\n", t->pitch);
DRM_ERROR("use_pitch %d\n", t->use_pitch);
DRM_ERROR("width %d\n", t->width);
DRM_ERROR("width_11 %d\n", t->width_11);
DRM_ERROR("height %d\n", t->height);
DRM_ERROR("height_11 %d\n", t->height_11);
DRM_ERROR("num levels %d\n", t->num_levels);
DRM_ERROR("depth %d\n", t->txdepth);
DRM_ERROR("bpp %d\n", t->cpp);
DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
DRM_ERROR("compress format %d\n", t->compress_format);
}
#endif
 
static int r100_track_compress_size(int compress_format, int w, int h)
/*
* Global GPU functions
*/
void r100_errata(struct radeon_device *rdev)
{
int block_width, block_height, block_bytes;
int wblocks, hblocks;
int min_wblocks;
int sz;
rdev->pll_errata = 0;
 
block_width = 4;
block_height = 4;
 
switch (compress_format) {
case R100_TRACK_COMP_DXT1:
block_bytes = 8;
min_wblocks = 4;
break;
default:
case R100_TRACK_COMP_DXT35:
block_bytes = 16;
min_wblocks = 2;
break;
if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
}
 
hblocks = (h + block_height - 1) / block_height;
wblocks = (w + block_width - 1) / block_width;
if (wblocks < min_wblocks)
wblocks = min_wblocks;
sz = wblocks * hblocks * block_bytes;
return sz;
if (rdev->family == CHIP_RV100 ||
rdev->family == CHIP_RS100 ||
rdev->family == CHIP_RS200) {
rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
}
 
static int r100_cs_track_cube(struct radeon_device *rdev,
struct r100_cs_track *track, unsigned idx)
{
unsigned face, w, h;
struct radeon_bo *cube_robj;
unsigned long size;
unsigned compress_format = track->textures[idx].compress_format;
 
for (face = 0; face < 5; face++) {
cube_robj = track->textures[idx].cube_info[face].robj;
w = track->textures[idx].cube_info[face].width;
h = track->textures[idx].cube_info[face].height;
 
if (compress_format) {
size = r100_track_compress_size(compress_format, w, h);
} else
size = w * h;
size *= track->textures[idx].cpp;
 
size += track->textures[idx].cube_info[face].offset;
 
if (size > radeon_bo_size(cube_robj)) {
DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
size, radeon_bo_size(cube_robj));
r100_cs_track_texture_print(&track->textures[idx]);
return -1;
}
}
return 0;
}
 
static int r100_cs_track_texture_check(struct radeon_device *rdev,
struct r100_cs_track *track)
/* Wait for vertical sync on primary CRTC */
void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
{
struct radeon_bo *robj;
unsigned long size;
unsigned u, i, w, h, d;
int ret;
uint32_t crtc_gen_cntl, tmp;
int i;
 
for (u = 0; u < track->num_texture; u++) {
if (!track->textures[u].enabled)
continue;
if (track->textures[u].lookup_disable)
continue;
robj = track->textures[u].robj;
if (robj == NULL) {
DRM_ERROR("No texture bound to unit %u\n", u);
return -EINVAL;
crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
!(crtc_gen_cntl & RADEON_CRTC_EN)) {
return;
}
size = 0;
for (i = 0; i <= track->textures[u].num_levels; i++) {
if (track->textures[u].use_pitch) {
if (rdev->family < CHIP_R300)
w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
else
w = track->textures[u].pitch / (1 << i);
} else {
w = track->textures[u].width;
if (rdev->family >= CHIP_RV515)
w |= track->textures[u].width_11;
w = w / (1 << i);
if (track->textures[u].roundup_w)
w = roundup_pow_of_two(w);
/* Clear the CRTC_VBLANK_SAVE bit */
WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(RADEON_CRTC_STATUS);
if (tmp & RADEON_CRTC_VBLANK_SAVE) {
return;
}
h = track->textures[u].height;
if (rdev->family >= CHIP_RV515)
h |= track->textures[u].height_11;
h = h / (1 << i);
if (track->textures[u].roundup_h)
h = roundup_pow_of_two(h);
if (track->textures[u].tex_coord_type == 1) {
d = (1 << track->textures[u].txdepth) / (1 << i);
if (!d)
d = 1;
} else {
d = 1;
DRM_UDELAY(1);
}
if (track->textures[u].compress_format) {
 
size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
/* compressed textures are block based */
} else
size += w * h * d;
}
size *= track->textures[u].cpp;
 
switch (track->textures[u].tex_coord_type) {
case 0:
case 1:
break;
case 2:
if (track->separate_cube) {
ret = r100_cs_track_cube(rdev, track, u);
if (ret)
return ret;
} else
size *= 6;
break;
default:
DRM_ERROR("Invalid texture coordinate type %u for unit "
"%u\n", track->textures[u].tex_coord_type, u);
return -EINVAL;
}
if (size > radeon_bo_size(robj)) {
DRM_ERROR("Texture of unit %u needs %lu bytes but is "
"%lu\n", u, size, radeon_bo_size(robj));
r100_cs_track_texture_print(&track->textures[u]);
return -EINVAL;
}
}
return 0;
}
 
int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
/* Wait for vertical sync on secondary CRTC */
void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
{
unsigned i;
unsigned long size;
unsigned prim_walk;
unsigned nverts;
unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
uint32_t crtc2_gen_cntl, tmp;
int i;
 
if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
!track->blend_read_enable)
num_cb = 0;
crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
!(crtc2_gen_cntl & RADEON_CRTC2_EN))
return;
 
for (i = 0; i < num_cb; i++) {
if (track->cb[i].robj == NULL) {
DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
return -EINVAL;
/* Clear the CRTC_VBLANK_SAVE bit */
WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(RADEON_CRTC2_STATUS);
if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
return;
}
size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
size += track->cb[i].offset;
if (size > radeon_bo_size(track->cb[i].robj)) {
DRM_ERROR("[drm] Buffer too small for color buffer %d "
"(need %lu have %lu) !\n", i, size,
radeon_bo_size(track->cb[i].robj));
DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
i, track->cb[i].pitch, track->cb[i].cpp,
track->cb[i].offset, track->maxy);
return -EINVAL;
DRM_UDELAY(1);
}
}
track->cb_dirty = false;
 
if (track->zb_dirty && track->z_enabled) {
if (track->zb.robj == NULL) {
DRM_ERROR("[drm] No buffer for z buffer !\n");
return -EINVAL;
}
size = track->zb.pitch * track->zb.cpp * track->maxy;
size += track->zb.offset;
if (size > radeon_bo_size(track->zb.robj)) {
DRM_ERROR("[drm] Buffer too small for z buffer "
"(need %lu have %lu) !\n", size,
radeon_bo_size(track->zb.robj));
DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
track->zb.pitch, track->zb.cpp,
track->zb.offset, track->maxy);
return -EINVAL;
}
}
track->zb_dirty = false;
 
if (track->aa_dirty && track->aaresolve) {
if (track->aa.robj == NULL) {
DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
return -EINVAL;
}
/* I believe the format comes from colorbuffer0. */
size = track->aa.pitch * track->cb[0].cpp * track->maxy;
size += track->aa.offset;
if (size > radeon_bo_size(track->aa.robj)) {
DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
"(need %lu have %lu) !\n", i, size,
radeon_bo_size(track->aa.robj));
DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
i, track->aa.pitch, track->cb[0].cpp,
track->aa.offset, track->maxy);
return -EINVAL;
}
}
track->aa_dirty = false;
 
prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
if (track->vap_vf_cntl & (1 << 14)) {
nverts = track->vap_alt_nverts;
} else {
nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
}
switch (prim_walk) {
case 1:
for (i = 0; i < track->num_arrays; i++) {
size = track->arrays[i].esize * track->max_indx * 4;
if (track->arrays[i].robj == NULL) {
DRM_ERROR("(PW %u) Vertex array %u no buffer "
"bound\n", prim_walk, i);
return -EINVAL;
}
if (size > radeon_bo_size(track->arrays[i].robj)) {
dev_err(rdev->dev, "(PW %u) Vertex array %u "
"need %lu dwords have %lu dwords\n",
prim_walk, i, size >> 2,
radeon_bo_size(track->arrays[i].robj)
>> 2);
DRM_ERROR("Max indices %u\n", track->max_indx);
return -EINVAL;
}
}
break;
case 2:
for (i = 0; i < track->num_arrays; i++) {
size = track->arrays[i].esize * (nverts - 1) * 4;
if (track->arrays[i].robj == NULL) {
DRM_ERROR("(PW %u) Vertex array %u no buffer "
"bound\n", prim_walk, i);
return -EINVAL;
}
if (size > radeon_bo_size(track->arrays[i].robj)) {
dev_err(rdev->dev, "(PW %u) Vertex array %u "
"need %lu dwords have %lu dwords\n",
prim_walk, i, size >> 2,
radeon_bo_size(track->arrays[i].robj)
>> 2);
return -EINVAL;
}
}
break;
case 3:
size = track->vtx_size * nverts;
if (size != track->immd_dwords) {
DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
track->immd_dwords, size);
DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
nverts, track->vtx_size);
return -EINVAL;
}
break;
default:
DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
prim_walk);
return -EINVAL;
}
 
if (track->tex_dirty) {
track->tex_dirty = false;
return r100_cs_track_texture_check(rdev, track);
}
return 0;
}
 
void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
{
unsigned i, face;
 
track->cb_dirty = true;
track->zb_dirty = true;
track->tex_dirty = true;
track->aa_dirty = true;
 
if (rdev->family < CHIP_R300) {
track->num_cb = 1;
if (rdev->family <= CHIP_RS200)
track->num_texture = 3;
else
track->num_texture = 6;
track->maxy = 2048;
track->separate_cube = 1;
} else {
track->num_cb = 4;
track->num_texture = 16;
track->maxy = 4096;
track->separate_cube = 0;
track->aaresolve = false;
track->aa.robj = NULL;
}
 
for (i = 0; i < track->num_cb; i++) {
track->cb[i].robj = NULL;
track->cb[i].pitch = 8192;
track->cb[i].cpp = 16;
track->cb[i].offset = 0;
}
track->z_enabled = true;
track->zb.robj = NULL;
track->zb.pitch = 8192;
track->zb.cpp = 4;
track->zb.offset = 0;
track->vtx_size = 0x7F;
track->immd_dwords = 0xFFFFFFFFUL;
track->num_arrays = 11;
track->max_indx = 0x00FFFFFFUL;
for (i = 0; i < track->num_arrays; i++) {
track->arrays[i].robj = NULL;
track->arrays[i].esize = 0x7F;
}
for (i = 0; i < track->num_texture; i++) {
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
track->textures[i].pitch = 16536;
track->textures[i].width = 16536;
track->textures[i].height = 16536;
track->textures[i].width_11 = 1 << 11;
track->textures[i].height_11 = 1 << 11;
track->textures[i].num_levels = 12;
if (rdev->family <= CHIP_RS200) {
track->textures[i].tex_coord_type = 0;
track->textures[i].txdepth = 0;
} else {
track->textures[i].txdepth = 16;
track->textures[i].tex_coord_type = 1;
}
track->textures[i].cpp = 64;
track->textures[i].robj = NULL;
/* CS IB emission code makes sure texture unit are disabled */
track->textures[i].enabled = false;
track->textures[i].lookup_disable = false;
track->textures[i].roundup_w = true;
track->textures[i].roundup_h = true;
if (track->separate_cube)
for (face = 0; face < 5; face++) {
track->textures[i].cube_info[face].robj = NULL;
track->textures[i].cube_info[face].width = 16536;
track->textures[i].cube_info[face].height = 16536;
track->textures[i].cube_info[face].offset = 0;
}
}
}
#endif
 
/*
* Global GPU functions
*/
static void r100_errata(struct radeon_device *rdev)
{
rdev->pll_errata = 0;
 
if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
}
 
if (rdev->family == CHIP_RV100 ||
rdev->family == CHIP_RS100 ||
rdev->family == CHIP_RS200) {
rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
}
}
 
static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
{
unsigned i;
uint32_t tmp;
 
2210,27 → 1714,79
return -1;
}
 
bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
{
lockup->last_cp_rptr = cp->rptr;
lockup->last_jiffies = GetTimerTicks();
}
 
/**
* r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
* @rdev: radeon device structure
* @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
* @cp: radeon_cp structure holding CP information
*
* We don't need to initialize the lockup tracking information as we will either
* have CP rptr to a different value of jiffies wrap around which will force
* initialization of the lockup tracking informations.
*
* A possible false positivie is if we get call after while and last_cp_rptr ==
* the current CP rptr, even if it's unlikely it might happen. To avoid this
* if the elapsed time since last call is bigger than 2 second than we return
* false and update the tracking information. Due to this the caller must call
* r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
* the fencing code should be cautious about that.
*
* Caller should write to the ring to force CP to do something so we don't get
* false positive when CP is just gived nothing to do.
*
**/
bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
{
unsigned long cjiffies, elapsed;
 
cjiffies = GetTimerTicks();
if (!time_after(cjiffies, lockup->last_jiffies)) {
/* likely a wrap around */
lockup->last_cp_rptr = cp->rptr;
lockup->last_jiffies = GetTimerTicks();
return false;
}
if (cp->rptr != lockup->last_cp_rptr) {
/* CP is still working no lockup */
lockup->last_cp_rptr = cp->rptr;
lockup->last_jiffies = GetTimerTicks();
return false;
}
elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
if (elapsed >= 10000) {
dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
return true;
}
/* give a chance to the GPU ... */
return false;
}
 
bool r100_gpu_is_lockup(struct radeon_device *rdev)
{
u32 rbbm_status;
int r;
 
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
radeon_ring_lockup_update(ring);
r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
return false;
}
/* force CP activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
}
 
/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
void r100_enable_bm(struct radeon_device *rdev)
{
uint32_t tmp;
/* Enable bus mastering */
tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
WREG32(RADEON_BUS_CNTL, tmp);
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
}
 
void r100_bm_disable(struct radeon_device *rdev)
2246,7 → 1802,8
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
tmp = RREG32(RADEON_BUS_CNTL);
mdelay(1);
pci_clear_master(rdev->pdev);
tmp = PciRead16(rdev->pdev->bus, rdev->pdev->devfn, 0x4);
PciWrite16(rdev->pdev->bus, rdev->pdev->devfn, 0x4, tmp & 0xFFFB);
mdelay(1);
}
 
2299,6 → 1856,7
if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true;
ret = -1;
} else
dev_info(rdev->dev, "GPU reset succeed\n");
2521,7 → 2079,7
WREG32(RADEON_CONFIG_CNTL, temp);
}
 
static void r100_mc_init(struct radeon_device *rdev)
void r100_mc_init(struct radeon_device *rdev)
{
u64 base;
 
2555,7 → 2113,7
* or the chip could hang on a subsequent access
*/
if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
mdelay(5);
udelay(5000);
}
 
/* This function is required to workaround a hardware bug in some (all?)
2593,7 → 2151,7
r100_pll_errata_after_data(rdev);
}
 
static void r100_set_safe_registers(struct radeon_device *rdev)
void r100_set_safe_registers(struct radeon_device *rdev)
{
if (ASIC_IS_RN50(rdev)) {
rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
2636,22 → 2194,21
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t rdp, wdp;
unsigned count, i, j;
 
radeon_ring_free_size(rdev, ring);
radeon_ring_free_size(rdev);
rdp = RREG32(RADEON_CP_RB_RPTR);
wdp = RREG32(RADEON_CP_RB_WPTR);
count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
for (j = 0; j <= count; j++) {
i = (rdp + j) & ring->ptr_mask;
seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
i = (rdp + j) & rdev->cp.ptr_mask;
seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
}
return 0;
}
3319,8 → 2876,384
}
}
 
int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
#if 0
static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
{
DRM_ERROR("pitch %d\n", t->pitch);
DRM_ERROR("use_pitch %d\n", t->use_pitch);
DRM_ERROR("width %d\n", t->width);
DRM_ERROR("width_11 %d\n", t->width_11);
DRM_ERROR("height %d\n", t->height);
DRM_ERROR("height_11 %d\n", t->height_11);
DRM_ERROR("num levels %d\n", t->num_levels);
DRM_ERROR("depth %d\n", t->txdepth);
DRM_ERROR("bpp %d\n", t->cpp);
DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
DRM_ERROR("compress format %d\n", t->compress_format);
}
 
static int r100_track_compress_size(int compress_format, int w, int h)
{
int block_width, block_height, block_bytes;
int wblocks, hblocks;
int min_wblocks;
int sz;
 
block_width = 4;
block_height = 4;
 
switch (compress_format) {
case R100_TRACK_COMP_DXT1:
block_bytes = 8;
min_wblocks = 4;
break;
default:
case R100_TRACK_COMP_DXT35:
block_bytes = 16;
min_wblocks = 2;
break;
}
 
hblocks = (h + block_height - 1) / block_height;
wblocks = (w + block_width - 1) / block_width;
if (wblocks < min_wblocks)
wblocks = min_wblocks;
sz = wblocks * hblocks * block_bytes;
return sz;
}
 
static int r100_cs_track_cube(struct radeon_device *rdev,
struct r100_cs_track *track, unsigned idx)
{
unsigned face, w, h;
struct radeon_bo *cube_robj;
unsigned long size;
unsigned compress_format = track->textures[idx].compress_format;
 
for (face = 0; face < 5; face++) {
cube_robj = track->textures[idx].cube_info[face].robj;
w = track->textures[idx].cube_info[face].width;
h = track->textures[idx].cube_info[face].height;
 
if (compress_format) {
size = r100_track_compress_size(compress_format, w, h);
} else
size = w * h;
size *= track->textures[idx].cpp;
 
size += track->textures[idx].cube_info[face].offset;
 
if (size > radeon_bo_size(cube_robj)) {
DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
size, radeon_bo_size(cube_robj));
r100_cs_track_texture_print(&track->textures[idx]);
return -1;
}
}
return 0;
}
 
static int r100_cs_track_texture_check(struct radeon_device *rdev,
struct r100_cs_track *track)
{
struct radeon_bo *robj;
unsigned long size;
unsigned u, i, w, h, d;
int ret;
 
for (u = 0; u < track->num_texture; u++) {
if (!track->textures[u].enabled)
continue;
if (track->textures[u].lookup_disable)
continue;
robj = track->textures[u].robj;
if (robj == NULL) {
DRM_ERROR("No texture bound to unit %u\n", u);
return -EINVAL;
}
size = 0;
for (i = 0; i <= track->textures[u].num_levels; i++) {
if (track->textures[u].use_pitch) {
if (rdev->family < CHIP_R300)
w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
else
w = track->textures[u].pitch / (1 << i);
} else {
w = track->textures[u].width;
if (rdev->family >= CHIP_RV515)
w |= track->textures[u].width_11;
w = w / (1 << i);
if (track->textures[u].roundup_w)
w = roundup_pow_of_two(w);
}
h = track->textures[u].height;
if (rdev->family >= CHIP_RV515)
h |= track->textures[u].height_11;
h = h / (1 << i);
if (track->textures[u].roundup_h)
h = roundup_pow_of_two(h);
if (track->textures[u].tex_coord_type == 1) {
d = (1 << track->textures[u].txdepth) / (1 << i);
if (!d)
d = 1;
} else {
d = 1;
}
if (track->textures[u].compress_format) {
 
size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
/* compressed textures are block based */
} else
size += w * h * d;
}
size *= track->textures[u].cpp;
 
switch (track->textures[u].tex_coord_type) {
case 0:
case 1:
break;
case 2:
if (track->separate_cube) {
ret = r100_cs_track_cube(rdev, track, u);
if (ret)
return ret;
} else
size *= 6;
break;
default:
DRM_ERROR("Invalid texture coordinate type %u for unit "
"%u\n", track->textures[u].tex_coord_type, u);
return -EINVAL;
}
if (size > radeon_bo_size(robj)) {
DRM_ERROR("Texture of unit %u needs %lu bytes but is "
"%lu\n", u, size, radeon_bo_size(robj));
r100_cs_track_texture_print(&track->textures[u]);
return -EINVAL;
}
}
return 0;
}
 
int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
{
unsigned i;
unsigned long size;
unsigned prim_walk;
unsigned nverts;
unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
 
if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
!track->blend_read_enable)
num_cb = 0;
 
for (i = 0; i < num_cb; i++) {
if (track->cb[i].robj == NULL) {
DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
return -EINVAL;
}
size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
size += track->cb[i].offset;
if (size > radeon_bo_size(track->cb[i].robj)) {
DRM_ERROR("[drm] Buffer too small for color buffer %d "
"(need %lu have %lu) !\n", i, size,
radeon_bo_size(track->cb[i].robj));
DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
i, track->cb[i].pitch, track->cb[i].cpp,
track->cb[i].offset, track->maxy);
return -EINVAL;
}
}
track->cb_dirty = false;
 
if (track->zb_dirty && track->z_enabled) {
if (track->zb.robj == NULL) {
DRM_ERROR("[drm] No buffer for z buffer !\n");
return -EINVAL;
}
size = track->zb.pitch * track->zb.cpp * track->maxy;
size += track->zb.offset;
if (size > radeon_bo_size(track->zb.robj)) {
DRM_ERROR("[drm] Buffer too small for z buffer "
"(need %lu have %lu) !\n", size,
radeon_bo_size(track->zb.robj));
DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
track->zb.pitch, track->zb.cpp,
track->zb.offset, track->maxy);
return -EINVAL;
}
}
track->zb_dirty = false;
 
if (track->aa_dirty && track->aaresolve) {
if (track->aa.robj == NULL) {
DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
return -EINVAL;
}
/* I believe the format comes from colorbuffer0. */
size = track->aa.pitch * track->cb[0].cpp * track->maxy;
size += track->aa.offset;
if (size > radeon_bo_size(track->aa.robj)) {
DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
"(need %lu have %lu) !\n", i, size,
radeon_bo_size(track->aa.robj));
DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
i, track->aa.pitch, track->cb[0].cpp,
track->aa.offset, track->maxy);
return -EINVAL;
}
}
track->aa_dirty = false;
 
prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
if (track->vap_vf_cntl & (1 << 14)) {
nverts = track->vap_alt_nverts;
} else {
nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
}
switch (prim_walk) {
case 1:
for (i = 0; i < track->num_arrays; i++) {
size = track->arrays[i].esize * track->max_indx * 4;
if (track->arrays[i].robj == NULL) {
DRM_ERROR("(PW %u) Vertex array %u no buffer "
"bound\n", prim_walk, i);
return -EINVAL;
}
if (size > radeon_bo_size(track->arrays[i].robj)) {
dev_err(rdev->dev, "(PW %u) Vertex array %u "
"need %lu dwords have %lu dwords\n",
prim_walk, i, size >> 2,
radeon_bo_size(track->arrays[i].robj)
>> 2);
DRM_ERROR("Max indices %u\n", track->max_indx);
return -EINVAL;
}
}
break;
case 2:
for (i = 0; i < track->num_arrays; i++) {
size = track->arrays[i].esize * (nverts - 1) * 4;
if (track->arrays[i].robj == NULL) {
DRM_ERROR("(PW %u) Vertex array %u no buffer "
"bound\n", prim_walk, i);
return -EINVAL;
}
if (size > radeon_bo_size(track->arrays[i].robj)) {
dev_err(rdev->dev, "(PW %u) Vertex array %u "
"need %lu dwords have %lu dwords\n",
prim_walk, i, size >> 2,
radeon_bo_size(track->arrays[i].robj)
>> 2);
return -EINVAL;
}
}
break;
case 3:
size = track->vtx_size * nverts;
if (size != track->immd_dwords) {
DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
track->immd_dwords, size);
DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
nverts, track->vtx_size);
return -EINVAL;
}
break;
default:
DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
prim_walk);
return -EINVAL;
}
 
if (track->tex_dirty) {
track->tex_dirty = false;
return r100_cs_track_texture_check(rdev, track);
}
return 0;
}
 
void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
{
unsigned i, face;
 
track->cb_dirty = true;
track->zb_dirty = true;
track->tex_dirty = true;
track->aa_dirty = true;
 
if (rdev->family < CHIP_R300) {
track->num_cb = 1;
if (rdev->family <= CHIP_RS200)
track->num_texture = 3;
else
track->num_texture = 6;
track->maxy = 2048;
track->separate_cube = 1;
} else {
track->num_cb = 4;
track->num_texture = 16;
track->maxy = 4096;
track->separate_cube = 0;
track->aaresolve = false;
track->aa.robj = NULL;
}
 
for (i = 0; i < track->num_cb; i++) {
track->cb[i].robj = NULL;
track->cb[i].pitch = 8192;
track->cb[i].cpp = 16;
track->cb[i].offset = 0;
}
track->z_enabled = true;
track->zb.robj = NULL;
track->zb.pitch = 8192;
track->zb.cpp = 4;
track->zb.offset = 0;
track->vtx_size = 0x7F;
track->immd_dwords = 0xFFFFFFFFUL;
track->num_arrays = 11;
track->max_indx = 0x00FFFFFFUL;
for (i = 0; i < track->num_arrays; i++) {
track->arrays[i].robj = NULL;
track->arrays[i].esize = 0x7F;
}
for (i = 0; i < track->num_texture; i++) {
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
track->textures[i].pitch = 16536;
track->textures[i].width = 16536;
track->textures[i].height = 16536;
track->textures[i].width_11 = 1 << 11;
track->textures[i].height_11 = 1 << 11;
track->textures[i].num_levels = 12;
if (rdev->family <= CHIP_RS200) {
track->textures[i].tex_coord_type = 0;
track->textures[i].txdepth = 0;
} else {
track->textures[i].txdepth = 16;
track->textures[i].tex_coord_type = 1;
}
track->textures[i].cpp = 64;
track->textures[i].robj = NULL;
/* CS IB emission code makes sure texture unit are disabled */
track->textures[i].enabled = false;
track->textures[i].lookup_disable = false;
track->textures[i].roundup_w = true;
track->textures[i].roundup_h = true;
if (track->separate_cube)
for (face = 0; face < 5; face++) {
track->textures[i].cube_info[face].robj = NULL;
track->textures[i].cube_info[face].width = 16536;
track->textures[i].cube_info[face].height = 16536;
track->textures[i].cube_info[face].offset = 0;
}
}
}
#endif
 
int r100_ring_test(struct radeon_device *rdev)
{
uint32_t scratch;
uint32_t tmp = 0;
unsigned i;
3332,15 → 3265,15
return r;
}
WREG32(scratch, 0xCAFEDEAD);
r = radeon_ring_lock(rdev, ring, 2);
r = radeon_ring_lock(rdev, 2);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
radeon_scratch_free(rdev, scratch);
return r;
}
radeon_ring_write(ring, PACKET0(scratch, 0));
radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring);
radeon_ring_write(rdev, PACKET0(scratch, 0));
radeon_ring_write(rdev, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF) {
3361,22 → 3294,14
 
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 
if (ring->rptr_save_reg) {
u32 next_rptr = ring->wptr + 2 + 3;
radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0));
radeon_ring_write(ring, next_rptr);
radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
radeon_ring_write(rdev, ib->gpu_addr);
radeon_ring_write(rdev, ib->length_dw);
}
 
radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
radeon_ring_write(ring, ib->gpu_addr);
radeon_ring_write(ring, ib->length_dw);
}
 
int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
int r100_ib_test(struct radeon_device *rdev)
{
struct radeon_ib ib;
struct radeon_ib *ib;
uint32_t scratch;
uint32_t tmp = 0;
unsigned i;
3388,29 → 3313,28
return r;
}
WREG32(scratch, 0xCAFEDEAD);
r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256);
r = radeon_ib_get(rdev, &ib);
if (r) {
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
goto free_scratch;
return r;
}
ib.ptr[0] = PACKET0(scratch, 0);
ib.ptr[1] = 0xDEADBEEF;
ib.ptr[2] = PACKET2(0);
ib.ptr[3] = PACKET2(0);
ib.ptr[4] = PACKET2(0);
ib.ptr[5] = PACKET2(0);
ib.ptr[6] = PACKET2(0);
ib.ptr[7] = PACKET2(0);
ib.length_dw = 8;
r = radeon_ib_schedule(rdev, &ib, NULL);
ib->ptr[0] = PACKET0(scratch, 0);
ib->ptr[1] = 0xDEADBEEF;
ib->ptr[2] = PACKET2(0);
ib->ptr[3] = PACKET2(0);
ib->ptr[4] = PACKET2(0);
ib->ptr[5] = PACKET2(0);
ib->ptr[6] = PACKET2(0);
ib->ptr[7] = PACKET2(0);
ib->length_dw = 8;
r = radeon_ib_schedule(rdev, ib);
if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
goto free_ib;
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
return r;
}
r = radeon_fence_wait(ib.fence, false);
r = radeon_fence_wait(ib->fence, false);
if (r) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
goto free_ib;
return r;
}
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
3426,19 → 3350,41
scratch, tmp);
r = -EINVAL;
}
free_ib:
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
free_scratch:
radeon_scratch_free(rdev, scratch);
return r;
}
 
void r100_ib_fini(struct radeon_device *rdev)
{
radeon_ib_pool_fini(rdev);
}
 
int r100_ib_init(struct radeon_device *rdev)
{
int r;
 
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r);
r100_ib_fini(rdev);
return r;
}
r = r100_ib_test(rdev);
if (r) {
dev_err(rdev->dev, "failed testing IB (%d).\n", r);
r100_ib_fini(rdev);
return r;
}
return 0;
}
 
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
{
/* Shutdown CP we shouldn't need to do that but better be safe than
* sorry
*/
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
rdev->cp.ready = false;
WREG32(R_000740_CP_CSQ_CNTL, 0);
 
/* Save few CRTC registers */
3508,6 → 3454,20
dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
}
 
 
int drm_order(unsigned long size)
{
int order;
unsigned long tmp;
 
for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
 
if (size & (size - 1))
++order;
 
return order;
}
 
static void r100_mc_program(struct radeon_device *rdev)
{
struct r100_mc_save save;
3538,7 → 3498,7
r100_mc_resume(rdev, &save);
}
 
static void r100_clock_startup(struct radeon_device *rdev)
void r100_clock_startup(struct radeon_device *rdev)
{
u32 tmp;
 
3585,10 → 3545,9
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
 
r = radeon_ib_pool_init(rdev);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
3687,7 → 3646,6
return r;
}
r100_set_safe_registers(rdev);
 
rdev->accel_working = true;
r = r100_startup(rdev);
if (r) {
3699,43 → 3657,3
}
return 0;
}
 
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
{
if (reg < rdev->rmmio_size)
return readl(((void __iomem *)rdev->rmmio) + reg);
else {
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
}
}
 
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
if (reg < rdev->rmmio_size)
writel(v, ((void __iomem *)rdev->rmmio) + reg);
else {
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
}
}
 
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
{
if (reg < rdev->rio_mem_size)
return ioread32(rdev->rio_mem + reg);
else {
iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
return ioread32(rdev->rio_mem + RADEON_MM_DATA);
}
}
 
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
if (reg < rdev->rio_mem_size)
iowrite32(v, rdev->rio_mem + reg);
else {
iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
}
}
/drivers/video/drm/radeon/r200.c
25,8 → 25,9
* Alex Deucher
* Jerome Glisse
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
86,10 → 87,9
int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence)
unsigned num_pages,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t size;
uint32_t cur_size;
int i, num_loops;
96,16 → 96,16
int r = 0;
 
/* radeon pitch is /64 */
size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
size = num_pages << PAGE_SHIFT;
num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
r = radeon_ring_lock(rdev, num_loops * 4 + 64);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
/* Must wait for 2D idle & clean before DMA or hangs might happen */
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, (1 << 16));
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev, (1 << 16));
for (i = 0; i < num_loops; i++) {
cur_size = size;
if (cur_size > 0x1FFFFF) {
112,19 → 112,19
cur_size = 0x1FFFFF;
}
size -= cur_size;
radeon_ring_write(ring, PACKET0(0x720, 2));
radeon_ring_write(ring, src_offset);
radeon_ring_write(ring, dst_offset);
radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30));
radeon_ring_write(rdev, PACKET0(0x720, 2));
radeon_ring_write(rdev, src_offset);
radeon_ring_write(rdev, dst_offset);
radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
src_offset += cur_size;
dst_offset += cur_size;
}
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
r = radeon_fence_emit(rdev, fence);
}
radeon_ring_unlock_commit(rdev, ring);
radeon_ring_unlock_commit(rdev);
return r;
}
#if 0
156,7 → 156,7
u32 tile_flags = 0;
u32 idx_value;
 
ib = p->ib.ptr;
ib = p->ib->ptr;
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
switch (reg) {
217,16 → 217,6
r100_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R200_TXO_MACRO_TILE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R200_TXO_MICRO_TILE;
 
tmp = idx_value & ~(0x7 << 2);
tmp |= tile_flags;
ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
} else
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
289,7 → 279,6
return r;
}
 
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_COLOR_TILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
298,8 → 287,6
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
} else
ib[idx] = idx_value;
 
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
track->cb_dirty = true;
/drivers/video/drm/radeon/r300.c
33,7 → 33,7
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
#include <drm/radeon_drm.h>
#include "radeon_drm.h"
 
#include "r300d.h"
#include "rv350d.h"
74,7 → 74,7
 
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
void __iomem *ptr = rdev->gart.ptr;
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
 
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
93,7 → 93,7
{
int r;
 
if (rdev->gart.robj) {
if (rdev->gart.table.vram.robj) {
WARN(1, "RV370 PCIE GART already initialized\n");
return 0;
}
105,8 → 105,8
if (r)
DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
return radeon_gart_table_vram_alloc(rdev);
}
 
116,7 → 116,7
uint32_t tmp;
int r;
 
if (rdev->gart.robj == NULL) {
if (rdev->gart.table.vram.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
144,9 → 144,8
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
rv370_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)table_addr);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
(unsigned)(rdev->mc.gtt_size >> 20), table_addr);
rdev->gart.ready = true;
return 0;
}
154,6 → 153,7
void rv370_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
int r;
 
WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
162,8 → 162,15
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
radeon_gart_table_vram_unpin(rdev);
if (rdev->gart.table.vram.robj) {
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
}
}
 
void rv370_pcie_gart_fini(struct radeon_device *rdev)
{
175,38 → 182,36
void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
 
/* Who ever call radeon_fence_emit should call ring_lock and ask
* for enough space (today caller are ib schedule and buffer move) */
/* Write SC register so SC & US assert idle */
radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
radeon_ring_write(rdev, 0);
/* Flush 3D cache */
radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_RB3D_DC_FLUSH);
radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_ZC_FLUSH);
radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_ZC_FLUSH);
/* Wait until IDLE & CLEAN */
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN |
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE));
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(ring, rdev->config.r300.hdp_cntl |
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(ring, rdev->config.r300.hdp_cntl);
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
radeon_ring_write(ring, RADEON_SW_INT_FIRE);
radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
radeon_ring_write(rdev, fence->seq);
radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
}
 
void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
void r300_ring_start(struct radeon_device *rdev)
{
unsigned gb_tile_config;
int r;
229,44 → 234,44
break;
}
 
r = radeon_ring_lock(rdev, ring, 64);
r = radeon_ring_lock(rdev, 64);
if (r) {
return;
}
radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
radeon_ring_write(ring,
radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
radeon_ring_write(rdev,
RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0));
radeon_ring_write(ring, gb_tile_config);
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring,
radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
radeon_ring_write(rdev, gb_tile_config);
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_3D_IDLECLEAN);
radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring,
radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_3D_IDLECLEAN);
radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0));
radeon_ring_write(ring,
radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
radeon_ring_write(rdev,
((6 << R300_MS_X0_SHIFT) |
(6 << R300_MS_Y0_SHIFT) |
(6 << R300_MS_X1_SHIFT) |
275,8 → 280,8
(6 << R300_MS_Y2_SHIFT) |
(6 << R300_MSBD0_Y_SHIFT) |
(6 << R300_MSBD0_X_SHIFT)));
radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0));
radeon_ring_write(ring,
radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
radeon_ring_write(rdev,
((6 << R300_MS_X3_SHIFT) |
(6 << R300_MS_Y3_SHIFT) |
(6 << R300_MS_X4_SHIFT) |
284,19 → 289,19
(6 << R300_MS_X5_SHIFT) |
(6 << R300_MS_Y5_SHIFT) |
(6 << R300_MSBD1_SHIFT)));
radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0));
radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0));
radeon_ring_write(ring,
radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
radeon_ring_write(rdev,
R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0));
radeon_ring_write(ring,
radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
radeon_ring_write(rdev,
R300_GEOMETRY_ROUND_NEAREST |
R300_COLOR_ROUND_NEAREST);
radeon_ring_unlock_commit(rdev, ring);
radeon_ring_unlock_commit(rdev);
}
 
static void r300_errata(struct radeon_device *rdev)
void r300_errata(struct radeon_device *rdev)
{
rdev->pll_errata = 0;
 
322,7 → 327,7
return -1;
}
 
static void r300_gpu_init(struct radeon_device *rdev)
void r300_gpu_init(struct radeon_device *rdev)
{
uint32_t gb_tile_config, tmp;
 
377,6 → 382,28
rdev->num_gb_pipes, rdev->num_z_pipes);
}
 
bool r300_gpu_is_lockup(struct radeon_device *rdev)
{
u32 rbbm_status;
int r;
 
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
return false;
}
/* force CP activities */
r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
}
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
}
 
int r300_asic_reset(struct radeon_device *rdev)
{
struct r100_mc_save save;
427,6 → 454,7
/* Check if GPU is idle */
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true;
ret = -1;
} else
dev_info(rdev->dev, "GPU reset succeed\n");
607,7 → 635,7
int r;
u32 idx_value;
 
ib = p->ib.ptr;
ib = p->ib->ptr;
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
 
683,10 → 711,6
return r;
}
 
if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
} else {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_TXO_MACRO_TILE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
697,7 → 721,6
tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
tmp |= tile_flags;
ib[idx] = tmp;
}
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
747,7 → 770,6
/* RB3D_COLORPITCH1 */
/* RB3D_COLORPITCH2 */
/* RB3D_COLORPITCH3 */
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
766,7 → 788,6
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
}
i = (reg - 0x4E38) >> 2;
track->cb[i].pitch = idx_value & 0x3FFE;
switch (((idx_value >> 21) & 0xF)) {
832,7 → 853,6
break;
case 0x4F24:
/* ZB_DEPTHPITCH */
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
851,7 → 871,7
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
}
 
track->zb.pitch = idx_value & 0x3FFC;
track->zb_dirty = true;
break;
1149,7 → 1169,7
unsigned idx;
int r;
 
ib = p->ib.ptr;
ib = p->ib->ptr;
idx = pkt->idx + 1;
track = (struct r100_cs_track *)p->track;
switch(pkt->opcode) {
1389,13 → 1409,11
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
 
r = radeon_ib_pool_init(rdev);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
 
return 0;
}
 
1474,7 → 1492,6
return r;
}
r300_set_reg_safe(rdev);
 
rdev->accel_working = true;
r = r300_startup(rdev);
if (r) {
/drivers/video/drm/radeon/r420.c
27,7 → 27,7
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include "drmP.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
160,8 → 160,6
 
static void r420_cp_errata_init(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 
/* RV410 and R420 can lock up if CP DMA to host memory happens
* while the 2D engine is busy.
*
169,24 → 167,22
* of the CP init, apparently.
*/
radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
radeon_ring_lock(rdev, ring, 8);
radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
radeon_ring_write(ring, rdev->config.r300.resync_scratch);
radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring);
radeon_ring_lock(rdev, 8);
radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
radeon_ring_write(rdev, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev);
}
 
static void r420_cp_errata_fini(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 
/* Catch the RESYNC we dispatched all the way back,
* at the very beginning of the CP init.
*/
radeon_ring_lock(rdev, ring, 8);
radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_RB3D_DC_FINISH);
radeon_ring_unlock_commit(rdev, ring);
radeon_ring_lock(rdev, 8);
radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
radeon_ring_unlock_commit(rdev);
radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
}
 
229,21 → 225,44
return r;
}
r420_cp_errata_init(rdev);
 
r = radeon_ib_pool_init(rdev);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
 
return 0;
}
 
int r420_resume(struct radeon_device *rdev)
{
/* Make sur GART are not working */
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_disable(rdev);
/* Resume clock before doing reset */
r420_clock_resume(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
if (rdev->is_atom_bios) {
atom_asic_init(rdev->mode_info.atom_context);
} else {
radeon_combios_asic_init(rdev->ddev);
}
/* Resume clock after posting */
r420_clock_resume(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return r420_startup(rdev);
}
 
 
 
 
 
int r420_init(struct radeon_device *rdev)
{
int r;
322,7 → 341,6
return r;
}
r420_set_reg_safe(rdev);
 
rdev->accel_working = true;
r = r420_startup(rdev);
if (r) {
/drivers/video/drm/radeon/r520.c
25,7 → 25,7
* Alex Deucher
* Jerome Glisse
*/
#include <drm/drmP.h>
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
33,7 → 33,7
 
/* This files gather functions specifics to: r520,rv530,rv560,rv570,r580 */
 
int r520_mc_wait_for_idle(struct radeon_device *rdev)
static int r520_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
uint32_t tmp;
119,7 → 119,7
rdev->mc.vram_width *= 2;
}
 
static void r520_mc_init(struct radeon_device *rdev)
void r520_mc_init(struct radeon_device *rdev)
{
 
r520_vram_get_type(rdev);
131,7 → 131,7
radeon_update_bandwidth_info(rdev);
}
 
static void r520_mc_program(struct radeon_device *rdev)
void r520_mc_program(struct radeon_device *rdev)
{
struct rv515_mc_save save;
 
196,13 → 196,11
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
 
r = radeon_ib_pool_init(rdev);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
 
return 0;
}
 
274,7 → 272,6
if (r)
return r;
rv515_set_safe_registers(rdev);
 
rdev->accel_working = true;
r = r520_startup(rdev);
if (r) {
/drivers/video/drm/radeon/r600_audio.c
29,28 → 29,7
#include "radeon_asic.h"
#include "atom.h"
 
/*
* check if enc_priv stores radeon_encoder_atom_dig
*/
static bool radeon_dig_encoder(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
return true;
}
return false;
}
#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */
 
/*
* check if the chipset is supported
57,91 → 36,115
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE6(rdev))
return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR)
|| rdev->family == CHIP_RS600
|| rdev->family == CHIP_RS690
|| rdev->family == CHIP_RS740;
}
 
struct r600_audio r600_audio_status(struct radeon_device *rdev)
/*
* current number of channels
*/
int r600_audio_channels(struct radeon_device *rdev)
{
struct r600_audio status;
uint32_t value;
return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
}
 
value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
/*
* current bits per sample
*/
int r600_audio_bits_per_sample(struct radeon_device *rdev)
{
uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
switch (value) {
case 0x0: return 8;
case 0x1: return 16;
case 0x2: return 20;
case 0x3: return 24;
case 0x4: return 32;
}
 
/* number of channels */
status.channels = (value & 0x7) + 1;
dev_err(rdev->dev, "Unknown bits per sample 0x%x using 16 instead\n",
(int)value);
 
/* bits per sample */
switch ((value & 0xF0) >> 4) {
case 0x0:
status.bits_per_sample = 8;
break;
case 0x1:
status.bits_per_sample = 16;
break;
case 0x2:
status.bits_per_sample = 20;
break;
case 0x3:
status.bits_per_sample = 24;
break;
case 0x4:
status.bits_per_sample = 32;
break;
default:
dev_err(rdev->dev, "Unknown bits per sample 0x%x, using 16\n",
(int)value);
status.bits_per_sample = 16;
return 16;
}
 
/* current sampling rate in HZ */
/*
* current sampling rate in HZ
*/
int r600_audio_rate(struct radeon_device *rdev)
{
uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
uint32_t result;
 
if (value & 0x4000)
status.rate = 44100;
result = 44100;
else
status.rate = 48000;
status.rate *= ((value >> 11) & 0x7) + 1;
status.rate /= ((value >> 8) & 0x7) + 1;
result = 48000;
 
value = RREG32(R600_AUDIO_STATUS_BITS);
result *= ((value >> 11) & 0x7) + 1;
result /= ((value >> 8) & 0x7) + 1;
 
/* iec 60958 status bits */
status.status_bits = value & 0xff;
return result;
}
 
/* iec 60958 category code */
status.category_code = (value >> 8) & 0xff;
/*
* iec 60958 status bits
*/
uint8_t r600_audio_status_bits(struct radeon_device *rdev)
{
return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
}
 
return status;
/*
* iec 60958 category code
*/
uint8_t r600_audio_category_code(struct radeon_device *rdev)
{
return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
}
 
/*
* update all hdmi interfaces with current audio parameters
*/
void r600_audio_update_hdmi(struct work_struct *work)
static void r600_audio_update_hdmi(unsigned long param)
{
struct radeon_device *rdev = container_of(work, struct radeon_device,
audio_work);
struct radeon_device *rdev = (struct radeon_device *)param;
struct drm_device *dev = rdev->ddev;
struct r600_audio audio_status = r600_audio_status(rdev);
 
int channels = r600_audio_channels(rdev);
int rate = r600_audio_rate(rdev);
int bps = r600_audio_bits_per_sample(rdev);
uint8_t status_bits = r600_audio_status_bits(rdev);
uint8_t category_code = r600_audio_category_code(rdev);
 
struct drm_encoder *encoder;
bool changed = false;
int changes = 0, still_going = 0;
 
if (rdev->audio_status.channels != audio_status.channels ||
rdev->audio_status.rate != audio_status.rate ||
rdev->audio_status.bits_per_sample != audio_status.bits_per_sample ||
rdev->audio_status.status_bits != audio_status.status_bits ||
rdev->audio_status.category_code != audio_status.category_code) {
rdev->audio_status = audio_status;
changed = true;
changes |= channels != rdev->audio_channels;
changes |= rate != rdev->audio_rate;
changes |= bps != rdev->audio_bits_per_sample;
changes |= status_bits != rdev->audio_status_bits;
changes |= category_code != rdev->audio_category_code;
 
if (changes) {
rdev->audio_channels = channels;
rdev->audio_rate = rate;
rdev->audio_bits_per_sample = bps;
rdev->audio_status_bits = status_bits;
rdev->audio_category_code = category_code;
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (!radeon_dig_encoder(encoder))
continue;
if (changed || r600_hdmi_buffer_status_changed(encoder))
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
still_going |= radeon_encoder->audio_polling_active;
if (changes || r600_hdmi_buffer_status_changed(encoder))
r600_hdmi_update_audio_settings(encoder);
}
 
// mod_timer(&rdev->audio_timer,
// jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
}
 
/*
149,23 → 152,13
*/
static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
{
u32 value = 0;
DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
if (ASIC_IS_DCE4(rdev)) {
if (enable) {
value |= 0x81000000; /* Required to enable audio */
value |= 0x0e1000f0; /* fglrx sets that too */
}
WREG32(EVERGREEN_AUDIO_ENABLE, value);
} else {
WREG32_P(R600_AUDIO_ENABLE,
enable ? 0x81000000 : 0x0, ~0x81000000);
}
WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
rdev->audio_enabled = enable;
}
 
/*
* initialize the audio vars
* initialize the audio vars and register the update timer
*/
int r600_audio_init(struct radeon_device *rdev)
{
174,12 → 167,19
 
r600_audio_engine_enable(rdev, true);
 
rdev->audio_status.channels = -1;
rdev->audio_status.rate = -1;
rdev->audio_status.bits_per_sample = -1;
rdev->audio_status.status_bits = 0;
rdev->audio_status.category_code = 0;
rdev->audio_channels = -1;
rdev->audio_rate = -1;
rdev->audio_bits_per_sample = -1;
rdev->audio_status_bits = 0;
rdev->audio_category_code = 0;
 
// setup_timer(
// &rdev->audio_timer,
// r600_audio_update_hdmi,
// (unsigned long)rdev);
 
// mod_timer(&rdev->audio_timer, jiffies + 1);
 
return 0;
}
 
192,7 → 192,6
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
int base_rate = 48000;
 
switch (radeon_encoder->encoder_id) {
212,15 → 211,6
return;
}
 
if (ASIC_IS_DCE4(rdev)) {
/* TODO: other PLLs? */
WREG32(EVERGREEN_AUDIO_PLL1_MUL, base_rate * 10);
WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
 
/* Select DTO source */
WREG32(0x5ac, radeon_crtc->crtc_id);
} else {
switch (dig->dig_encoder) {
case 0:
WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
234,13 → 224,11
WREG32(R600_AUDIO_CLK_SRCSEL, 1);
break;
default:
dev_err(rdev->dev,
"Unsupported DIG on encoder 0x%02X\n",
dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n",
radeon_encoder->encoder_id);
return;
}
}
}
 
/*
* release the audio timer
251,5 → 239,7
if (!rdev->audio_enabled)
return;
 
// del_timer(&rdev->audio_timer);
 
r600_audio_engine_enable(rdev, false);
}
/drivers/video/drm/radeon/r600_blit_shaders.c
314,10 → 314,6
0x00000000, /* VGT_VTX_CNT_EN */
 
0xc0016900,
0x000000d4,
0x00000000, /* SX_MISC */
 
0xc0016900,
0x000002c8,
0x00000000, /* VGT_STRMOUT_BUFFER_EN */
 
630,10 → 626,6
0x00000000, /* VGT_VTX_CNT_EN */
 
0xc0016900,
0x000000d4,
0x00000000, /* SX_MISC */
 
0xc0016900,
0x000002c8,
0x00000000, /* VGT_STRMOUT_BUFFER_EN */
 
/drivers/video/drm/radeon/r600_blit_shaders.h
35,5 → 35,4
extern const u32 r6xx_ps_size, r6xx_vs_size;
extern const u32 r6xx_default_size, r7xx_default_size;
 
__pure uint32_t int2float(uint32_t x);
#endif
/drivers/video/drm/radeon/radeon_bios.c
25,7 → 25,7
* Alex Deucher
* Jerome Glisse
*/
#include <drm/drmP.h>
#include "drmP.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
99,83 → 99,18
return true;
}
 
#ifdef CONFIG_ACPI
/* ATRM is used to get the BIOS on the discrete cards in
* dual-gpu systems.
*/
/* retrieve the ROM in 4k blocks */
#define ATRM_BIOS_PAGE 4096
/**
* radeon_atrm_call - fetch a chunk of the vbios
*
* @atrm_handle: acpi ATRM handle
* @bios: vbios image pointer
* @offset: offset of vbios image data to fetch
* @len: length of vbios image data to fetch
*
* Executes ATRM to fetch a chunk of the discrete
* vbios image on PX systems (all asics).
* Returns the length of the buffer fetched.
*/
static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
int offset, int len)
{
acpi_status status;
union acpi_object atrm_arg_elements[2], *obj;
struct acpi_object_list atrm_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
 
atrm_arg.count = 2;
atrm_arg.pointer = &atrm_arg_elements[0];
 
atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
atrm_arg_elements[0].integer.value = offset;
 
atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
atrm_arg_elements[1].integer.value = len;
 
status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
if (ACPI_FAILURE(status)) {
printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
return -ENODEV;
}
 
obj = (union acpi_object *)buffer.pointer;
memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
len = obj->buffer.length;
kfree(buffer.pointer);
return len;
}
 
static bool radeon_atrm_get_bios(struct radeon_device *rdev)
{
int ret;
int size = 256 * 1024;
int i;
struct pci_dev *pdev = NULL;
acpi_handle dhandle, atrm_handle;
acpi_status status;
bool found = false;
 
/* ATRM is for the discrete card only */
if (rdev->flags & RADEON_IS_IGP)
if (!radeon_atrm_supported(rdev->pdev))
return false;
 
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
continue;
 
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
if (!ACPI_FAILURE(status)) {
found = true;
break;
}
}
 
if (!found)
return false;
 
rdev->bios = kmalloc(size, GFP_KERNEL);
if (!rdev->bios) {
DRM_ERROR("Unable to allocate bios\n");
183,11 → 118,10
}
 
for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
ret = radeon_atrm_call(atrm_handle,
rdev->bios,
ret = radeon_atrm_get_bios_chunk(rdev->bios,
(i * ATRM_BIOS_PAGE),
ATRM_BIOS_PAGE);
if (ret < ATRM_BIOS_PAGE)
if (ret <= 0)
break;
}
 
197,12 → 131,6
}
return true;
}
#else
static bool radeon_atrm_get_bios(struct radeon_device *rdev)
{
return false;
}
#endif
 
static bool ni_read_disabled_bios(struct radeon_device *rdev)
{
549,62 → 477,7
return legacy_read_disabled_bios(rdev);
}
 
#ifdef CONFIG_ACPI
static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
{
bool ret = false;
struct acpi_table_header *hdr;
acpi_size tbl_size;
UEFI_ACPI_VFCT *vfct;
GOP_VBIOS_CONTENT *vbios;
VFCT_IMAGE_HEADER *vhdr;
 
if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
return false;
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
goto out_unmap;
}
 
vfct = (UEFI_ACPI_VFCT *)hdr;
if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
goto out_unmap;
}
 
vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
vhdr = &vbios->VbiosHeader;
DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
 
if (vhdr->PCIBus != rdev->pdev->bus->number ||
vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) ||
vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) ||
vhdr->VendorID != rdev->pdev->vendor ||
vhdr->DeviceID != rdev->pdev->device) {
DRM_INFO("ACPI VFCT table is not for this card\n");
goto out_unmap;
};
 
if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
DRM_ERROR("ACPI VFCT image truncated\n");
goto out_unmap;
}
 
rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
ret = !!rdev->bios;
 
out_unmap:
return ret;
}
#else
static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
{
return false;
}
#endif
 
bool radeon_get_bios(struct radeon_device *rdev)
{
bool r;
612,8 → 485,6
 
r = radeon_atrm_get_bios(rdev);
if (r == false)
r = radeon_acpi_vfct_bios(rdev);
if (r == false)
r = igp_read_bios_from_vram(rdev);
if (r == false)
r = radeon_read_bios(rdev);
/drivers/video/drm/radeon/radeon_fence.c
34,608 → 34,305
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include "drmP.h"
#include "drm.h"
#include "radeon_reg.h"
#include "radeon.h"
 
/*
* Fences
* Fences mark an event in the GPUs pipeline and are used
* for GPU/CPU synchronization. When the fence is written,
* it is expected that all buffers associated with that fence
* are no longer in use by the associated ring on the GPU and
* that the the relevant GPU caches have been flushed. Whether
* we use a scratch register or memory location depends on the asic
* and whether writeback is enabled.
*/
 
/**
* radeon_fence_write - write a fence value
*
* @rdev: radeon_device pointer
* @seq: sequence number to write
* @ring: ring index the fence is associated with
*
* Writes a fence value to memory or a scratch register (all asics).
*/
static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
{
struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
*drv->cpu_addr = cpu_to_le32(seq);
} else {
WREG32(drv->scratch_reg, seq);
if (rdev->wb.enabled) {
u32 scratch_index;
if (rdev->wb.use_event)
scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
else
scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);;
} else
WREG32(rdev->fence_drv.scratch_reg, seq);
}
}
 
/**
* radeon_fence_read - read a fence value
*
* @rdev: radeon_device pointer
* @ring: ring index the fence is associated with
*
* Reads a fence value from memory or a scratch register (all asics).
* Returns the value of the fence read from memory or register.
*/
static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
static u32 radeon_fence_read(struct radeon_device *rdev)
{
struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
u32 seq = 0;
u32 seq;
 
if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
seq = le32_to_cpu(*drv->cpu_addr);
} else {
seq = RREG32(drv->scratch_reg);
}
if (rdev->wb.enabled) {
u32 scratch_index;
if (rdev->wb.use_event)
scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
else
scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
} else
seq = RREG32(rdev->fence_drv.scratch_reg);
return seq;
}
 
/**
* radeon_fence_emit - emit a fence on the requested ring
*
* @rdev: radeon_device pointer
* @fence: radeon fence object
* @ring: ring index the fence is associated with
*
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
int radeon_fence_emit(struct radeon_device *rdev,
struct radeon_fence **fence,
int ring)
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
{
/* we are protected by the ring emission mutex */
*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
if ((*fence) == NULL) {
return -ENOMEM;
}
kref_init(&((*fence)->kref));
(*fence)->rdev = rdev;
(*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
(*fence)->ring = ring;
radeon_fence_ring_emit(rdev, ring, *fence);
// trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
unsigned long irq_flags;
 
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
if (fence->emited) {
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return 0;
}
 
/**
* radeon_fence_process - process a fence
*
* @rdev: radeon_device pointer
* @ring: ring index the fence is associated with
*
* Checks the current fence value and wakes the fence queue
* if the sequence number has increased (all asics).
fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
if (!rdev->cp.ready)
/* FIXME: cp is not running assume everythings is done right
* away
*/
void radeon_fence_process(struct radeon_device *rdev, int ring)
{
uint64_t seq, last_seq, last_emitted;
unsigned count_loop = 0;
bool wake = false;
radeon_fence_write(rdev, fence->seq);
else
radeon_fence_ring_emit(rdev, fence);
 
/* Note there is a scenario here for an infinite loop but it's
* very unlikely to happen. For it to happen, the current polling
* process need to be interrupted by another process and another
* process needs to update the last_seq btw the atomic read and
* xchg of the current process.
*
* More over for this to go in infinite loop there need to be
* continuously new fence signaled ie radeon_fence_read needs
* to return a different value each time for both the currently
* polling process and the other process that xchg the last_seq
* btw atomic read and xchg of the current process. And the
* value the other process set as last seq must be higher than
* the seq value we just read. Which means that current process
* need to be interrupted after radeon_fence_read and before
* atomic xchg.
*
* To be even more safe we count the number of time we loop and
* we bail after 10 loop just accepting the fact that we might
* have temporarly set the last_seq not to the true real last
* seq but to an older one.
*/
last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
do {
last_emitted = rdev->fence_drv[ring].sync_seq[ring];
seq = radeon_fence_read(rdev, ring);
seq |= last_seq & 0xffffffff00000000LL;
if (seq < last_seq) {
seq &= 0xffffffff;
seq |= last_emitted & 0xffffffff00000000LL;
// trace_radeon_fence_emit(rdev->ddev, fence->seq);
fence->emited = true;
list_move_tail(&fence->list, &rdev->fence_drv.emited);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return 0;
}
 
if (seq <= last_seq || seq > last_emitted) {
break;
}
/* If we loop over we don't want to return without
* checking if a fence is signaled as it means that the
* seq we just read is different from the previous on.
*/
wake = true;
last_seq = seq;
if ((count_loop++) > 10) {
/* We looped over too many time leave with the
* fact that we might have set an older fence
* seq then the current real last seq as signaled
* by the hw.
*/
break;
}
} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
 
if (wake) {
rdev->fence_drv[ring].last_activity = GetTimerTicks();
wake_up_all(&rdev->fence_queue);
}
}
 
/**
* radeon_fence_destroy - destroy a fence
*
* @kref: fence kref
*
* Frees the fence object (all asics).
*/
static void radeon_fence_destroy(struct kref *kref)
static bool radeon_fence_poll_locked(struct radeon_device *rdev)
{
struct radeon_fence *fence;
struct list_head *i, *n;
uint32_t seq;
bool wake = false;
unsigned long cjiffies;
 
fence = container_of(kref, struct radeon_fence, kref);
kfree(fence);
seq = radeon_fence_read(rdev);
if (seq != rdev->fence_drv.last_seq) {
rdev->fence_drv.last_seq = seq;
rdev->fence_drv.last_jiffies = GetTimerTicks();
rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
} else {
cjiffies = GetTimerTicks();
if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
cjiffies -= rdev->fence_drv.last_jiffies;
if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
/* update the timeout */
rdev->fence_drv.last_timeout -= cjiffies;
} else {
/* the 500ms timeout is elapsed we should test
* for GPU lockup
*/
rdev->fence_drv.last_timeout = 1;
}
 
/**
* radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
*
* @rdev: radeon device pointer
* @seq: sequence number
* @ring: ring index the fence is associated with
*
* Check if the last singled fence sequnce number is >= the requested
* sequence number (all asics).
* Returns true if the fence has signaled (current fence value
* is >= requested value) or false if it has not (current fence
* value is < the requested value. Helper function for
* radeon_fence_signaled().
} else {
/* wrap around update last jiffies, we will just wait
* a little longer
*/
static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
u64 seq, unsigned ring)
{
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
return true;
rdev->fence_drv.last_jiffies = cjiffies;
}
/* poll new last sequence at least once */
radeon_fence_process(rdev, ring);
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
return true;
}
return false;
}
 
/**
* radeon_fence_signaled - check if a fence has signaled
*
* @fence: radeon fence object
*
* Check if the requested fence has signaled (all asics).
* Returns true if the fence has signaled or false if it has not.
*/
bool radeon_fence_signaled(struct radeon_fence *fence)
{
if (!fence) {
return true;
n = NULL;
list_for_each(i, &rdev->fence_drv.emited) {
fence = list_entry(i, struct radeon_fence, list);
if (fence->seq == seq) {
n = i;
break;
}
if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
return true;
}
if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
fence->seq = RADEON_FENCE_SIGNALED_SEQ;
return true;
/* all fence previous to this one are considered as signaled */
if (n) {
kevent_t event;
event.code = -1;
i = n;
do {
n = i->prev;
list_move_tail(i, &rdev->fence_drv.signaled);
fence = list_entry(i, struct radeon_fence, list);
fence->signaled = true;
// dbgprintf("fence %x done\n", fence);
RaiseEvent(fence->evnt, 0, &event);
i = n;
} while (i != &rdev->fence_drv.emited);
wake = true;
}
return false;
return wake;
}
 
/**
* radeon_fence_wait_seq - wait for a specific sequence number
*
* @rdev: radeon device pointer
* @target_seq: sequence number we want to wait for
* @ring: ring index the fence is associated with
* @intr: use interruptable sleep
* @lock_ring: whether the ring should be locked or not
*
* Wait for the requested sequence number to be written (all asics).
* @intr selects whether to use interruptable (true) or non-interruptable
* (false) sleep when waiting for the sequence number. Helper function
* for radeon_fence_wait(), et al.
* Returns 0 if the sequence number has passed, error for all other cases.
* -EDEADLK is returned when a GPU lockup has been detected and the ring is
* marked as not ready so no further jobs get scheduled until a successful
* reset.
*/
static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
unsigned ring, bool intr, bool lock_ring)
 
int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
{
unsigned long timeout, last_activity;
uint64_t seq;
unsigned i;
bool signaled;
int r;
unsigned long irq_flags;
 
while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
if (!rdev->ring[ring].ready) {
return -EBUSY;
*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
if ((*fence) == NULL) {
return -ENOMEM;
}
 
timeout = GetTimerTicks() - RADEON_FENCE_JIFFIES_TIMEOUT;
if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
/* the normal case, timeout is somewhere before last_activity */
timeout = rdev->fence_drv[ring].last_activity - timeout;
} else {
/* either jiffies wrapped around, or no fence was signaled in the last 500ms
* anyway we will just wait for the minimum amount and then check for a lockup
*/
timeout = 1;
}
seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
/* Save current last activity valuee, used to check for GPU lockups */
last_activity = rdev->fence_drv[ring].last_activity;
(*fence)->evnt = CreateEvent(NULL, MANUAL_DESTROY);
// kref_init(&((*fence)->kref));
(*fence)->rdev = rdev;
(*fence)->emited = false;
(*fence)->signaled = false;
(*fence)->seq = 0;
INIT_LIST_HEAD(&(*fence)->list);
 
// trace_radeon_fence_wait_begin(rdev->ddev, seq);
radeon_irq_kms_sw_irq_get(rdev, ring);
// if (intr) {
// r = wait_event_interruptible_timeout(rdev->fence_queue,
// (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
// timeout);
// } else {
// r = wait_event_timeout(rdev->fence_queue,
// (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
// timeout);
// }
delay(1);
 
radeon_irq_kms_sw_irq_put(rdev, ring);
// if (unlikely(r < 0)) {
// return r;
// }
// trace_radeon_fence_wait_end(rdev->ddev, seq);
 
if (unlikely(!signaled)) {
/* we were interrupted for some reason and fence
* isn't signaled yet, resume waiting */
if (r) {
continue;
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return 0;
}
 
/* check if sequence value has changed since last_activity */
if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
continue;
}
 
if (lock_ring) {
mutex_lock(&rdev->ring_lock);
}
bool radeon_fence_signaled(struct radeon_fence *fence)
{
unsigned long irq_flags;
bool signaled = false;
 
/* test if somebody else has already decided that this is a lockup */
if (last_activity != rdev->fence_drv[ring].last_activity) {
if (lock_ring) {
mutex_unlock(&rdev->ring_lock);
}
continue;
}
if (!fence)
return true;
 
if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
/* good news we believe it's a lockup */
dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
target_seq, seq);
if (fence->rdev->gpu_lockup)
return true;
 
/* change last activity so nobody else think there is a lockup */
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
rdev->fence_drv[i].last_activity = jiffies;
write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
signaled = fence->signaled;
/* if we are shuting down report all fence as signaled */
if (fence->rdev->shutdown) {
signaled = true;
}
 
/* mark the ring as not ready any more */
rdev->ring[ring].ready = false;
if (lock_ring) {
mutex_unlock(&rdev->ring_lock);
if (!fence->emited) {
WARN(1, "Querying an unemited fence : %p !\n", fence);
signaled = true;
}
return -EDEADLK;
if (!signaled) {
radeon_fence_poll_locked(fence->rdev);
signaled = fence->signaled;
}
 
if (lock_ring) {
mutex_unlock(&rdev->ring_lock);
write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
return signaled;
}
}
}
return 0;
}
 
/**
* radeon_fence_wait - wait for a fence to signal
*
* @fence: radeon fence object
* @intr: use interruptable sleep
*
* Wait for the requested fence to signal (all asics).
* @intr selects whether to use interruptable (true) or non-interruptable
* (false) sleep when waiting for the fence.
* Returns 0 if the fence has passed, error for all other cases.
*/
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
struct radeon_device *rdev;
unsigned long irq_flags, timeout;
u32 seq;
int r;
 
if (fence == NULL) {
WARN(1, "Querying an invalid fence : %p !\n", fence);
return -EINVAL;
return 0;
}
 
r = radeon_fence_wait_seq(fence->rdev, fence->seq,
fence->ring, intr, true);
if (r) {
return r;
}
fence->seq = RADEON_FENCE_SIGNALED_SEQ;
rdev = fence->rdev;
if (radeon_fence_signaled(fence)) {
return 0;
}
timeout = rdev->fence_drv.last_timeout;
retry:
/* save current sequence used to check for GPU lockup */
seq = rdev->fence_drv.last_seq;
// trace_radeon_fence_wait_begin(rdev->ddev, seq);
if (intr) {
radeon_irq_kms_sw_irq_get(rdev);
// r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
// radeon_fence_signaled(fence), timeout);
 
static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
{
unsigned i;
WaitEvent(fence->evnt);
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
return true;
radeon_irq_kms_sw_irq_put(rdev);
if (unlikely(r < 0)) {
return r;
}
}
return false;
}
 
/**
* radeon_fence_wait_any_seq - wait for a sequence number on any ring
*
* @rdev: radeon device pointer
* @target_seq: sequence number(s) we want to wait for
* @intr: use interruptable sleep
*
* Wait for the requested sequence number(s) to be written by any ring
* (all asics). Sequnce number array is indexed by ring id.
* @intr selects whether to use interruptable (true) or non-interruptable
* (false) sleep when waiting for the sequence number. Helper function
* for radeon_fence_wait_any(), et al.
* Returns 0 if the sequence number has passed, error for all other cases.
*/
static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
u64 *target_seq, bool intr)
{
unsigned long timeout, last_activity, tmp;
unsigned i, ring = RADEON_NUM_RINGS;
bool signaled;
int r;
 
for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
if (!target_seq[i]) {
continue;
}
 
/* use the most recent one as indicator */
if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
last_activity = rdev->fence_drv[i].last_activity;
}
 
/* For lockup detection just pick the lowest ring we are
* actively waiting for
*/
if (i < ring) {
ring = i;
}
}
 
/* nothing to wait for ? */
if (ring == RADEON_NUM_RINGS) {
return -ENOENT;
}
 
while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
timeout = GetTimerTicks() - RADEON_FENCE_JIFFIES_TIMEOUT;
if (time_after(last_activity, timeout)) {
/* the normal case, timeout is somewhere before last_activity */
timeout = last_activity - timeout;
} else {
/* either jiffies wrapped around, or no fence was signaled in the last 500ms
* anyway we will just wait for the minimum amount and then check for a lockup
*/
timeout = 1;
}
radeon_irq_kms_sw_irq_get(rdev);
// r = wait_event_timeout(rdev->fence_drv.queue,
// radeon_fence_signaled(fence), timeout);
 
// trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (target_seq[i]) {
radeon_irq_kms_sw_irq_get(rdev, i);
}
}
WaitEvent(fence->evnt);
 
// WaitEvent(fence->evnt);
 
r = 1;
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (target_seq[i]) {
radeon_irq_kms_sw_irq_put(rdev, i);
radeon_irq_kms_sw_irq_put(rdev);
}
}
if (unlikely(r < 0)) {
return r;
}
// trace_radeon_fence_wait_end(rdev->ddev, seq);
 
if (unlikely(!signaled)) {
/* we were interrupted for some reason and fence
* isn't signaled yet, resume waiting */
if (unlikely(!radeon_fence_signaled(fence))) {
/* we were interrupted for some reason and fence isn't
* isn't signaled yet, resume wait
*/
if (r) {
continue;
timeout = r;
goto retry;
}
 
mutex_lock(&rdev->ring_lock);
for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
tmp = rdev->fence_drv[i].last_activity;
}
}
/* test if somebody else has already decided that this is a lockup */
if (last_activity != tmp) {
last_activity = tmp;
mutex_unlock(&rdev->ring_lock);
continue;
}
 
if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
/* don't protect read access to rdev->fence_drv.last_seq
* if we experiencing a lockup the value doesn't change
*/
if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
/* good news we believe it's a lockup */
dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
target_seq[ring]);
WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
fence->seq, seq);
/* FIXME: what should we do ? marking everyone
* as signaled for now
*/
rdev->gpu_lockup = true;
// r = radeon_gpu_reset(rdev);
// if (r)
// return r;
return true;
 
/* change last activity so nobody else think there is a lockup */
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
rdev->fence_drv[i].last_activity = GetTimerTicks();
// radeon_fence_write(rdev, fence->seq);
// rdev->gpu_lockup = false;
}
 
/* mark the ring as not ready any more */
rdev->ring[ring].ready = false;
mutex_unlock(&rdev->ring_lock);
return -EDEADLK;
timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
rdev->fence_drv.last_jiffies = GetTimerTicks();
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
goto retry;
}
mutex_unlock(&rdev->ring_lock);
}
}
return 0;
}
 
/**
* radeon_fence_wait_any - wait for a fence to signal on any ring
*
* @rdev: radeon device pointer
* @fences: radeon fence object(s)
* @intr: use interruptable sleep
*
* Wait for any requested fence to signal (all asics). Fence
* array is indexed by ring id. @intr selects whether to use
* interruptable (true) or non-interruptable (false) sleep when
* waiting for the fences. Used by the suballocator.
* Returns 0 if any fence has passed, error for all other cases.
*/
int radeon_fence_wait_any(struct radeon_device *rdev,
struct radeon_fence **fences,
bool intr)
#if 0
int radeon_fence_wait_next(struct radeon_device *rdev)
{
uint64_t seq[RADEON_NUM_RINGS];
unsigned i;
unsigned long irq_flags;
struct radeon_fence *fence;
int r;
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
seq[i] = 0;
 
if (!fences[i]) {
continue;
if (rdev->gpu_lockup) {
return 0;
}
 
if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
/* something was allready signaled */
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
if (list_empty(&rdev->fence_drv.emited)) {
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return 0;
}
 
seq[i] = fences[i]->seq;
}
 
r = radeon_fence_wait_any_seq(rdev, seq, intr);
if (r) {
fence = list_entry(rdev->fence_drv.emited.next,
struct radeon_fence, list);
radeon_fence_ref(fence);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
r = radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
return r;
}
return 0;
}
 
/**
* radeon_fence_wait_next_locked - wait for the next fence to signal
*
* @rdev: radeon device pointer
* @ring: ring index the fence is associated with
*
* Wait for the next fence on the requested ring to signal (all asics).
* Returns 0 if the next fence has passed, error for all other cases.
* Caller must hold ring lock.
*/
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
int radeon_fence_wait_last(struct radeon_device *rdev)
{
uint64_t seq;
unsigned long irq_flags;
struct radeon_fence *fence;
int r;
 
seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
/* nothing to wait for, last_seq is
already the last emited fence */
return -ENOENT;
if (rdev->gpu_lockup) {
return 0;
}
return radeon_fence_wait_seq(rdev, seq, ring, false, false);
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
if (list_empty(&rdev->fence_drv.emited)) {
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return 0;
}
 
/**
* radeon_fence_wait_empty_locked - wait for all fences to signal
*
* @rdev: radeon device pointer
* @ring: ring index the fence is associated with
*
* Wait for all fences on the requested ring to signal (all asics).
* Returns 0 if the fences have passed, error for all other cases.
* Caller must hold ring lock.
*/
void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
{
uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
 
while(1) {
int r;
r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
if (r == -EDEADLK) {
mutex_unlock(&rdev->ring_lock);
r = radeon_gpu_reset(rdev);
mutex_lock(&rdev->ring_lock);
if (!r)
continue;
fence = list_entry(rdev->fence_drv.emited.prev,
struct radeon_fence, list);
radeon_fence_ref(fence);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
r = radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
return r;
}
if (r) {
dev_err(rdev->dev, "error waiting for ring to become"
" idle (%d)\n", r);
}
return;
}
}
 
/**
* radeon_fence_ref - take a ref on a fence
*
* @fence: radeon fence object
*
* Take a reference on a fence (all asics).
* Returns the fence.
*/
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
{
kref_get(&fence->kref);
642,230 → 339,58
return fence;
}
 
/**
* radeon_fence_unref - remove a ref on a fence
*
* @fence: radeon fence object
*
* Remove a reference on a fence (all asics).
*/
#endif
 
void radeon_fence_unref(struct radeon_fence **fence)
{
unsigned long irq_flags;
struct radeon_fence *tmp = *fence;
 
*fence = NULL;
if (tmp) {
kref_put(&tmp->kref, radeon_fence_destroy);
}
}
 
/**
* radeon_fence_count_emitted - get the count of emitted fences
*
* @rdev: radeon device pointer
* @ring: ring index the fence is associated with
*
* Get the number of fences emitted on the requested ring (all asics).
* Returns the number of emitted fences on the ring. Used by the
* dynpm code to ring track activity.
*/
unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
if(tmp)
{
uint64_t emitted;
 
/* We are not protected by ring lock when reading the last sequence
* but it's ok to report slightly wrong fence count here.
*/
radeon_fence_process(rdev, ring);
emitted = rdev->fence_drv[ring].sync_seq[ring]
- atomic64_read(&rdev->fence_drv[ring].last_seq);
/* to avoid 32bits warp around */
if (emitted > 0x10000000) {
emitted = 0x10000000;
write_lock_irqsave(&tmp->rdev->fence_drv.lock, irq_flags);
list_del(&tmp->list);
tmp->emited = false;
write_unlock_irqrestore(&tmp->rdev->fence_drv.lock, irq_flags);
};
}
return (unsigned)emitted;
}
 
/**
* radeon_fence_need_sync - do we need a semaphore
*
* @fence: radeon fence object
* @dst_ring: which ring to check against
*
* Check if the fence needs to be synced against another ring
* (all asics). If so, we need to emit a semaphore.
* Returns true if we need to sync with another ring, false if
* not.
*/
bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
void radeon_fence_process(struct radeon_device *rdev)
{
struct radeon_fence_driver *fdrv;
unsigned long irq_flags;
bool wake;
 
if (!fence) {
return false;
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
wake = radeon_fence_poll_locked(rdev);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
}
 
if (fence->ring == dst_ring) {
return false;
}
 
/* we are protected by the ring mutex */
fdrv = &fence->rdev->fence_drv[dst_ring];
if (fence->seq <= fdrv->sync_seq[fence->ring]) {
return false;
}
 
return true;
}
 
/**
* radeon_fence_note_sync - record the sync point
*
* @fence: radeon fence object
* @dst_ring: which ring to check against
*
* Note the sequence number at which point the fence will
* be synced with the requested ring (all asics).
*/
void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
int radeon_fence_driver_init(struct radeon_device *rdev)
{
struct radeon_fence_driver *dst, *src;
unsigned i;
 
if (!fence) {
return;
}
 
if (fence->ring == dst_ring) {
return;
}
 
/* we are protected by the ring mutex */
src = &fence->rdev->fence_drv[fence->ring];
dst = &fence->rdev->fence_drv[dst_ring];
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (i == dst_ring) {
continue;
}
dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
}
}
 
/**
* radeon_fence_driver_start_ring - make the fence driver
* ready for use on the requested ring.
*
* @rdev: radeon device pointer
* @ring: ring index to start the fence driver on
*
* Make the fence driver ready for processing (all asics).
* Not all asics have all rings, so each asic will only
* start the fence driver on the rings it has.
* Returns 0 for success, errors for failure.
*/
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
{
uint64_t index;
unsigned long irq_flags;
int r;
 
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
if (rdev->wb.use_event) {
rdev->fence_drv[ring].scratch_reg = 0;
index = R600_WB_EVENT_OFFSET + ring * 4;
} else {
r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
if (r) {
dev_err(rdev->dev, "fence failed to get scratch register\n");
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return r;
}
index = RADEON_WB_SCRATCH_OFFSET +
rdev->fence_drv[ring].scratch_reg -
rdev->scratch.reg_base;
}
rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
rdev->fence_drv[ring].initialized = true;
dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
radeon_fence_write(rdev, 0);
atomic_set(&rdev->fence_drv.seq, 0);
INIT_LIST_HEAD(&rdev->fence_drv.created);
INIT_LIST_HEAD(&rdev->fence_drv.emited);
INIT_LIST_HEAD(&rdev->fence_drv.signaled);
// init_waitqueue_head(&rdev->fence_drv.queue);
rdev->fence_drv.initialized = true;
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return 0;
}
 
/**
* radeon_fence_driver_init_ring - init the fence driver
* for the requested ring.
*
* @rdev: radeon device pointer
* @ring: ring index to start the fence driver on
*
* Init the fence driver for the requested ring (all asics).
* Helper function for radeon_fence_driver_init().
*/
static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
{
int i;
 
rdev->fence_drv[ring].scratch_reg = -1;
rdev->fence_drv[ring].cpu_addr = NULL;
rdev->fence_drv[ring].gpu_addr = 0;
for (i = 0; i < RADEON_NUM_RINGS; ++i)
rdev->fence_drv[ring].sync_seq[i] = 0;
atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
rdev->fence_drv[ring].last_activity = jiffies;
rdev->fence_drv[ring].initialized = false;
}
 
/**
* radeon_fence_driver_init - init the fence driver
* for all possible rings.
*
* @rdev: radeon device pointer
*
* Init the fence driver for all possible rings (all asics).
* Not all asics have all rings, so each asic will only
* start the fence driver on the rings it has using
* radeon_fence_driver_start_ring().
* Returns 0 for success.
*/
int radeon_fence_driver_init(struct radeon_device *rdev)
{
int ring;
 
init_waitqueue_head(&rdev->fence_queue);
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
radeon_fence_driver_init_ring(rdev, ring);
}
if (radeon_debugfs_fence_init(rdev)) {
dev_err(rdev->dev, "fence debugfs file creation failed\n");
}
return 0;
}
 
/**
* radeon_fence_driver_fini - tear down the fence driver
* for all possible rings.
*
* @rdev: radeon device pointer
*
* Tear down the fence driver for all possible rings (all asics).
*/
void radeon_fence_driver_fini(struct radeon_device *rdev)
{
int ring;
 
mutex_lock(&rdev->ring_lock);
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
if (!rdev->fence_drv[ring].initialized)
continue;
radeon_fence_wait_empty_locked(rdev, ring);
wake_up_all(&rdev->fence_queue);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
rdev->fence_drv[ring].initialized = false;
}
mutex_unlock(&rdev->ring_lock);
}
 
 
/*
* Fence debugfs
*/
875,24 → 400,16
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
int i, j;
struct radeon_fence *fence;
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!rdev->fence_drv[i].initialized)
continue;
 
seq_printf(m, "--- ring %d ---\n", i);
seq_printf(m, "Last signaled fence 0x%016llx\n",
(unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
seq_printf(m, "Last emitted 0x%016llx\n",
rdev->fence_drv[i].sync_seq[i]);
 
for (j = 0; j < RADEON_NUM_RINGS; ++j) {
if (i != j && rdev->fence_drv[j].initialized)
seq_printf(m, "Last sync to ring %d 0x%016llx\n",
j, rdev->fence_drv[i].sync_seq[j]);
seq_printf(m, "Last signaled fence 0x%08X\n",
radeon_fence_read(rdev));
if (!list_empty(&rdev->fence_drv.emited)) {
fence = list_entry(rdev->fence_drv.emited.prev,
struct radeon_fence, list);
seq_printf(m, "Last emited fence %p with 0x%08X\n",
fence, fence->seq);
}
}
return 0;
}
 
/drivers/video/drm/radeon/radeon_gart.c
25,8 → 25,8
* Alex Deucher
* Jerome Glisse
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
#include "radeon_reg.h"
 
43,38 → 43,8
}
 
/*
* GART
* The GART (Graphics Aperture Remapping Table) is an aperture
* in the GPU's address space. System pages can be mapped into
* the aperture and look like contiguous pages from the GPU's
* perspective. A page table maps the pages in the aperture
* to the actual backing pages in system memory.
*
* Radeon GPUs support both an internal GART, as described above,
* and AGP. AGP works similarly, but the GART table is configured
* and maintained by the northbridge rather than the driver.
* Radeon hw has a separate AGP aperture that is programmed to
* point to the AGP aperture provided by the northbridge and the
* requests are passed through to the northbridge aperture.
* Both AGP and internal GART can be used at the same time, however
* that is not currently supported by the driver.
*
* This file handles the common internal GART management.
*/
 
/*
* Common GART table functions.
*/
/**
* radeon_gart_table_ram_alloc - allocate system ram for gart page table
*
* @rdev: radeon_device pointer
*
* Allocate system memory for GART page table
* (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
* gart table to be in system memory.
* Returns 0 for success, -ENOMEM for failure.
*/
int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
{
void *ptr;
91,54 → 61,38
rdev->gart.table_size >> PAGE_SHIFT);
}
#endif
rdev->gart.ptr = ptr;
memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
rdev->gart.table.ram.ptr = ptr;
memset((void *)rdev->gart.table.ram.ptr, 0, rdev->gart.table_size);
return 0;
}
 
/**
* radeon_gart_table_ram_free - free system ram for gart page table
*
* @rdev: radeon_device pointer
*
* Free system memory for GART page table
* (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
* gart table to be in system memory.
*/
void radeon_gart_table_ram_free(struct radeon_device *rdev)
{
if (rdev->gart.ptr == NULL) {
if (rdev->gart.table.ram.ptr == NULL) {
return;
}
#ifdef CONFIG_X86
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
set_memory_wb((unsigned long)rdev->gart.ptr,
set_memory_wb((unsigned long)rdev->gart.table.ram.ptr,
rdev->gart.table_size >> PAGE_SHIFT);
}
#endif
rdev->gart.ptr = NULL;
// pci_free_consistent(rdev->pdev, rdev->gart.table_size,
// (void *)rdev->gart.table.ram.ptr,
// rdev->gart.table_addr);
rdev->gart.table.ram.ptr = NULL;
rdev->gart.table_addr = 0;
}
 
/**
* radeon_gart_table_vram_alloc - allocate vram for gart page table
*
* @rdev: radeon_device pointer
*
* Allocate video memory for GART page table
* (pcie r4xx, r5xx+). These asics require the
* gart table to be in video memory.
* Returns 0 for success, error for failure.
*/
int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
{
int r;
 
if (rdev->gart.robj == NULL) {
if (rdev->gart.table.vram.robj == NULL) {
r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
NULL, &rdev->gart.robj);
&rdev->gart.table.vram.robj);
if (r) {
return r;
}
146,93 → 100,51
return 0;
}
 
/**
* radeon_gart_table_vram_pin - pin gart page table in vram
*
* @rdev: radeon_device pointer
*
* Pin the GART page table in vram so it will not be moved
* by the memory manager (pcie r4xx, r5xx+). These asics require the
* gart table to be in video memory.
* Returns 0 for success, error for failure.
*/
int radeon_gart_table_vram_pin(struct radeon_device *rdev)
{
uint64_t gpu_addr;
int r;
 
r = radeon_bo_reserve(rdev->gart.robj, false);
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->gart.robj,
r = radeon_bo_pin(rdev->gart.table.vram.robj,
RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
if (r) {
radeon_bo_unreserve(rdev->gart.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
return r;
}
r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
r = radeon_bo_kmap(rdev->gart.table.vram.robj,
(void **)&rdev->gart.table.vram.ptr);
if (r)
radeon_bo_unpin(rdev->gart.robj);
radeon_bo_unreserve(rdev->gart.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
rdev->gart.table_addr = gpu_addr;
return r;
}
 
/**
* radeon_gart_table_vram_unpin - unpin gart page table in vram
*
* @rdev: radeon_device pointer
*
* Unpin the GART page table in vram (pcie r4xx, r5xx+).
* These asics require the gart table to be in video memory.
*/
void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
void radeon_gart_table_vram_free(struct radeon_device *rdev)
{
int r;
 
if (rdev->gart.robj == NULL) {
if (rdev->gart.table.vram.robj == NULL) {
return;
}
r = radeon_bo_reserve(rdev->gart.robj, false);
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.robj);
radeon_bo_unpin(rdev->gart.robj);
radeon_bo_unreserve(rdev->gart.robj);
rdev->gart.ptr = NULL;
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
radeon_bo_unref(&rdev->gart.table.vram.robj);
}
 
/**
* radeon_gart_table_vram_free - free gart page table vram
*
* @rdev: radeon_device pointer
*
* Free the video memory used for the GART page table
* (pcie r4xx, r5xx+). These asics require the gart table to
* be in video memory.
*/
void radeon_gart_table_vram_free(struct radeon_device *rdev)
{
if (rdev->gart.robj == NULL) {
return;
}
radeon_gart_table_vram_unpin(rdev);
radeon_bo_unref(&rdev->gart.robj);
}
 
 
 
/*
* Common gart functions.
*/
/**
* radeon_gart_unbind - unbind pages from the gart page table
*
* @rdev: radeon_device pointer
* @offset: offset into the GPU's gart aperture
* @pages: number of pages to unbind
*
* Unbinds the requested pages from the gart page table and
* replaces them with the dummy page (all asics).
*/
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int pages)
{
242,7 → 154,7
u64 page_base;
 
if (!rdev->gart.ready) {
WARN(1, "trying to unbind memory from uninitialized GART !\n");
WARN(1, "trying to unbind memory to unitialized GART !\n");
return;
}
t = offset / RADEON_GPU_PAGE_SIZE;
255,9 → 167,7
rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
if (rdev->gart.ptr) {
radeon_gart_set_page(rdev, t, page_base);
}
page_base += RADEON_GPU_PAGE_SIZE;
}
}
266,21 → 176,8
radeon_gart_tlb_flush(rdev);
}
 
/**
* radeon_gart_bind - bind pages into the gart page table
*
* @rdev: radeon_device pointer
* @offset: offset into the GPU's gart aperture
* @pages: number of pages to bind
* @pagelist: pages to bind
* @dma_addr: DMA addresses of pages
*
* Binds the requested pages to the gart page table
* (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, u32 *pagelist, dma_addr_t *dma_addr)
int pages, u32_t *pagelist)
{
unsigned t;
unsigned p;
290,7 → 187,7
// dbgprintf("offset %x pages %d list %x\n",
// offset, pages, pagelist);
if (!rdev->gart.ready) {
WARN(1, "trying to bind memory to uninitialized GART !\n");
WARN(1, "trying to bind memory to unitialized GART !\n");
return -EINVAL;
}
t = offset / RADEON_GPU_PAGE_SIZE;
297,9 → 194,13
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
 
for (i = 0; i < pages; i++, p++) {
/* we need to support large memory configurations */
/* assume that unbind have already been call on the range */
 
rdev->gart.pages_addr[p] = pagelist[i] & ~4095;
 
 
rdev->gart.pages[p] = pagelist[i];
if (rdev->gart.ptr) {
page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
radeon_gart_set_page(rdev, t, page_base);
306,28 → 207,16
page_base += RADEON_GPU_PAGE_SIZE;
}
}
}
mb();
radeon_gart_tlb_flush(rdev);
return 0;
}
 
/**
* radeon_gart_restore - bind all pages in the gart page table
*
* @rdev: radeon_device pointer
*
* Binds all pages in the gart page table (all asics).
* Used to rebuild the gart table on device startup or resume.
*/
void radeon_gart_restore(struct radeon_device *rdev)
{
int i, j, t;
u64 page_base;
 
if (!rdev->gart.ptr) {
return;
}
for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
page_base = rdev->gart.pages_addr[i];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
339,14 → 228,6
radeon_gart_tlb_flush(rdev);
}
 
/**
* radeon_gart_init - init the driver info for managing the gart
*
* @rdev: radeon_device pointer
*
* Allocate the dummy page and init the gart driver info (all asics).
* Returns 0 for success, error for failure.
*/
int radeon_gart_init(struct radeon_device *rdev)
{
int r, i;
368,13 → 249,14
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
/* Allocate pages table */
rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
GFP_KERNEL);
if (rdev->gart.pages == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
}
rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
rdev->gart.num_cpu_pages);
rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) *
rdev->gart.num_cpu_pages, GFP_KERNEL);
if (rdev->gart.pages_addr == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
386,13 → 268,6
return 0;
}
 
/**
* radeon_gart_fini - tear down the driver info for managing the gart
*
* @rdev: radeon_device pointer
*
* Tear down the gart driver info and free the dummy page (all asics).
*/
void radeon_gart_fini(struct radeon_device *rdev)
{
if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
400,909 → 275,8
radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
}
rdev->gart.ready = false;
vfree(rdev->gart.pages);
vfree(rdev->gart.pages_addr);
kfree(rdev->gart.pages);
kfree(rdev->gart.pages_addr);
rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL;
}
 
/*
* GPUVM
* GPUVM is similar to the legacy gart on older asics, however
* rather than there being a single global gart table
* for the entire GPU, there are multiple VM page tables active
* at any given time. The VM page tables can contain a mix
* vram pages and system memory pages and system memory pages
* can be mapped as snooped (cached system pages) or unsnooped
* (uncached system pages).
* Each VM has an ID associated with it and there is a page table
* associated with each VMID. When execting a command buffer,
* the kernel tells the the ring what VMID to use for that command
* buffer. VMIDs are allocated dynamically as commands are submitted.
* The userspace drivers maintain their own address space and the kernel
* sets up their pages tables accordingly when they submit their
* command buffers and a VMID is assigned.
* Cayman/Trinity support up to 8 active VMs at any given time;
* SI supports 16.
*/
 
/*
* vm helpers
*
* TODO bind a default page at vm initialization for default address
*/
 
/**
* radeon_vm_num_pde - return the number of page directory entries
*
* @rdev: radeon_device pointer
*
* Calculate the number of page directory entries (cayman+).
*/
static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
{
return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
}
 
/**
* radeon_vm_directory_size - returns the size of the page directory in bytes
*
* @rdev: radeon_device pointer
*
* Calculate the size of the page directory in bytes (cayman+).
*/
static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
{
return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
}
 
/**
* radeon_vm_manager_init - init the vm manager
*
* @rdev: radeon_device pointer
*
* Init the vm manager (cayman+).
* Returns 0 for success, error for failure.
*/
int radeon_vm_manager_init(struct radeon_device *rdev)
{
struct radeon_vm *vm;
struct radeon_bo_va *bo_va;
int r;
unsigned size;
 
if (!rdev->vm_manager.enabled) {
/* allocate enough for 2 full VM pts */
size = radeon_vm_directory_size(rdev);
size += rdev->vm_manager.max_pfn * 8;
size *= 2;
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
RADEON_GPU_PAGE_ALIGN(size),
RADEON_GEM_DOMAIN_VRAM);
if (r) {
dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
(rdev->vm_manager.max_pfn * 8) >> 10);
return r;
}
 
r = radeon_asic_vm_init(rdev);
if (r)
return r;
 
rdev->vm_manager.enabled = true;
 
r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
if (r)
return r;
}
 
/* restore page table */
list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
if (vm->page_directory == NULL)
continue;
 
list_for_each_entry(bo_va, &vm->va, vm_list) {
bo_va->valid = false;
}
}
return 0;
}
 
/**
* radeon_vm_free_pt - free the page table for a specific vm
*
* @rdev: radeon_device pointer
* @vm: vm to unbind
*
* Free the page table of a specific vm (cayman+).
*
* Global and local mutex must be lock!
*/
static void radeon_vm_free_pt(struct radeon_device *rdev,
struct radeon_vm *vm)
{
struct radeon_bo_va *bo_va;
int i;
 
if (!vm->page_directory)
return;
 
list_del_init(&vm->list);
radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
 
list_for_each_entry(bo_va, &vm->va, vm_list) {
bo_va->valid = false;
}
 
if (vm->page_tables == NULL)
return;
 
for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
 
kfree(vm->page_tables);
}
 
/**
* radeon_vm_manager_fini - tear down the vm manager
*
* @rdev: radeon_device pointer
*
* Tear down the VM manager (cayman+).
*/
void radeon_vm_manager_fini(struct radeon_device *rdev)
{
struct radeon_vm *vm, *tmp;
int i;
 
if (!rdev->vm_manager.enabled)
return;
 
mutex_lock(&rdev->vm_manager.lock);
/* free all allocated page tables */
list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
mutex_lock(&vm->mutex);
radeon_vm_free_pt(rdev, vm);
mutex_unlock(&vm->mutex);
}
for (i = 0; i < RADEON_NUM_VM; ++i) {
radeon_fence_unref(&rdev->vm_manager.active[i]);
}
radeon_asic_vm_fini(rdev);
mutex_unlock(&rdev->vm_manager.lock);
 
radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
rdev->vm_manager.enabled = false;
}
 
/**
* radeon_vm_evict - evict page table to make room for new one
*
* @rdev: radeon_device pointer
* @vm: VM we want to allocate something for
*
* Evict a VM from the lru, making sure that it isn't @vm. (cayman+).
* Returns 0 for success, -ENOMEM for failure.
*
* Global and local mutex must be locked!
*/
static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
{
struct radeon_vm *vm_evict;
 
if (list_empty(&rdev->vm_manager.lru_vm))
return -ENOMEM;
 
vm_evict = list_first_entry(&rdev->vm_manager.lru_vm,
struct radeon_vm, list);
if (vm_evict == vm)
return -ENOMEM;
 
mutex_lock(&vm_evict->mutex);
radeon_vm_free_pt(rdev, vm_evict);
mutex_unlock(&vm_evict->mutex);
return 0;
}
 
/**
* radeon_vm_alloc_pt - allocates a page table for a VM
*
* @rdev: radeon_device pointer
* @vm: vm to bind
*
* Allocate a page table for the requested vm (cayman+).
* Returns 0 for success, error for failure.
*
* Global and local mutex must be locked!
*/
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
{
unsigned pd_size, pts_size;
u64 *pd_addr;
int r;
 
if (vm == NULL) {
return -EINVAL;
}
 
if (vm->page_directory != NULL) {
return 0;
}
 
retry:
pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
&vm->page_directory, pd_size,
RADEON_GPU_PAGE_SIZE, false);
if (r == -ENOMEM) {
r = radeon_vm_evict(rdev, vm);
if (r)
return r;
goto retry;
 
} else if (r) {
return r;
}
 
vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
 
/* Initially clear the page directory */
pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory);
memset(pd_addr, 0, pd_size);
 
pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
 
if (vm->page_tables == NULL) {
DRM_ERROR("Cannot allocate memory for page table array\n");
radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
return -ENOMEM;
}
 
return 0;
}
 
/**
* radeon_vm_add_to_lru - add VMs page table to LRU list
*
* @rdev: radeon_device pointer
* @vm: vm to add to LRU
*
* Add the allocated page table to the LRU list (cayman+).
*
* Global mutex must be locked!
*/
void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
{
list_del_init(&vm->list);
list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
}
 
/**
* radeon_vm_grab_id - allocate the next free VMID
*
* @rdev: radeon_device pointer
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
*
* Allocate an id for the vm (cayman+).
* Returns the fence we need to sync to (if any).
*
* Global and local mutex must be locked!
*/
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
struct radeon_vm *vm, int ring)
{
struct radeon_fence *best[RADEON_NUM_RINGS] = {};
unsigned choices[2] = {};
unsigned i;
 
/* check if the id is still valid */
if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id])
return NULL;
 
/* we definately need to flush */
radeon_fence_unref(&vm->last_flush);
 
/* skip over VMID 0, since it is the system VM */
for (i = 1; i < rdev->vm_manager.nvm; ++i) {
struct radeon_fence *fence = rdev->vm_manager.active[i];
 
if (fence == NULL) {
/* found a free one */
vm->id = i;
return NULL;
}
 
if (radeon_fence_is_earlier(fence, best[fence->ring])) {
best[fence->ring] = fence;
choices[fence->ring == ring ? 0 : 1] = i;
}
}
 
for (i = 0; i < 2; ++i) {
if (choices[i]) {
vm->id = choices[i];
return rdev->vm_manager.active[choices[i]];
}
}
 
/* should never happen */
BUG();
return NULL;
}
 
/**
* radeon_vm_fence - remember fence for vm
*
* @rdev: radeon_device pointer
* @vm: vm we want to fence
* @fence: fence to remember
*
* Fence the vm (cayman+).
* Set the fence used to protect page table and id.
*
* Global and local mutex must be locked!
*/
void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence)
{
radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
 
radeon_fence_unref(&vm->fence);
vm->fence = radeon_fence_ref(fence);
}
 
/**
* radeon_vm_bo_find - find the bo_va for a specific vm & bo
*
* @vm: requested vm
* @bo: requested buffer object
*
* Find @bo inside the requested vm (cayman+).
* Search inside the @bos vm list for the requested vm
* Returns the found bo_va or NULL if none is found
*
* Object has to be reserved!
*/
struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
struct radeon_bo *bo)
{
struct radeon_bo_va *bo_va;
 
list_for_each_entry(bo_va, &bo->va, bo_list) {
if (bo_va->vm == vm) {
return bo_va;
}
}
return NULL;
}
 
/**
* radeon_vm_bo_add - add a bo to a specific vm
*
* @rdev: radeon_device pointer
* @vm: requested vm
* @bo: radeon buffer object
*
* Add @bo into the requested vm (cayman+).
* Add @bo to the list of bos associated with the vm
* Returns newly added bo_va or NULL for failure
*
* Object has to be reserved!
*/
struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo)
{
struct radeon_bo_va *bo_va;
 
bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
if (bo_va == NULL) {
return NULL;
}
bo_va->vm = vm;
bo_va->bo = bo;
bo_va->soffset = 0;
bo_va->eoffset = 0;
bo_va->flags = 0;
bo_va->valid = false;
bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list);
INIT_LIST_HEAD(&bo_va->vm_list);
 
mutex_lock(&vm->mutex);
list_add(&bo_va->vm_list, &vm->va);
list_add_tail(&bo_va->bo_list, &bo->va);
mutex_unlock(&vm->mutex);
 
return bo_va;
}
 
/**
* radeon_vm_bo_set_addr - set bos virtual address inside a vm
*
* @rdev: radeon_device pointer
* @bo_va: bo_va to store the address
* @soffset: requested offset of the buffer in the VM address space
* @flags: attributes of pages (read/write/valid/etc.)
*
* Set offset of @bo_va (cayman+).
* Validate and set the offset requested within the vm address space.
* Returns 0 for success, error for failure.
*
* Object has to be reserved!
*/
int radeon_vm_bo_set_addr(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
uint64_t soffset,
uint32_t flags)
{
uint64_t size = radeon_bo_size(bo_va->bo);
uint64_t eoffset, last_offset = 0;
struct radeon_vm *vm = bo_va->vm;
struct radeon_bo_va *tmp;
struct list_head *head;
unsigned last_pfn;
 
if (soffset) {
/* make sure object fit at this offset */
eoffset = soffset + size;
if (soffset >= eoffset) {
return -EINVAL;
}
 
last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
if (last_pfn > rdev->vm_manager.max_pfn) {
dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
last_pfn, rdev->vm_manager.max_pfn);
return -EINVAL;
}
 
} else {
eoffset = last_pfn = 0;
}
 
mutex_lock(&vm->mutex);
head = &vm->va;
last_offset = 0;
list_for_each_entry(tmp, &vm->va, vm_list) {
if (bo_va == tmp) {
/* skip over currently modified bo */
continue;
}
 
if (soffset >= last_offset && eoffset <= tmp->soffset) {
/* bo can be added before this one */
break;
}
if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
/* bo and tmp overlap, invalid offset */
dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
(unsigned)tmp->soffset, (unsigned)tmp->eoffset);
mutex_unlock(&vm->mutex);
return -EINVAL;
}
last_offset = tmp->eoffset;
head = &tmp->vm_list;
}
 
bo_va->soffset = soffset;
bo_va->eoffset = eoffset;
bo_va->flags = flags;
bo_va->valid = false;
list_move(&bo_va->vm_list, head);
 
mutex_unlock(&vm->mutex);
return 0;
}
 
/**
* radeon_vm_map_gart - get the physical address of a gart page
*
* @rdev: radeon_device pointer
* @addr: the unmapped addr
*
* Look up the physical address of the page that the pte resolves
* to (cayman+).
* Returns the physical address of the page.
*/
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
{
uint64_t result;
 
/* page table offset */
result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
 
/* in case cpu page size != gpu page size*/
result |= addr & (~PAGE_MASK);
 
return result;
}
 
/**
* radeon_vm_update_pdes - make sure that page directory is valid
*
* @rdev: radeon_device pointer
* @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
*
* Allocates new page tables if necessary
* and updates the page directory (cayman+).
* Returns 0 for success, error for failure.
*
* Global and local mutex must be locked!
*/
static int radeon_vm_update_pdes(struct radeon_device *rdev,
struct radeon_vm *vm,
uint64_t start, uint64_t end)
{
static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
 
uint64_t last_pde = ~0, last_pt = ~0;
unsigned count = 0;
uint64_t pt_idx;
int r;
 
start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
 
/* walk over the address space and update the page directory */
for (pt_idx = start; pt_idx <= end; ++pt_idx) {
uint64_t pde, pt;
 
if (vm->page_tables[pt_idx])
continue;
 
retry:
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
&vm->page_tables[pt_idx],
RADEON_VM_PTE_COUNT * 8,
RADEON_GPU_PAGE_SIZE, false);
 
if (r == -ENOMEM) {
r = radeon_vm_evict(rdev, vm);
if (r)
return r;
goto retry;
} else if (r) {
return r;
}
 
pde = vm->pd_gpu_addr + pt_idx * 8;
 
pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
 
if (((last_pde + 8 * count) != pde) ||
((last_pt + incr * count) != pt)) {
 
if (count) {
radeon_asic_vm_set_page(rdev, last_pde,
last_pt, count, incr,
RADEON_VM_PAGE_VALID);
}
 
count = 1;
last_pde = pde;
last_pt = pt;
} else {
++count;
}
}
 
if (count) {
radeon_asic_vm_set_page(rdev, last_pde, last_pt, count,
incr, RADEON_VM_PAGE_VALID);
 
}
 
return 0;
}
 
/**
* radeon_vm_update_ptes - make sure that page tables are valid
*
* @rdev: radeon_device pointer
* @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
* @dst: destination address to map to
* @flags: mapping flags
*
* Update the page tables in the range @start - @end (cayman+).
*
* Global and local mutex must be locked!
*/
static void radeon_vm_update_ptes(struct radeon_device *rdev,
struct radeon_vm *vm,
uint64_t start, uint64_t end,
uint64_t dst, uint32_t flags)
{
static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
 
uint64_t last_pte = ~0, last_dst = ~0;
unsigned count = 0;
uint64_t addr;
 
start = start / RADEON_GPU_PAGE_SIZE;
end = end / RADEON_GPU_PAGE_SIZE;
 
/* walk over the address space and update the page tables */
for (addr = start; addr < end; ) {
uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
unsigned nptes;
uint64_t pte;
 
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
nptes = RADEON_VM_PTE_COUNT - (addr & mask);
 
pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
pte += (addr & mask) * 8;
 
if ((last_pte + 8 * count) != pte) {
 
if (count) {
radeon_asic_vm_set_page(rdev, last_pte,
last_dst, count,
RADEON_GPU_PAGE_SIZE,
flags);
}
 
count = nptes;
last_pte = pte;
last_dst = dst;
} else {
count += nptes;
}
 
addr += nptes;
dst += nptes * RADEON_GPU_PAGE_SIZE;
}
 
if (count) {
radeon_asic_vm_set_page(rdev, last_pte, last_dst, count,
RADEON_GPU_PAGE_SIZE, flags);
}
}
 
/**
* radeon_vm_bo_update_pte - map a bo into the vm page table
*
* @rdev: radeon_device pointer
* @vm: requested vm
* @bo: radeon buffer object
* @mem: ttm mem
*
* Fill in the page table entries for @bo (cayman+).
* Returns 0 for success, -EINVAL for failure.
*
* Object have to be reserved & global and local mutex must be locked!
*/
int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo,
struct ttm_mem_reg *mem)
{
unsigned ridx = rdev->asic->vm.pt_ring_index;
struct radeon_ring *ring = &rdev->ring[ridx];
struct radeon_semaphore *sem = NULL;
struct radeon_bo_va *bo_va;
unsigned nptes, npdes, ndw;
uint64_t addr;
int r;
 
/* nothing to do if vm isn't bound */
if (vm->page_directory == NULL)
return 0;
 
bo_va = radeon_vm_bo_find(vm, bo);
if (bo_va == NULL) {
dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
return -EINVAL;
}
 
if (!bo_va->soffset) {
dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
bo, vm);
return -EINVAL;
}
 
if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
return 0;
 
bo_va->flags &= ~RADEON_VM_PAGE_VALID;
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
if (mem) {
addr = mem->start << PAGE_SHIFT;
if (mem->mem_type != TTM_PL_SYSTEM) {
bo_va->flags |= RADEON_VM_PAGE_VALID;
bo_va->valid = true;
}
if (mem->mem_type == TTM_PL_TT) {
bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
} else {
addr += rdev->vm_manager.vram_base_offset;
}
} else {
addr = 0;
bo_va->valid = false;
}
 
if (vm->fence && radeon_fence_signaled(vm->fence)) {
radeon_fence_unref(&vm->fence);
}
 
if (vm->fence && vm->fence->ring != ridx) {
r = radeon_semaphore_create(rdev, &sem);
if (r) {
return r;
}
}
 
nptes = radeon_bo_ngpu_pages(bo);
 
/* assume two extra pdes in case the mapping overlaps the borders */
npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
 
/* estimate number of dw needed */
/* semaphore, fence and padding */
ndw = 32;
 
if (RADEON_VM_BLOCK_SIZE > 11)
/* reserve space for one header for every 2k dwords */
ndw += (nptes >> 11) * 4;
else
/* reserve space for one header for
every (1 << BLOCK_SIZE) entries */
ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
 
/* reserve space for pte addresses */
ndw += nptes * 2;
 
/* reserve space for one header for every 2k dwords */
ndw += (npdes >> 11) * 4;
 
/* reserve space for pde addresses */
ndw += npdes * 2;
 
r = radeon_ring_lock(rdev, ring, ndw);
if (r) {
return r;
}
 
if (sem && radeon_fence_need_sync(vm->fence, ridx)) {
radeon_semaphore_sync_rings(rdev, sem, vm->fence->ring, ridx);
radeon_fence_note_sync(vm->fence, ridx);
}
 
r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
return r;
}
 
radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset,
addr, bo_va->flags);
 
radeon_fence_unref(&vm->fence);
r = radeon_fence_emit(rdev, &vm->fence, ridx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
return r;
}
radeon_ring_unlock_commit(rdev, ring);
radeon_semaphore_free(rdev, &sem, vm->fence);
radeon_fence_unref(&vm->last_flush);
 
return 0;
}
 
/**
* radeon_vm_bo_rmv - remove a bo to a specific vm
*
* @rdev: radeon_device pointer
* @bo_va: requested bo_va
*
* Remove @bo_va->bo from the requested vm (cayman+).
* Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
* remove the ptes for @bo_va in the page table.
* Returns 0 for success.
*
* Object have to be reserved!
*/
int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo_va *bo_va)
{
int r;
 
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&bo_va->vm->mutex);
r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
mutex_unlock(&rdev->vm_manager.lock);
list_del(&bo_va->vm_list);
mutex_unlock(&bo_va->vm->mutex);
list_del(&bo_va->bo_list);
 
kfree(bo_va);
return r;
}
 
/**
* radeon_vm_bo_invalidate - mark the bo as invalid
*
* @rdev: radeon_device pointer
* @vm: requested vm
* @bo: radeon buffer object
*
* Mark @bo as invalid (cayman+).
*/
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo)
{
struct radeon_bo_va *bo_va;
 
BUG_ON(!atomic_read(&bo->tbo.reserved));
list_for_each_entry(bo_va, &bo->va, bo_list) {
bo_va->valid = false;
}
}
 
/**
* radeon_vm_init - initialize a vm instance
*
* @rdev: radeon_device pointer
* @vm: requested vm
*
* Init @vm fields (cayman+).
*/
void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
{
vm->id = 0;
vm->fence = NULL;
mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->list);
INIT_LIST_HEAD(&vm->va);
}
 
/**
* radeon_vm_fini - tear down a vm instance
*
* @rdev: radeon_device pointer
* @vm: requested vm
*
* Tear down @vm (cayman+).
* Unbind the VM and remove all bos from the vm bo list
*/
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
{
struct radeon_bo_va *bo_va, *tmp;
int r;
 
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
radeon_vm_free_pt(rdev, vm);
mutex_unlock(&rdev->vm_manager.lock);
 
if (!list_empty(&vm->va)) {
dev_err(rdev->dev, "still active bo inside vm\n");
}
list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
list_del_init(&bo_va->vm_list);
r = radeon_bo_reserve(bo_va->bo, false);
if (!r) {
list_del_init(&bo_va->bo_list);
radeon_bo_unreserve(bo_va->bo);
kfree(bo_va);
}
}
radeon_fence_unref(&vm->fence);
radeon_fence_unref(&vm->last_flush);
mutex_unlock(&vm->mutex);
}
/drivers/video/drm/radeon/radeon_ring.c
24,365 → 24,226
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
* Christian Konig
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
 
int radeon_debugfs_ib_init(struct radeon_device *rdev);
 
/*
* IB
* IBs (Indirect Buffers) and areas of GPU accessible memory where
* commands are stored. You can put a pointer to the IB in the
* command ring and the hw will fetch the commands from the IB
* and execute them. Generally userspace acceleration drivers
* produce command buffers which are send to the kernel and
* put in IBs for execution by the requested ring.
* IB.
*/
static int radeon_debugfs_sa_init(struct radeon_device *rdev);
 
/**
* radeon_ib_get - request an IB (Indirect Buffer)
*
* @rdev: radeon_device pointer
* @ring: ring index the IB is associated with
* @ib: IB object returned
* @size: requested IB size
*
* Request an IB (all asics). IBs are allocated using the
* suballocator.
* Returns 0 on success, error on failure.
*/
int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size)
int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
{
int i, r;
struct radeon_fence *fence;
struct radeon_ib *nib;
int r = 0, i, c;
 
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
*ib = NULL;
r = radeon_fence_create(rdev, &fence);
if (r) {
dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
dev_err(rdev->dev, "failed to create fence for new IB\n");
return r;
}
 
r = radeon_semaphore_create(rdev, &ib->semaphore);
mutex_lock(&rdev->ib_pool.mutex);
for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
i &= (RADEON_IB_POOL_SIZE - 1);
if (rdev->ib_pool.ibs[i].free) {
nib = &rdev->ib_pool.ibs[i];
break;
}
}
if (nib == NULL) {
/* This should never happen, it means we allocated all
* IB and haven't scheduled one yet, return EBUSY to
* userspace hoping that on ioctl recall we get better
* luck
*/
dev_err(rdev->dev, "no free indirect buffer !\n");
mutex_unlock(&rdev->ib_pool.mutex);
radeon_fence_unref(&fence);
return -EBUSY;
}
rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
nib->free = false;
if (nib->fence) {
mutex_unlock(&rdev->ib_pool.mutex);
r = radeon_fence_wait(nib->fence, false);
if (r) {
dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
mutex_lock(&rdev->ib_pool.mutex);
nib->free = true;
mutex_unlock(&rdev->ib_pool.mutex);
radeon_fence_unref(&fence);
return r;
}
 
ib->ring = ring;
ib->fence = NULL;
ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
ib->vm = vm;
if (vm) {
/* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
* space and soffset is the offset inside the pool bo
*/
ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
} else {
ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
mutex_lock(&rdev->ib_pool.mutex);
}
ib->is_const_ib = false;
for (i = 0; i < RADEON_NUM_RINGS; ++i)
ib->sync_to[i] = NULL;
 
radeon_fence_unref(&nib->fence);
nib->fence = fence;
nib->length_dw = 0;
mutex_unlock(&rdev->ib_pool.mutex);
*ib = nib;
return 0;
}
 
/**
* radeon_ib_free - free an IB (Indirect Buffer)
*
* @rdev: radeon_device pointer
* @ib: IB object to free
*
* Free an IB (all asics).
*/
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
{
radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
radeon_fence_unref(&ib->fence);
struct radeon_ib *tmp = *ib;
 
*ib = NULL;
if (tmp == NULL) {
return;
}
if (!tmp->fence->emited)
radeon_fence_unref(&tmp->fence);
mutex_lock(&rdev->ib_pool.mutex);
tmp->free = true;
mutex_unlock(&rdev->ib_pool.mutex);
}
 
/**
* radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
*
* @rdev: radeon_device pointer
* @ib: IB object to schedule
* @const_ib: Const IB to schedule (SI only)
*
* Schedule an IB on the associated ring (all asics).
* Returns 0 on success, error on failure.
*
* On SI, there are two parallel engines fed from the primary ring,
* the CE (Constant Engine) and the DE (Drawing Engine). Since
* resource descriptors have moved to memory, the CE allows you to
* prime the caches while the DE is updating register state so that
* the resource descriptors will be already in cache when the draw is
* processed. To accomplish this, the userspace driver submits two
* IBs, one for the CE and one for the DE. If there is a CE IB (called
* a CONST_IB), it will be put on the ring prior to the DE IB. Prior
* to SI there was just a DE IB.
*/
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib)
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
bool need_sync = false;
int i, r = 0;
int r = 0;
 
if (!ib->length_dw || !ring->ready) {
if (!ib->length_dw || !rdev->cp.ready) {
/* TODO: Nothings in the ib we should report. */
dev_err(rdev->dev, "couldn't schedule ib\n");
DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
return -EINVAL;
}
 
/* 64 dwords should be enough for fence too */
r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8);
r = radeon_ring_lock(rdev, 64);
if (r) {
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
return r;
}
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
struct radeon_fence *fence = ib->sync_to[i];
if (radeon_fence_need_sync(fence, ib->ring)) {
need_sync = true;
radeon_semaphore_sync_rings(rdev, ib->semaphore,
fence->ring, ib->ring);
radeon_fence_note_sync(fence, ib->ring);
}
}
/* immediately free semaphore when we don't need to sync */
if (!need_sync) {
radeon_semaphore_free(rdev, &ib->semaphore, NULL);
}
/* if we can't remember our last VM flush then flush now! */
if (ib->vm && !ib->vm->last_flush) {
radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
}
if (const_ib) {
radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
}
radeon_ring_ib_execute(rdev, ib->ring, ib);
r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
if (r) {
dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
radeon_ring_unlock_undo(rdev, ring);
return r;
}
if (const_ib) {
const_ib->fence = radeon_fence_ref(ib->fence);
}
/* we just flushed the VM, remember that */
if (ib->vm && !ib->vm->last_flush) {
ib->vm->last_flush = radeon_fence_ref(ib->fence);
}
radeon_ring_unlock_commit(rdev, ring);
radeon_ring_ib_execute(rdev, ib);
radeon_fence_emit(rdev, ib->fence);
mutex_lock(&rdev->ib_pool.mutex);
/* once scheduled IB is considered free and protected by the fence */
ib->free = true;
mutex_unlock(&rdev->ib_pool.mutex);
radeon_ring_unlock_commit(rdev);
return 0;
}
 
/**
* radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
*
* @rdev: radeon_device pointer
*
* Initialize the suballocator to manage a pool of memory
* for use as IBs (all asics).
* Returns 0 on success, error on failure.
*/
int radeon_ib_pool_init(struct radeon_device *rdev)
{
int r;
void *ptr;
uint64_t gpu_addr;
int i;
int r = 0;
 
if (rdev->ib_pool_ready) {
if (rdev->ib_pool.robj)
return 0;
INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
/* Allocate 1M object buffer */
r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
&rdev->ib_pool.robj);
if (r) {
DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
return r;
}
r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
RADEON_IB_POOL_SIZE*64*1024,
RADEON_GEM_DOMAIN_GTT);
r = radeon_bo_reserve(rdev->ib_pool.robj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
if (r) {
radeon_bo_unreserve(rdev->ib_pool.robj);
DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
return r;
}
 
r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
radeon_bo_unreserve(rdev->ib_pool.robj);
if (r) {
DRM_ERROR("radeon: failed to map ib pool (%d).\n", r);
return r;
}
for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
unsigned offset;
 
rdev->ib_pool_ready = true;
if (radeon_debugfs_sa_init(rdev)) {
dev_err(rdev->dev, "failed to register debugfs file for SA\n");
offset = i * 64 * 1024;
rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
rdev->ib_pool.ibs[i].ptr = ptr + offset;
rdev->ib_pool.ibs[i].idx = i;
rdev->ib_pool.ibs[i].length_dw = 0;
rdev->ib_pool.ibs[i].free = true;
}
return 0;
rdev->ib_pool.head_id = 0;
rdev->ib_pool.ready = true;
DRM_INFO("radeon: ib pool ready.\n");
if (radeon_debugfs_ib_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for IB !\n");
}
return r;
}
 
/**
* radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
*
* @rdev: radeon_device pointer
*
* Tear down the suballocator managing the pool of memory
* for use as IBs (all asics).
*/
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
if (rdev->ib_pool_ready) {
radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
rdev->ib_pool_ready = false;
}
}
 
/**
* radeon_ib_ring_tests - test IBs on the rings
*
* @rdev: radeon_device pointer
*
* Test an IB (Indirect Buffer) on each ring.
* If the test fails, disable the ring.
* Returns 0 on success, error if the primary GFX ring
* IB test fails.
*/
int radeon_ib_ring_tests(struct radeon_device *rdev)
{
unsigned i;
int r;
struct radeon_bo *robj;
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
struct radeon_ring *ring = &rdev->ring[i];
if (!rdev->ib_pool.ready) {
return;
}
mutex_lock(&rdev->ib_pool.mutex);
// radeon_ib_bogus_cleanup(rdev);
robj = rdev->ib_pool.robj;
rdev->ib_pool.robj = NULL;
mutex_unlock(&rdev->ib_pool.mutex);
 
if (!ring->ready)
continue;
 
r = radeon_ib_test(rdev, i, ring);
if (r) {
ring->ready = false;
 
if (i == RADEON_RING_TYPE_GFX_INDEX) {
/* oh, oh, that's really bad */
DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
rdev->accel_working = false;
return r;
 
} else {
/* still not good, but we can live with it */
DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
if (robj) {
r = radeon_bo_reserve(robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(robj);
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
radeon_bo_unref(&robj);
}
}
return 0;
}
 
 
/*
* Rings
* Most engines on the GPU are fed via ring buffers. Ring
* buffers are areas of GPU accessible memory that the host
* writes commands into and the GPU reads commands out of.
* There is a rptr (read pointer) that determines where the
* GPU is currently reading, and a wptr (write pointer)
* which determines where the host has written. When the
* pointers are equal, the ring is idle. When the host
* writes commands to the ring buffer, it increments the
* wptr. The GPU then starts fetching commands and executes
* them until the pointers are equal again.
* Ring.
*/
static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
 
/**
* radeon_ring_write - write a value to the ring
*
* @ring: radeon_ring structure holding ring information
* @v: dword (dw) value to write
*
* Write a value to the requested ring buffer (all asics).
*/
void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
void radeon_ring_free_size(struct radeon_device *rdev)
{
#if DRM_DEBUG_CODE
if (ring->count_dw <= 0) {
DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
}
#endif
ring->ring[ring->wptr++] = v;
ring->wptr &= ring->ptr_mask;
ring->count_dw--;
ring->ring_free_dw--;
}
 
/**
* radeon_ring_supports_scratch_reg - check if the ring supports
* writing to scratch registers
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Check if a specific ring supports writing to scratch registers (all asics).
* Returns true if the ring supports writing to scratch regs, false if not.
*/
bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
struct radeon_ring *ring)
{
switch (ring->idx) {
case RADEON_RING_TYPE_GFX_INDEX:
case CAYMAN_RING_TYPE_CP1_INDEX:
case CAYMAN_RING_TYPE_CP2_INDEX:
return true;
default:
return false;
}
}
 
/**
* radeon_ring_free_size - update the free size
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Update the free dw slots in the ring buffer (all asics).
*/
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 rptr;
 
if (rdev->wb.enabled)
rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
else {
if (rdev->family >= CHIP_R600)
rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
else
rptr = RREG32(ring->rptr_reg);
ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
}
/* This works because ring_size is a power of 2 */
ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
ring->ring_free_dw -= ring->wptr;
ring->ring_free_dw &= ring->ptr_mask;
if (!ring->ring_free_dw) {
ring->ring_free_dw = ring->ring_size / 4;
rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
rdev->cp.ring_free_dw -= rdev->cp.wptr;
rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
if (!rdev->cp.ring_free_dw) {
rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
}
}
 
/**
* radeon_ring_alloc - allocate space on the ring buffer
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
* @ndw: number of dwords to allocate in the ring buffer
*
* Allocate @ndw dwords in the ring buffer (all asics).
* Returns 0 on success, error on failure.
*/
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
{
int r;
 
/* Align requested size with padding so unlock_commit can
* pad safely */
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
while (ndw > (ring->ring_free_dw - 1)) {
radeon_ring_free_size(rdev, ring);
if (ndw < ring->ring_free_dw) {
ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
while (ndw > (rdev->cp.ring_free_dw - 1)) {
radeon_ring_free_size(rdev);
if (ndw < rdev->cp.ring_free_dw) {
break;
}
// r = radeon_fence_wait_next(rdev);
391,362 → 252,98
// return r;
// }
}
ring->count_dw = ndw;
ring->wptr_old = ring->wptr;
rdev->cp.count_dw = ndw;
rdev->cp.wptr_old = rdev->cp.wptr;
return 0;
}
 
/**
* radeon_ring_lock - lock the ring and allocate space on it
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
* @ndw: number of dwords to allocate in the ring buffer
*
* Lock the ring and allocate @ndw dwords in the ring buffer
* (all asics).
* Returns 0 on success, error on failure.
*/
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
{
int r;
 
mutex_lock(&rdev->ring_lock);
r = radeon_ring_alloc(rdev, ring, ndw);
mutex_lock(&rdev->cp.mutex);
r = radeon_ring_alloc(rdev, ndw);
if (r) {
mutex_unlock(&rdev->ring_lock);
mutex_unlock(&rdev->cp.mutex);
return r;
}
return 0;
}
 
/**
* radeon_ring_commit - tell the GPU to execute the new
* commands on the ring buffer
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Update the wptr (write pointer) to tell the GPU to
* execute new commands on the ring buffer (all asics).
*/
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
void radeon_ring_commit(struct radeon_device *rdev)
{
unsigned count_dw_pad;
unsigned i;
 
/* We pad to match fetch size */
while (ring->wptr & ring->align_mask) {
radeon_ring_write(ring, ring->nop);
count_dw_pad = (rdev->cp.align_mask + 1) -
(rdev->cp.wptr & rdev->cp.align_mask);
for (i = 0; i < count_dw_pad; i++) {
radeon_ring_write(rdev, 2 << 30);
}
DRM_MEMORYBARRIER();
WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
(void)RREG32(ring->wptr_reg);
radeon_cp_commit(rdev);
}
 
/**
* radeon_ring_unlock_commit - tell the GPU to execute the new
* commands on the ring buffer and unlock it
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Call radeon_ring_commit() then unlock the ring (all asics).
*/
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
void radeon_ring_unlock_commit(struct radeon_device *rdev)
{
radeon_ring_commit(rdev, ring);
mutex_unlock(&rdev->ring_lock);
radeon_ring_commit(rdev);
mutex_unlock(&rdev->cp.mutex);
}
 
/**
* radeon_ring_undo - reset the wptr
*
* @ring: radeon_ring structure holding ring information
*
* Reset the driver's copy of the wtpr (all asics).
*/
void radeon_ring_undo(struct radeon_ring *ring)
void radeon_ring_unlock_undo(struct radeon_device *rdev)
{
ring->wptr = ring->wptr_old;
rdev->cp.wptr = rdev->cp.wptr_old;
mutex_unlock(&rdev->cp.mutex);
}
 
/**
* radeon_ring_unlock_undo - reset the wptr and unlock the ring
*
* @ring: radeon_ring structure holding ring information
*
* Call radeon_ring_undo() then unlock the ring (all asics).
*/
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
{
radeon_ring_undo(ring);
mutex_unlock(&rdev->ring_lock);
}
 
/**
* radeon_ring_force_activity - add some nop packets to the ring
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Add some nop packets to the ring to force activity (all asics).
* Used for lockup detection to see if the rptr is advancing.
*/
void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
{
int r;
 
radeon_ring_free_size(rdev, ring);
if (ring->rptr == ring->wptr) {
r = radeon_ring_alloc(rdev, ring, 1);
if (!r) {
radeon_ring_write(ring, ring->nop);
radeon_ring_commit(rdev, ring);
}
}
}
 
/**
* radeon_ring_force_activity - update lockup variables
*
* @ring: radeon_ring structure holding ring information
*
* Update the last rptr value and timestamp (all asics).
*/
void radeon_ring_lockup_update(struct radeon_ring *ring)
{
ring->last_rptr = ring->rptr;
ring->last_activity = GetTimerTicks();
}
 
/**
* radeon_ring_test_lockup() - check if ring is lockedup by recording information
* @rdev: radeon device structure
* @ring: radeon_ring structure holding ring information
*
* We don't need to initialize the lockup tracking information as we will either
* have CP rptr to a different value of jiffies wrap around which will force
* initialization of the lockup tracking informations.
*
* A possible false positivie is if we get call after while and last_cp_rptr ==
* the current CP rptr, even if it's unlikely it might happen. To avoid this
* if the elapsed time since last call is bigger than 2 second than we return
* false and update the tracking information. Due to this the caller must call
* radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
* the fencing code should be cautious about that.
*
* Caller should write to the ring to force CP to do something so we don't get
* false positive when CP is just gived nothing to do.
*
**/
bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
unsigned long cjiffies, elapsed;
uint32_t rptr;
 
cjiffies = GetTimerTicks();
if (!time_after(cjiffies, ring->last_activity)) {
/* likely a wrap around */
radeon_ring_lockup_update(ring);
return false;
}
rptr = RREG32(ring->rptr_reg);
ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
if (ring->rptr != ring->last_rptr) {
/* CP is still working no lockup */
radeon_ring_lockup_update(ring);
return false;
}
elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
return true;
}
/* give a chance to the GPU ... */
return false;
}
 
/**
* radeon_ring_backup - Back up the content of a ring
*
* @rdev: radeon_device pointer
* @ring: the ring we want to back up
*
* Saves all unprocessed commits from a ring, returns the number of dwords saved.
*/
unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
uint32_t **data)
{
unsigned size, ptr, i;
 
/* just in case lock the ring */
mutex_lock(&rdev->ring_lock);
*data = NULL;
 
if (ring->ring_obj == NULL) {
mutex_unlock(&rdev->ring_lock);
return 0;
}
 
/* it doesn't make sense to save anything if all fences are signaled */
if (!radeon_fence_count_emitted(rdev, ring->idx)) {
mutex_unlock(&rdev->ring_lock);
return 0;
}
 
/* calculate the number of dw on the ring */
if (ring->rptr_save_reg)
ptr = RREG32(ring->rptr_save_reg);
else if (rdev->wb.enabled)
ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
else {
/* no way to read back the next rptr */
mutex_unlock(&rdev->ring_lock);
return 0;
}
 
size = ring->wptr + (ring->ring_size / 4);
size -= ptr;
size &= ring->ptr_mask;
if (size == 0) {
mutex_unlock(&rdev->ring_lock);
return 0;
}
 
/* and then save the content of the ring */
*data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
if (!*data) {
mutex_unlock(&rdev->ring_lock);
return 0;
}
for (i = 0; i < size; ++i) {
(*data)[i] = ring->ring[ptr++];
ptr &= ring->ptr_mask;
}
 
mutex_unlock(&rdev->ring_lock);
return size;
}
 
/**
* radeon_ring_restore - append saved commands to the ring again
*
* @rdev: radeon_device pointer
* @ring: ring to append commands to
* @size: number of dwords we want to write
* @data: saved commands
*
* Allocates space on the ring and restore the previously saved commands.
*/
int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned size, uint32_t *data)
{
int i, r;
 
if (!size || !data)
return 0;
 
/* restore the saved ring content */
r = radeon_ring_lock(rdev, ring, size);
if (r)
return r;
 
for (i = 0; i < size; ++i) {
radeon_ring_write(ring, data[i]);
}
 
radeon_ring_unlock_commit(rdev, ring);
kfree(data);
return 0;
}
 
/**
* radeon_ring_init - init driver ring struct.
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
* @ring_size: size of the ring
* @rptr_offs: offset of the rptr writeback location in the WB buffer
* @rptr_reg: MMIO offset of the rptr register
* @wptr_reg: MMIO offset of the wptr register
* @ptr_reg_shift: bit offset of the rptr/wptr values
* @ptr_reg_mask: bit mask of the rptr/wptr values
* @nop: nop packet for this ring
*
* Initialize the driver information for the selected ring (all asics).
* Returns 0 on success, error on failure.
*/
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
{
int r;
 
ring->ring_size = ring_size;
ring->rptr_offs = rptr_offs;
ring->rptr_reg = rptr_reg;
ring->wptr_reg = wptr_reg;
ring->ptr_reg_shift = ptr_reg_shift;
ring->ptr_reg_mask = ptr_reg_mask;
ring->nop = nop;
rdev->cp.ring_size = ring_size;
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
if (rdev->cp.ring_obj == NULL) {
r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
NULL, &ring->ring_obj);
&rdev->cp.ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
return r;
}
r = radeon_bo_reserve(ring->ring_obj, false);
r = radeon_bo_reserve(rdev->cp.ring_obj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
&ring->gpu_addr);
r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
&rdev->cp.gpu_addr);
if (r) {
radeon_bo_unreserve(ring->ring_obj);
radeon_bo_unreserve(rdev->cp.ring_obj);
dev_err(rdev->dev, "(%d) ring pin failed\n", r);
return r;
}
r = radeon_bo_kmap(ring->ring_obj,
(void **)&ring->ring);
radeon_bo_unreserve(ring->ring_obj);
r = radeon_bo_kmap(rdev->cp.ring_obj,
(void **)&rdev->cp.ring);
radeon_bo_unreserve(rdev->cp.ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring map failed\n", r);
return r;
}
}
ring->ptr_mask = (ring->ring_size / 4) - 1;
ring->ring_free_dw = ring->ring_size / 4;
if (rdev->wb.enabled) {
u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4);
ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index;
ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4];
}
if (radeon_debugfs_ring_init(rdev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
}
radeon_ring_lockup_update(ring);
rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
return 0;
}
 
/**
* radeon_ring_fini - tear down the driver ring struct.
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Tear down the driver information for the selected ring (all asics).
*/
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
void radeon_ring_fini(struct radeon_device *rdev)
{
int r;
struct radeon_bo *ring_obj;
 
mutex_lock(&rdev->ring_lock);
ring_obj = ring->ring_obj;
ring->ready = false;
ring->ring = NULL;
ring->ring_obj = NULL;
mutex_unlock(&rdev->ring_lock);
mutex_lock(&rdev->cp.mutex);
ring_obj = rdev->cp.ring_obj;
rdev->cp.ring = NULL;
rdev->cp.ring_obj = NULL;
mutex_unlock(&rdev->cp.mutex);
 
if (ring_obj) {
r = radeon_bo_reserve(ring_obj, false);
759,92 → 356,81
}
}
 
 
/*
* Debugfs info
*/
#if defined(CONFIG_DEBUG_FS)
 
static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
int ridx = *(int*)node->info_ent->data;
struct radeon_ring *ring = &rdev->ring[ridx];
unsigned count, i, j;
struct radeon_ib *ib = node->info_ent->data;
unsigned i;
 
radeon_ring_free_size(rdev, ring);
count = (ring->ring_size / 4) - ring->ring_free_dw;
seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
if (ring->rptr_save_reg) {
seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
RREG32(ring->rptr_save_reg));
if (ib == NULL) {
return 0;
}
seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
i = ring->rptr;
for (j = 0; j <= count; j++) {
seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
i = (i + 1) & ring->ptr_mask;
seq_printf(m, "IB %04u\n", ib->idx);
seq_printf(m, "IB fence %p\n", ib->fence);
seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
for (i = 0; i < ib->length_dw; i++) {
seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
}
return 0;
}
 
static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
 
static struct drm_info_list radeon_debugfs_ring_info_list[] = {
{"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
{"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
};
 
static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_device *rdev = node->info_ent->data;
struct radeon_ib *ib;
unsigned i;
 
radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
 
mutex_lock(&rdev->ib_pool.mutex);
if (list_empty(&rdev->ib_pool.bogus_ib)) {
mutex_unlock(&rdev->ib_pool.mutex);
seq_printf(m, "no bogus IB recorded\n");
return 0;
 
}
ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
list_del_init(&ib->list);
mutex_unlock(&rdev->ib_pool.mutex);
seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
for (i = 0; i < ib->length_dw; i++) {
seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
}
vfree(ib->ptr);
kfree(ib);
return 0;
}
 
static struct drm_info_list radeon_debugfs_sa_list[] = {
{"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
 
static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
{"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
};
 
#endif
 
static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
int radeon_debugfs_ib_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
unsigned i;
for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) {
struct drm_info_list *info = &radeon_debugfs_ring_info_list[i];
int ridx = *(int*)radeon_debugfs_ring_info_list[i].data;
unsigned r;
int r;
 
if (&rdev->ring[ridx] != ring)
continue;
 
r = radeon_debugfs_add_files(rdev, info, 1);
radeon_debugfs_ib_bogus_info_list[0].data = rdev;
r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
if (r)
return r;
for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
radeon_debugfs_ib_list[i].driver_features = 0;
radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
}
#endif
return 0;
}
 
static int radeon_debugfs_sa_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
RADEON_IB_POOL_SIZE);
#else
return 0;
#endif
/drivers/video/drm/radeon/rs400.c
77,7 → 77,7
{
int r;
 
if (rdev->gart.ptr) {
if (rdev->gart.table.ram.ptr) {
WARN(1, "RS400 GART already initialized\n");
return 0;
}
182,9 → 182,6
/* Enable gart */
WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
rs400_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
212,7 → 209,6
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
uint32_t entry;
u32 *gtt = rdev->gart.ptr;
 
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
222,7 → 218,7
((upper_32_bits(addr) & 0xff) << 4) |
RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
entry = cpu_to_le32(entry);
gtt[i] = entry;
rdev->gart.table.ram.ptr[i] = entry;
return 0;
}
 
242,7 → 238,7
return -1;
}
 
static void rs400_gpu_init(struct radeon_device *rdev)
void rs400_gpu_init(struct radeon_device *rdev)
{
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
252,7 → 248,7
}
}
 
static void rs400_mc_init(struct radeon_device *rdev)
void rs400_mc_init(struct radeon_device *rdev)
{
u64 base;
 
370,7 → 366,7
#endif
}
 
static void rs400_mc_program(struct radeon_device *rdev)
void rs400_mc_program(struct radeon_device *rdev)
{
struct r100_mc_save save;
 
419,13 → 415,11
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
 
r = radeon_ib_pool_init(rdev);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
 
return 0;
}
 
488,7 → 482,6
if (r)
return r;
r300_set_reg_safe(rdev);
 
rdev->accel_working = true;
r = rs400_startup(rdev);
if (r) {
/drivers/video/drm/radeon/rs690.c
25,13 → 25,13
* Alex Deucher
* Jerome Glisse
*/
#include <drm/drmP.h>
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
#include "rs690d.h"
 
int rs690_mc_wait_for_idle(struct radeon_device *rdev)
static int rs690_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
uint32_t tmp;
145,7 → 145,7
rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
}
 
static void rs690_mc_init(struct radeon_device *rdev)
void rs690_mc_init(struct radeon_device *rdev)
{
u64 base;
 
224,7 → 224,7
fixed20_12 sclk;
};
 
static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
struct radeon_crtc *crtc,
struct rs690_watermark *wm)
{
581,7 → 581,7
WREG32(R_000078_MC_INDEX, 0x7F);
}
 
static void rs690_mc_program(struct radeon_device *rdev)
void rs690_mc_program(struct radeon_device *rdev)
{
struct rv515_mc_save save;
 
630,14 → 630,11
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
 
r = radeon_ib_pool_init(rdev);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
 
 
return 0;
}
 
701,7 → 698,6
if (r)
return r;
rs600_set_safe_registers(rdev);
 
rdev->accel_working = true;
r = rs690_startup(rdev);
if (r) {
/drivers/video/drm/radeon/rv515.c
27,7 → 27,7
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include "drmP.h"
#include "rv515d.h"
#include "radeon.h"
#include "radeon_asic.h"
35,9 → 35,9
#include "rv515_reg_safe.h"
 
/* This files gather functions specifics to: rv515 */
static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
static void rv515_gpu_init(struct radeon_device *rdev);
int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
void rv515_gpu_init(struct radeon_device *rdev);
int rv515_mc_wait_for_idle(struct radeon_device *rdev);
 
void rv515_debugfs(struct radeon_device *rdev)
53,46 → 53,46
}
}
 
void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
void rv515_ring_start(struct radeon_device *rdev)
{
int r;
 
r = radeon_ring_lock(rdev, ring, 64);
r = radeon_ring_lock(rdev, 64);
if (r) {
return;
}
radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0));
radeon_ring_write(ring,
radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0));
radeon_ring_write(rdev,
ISYNC_ANY2D_IDLE3D |
ISYNC_ANY3D_IDLE2D |
ISYNC_WAIT_IDLEGUI |
ISYNC_CPSCRATCH_IDLEGUI);
radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
radeon_ring_write(ring, PACKET0(GB_SELECT, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(GB_ENABLE, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(R500_SU_REG_DEST, 0));
radeon_ring_write(ring, (1 << rdev->num_gb_pipes) - 1);
radeon_ring_write(ring, PACKET0(VAP_INDEX_OFFSET, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
radeon_ring_write(ring, PACKET0(GB_AA_CONFIG, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
radeon_ring_write(ring, PACKET0(GB_MSPOS0, 0));
radeon_ring_write(ring,
radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0));
radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0));
radeon_ring_write(rdev,
((6 << MS_X0_SHIFT) |
(6 << MS_Y0_SHIFT) |
(6 << MS_X1_SHIFT) |
101,8 → 101,8
(6 << MS_Y2_SHIFT) |
(6 << MSBD0_Y_SHIFT) |
(6 << MSBD0_X_SHIFT)));
radeon_ring_write(ring, PACKET0(GB_MSPOS1, 0));
radeon_ring_write(ring,
radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0));
radeon_ring_write(rdev,
((6 << MS_X3_SHIFT) |
(6 << MS_Y3_SHIFT) |
(6 << MS_X4_SHIFT) |
110,15 → 110,15
(6 << MS_X5_SHIFT) |
(6 << MS_Y5_SHIFT) |
(6 << MSBD1_SHIFT)));
radeon_ring_write(ring, PACKET0(GA_ENHANCE, 0));
radeon_ring_write(ring, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
radeon_ring_write(ring, PACKET0(GA_POLY_MODE, 0));
radeon_ring_write(ring, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
radeon_ring_write(ring, PACKET0(GA_ROUND_MODE, 0));
radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
radeon_ring_write(ring, PACKET0(0x20C8, 0));
radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring);
radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0));
radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0));
radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0));
radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
radeon_ring_write(rdev, PACKET0(0x20C8, 0));
radeon_ring_write(rdev, 0);
radeon_ring_unlock_commit(rdev);
}
 
int rv515_mc_wait_for_idle(struct radeon_device *rdev)
143,13 → 143,13
RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
}
 
static void rv515_gpu_init(struct radeon_device *rdev)
void rv515_gpu_init(struct radeon_device *rdev)
{
unsigned pipe_select_current, gb_pipe_select, tmp;
 
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"resetting GPU. Bad things might happen.\n");
"reseting GPU. Bad things might happen.\n");
}
rv515_vga_render_disable(rdev);
r420_pipes_init(rdev);
161,7 → 161,7
WREG32_PLL(0x000D, tmp);
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"resetting GPU. Bad things might happen.\n");
"reseting GPU. Bad things might happen.\n");
}
if (rv515_mc_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait MC idle while "
189,7 → 189,7
}
}
 
static void rv515_mc_init(struct radeon_device *rdev)
void rv515_mc_init(struct radeon_device *rdev)
{
 
rv515_vram_get_type(rdev);
261,7 → 261,7
};
#endif
 
static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
270,7 → 270,7
#endif
}
 
static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
281,8 → 281,12
 
void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
{
save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL);
save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL);
save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL);
save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL);
 
/* Stop all video */
WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
307,10 → 311,19
/* Unlock host access */
WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
/* Restore video state */
WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control);
WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control);
WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
}
 
static void rv515_mc_program(struct radeon_device *rdev)
void rv515_mc_program(struct radeon_device *rdev)
{
struct rv515_mc_save save;
 
388,13 → 401,11
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
 
r = radeon_ib_pool_init(rdev);
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
 
return 0;
}
 
466,7 → 477,6
if (r)
return r;
rv515_set_safe_registers(rdev);
 
rdev->accel_working = true;
r = rv515_startup(rdev);
if (r) {
725,7 → 735,7
fixed20_12 sclk;
};
 
static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
struct radeon_crtc *crtc,
struct rv515_watermark *wm)
{
/drivers/video/drm/radeon/cayman_blit_shaders.c
24,7 → 24,6
* Alex Deucher <alexander.deucher@amd.com>
*/
 
#include <linux/bug.h>
#include <linux/types.h>
#include <linux/kernel.h>
 
/drivers/video/drm/radeon/fwblob.asm
119,7 → 119,7
dd (SUMO2ME_END - SUMO2ME_START)
 
 
macro NI_code [arg]
macro ni_code [arg]
{
dd FIRMWARE_#arg#_ME
dd arg#ME_START
135,7 → 135,7
 
}
 
NI_code BARTS, TURKS, CAICOS, CAYMAN
ni_code BARTS, TURKS, CAICOS, CAYMAN
 
dd FIRMWARE_RV610_PFP
dd RV610PFP_START
235,32 → 235,7
dd SUMORLC_START
dd (SUMORLC_END - SUMORLC_START)
 
macro SI_code [arg]
{
dd FIRMWARE_#arg#_PFP
dd arg#_PFP_START
dd (arg#_PFP_END - arg#_PFP_START)
 
dd FIRMWARE_#arg#_ME
dd arg#_ME_START
dd (arg#_ME_END - arg#_ME_START)
 
dd FIRMWARE_#arg#_CE
dd arg#_CE_START
dd (arg#_CE_END - arg#_CE_START)
 
dd FIRMWARE_#arg#_MC
dd arg#_MC_START
dd (arg#_MC_END - arg#_MC_START)
 
dd FIRMWARE_#arg#_RLC
dd arg#_RLC_START
dd (arg#_RLC_END - arg#_RLC_START)
 
}
 
SI_code TAHITI, PITCAIRN, VERDE
 
___end_builtin_fw:
 
 
340,49 → 315,8
FIRMWARE_CAICOS_MC db 'radeon/CAICOS_mc.bin',0
FIRMWARE_CAYMAN_MC db 'radeon/CAYMAN_mc.bin',0
 
macro SI_firmware [arg]
{
 
forward
 
FIRMWARE_#arg#_PFP db 'radeon/',`arg,'_pfp.bin',0
FIRMWARE_#arg#_ME db 'radeon/',`arg,'_me.bin',0
FIRMWARE_#arg#_CE db 'radeon/',`arg,'_ce.bin',0
FIRMWARE_#arg#_MC db 'radeon/',`arg,'_mc.bin',0
FIRMWARE_#arg#_RLC db 'radeon/',`arg,'_rlc.bin',0
 
forward
 
align 16
arg#_PFP_START:
file "firmware/"#`arg#"_pfp.bin"
arg#_PFP_END:
 
align 16
arg#_ME_START:
file "firmware/"#`arg#"_me.bin"
arg#_ME_END:
 
align 16
arg#_CE_START:
file "firmware/"#`arg#"_ce.bin"
arg#_CE_END:
 
align 16
arg#_MC_START:
file "firmware/"#`arg#"_mc.bin"
arg#_MC_END:
 
align 16
arg#_RLC_START:
file "firmware/"#`arg#"_rlc.bin"
arg#_RLC_END:
 
}
 
SI_firmware TAHITI,PITCAIRN,VERDE
 
align 16
R100CP_START:
file 'firmware/R100_cp.bin'
R100CP_END:
693,5 → 627,3
CAYMANMC_START:
file 'firmware/CAYMAN_mc.bin'
CAYMANMC_END:
 
 
/drivers/video/drm/radeon/nid.h
41,13 → 41,7
#define CAYMAN_MAX_TCC 16
#define CAYMAN_MAX_TCC_MASK 0xFF
 
#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001
 
#define DMIF_ADDR_CONFIG 0xBD4
#define SRBM_GFX_CNTL 0x0E44
#define RINGID(x) (((x) & 0x3) << 0)
#define VMID(x) (((x) & 0x7) << 0)
#define SRBM_STATUS 0x0E50
 
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
109,7 → 103,6
#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
#define ENABLE_ADVANCED_DRIVER_MODEL (1 << 6)
#define FUS_MC_VM_FB_OFFSET 0x2068
 
#define MC_SHARED_BLACKOUT_CNTL 0x20ac
#define MC_ARB_RAMCFG 0x2760
151,8 → 144,6
#define CGTS_SYS_TCC_DISABLE 0x3F90
#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
 
#define RLC_GFX_INDEX 0x3FC4
 
#define CONFIG_MEMSIZE 0x5428
 
#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
217,12 → 208,6
#define SOFT_RESET_VGT (1 << 14)
#define SOFT_RESET_IA (1 << 15)
 
#define GRBM_GFX_INDEX 0x802C
#define INSTANCE_INDEX(x) ((x) << 0)
#define SE_INDEX(x) ((x) << 16)
#define INSTANCE_BROADCAST_WRITES (1 << 30)
#define SE_BROADCAST_WRITES (1 << 31)
 
#define SCRATCH_REG0 0x8500
#define SCRATCH_REG1 0x8504
#define SCRATCH_REG2 0x8508
234,12 → 219,6
#define SCRATCH_UMSK 0x8540
#define SCRATCH_ADDR 0x8544
#define CP_SEM_WAIT_TIMER 0x85BC
#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8
#define CP_COHER_CNTL2 0x85E8
#define CP_STALLED_STAT1 0x8674
#define CP_STALLED_STAT2 0x8678
#define CP_BUSY_STAT 0x867C
#define CP_STAT 0x8680
#define CP_ME_CNTL 0x86D8
#define CP_ME_HALT (1 << 28)
#define CP_PFP_HALT (1 << 26)
415,12 → 394,6
#define CP_RB0_RPTR_ADDR 0xC10C
#define CP_RB0_RPTR_ADDR_HI 0xC110
#define CP_RB0_WPTR 0xC114
 
#define CP_INT_CNTL 0xC124
# define CNTX_BUSY_INT_ENABLE (1 << 19)
# define CNTX_EMPTY_INT_ENABLE (1 << 20)
# define TIME_STAMP_INT_ENABLE (1 << 26)
 
#define CP_RB1_BASE 0xC180
#define CP_RB1_CNTL 0xC184
#define CP_RB1_RPTR_ADDR 0xC188
438,10 → 411,6
#define CP_ME_RAM_DATA 0xC160
#define CP_DEBUG 0xC1FC
 
#define VGT_EVENT_INITIATOR 0x28a90
# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0)
# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
 
/*
* PM4
*/
476,7 → 445,6
#define PACKET3_DISPATCH_DIRECT 0x15
#define PACKET3_DISPATCH_INDIRECT 0x16
#define PACKET3_INDIRECT_BUFFER_END 0x17
#define PACKET3_MODE_CONTROL 0x18
#define PACKET3_SET_PREDICATION 0x20
#define PACKET3_REG_RMW 0x21
#define PACKET3_COND_EXEC 0x22
502,7 → 470,6
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
527,27 → 494,7
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45
#define PACKET3_EVENT_WRITE 0x46
#define EVENT_TYPE(x) ((x) << 0)
#define EVENT_INDEX(x) ((x) << 8)
/* 0 - any non-TS event
* 1 - ZPASS_DONE
* 2 - SAMPLE_PIPELINESTAT
* 3 - SAMPLE_STREAMOUTSTAT*
* 4 - *S_PARTIAL_FLUSH
* 5 - TS events
*/
#define PACKET3_EVENT_WRITE_EOP 0x47
#define DATA_SEL(x) ((x) << 29)
/* 0 - discard
* 1 - send low 32bit data
* 2 - send 64bit data
* 3 - send 64bit counter value
*/
#define INT_SEL(x) ((x) << 24)
/* 0 - none
* 1 - interrupt only (DATA_SEL = 0)
* 2 - interrupt when data write is confirmed
*/
#define PACKET3_EVENT_WRITE_EOS 0x48
#define PACKET3_PREAMBLE_CNTL 0x4A
# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
586,7 → 533,6
#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
#define PACKET3_SET_RESOURCE_INDIRECT 0x74
#define PACKET3_SET_APPEND_CNT 0x75
#define PACKET3_ME_WRITE 0x7A
 
#endif
 
/drivers/video/drm/radeon/r600d.h
66,14 → 66,6
#define CC_RB_BACKEND_DISABLE 0x98F4
#define BACKEND_DISABLE(x) ((x) << 16)
 
#define R_028808_CB_COLOR_CONTROL 0x28808
#define S_028808_SPECIAL_OP(x) (((x) & 0x7) << 4)
#define G_028808_SPECIAL_OP(x) (((x) >> 4) & 0x7)
#define C_028808_SPECIAL_OP 0xFFFFFF8F
#define V_028808_SPECIAL_NORMAL 0x00
#define V_028808_SPECIAL_DISABLE 0x01
#define V_028808_SPECIAL_RESOLVE_BOX 0x07
 
#define CB_COLOR0_BASE 0x28040
#define CB_COLOR1_BASE 0x28044
#define CB_COLOR2_BASE 0x28048
86,40 → 78,7
 
#define CB_COLOR0_SIZE 0x28060
#define CB_COLOR0_VIEW 0x28080
#define R_028080_CB_COLOR0_VIEW 0x028080
#define S_028080_SLICE_START(x) (((x) & 0x7FF) << 0)
#define G_028080_SLICE_START(x) (((x) >> 0) & 0x7FF)
#define C_028080_SLICE_START 0xFFFFF800
#define S_028080_SLICE_MAX(x) (((x) & 0x7FF) << 13)
#define G_028080_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
#define C_028080_SLICE_MAX 0xFF001FFF
#define R_028084_CB_COLOR1_VIEW 0x028084
#define R_028088_CB_COLOR2_VIEW 0x028088
#define R_02808C_CB_COLOR3_VIEW 0x02808C
#define R_028090_CB_COLOR4_VIEW 0x028090
#define R_028094_CB_COLOR5_VIEW 0x028094
#define R_028098_CB_COLOR6_VIEW 0x028098
#define R_02809C_CB_COLOR7_VIEW 0x02809C
#define R_028100_CB_COLOR0_MASK 0x028100
#define S_028100_CMASK_BLOCK_MAX(x) (((x) & 0xFFF) << 0)
#define G_028100_CMASK_BLOCK_MAX(x) (((x) >> 0) & 0xFFF)
#define C_028100_CMASK_BLOCK_MAX 0xFFFFF000
#define S_028100_FMASK_TILE_MAX(x) (((x) & 0xFFFFF) << 12)
#define G_028100_FMASK_TILE_MAX(x) (((x) >> 12) & 0xFFFFF)
#define C_028100_FMASK_TILE_MAX 0x00000FFF
#define R_028104_CB_COLOR1_MASK 0x028104
#define R_028108_CB_COLOR2_MASK 0x028108
#define R_02810C_CB_COLOR3_MASK 0x02810C
#define R_028110_CB_COLOR4_MASK 0x028110
#define R_028114_CB_COLOR5_MASK 0x028114
#define R_028118_CB_COLOR6_MASK 0x028118
#define R_02811C_CB_COLOR7_MASK 0x02811C
#define CB_COLOR0_INFO 0x280a0
# define CB_FORMAT(x) ((x) << 2)
# define CB_ARRAY_MODE(x) ((x) << 8)
# define CB_SOURCE_FORMAT(x) ((x) << 27)
# define CB_SF_EXPORT_FULL 0
# define CB_SF_EXPORT_NORM 1
#define CB_COLOR0_TILE 0x280c0
#define CB_COLOR0_FRAG 0x280e0
#define CB_COLOR0_MASK 0x28100
175,9 → 134,6
 
#define CONFIG_MEMSIZE 0x5428
#define CONFIG_CNTL 0x5424
#define CP_STALLED_STAT1 0x8674
#define CP_STALLED_STAT2 0x8678
#define CP_BUSY_STAT 0x867C
#define CP_STAT 0x8680
#define CP_COHER_BASE 0x85F8
#define CP_DEBUG 0xC1FC
220,14 → 176,6
#define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31)
#define DB_DEPTH_BASE 0x2800C
#define DB_HTILE_DATA_BASE 0x28014
#define DB_HTILE_SURFACE 0x28D24
#define S_028D24_HTILE_WIDTH(x) (((x) & 0x1) << 0)
#define G_028D24_HTILE_WIDTH(x) (((x) >> 0) & 0x1)
#define C_028D24_HTILE_WIDTH 0xFFFFFFFE
#define S_028D24_HTILE_HEIGHT(x) (((x) & 0x1) << 1)
#define G_028D24_HTILE_HEIGHT(x) (((x) >> 1) & 0x1)
#define C_028D24_HTILE_HEIGHT 0xFFFFFFFD
#define G_028D24_LINEAR(x) (((x) >> 2) & 0x1)
#define DB_WATERMARKS 0x9838
#define DEPTH_FREE(x) ((x) << 0)
#define DEPTH_FLUSH(x) ((x) << 5)
244,8 → 192,6
#define BACKEND_MAP(x) ((x) << 16)
 
#define GB_TILING_CONFIG 0x98F0
#define PIPE_TILING__SHIFT 1
#define PIPE_TILING__MASK 0x0000000e
 
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
471,17 → 417,6
#define SQ_PGM_START_VS 0x28858
#define SQ_PGM_RESOURCES_VS 0x28868
#define SQ_PGM_CF_OFFSET_VS 0x288d0
 
#define SQ_VTX_CONSTANT_WORD0_0 0x30000
#define SQ_VTX_CONSTANT_WORD1_0 0x30004
#define SQ_VTX_CONSTANT_WORD2_0 0x30008
# define SQ_VTXC_BASE_ADDR_HI(x) ((x) << 0)
# define SQ_VTXC_STRIDE(x) ((x) << 8)
# define SQ_VTXC_ENDIAN_SWAP(x) ((x) << 30)
# define SQ_ENDIAN_NONE 0
# define SQ_ENDIAN_8IN16 1
# define SQ_ENDIAN_8IN32 2
#define SQ_VTX_CONSTANT_WORD3_0 0x3000c
#define SQ_VTX_CONSTANT_WORD6_0 0x38018
#define S__SQ_VTX_CONSTANT_TYPE(x) (((x) & 3) << 30)
#define G__SQ_VTX_CONSTANT_TYPE(x) (((x) >> 30) & 3)
510,7 → 445,6
#define TC_L2_SIZE(x) ((x)<<5)
#define L2_DISABLE_LATE_HIT (1<<9)
 
#define VC_ENHANCE 0x9714
 
#define VGT_CACHE_INVALIDATION 0x88C4
#define CACHE_INVALIDATION(x) ((x)<<0)
543,11 → 477,6
#define VGT_STRMOUT_BUFFER_OFFSET_1 0x28AEC
#define VGT_STRMOUT_BUFFER_OFFSET_2 0x28AFC
#define VGT_STRMOUT_BUFFER_OFFSET_3 0x28B0C
#define VGT_STRMOUT_BUFFER_SIZE_0 0x28AD0
#define VGT_STRMOUT_BUFFER_SIZE_1 0x28AE0
#define VGT_STRMOUT_BUFFER_SIZE_2 0x28AF0
#define VGT_STRMOUT_BUFFER_SIZE_3 0x28B00
 
#define VGT_STRMOUT_EN 0x28AB0
#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
#define VTX_REUSE_DEPTH_MASK 0x000000FF
624,18 → 553,11
#define RLC_HB_WPTR 0x3f1c
#define RLC_HB_WPTR_LSB_ADDR 0x3f14
#define RLC_HB_WPTR_MSB_ADDR 0x3f18
#define RLC_GPU_CLOCK_COUNT_LSB 0x3f38
#define RLC_GPU_CLOCK_COUNT_MSB 0x3f3c
#define RLC_CAPTURE_GPU_CLOCK_COUNT 0x3f40
#define RLC_MC_CNTL 0x3f44
#define RLC_UCODE_CNTL 0x3f48
#define RLC_UCODE_ADDR 0x3f2c
#define RLC_UCODE_DATA 0x3f30
 
/* new for TN */
#define TN_RLC_SAVE_AND_RESTORE_BASE 0x3f10
#define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20
 
#define SRBM_SOFT_RESET 0xe60
# define SOFT_RESET_RLC (1 << 13)
 
855,239 → 777,6
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
 
/* Audio clocks */
#define DCCG_AUDIO_DTO0_PHASE 0x0514
#define DCCG_AUDIO_DTO0_MODULE 0x0518
#define DCCG_AUDIO_DTO0_LOAD 0x051c
# define DTO_LOAD (1 << 31)
#define DCCG_AUDIO_DTO0_CNTL 0x0520
 
#define DCCG_AUDIO_DTO1_PHASE 0x0524
#define DCCG_AUDIO_DTO1_MODULE 0x0528
#define DCCG_AUDIO_DTO1_LOAD 0x052c
#define DCCG_AUDIO_DTO1_CNTL 0x0530
 
#define DCCG_AUDIO_DTO_SELECT 0x0534
 
/* digital blocks */
#define TMDSA_CNTL 0x7880
# define TMDSA_HDMI_EN (1 << 2)
#define LVTMA_CNTL 0x7a80
# define LVTMA_HDMI_EN (1 << 2)
#define DDIA_CNTL 0x7200
# define DDIA_HDMI_EN (1 << 2)
#define DIG0_CNTL 0x75a0
# define DIG_MODE(x) (((x) & 7) << 8)
# define DIG_MODE_DP 0
# define DIG_MODE_LVDS 1
# define DIG_MODE_TMDS_DVI 2
# define DIG_MODE_TMDS_HDMI 3
# define DIG_MODE_SDVO 4
#define DIG1_CNTL 0x79a0
 
/* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one
* instance of the blocks while r6xx has 2. DCE 3.0 cards are slightly
* different due to the new DIG blocks, but also have 2 instances.
* DCE 3.0 HDMI blocks are part of each DIG encoder.
*/
 
/* rs6xx/rs740/r6xx/dce3 */
#define HDMI0_CONTROL 0x7400
/* rs6xx/rs740/r6xx */
# define HDMI0_ENABLE (1 << 0)
# define HDMI0_STREAM(x) (((x) & 3) << 2)
# define HDMI0_STREAM_TMDSA 0
# define HDMI0_STREAM_LVTMA 1
# define HDMI0_STREAM_DVOA 2
# define HDMI0_STREAM_DDIA 3
/* rs6xx/r6xx/dce3 */
# define HDMI0_ERROR_ACK (1 << 8)
# define HDMI0_ERROR_MASK (1 << 9)
#define HDMI0_STATUS 0x7404
# define HDMI0_ACTIVE_AVMUTE (1 << 0)
# define HDMI0_AUDIO_ENABLE (1 << 4)
# define HDMI0_AZ_FORMAT_WTRIG (1 << 28)
# define HDMI0_AZ_FORMAT_WTRIG_INT (1 << 29)
#define HDMI0_AUDIO_PACKET_CONTROL 0x7408
# define HDMI0_AUDIO_SAMPLE_SEND (1 << 0)
# define HDMI0_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
# define HDMI0_AUDIO_SEND_MAX_PACKETS (1 << 8)
# define HDMI0_AUDIO_TEST_EN (1 << 12)
# define HDMI0_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
# define HDMI0_AUDIO_CHANNEL_SWAP (1 << 24)
# define HDMI0_60958_CS_UPDATE (1 << 26)
# define HDMI0_AZ_FORMAT_WTRIG_MASK (1 << 28)
# define HDMI0_AZ_FORMAT_WTRIG_ACK (1 << 29)
#define HDMI0_AUDIO_CRC_CONTROL 0x740c
# define HDMI0_AUDIO_CRC_EN (1 << 0)
#define HDMI0_VBI_PACKET_CONTROL 0x7410
# define HDMI0_NULL_SEND (1 << 0)
# define HDMI0_GC_SEND (1 << 4)
# define HDMI0_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
#define HDMI0_INFOFRAME_CONTROL0 0x7414
# define HDMI0_AVI_INFO_SEND (1 << 0)
# define HDMI0_AVI_INFO_CONT (1 << 1)
# define HDMI0_AUDIO_INFO_SEND (1 << 4)
# define HDMI0_AUDIO_INFO_CONT (1 << 5)
# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
# define HDMI0_AUDIO_INFO_UPDATE (1 << 7)
# define HDMI0_MPEG_INFO_SEND (1 << 8)
# define HDMI0_MPEG_INFO_CONT (1 << 9)
# define HDMI0_MPEG_INFO_UPDATE (1 << 10)
#define HDMI0_INFOFRAME_CONTROL1 0x7418
# define HDMI0_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
# define HDMI0_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
# define HDMI0_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
#define HDMI0_GENERIC_PACKET_CONTROL 0x741c
# define HDMI0_GENERIC0_SEND (1 << 0)
# define HDMI0_GENERIC0_CONT (1 << 1)
# define HDMI0_GENERIC0_UPDATE (1 << 2)
# define HDMI0_GENERIC1_SEND (1 << 4)
# define HDMI0_GENERIC1_CONT (1 << 5)
# define HDMI0_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
# define HDMI0_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
#define HDMI0_GC 0x7428
# define HDMI0_GC_AVMUTE (1 << 0)
#define HDMI0_AVI_INFO0 0x7454
# define HDMI0_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
# define HDMI0_AVI_INFO_S(x) (((x) & 3) << 8)
# define HDMI0_AVI_INFO_B(x) (((x) & 3) << 10)
# define HDMI0_AVI_INFO_A(x) (((x) & 1) << 12)
# define HDMI0_AVI_INFO_Y(x) (((x) & 3) << 13)
# define HDMI0_AVI_INFO_Y_RGB 0
# define HDMI0_AVI_INFO_Y_YCBCR422 1
# define HDMI0_AVI_INFO_Y_YCBCR444 2
# define HDMI0_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
# define HDMI0_AVI_INFO_R(x) (((x) & 0xf) << 16)
# define HDMI0_AVI_INFO_M(x) (((x) & 0x3) << 20)
# define HDMI0_AVI_INFO_C(x) (((x) & 0x3) << 22)
# define HDMI0_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
# define HDMI0_AVI_INFO_SC(x) (((x) & 0x3) << 24)
# define HDMI0_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
#define HDMI0_AVI_INFO1 0x7458
# define HDMI0_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
# define HDMI0_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
# define HDMI0_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
#define HDMI0_AVI_INFO2 0x745c
# define HDMI0_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
# define HDMI0_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
#define HDMI0_AVI_INFO3 0x7460
# define HDMI0_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
# define HDMI0_AVI_INFO_VERSION(x) (((x) & 3) << 24)
#define HDMI0_MPEG_INFO0 0x7464
# define HDMI0_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
# define HDMI0_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
# define HDMI0_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
# define HDMI0_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
#define HDMI0_MPEG_INFO1 0x7468
# define HDMI0_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
# define HDMI0_MPEG_INFO_MF(x) (((x) & 3) << 8)
# define HDMI0_MPEG_INFO_FR(x) (((x) & 1) << 12)
#define HDMI0_GENERIC0_HDR 0x746c
#define HDMI0_GENERIC0_0 0x7470
#define HDMI0_GENERIC0_1 0x7474
#define HDMI0_GENERIC0_2 0x7478
#define HDMI0_GENERIC0_3 0x747c
#define HDMI0_GENERIC0_4 0x7480
#define HDMI0_GENERIC0_5 0x7484
#define HDMI0_GENERIC0_6 0x7488
#define HDMI0_GENERIC1_HDR 0x748c
#define HDMI0_GENERIC1_0 0x7490
#define HDMI0_GENERIC1_1 0x7494
#define HDMI0_GENERIC1_2 0x7498
#define HDMI0_GENERIC1_3 0x749c
#define HDMI0_GENERIC1_4 0x74a0
#define HDMI0_GENERIC1_5 0x74a4
#define HDMI0_GENERIC1_6 0x74a8
#define HDMI0_ACR_32_0 0x74ac
# define HDMI0_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
#define HDMI0_ACR_32_1 0x74b0
# define HDMI0_ACR_N_32(x) (((x) & 0xfffff) << 0)
#define HDMI0_ACR_44_0 0x74b4
# define HDMI0_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
#define HDMI0_ACR_44_1 0x74b8
# define HDMI0_ACR_N_44(x) (((x) & 0xfffff) << 0)
#define HDMI0_ACR_48_0 0x74bc
# define HDMI0_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
#define HDMI0_ACR_48_1 0x74c0
# define HDMI0_ACR_N_48(x) (((x) & 0xfffff) << 0)
#define HDMI0_ACR_STATUS_0 0x74c4
#define HDMI0_ACR_STATUS_1 0x74c8
#define HDMI0_AUDIO_INFO0 0x74cc
# define HDMI0_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
# define HDMI0_AUDIO_INFO_CC(x) (((x) & 7) << 8)
#define HDMI0_AUDIO_INFO1 0x74d0
# define HDMI0_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
# define HDMI0_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
# define HDMI0_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
# define HDMI0_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
#define HDMI0_60958_0 0x74d4
# define HDMI0_60958_CS_A(x) (((x) & 1) << 0)
# define HDMI0_60958_CS_B(x) (((x) & 1) << 1)
# define HDMI0_60958_CS_C(x) (((x) & 1) << 2)
# define HDMI0_60958_CS_D(x) (((x) & 3) << 3)
# define HDMI0_60958_CS_MODE(x) (((x) & 3) << 6)
# define HDMI0_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
# define HDMI0_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
# define HDMI0_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
# define HDMI0_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
# define HDMI0_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
#define HDMI0_60958_1 0x74d8
# define HDMI0_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
# define HDMI0_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
# define HDMI0_60958_CS_VALID_L(x) (((x) & 1) << 16)
# define HDMI0_60958_CS_VALID_R(x) (((x) & 1) << 18)
# define HDMI0_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
#define HDMI0_ACR_PACKET_CONTROL 0x74dc
# define HDMI0_ACR_SEND (1 << 0)
# define HDMI0_ACR_CONT (1 << 1)
# define HDMI0_ACR_SELECT(x) (((x) & 3) << 4)
# define HDMI0_ACR_HW 0
# define HDMI0_ACR_32 1
# define HDMI0_ACR_44 2
# define HDMI0_ACR_48 3
# define HDMI0_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
# define HDMI0_ACR_AUTO_SEND (1 << 12)
#define HDMI0_RAMP_CONTROL0 0x74e0
# define HDMI0_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
#define HDMI0_RAMP_CONTROL1 0x74e4
# define HDMI0_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
#define HDMI0_RAMP_CONTROL2 0x74e8
# define HDMI0_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
#define HDMI0_RAMP_CONTROL3 0x74ec
# define HDMI0_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
/* HDMI0_60958_2 is r7xx only */
#define HDMI0_60958_2 0x74f0
# define HDMI0_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
# define HDMI0_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
# define HDMI0_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
# define HDMI0_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
# define HDMI0_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
# define HDMI0_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
/* r6xx only; second instance starts at 0x7700 */
#define HDMI1_CONTROL 0x7700
#define HDMI1_STATUS 0x7704
#define HDMI1_AUDIO_PACKET_CONTROL 0x7708
/* DCE3; second instance starts at 0x7800 NOT 0x7700 */
#define DCE3_HDMI1_CONTROL 0x7800
#define DCE3_HDMI1_STATUS 0x7804
#define DCE3_HDMI1_AUDIO_PACKET_CONTROL 0x7808
/* DCE3.2 (for interrupts) */
#define AFMT_STATUS 0x7600
# define AFMT_AUDIO_ENABLE (1 << 4)
# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
#define AFMT_AUDIO_PACKET_CONTROL 0x7604
# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
# define AFMT_AUDIO_TEST_EN (1 << 12)
# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
# define AFMT_60958_CS_UPDATE (1 << 26)
# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
 
/*
* PM4
*/
1126,11 → 815,7
#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
#define PACKET3_INDIRECT_BUFFER_MP 0x38
#define PACKET3_MEM_SEMAPHORE 0x39
# define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12)
# define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
# define PACKET3_SEM_SEL_WAIT (0x7 << 29)
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_COPY_DW 0x3B
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
1192,7 → 877,6
#define PACKET3_SET_CTL_CONST 0x6F
#define PACKET3_SET_CTL_CONST_OFFSET 0x0003cff0
#define PACKET3_SET_CTL_CONST_END 0x0003e200
#define PACKET3_STRMOUT_BASE_UPDATE 0x72 /* r7xx */
#define PACKET3_SURFACE_BASE_UPDATE 0x73
 
 
1422,9 → 1106,6
#define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18)
#define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3)
#define C_0280A0_TILE_MODE 0xFFF3FFFF
#define V_0280A0_TILE_DISABLE 0
#define V_0280A0_CLEAR_ENABLE 1
#define V_0280A0_FRAG_ENABLE 2
#define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20)
#define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1)
#define C_0280A0_BLEND_CLAMP 0xFFEFFFFF
1671,12 → 1352,6
#define S_038010_DST_SEL_W(x) (((x) & 0x7) << 25)
#define G_038010_DST_SEL_W(x) (((x) >> 25) & 0x7)
#define C_038010_DST_SEL_W 0xF1FFFFFF
# define SQ_SEL_X 0
# define SQ_SEL_Y 1
# define SQ_SEL_Z 2
# define SQ_SEL_W 3
# define SQ_SEL_0 4
# define SQ_SEL_1 5
#define S_038010_BASE_LEVEL(x) (((x) & 0xF) << 28)
#define G_038010_BASE_LEVEL(x) (((x) >> 28) & 0xF)
#define C_038010_BASE_LEVEL 0x0FFFFFFF
/drivers/video/drm/radeon/rdisplay_kms.c
272,11 → 272,8
 
fb->width = reqmode->width;
fb->height = reqmode->height;
 
fb->pitches[0] = fb->pitches[1] = fb->pitches[2] =
fb->pitches[3] = radeon_align_pitch(dev->dev_private, reqmode->width, 32, false) * ((32 + 1) / 8);
fb->pitch = radeon_align_pitch(dev->dev_private, reqmode->width, 32, false) * ((32 + 1) / 8);
fb->bits_per_pixel = 32;
fb->depth = 24;
 
crtc->fb = fb;
crtc->enabled = true;
291,13 → 288,13
{
rdisplay->width = fb->width;
rdisplay->height = fb->height;
rdisplay->pitch = fb->pitches[0];
rdisplay->pitch = fb->pitch;
rdisplay->vrefresh = drm_mode_vrefresh(mode);
 
sysSetScreen(fb->width, fb->height, fb->pitches[0]);
sysSetScreen(fb->width, fb->height, fb->pitch);
 
dbgprintf("new mode %d x %d pitch %d\n",
fb->width, fb->height, fb->pitches[0]);
fb->width, fb->height, fb->pitch);
}
else
DRM_ERROR("failed to set mode %d_%d on crtc %p\n",
366,14 → 363,6
{
struct drm_device *dev;
 
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
struct drm_crtc *crtc = NULL;
struct drm_framebuffer *fb;
struct drm_display_mode *native;
 
 
cursor_t *cursor;
bool retval = false;
u32_t ifl;
385,117 → 374,62
 
ENTER();
 
dev = rdev->ddev;
rdisplay = GetDisplay();
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
{
if( connector->status != connector_status_connected)
continue;
dev = rdisplay->ddev = rdev->ddev;
 
connector_funcs = connector->helper_private;
encoder = connector_funcs->best_encoder(connector);
if( encoder == NULL)
ifl = safe_cli();
{
dbgprintf("CONNECTOR %x ID: %d no active encoders\n",
connector, connector->base.id);
continue;
}
connector->encoder = encoder;
 
dbgprintf("CONNECTOR %x ID: %d status %d encoder %x\n crtc %x\n",
connector, connector->base.id,
connector->status, connector->encoder,
encoder->crtc);
 
crtc = encoder->crtc;
break;
};
 
if(connector == NULL)
list_for_each_entry(cursor, &rdisplay->cursors, list)
{
dbgprintf("No active connectors!\n");
return -1;
init_cursor(cursor);
};
 
{
struct drm_display_mode *tmp;
 
list_for_each_entry(tmp, &connector->modes, head) {
if (drm_mode_width(tmp) > 16384 ||
drm_mode_height(tmp) > 16384)
continue;
if (tmp->type & DRM_MODE_TYPE_PREFERRED)
{
native = tmp;
break;
};
}
}
safe_sti(ifl);
 
if( ASIC_IS_AVIVO(rdev) && native )
{
dbgprintf("native w %d h %d\n", native->hdisplay, native->vdisplay);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(connector->encoder);
radeon_encoder->rmx_type = RMX_FULL;
radeon_encoder->native_mode = *native;
};
 
 
if(crtc == NULL)
{
struct drm_crtc *tmp_crtc;
int crtc_mask = 1;
rfbdev = rdev->mode_info.rfbdev;
fb_helper = &rfbdev->helper;
 
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head)
{
if (encoder->possible_crtcs & crtc_mask)
{
crtc = tmp_crtc;
encoder->crtc = crtc;
break;
};
crtc_mask <<= 1;
};
};
 
if(crtc == NULL)
{
dbgprintf("No CRTC for encoder %d\n", encoder->base.id);
return -1;
};
// for (i = 0; i < fb_helper->crtc_count; i++)
// {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[0].mode_set;
struct drm_crtc *crtc;
struct drm_display_mode *mode;
 
crtc = mode_set->crtc;
 
dbgprintf("[Select CRTC:%d]\n", crtc->base.id);
// if (!crtc->enabled)
// continue;
 
mode = mode_set->mode;
 
// drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
dbgprintf("crtc %d width %d height %d vrefresh %d\n",
crtc->base.id,
drm_mode_width(mode), drm_mode_height(mode),
drm_mode_vrefresh(mode));
// }
 
rdisplay = GetDisplay();
rdisplay->ddev = dev;
rdisplay->connector = connector;
rdisplay->crtc = crtc;
 
rdisplay->supported_modes = count_connector_modes(connector);
 
 
 
ifl = safe_cli();
rdisplay->connector = get_def_connector(dev);
if( rdisplay->connector == 0 )
{
list_for_each_entry(cursor, &rdisplay->cursors, list)
{
init_cursor(cursor);
dbgprintf("no active connectors\n");
return false;
};
 
};
safe_sti(ifl);
 
rdisplay->crtc = rdisplay->connector->encoder->crtc = crtc;
 
rdisplay->supported_modes = count_connector_modes(rdisplay->connector);
 
dbgprintf("current mode %d x %d x %d\n",
rdisplay->width, rdisplay->height, rdisplay->vrefresh);
dbgprintf("user mode mode %d x %d x %d\n",
usermode->width, usermode->height, usermode->freq);
 
 
if( (usermode->width != 0) &&
(usermode->height != 0) &&
( (usermode->width != rdisplay->width) ||
505,13 → 439,6
 
retval = set_mode(dev, rdisplay->connector, usermode, false);
}
else
{
usermode->width = rdisplay->width;
usermode->height = rdisplay->height;
usermode->freq = 60;
retval = set_mode(dev, rdisplay->connector, usermode, false);
};
 
ifl = safe_cli();
{
537,7 → 464,7
{
int err = -1;
 
// ENTER();
ENTER();
 
dbgprintf("mode %x count %d\n", mode, *count);
 
570,7 → 497,7
*count = i;
err = 0;
};
// LEAVE();
LEAVE();
return err;
}
 
578,7 → 505,7
{
int err = -1;
 
// ENTER();
ENTER();
 
dbgprintf("width %d height %d vrefresh %d\n",
mode->width, mode->height, mode->freq);
594,13 → 521,14
err = 0;
};
 
// LEAVE();
LEAVE();
return err;
};
 
 
int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
struct drm_mode_fb_cmd2 *mode_cmd,
 
int radeonfb_create_object(struct radeon_fbdev *rfbdev,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object **gobj_p)
{
struct radeon_device *rdev = rfbdev->rdev;
611,29 → 539,21
int ret;
int aligned_size, size;
int height = mode_cmd->height;
u32 bpp, depth;
 
static struct radeon_bo kos_bo;
static struct drm_mm_node vm_node;
 
 
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
 
/* need to align pitch with crtc limits */
mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
fb_tiled) * ((bpp + 1) / 8);
mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
 
if (rdev->family >= CHIP_R600)
height = ALIGN(mode_cmd->height, 8);
size = mode_cmd->pitches[0] * height;
size = mode_cmd->pitch * height;
aligned_size = ALIGN(size, PAGE_SIZE);
 
 
ret = drm_gem_object_init(rdev->ddev, &kos_bo.gem_base, aligned_size);
if (unlikely(ret)) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
aligned_size);
return -ENOMEM;
return ret;
}
 
kos_bo.rdev = rdev;
649,13 → 569,10
if (fb_tiled)
tiling_flags = RADEON_TILING_MACRO;
 
// if (tiling_flags) {
// ret = radeon_bo_set_tiling_flags(rbo,
// tiling_flags | RADEON_TILING_SURFACE,
// mode_cmd->pitches[0]);
// if (ret)
// dev_err(rdev->dev, "FB failed to set tiling flags\n");
// }
if (tiling_flags) {
rbo->tiling_flags = tiling_flags | RADEON_TILING_SURFACE;
rbo->pitch = mode_cmd->pitch;
}
 
vm_node.size = 0xC00000 >> 12;
vm_node.start = 0;
667,6 → 584,8
rbo->kptr = (void*)0xFE000000;
rbo->pin_count = 1;
 
// if (fb_tiled)
// radeon_bo_check_tiling(rbo, 0, 0);
 
*gobj_p = gobj;
return 0;
673,3 → 592,5
}
 
 
 
 
/drivers/video/drm/radeon/firmware/TAHITI_ce.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/TAHITI_me.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/VERDE_rlc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/VERDE_pfp.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/PITCAIRN_ce.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/PITCAIRN_me.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/PITCAIRN_mc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/VERDE_ce.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/PITCAIRN_pfp.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/PITCAIRN_rlc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/VERDE_mc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/VERDE_me.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/TAHITI_mc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/TAHITI_pfp.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/TAHITI_rlc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Deleted: svn:mime-type
-application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/atombios.h
101,7 → 101,6
#define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5)
#define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5)
#define ATOM_ENCODER_INIT (ATOM_DISABLE+7)
#define ATOM_INIT (ATOM_DISABLE+7)
#define ATOM_GET_STATUS (ATOM_DISABLE+8)
 
#define ATOM_BLANKING 1
252,25 → 251,25
USHORT SetEngineClock; //Function Table,directly used by various SW components,latest version 1.1
USHORT SetMemoryClock; //Function Table,directly used by various SW components,latest version 1.1
USHORT SetPixelClock; //Function Table,directly used by various SW components,latest version 1.2
USHORT EnableDispPowerGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
USHORT DynamicClockGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
USHORT ResetMemoryDLL; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
USHORT ResetMemoryDevice; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
USHORT MemoryPLLInit; //Atomic Table, used only by Bios
USHORT AdjustDisplayPll; //Atomic Table, used by various SW componentes.
USHORT MemoryPLLInit;
USHORT AdjustDisplayPll; //only used by Bios
USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios
USHORT ASIC_StaticPwrMgtStatusChange; //Obsolete , only used by Bios
USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2
USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3
USHORT HW_Misc_Operation; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT LCD1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT DAC1EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT DAC2EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT DVOOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT CV1OutputControl; //Atomic Table, Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
USHORT GetConditionalGoldenSetting; //Only used by Bios
USHORT GetConditionalGoldenSetting; //only used by Bios
USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1
USHORT PatchMCSetting; //only used by BIOS
USHORT MC_SEQ_Control; //only used by BIOS
USHORT TMDSAEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
USHORT LVDSEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
USHORT TV1OutputControl; //Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
USHORT EnableScaler; //Atomic Table, used only by Bios
USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
283,7 → 282,7
USHORT SetCRTC_Replication; //Atomic Table, used only by Bios
USHORT SelectCRTC_Source; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT EnableGraphSurfaces; //Atomic Table, used only by Bios
USHORT UpdateCRTC_DoubleBufferRegisters; //Atomic Table, used only by Bios
USHORT UpdateCRTC_DoubleBufferRegisters;
USHORT LUT_AutoFill; //Atomic Table, only used by Bios
USHORT EnableHW_IconCursor; //Atomic Table, only used by Bios
USHORT GetMemoryClock; //Atomic Table, directly used by various SW components,latest version 1.1
309,10 → 308,10
USHORT SetVoltage; //Function Table,directly and/or indirectly used by various SW components,latest version 1.1
USHORT DAC1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT DAC2OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT ComputeMemoryClockParam; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
USHORT SetupHWAssistedI2CStatus; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
USHORT ClockSource; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
USHORT MemoryDeviceInit; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
USHORT GetDispObjectInfo; //Atomic Table, indirectly used by various SW components,called from EnableVGARender
USHORT EnableYUV; //Atomic Table, indirectly used by various SW components,called from EnableVGARender
USHORT DIG1EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
USHORT DIG2EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
USHORT DIG1TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
319,12 → 318,10
USHORT DIG2TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
USHORT ProcessAuxChannelTransaction; //Function Table,only used by Bios
USHORT DPEncoderService; //Function Table,only used by Bios
USHORT GetVoltageInfo; //Function Table,only used by Bios since SI
}ATOM_MASTER_LIST_OF_COMMAND_TABLES;
 
// For backward compatible
#define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction
#define DPTranslatorControl DIG2EncoderControl
#define UNIPHYTransmitterControl DIG1TransmitterControl
#define LVTMATransmitterControl DIG2TransmitterControl
#define SetCRTC_DPM_State GetConditionalGoldenSetting
331,15 → 328,8
#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
#define HPDInterruptService ReadHWAssistedI2CStatus
#define EnableVGA_Access GetSCLKOverMCLKRatio
#define EnableYUV GetDispObjectInfo
#define DynamicClockGating EnableDispPowerGating
#define SetupHWAssistedI2CStatus ComputeMemoryClockParam
#define GetDispObjectInfo EnableYUV
 
#define TMDSAEncoderControl PatchMCSetting
#define LVDSEncoderControl MC_SEQ_Control
#define LCD1OutputControl HW_Misc_Operation
 
 
typedef struct _ATOM_MASTER_COMMAND_TABLE
{
ATOM_COMMON_TABLE_HEADER sHeader;
505,34 → 495,6
// ucInputFlag
#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode
 
// use for ComputeMemoryClockParamTable
typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1
{
union
{
ULONG ulClock;
ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output:UPPER_WORD=FB_DIV_INTEGER, LOWER_WORD=FB_DIV_FRAC shl (16-FB_FRACTION_BITS)
};
UCHAR ucDllSpeed; //Output
UCHAR ucPostDiv; //Output
union{
UCHAR ucInputFlag; //Input : ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN: 1-StrobeMode, 0-PerformanceMode
UCHAR ucPllCntlFlag; //Output:
};
UCHAR ucBWCntl;
}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1;
 
// definition of ucInputFlag
#define MPLL_INPUT_FLAG_STROBE_MODE_EN 0x01
// definition of ucPllCntlFlag
#define MPLL_CNTL_FLAG_VCO_MODE_MASK 0x03
#define MPLL_CNTL_FLAG_BYPASS_DQ_PLL 0x04
#define MPLL_CNTL_FLAG_QDR_ENABLE 0x08
#define MPLL_CNTL_FLAG_AD_HALF_RATE 0x10
 
//MPLL_CNTL_FLAG_BYPASS_AD_PLL has a wrong name, should be BYPASS_DQ_PLL
#define MPLL_CNTL_FLAG_BYPASS_AD_PLL 0x04
 
typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
{
ATOM_COMPUTE_CLOCK_FREQ ulClock;
600,16 → 562,6
#define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS
 
/****************************************************************************/
// Structure used by EnableDispPowerGatingTable.ctb
/****************************************************************************/
typedef struct _ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1
{
UCHAR ucDispPipeId; // ATOM_CRTC1, ATOM_CRTC2, ...
UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
UCHAR ucPadding[2];
}ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1;
 
/****************************************************************************/
// Structure used by EnableASIC_StaticPwrMgtTable.ctb
/****************************************************************************/
typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
855,7 → 807,6
#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ 0x00
#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ 0x01
#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ 0x02
#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ 0x03
#define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL 0x70
#define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER 0x00
#define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER 0x10
863,7 → 814,6
#define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER 0x30
#define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER 0x40
#define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER 0x50
#define ATOM_ENCODER_CONFIG_V4_DIG6_ENCODER 0x60
 
typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
{
1221,106 → 1171,6
#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3 0x80 //EF
 
 
typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V5
{
#if ATOM_BIG_ENDIAN
UCHAR ucReservd1:1;
UCHAR ucHPDSel:3;
UCHAR ucPhyClkSrcId:2;
UCHAR ucCoherentMode:1;
UCHAR ucReserved:1;
#else
UCHAR ucReserved:1;
UCHAR ucCoherentMode:1;
UCHAR ucPhyClkSrcId:2;
UCHAR ucHPDSel:3;
UCHAR ucReservd1:1;
#endif
}ATOM_DIG_TRANSMITTER_CONFIG_V5;
 
typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
{
USHORT usSymClock; // Encoder Clock in 10kHz,(DP mode)= linkclock/10, (TMDS/LVDS/HDMI)= pixel clock, (HDMI deep color), =pixel clock * deep_color_ratio
UCHAR ucPhyId; // 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4= UNIPHYE 5=UNIPHYF
UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_xxx
UCHAR ucLaneNum; // indicate lane number 1-8
UCHAR ucConnObjId; // Connector Object Id defined in ObjectId.h
UCHAR ucDigMode; // indicate DIG mode
union{
ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
UCHAR ucConfig;
};
UCHAR ucDigEncoderSel; // indicate DIG front end encoder
UCHAR ucDPLaneSet;
UCHAR ucReserved;
UCHAR ucReserved1;
}DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5;
 
//ucPhyId
#define ATOM_PHY_ID_UNIPHYA 0
#define ATOM_PHY_ID_UNIPHYB 1
#define ATOM_PHY_ID_UNIPHYC 2
#define ATOM_PHY_ID_UNIPHYD 3
#define ATOM_PHY_ID_UNIPHYE 4
#define ATOM_PHY_ID_UNIPHYF 5
#define ATOM_PHY_ID_UNIPHYG 6
 
// ucDigEncoderSel
#define ATOM_TRANMSITTER_V5__DIGA_SEL 0x01
#define ATOM_TRANMSITTER_V5__DIGB_SEL 0x02
#define ATOM_TRANMSITTER_V5__DIGC_SEL 0x04
#define ATOM_TRANMSITTER_V5__DIGD_SEL 0x08
#define ATOM_TRANMSITTER_V5__DIGE_SEL 0x10
#define ATOM_TRANMSITTER_V5__DIGF_SEL 0x20
#define ATOM_TRANMSITTER_V5__DIGG_SEL 0x40
 
// ucDigMode
#define ATOM_TRANSMITTER_DIGMODE_V5_DP 0
#define ATOM_TRANSMITTER_DIGMODE_V5_LVDS 1
#define ATOM_TRANSMITTER_DIGMODE_V5_DVI 2
#define ATOM_TRANSMITTER_DIGMODE_V5_HDMI 3
#define ATOM_TRANSMITTER_DIGMODE_V5_SDVO 4
#define ATOM_TRANSMITTER_DIGMODE_V5_DP_MST 5
 
// ucDPLaneSet
#define DP_LANE_SET__0DB_0_4V 0x00
#define DP_LANE_SET__0DB_0_6V 0x01
#define DP_LANE_SET__0DB_0_8V 0x02
#define DP_LANE_SET__0DB_1_2V 0x03
#define DP_LANE_SET__3_5DB_0_4V 0x08
#define DP_LANE_SET__3_5DB_0_6V 0x09
#define DP_LANE_SET__3_5DB_0_8V 0x0a
#define DP_LANE_SET__6DB_0_4V 0x10
#define DP_LANE_SET__6DB_0_6V 0x11
#define DP_LANE_SET__9_5DB_0_4V 0x18
 
// ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
// Bit1
#define ATOM_TRANSMITTER_CONFIG_V5_COHERENT 0x02
 
// Bit3:2
#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_MASK 0x0c
#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_SHIFT 0x02
 
#define ATOM_TRANSMITTER_CONFIG_V5_P1PLL 0x00
#define ATOM_TRANSMITTER_CONFIG_V5_P2PLL 0x04
#define ATOM_TRANSMITTER_CONFIG_V5_P0PLL 0x08
#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT 0x0c
// Bit6:4
#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_MASK 0x70
#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_SHIFT 0x04
 
#define ATOM_TRANSMITTER_CONFIG_V5_NO_HPD_SEL 0x00
#define ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL 0x10
#define ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL 0x20
#define ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL 0x30
#define ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL 0x40
#define ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL 0x50
#define ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL 0x60
 
#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION_V1_5 DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
 
 
/****************************************************************************/
// Structures used by ExternalEncoderControlTable V1.3
// ASIC Families: Evergreen, Llano, NI
1943,7 → 1793,6
#define ATOM_PPLL_SS_TYPE_V3_P1PLL 0x00
#define ATOM_PPLL_SS_TYPE_V3_P2PLL 0x04
#define ATOM_PPLL_SS_TYPE_V3_DCPLL 0x08
#define ATOM_PPLL_SS_TYPE_V3_P0PLL ATOM_PPLL_SS_TYPE_V3_DCPLL
#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK 0x00FF
#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT 0
#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK 0x0F00
2181,33 → 2030,6
USHORT usVoltageLevel; // real voltage level
}SET_VOLTAGE_PARAMETERS_V2;
 
 
typedef struct _SET_VOLTAGE_PARAMETERS_V1_3
{
UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
UCHAR ucVoltageMode; // Indicate action: Set voltage level
USHORT usVoltageLevel; // real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. )
}SET_VOLTAGE_PARAMETERS_V1_3;
 
//ucVoltageType
#define VOLTAGE_TYPE_VDDC 1
#define VOLTAGE_TYPE_MVDDC 2
#define VOLTAGE_TYPE_MVDDQ 3
#define VOLTAGE_TYPE_VDDCI 4
 
//SET_VOLTAGE_PARAMETERS_V3.ucVoltageMode
#define ATOM_SET_VOLTAGE 0 //Set voltage Level
#define ATOM_INIT_VOLTAGE_REGULATOR 3 //Init Regulator
#define ATOM_SET_VOLTAGE_PHASE 4 //Set Vregulator Phase
#define ATOM_GET_MAX_VOLTAGE 6 //Get Max Voltage, not used in SetVoltageTable v1.3
#define ATOM_GET_VOLTAGE_LEVEL 6 //Get Voltage level from vitual voltage ID
 
// define vitual voltage id in usVoltageLevel
#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
#define ATOM_VIRTUAL_VOLTAGE_ID1 0xff02
#define ATOM_VIRTUAL_VOLTAGE_ID2 0xff03
#define ATOM_VIRTUAL_VOLTAGE_ID3 0xff04
 
typedef struct _SET_VOLTAGE_PS_ALLOCATION
{
SET_VOLTAGE_PARAMETERS sASICSetVoltage;
2214,44 → 2036,6
WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
}SET_VOLTAGE_PS_ALLOCATION;
 
// New Added from SI for GetVoltageInfoTable, input parameter structure
typedef struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1
{
UCHAR ucVoltageType; // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
UCHAR ucVoltageMode; // Input: Indicate action: Get voltage info
USHORT usVoltageLevel; // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id
ULONG ulReserved;
}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1;
 
// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_VID
typedef struct _GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
{
ULONG ulVotlageGpioState;
ULONG ulVoltageGPioMask;
}GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
 
// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_STATEx_LEAKAGE_VID
typedef struct _GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
{
USHORT usVoltageLevel;
USHORT usVoltageId; // Voltage Id programmed in Voltage Regulator
ULONG ulReseved;
}GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
 
 
// GetVoltageInfo v1.1 ucVoltageMode
#define ATOM_GET_VOLTAGE_VID 0x00
#define ATOM_GET_VOTLAGE_INIT_SEQ 0x03
#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID 0x04
// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
 
// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state
#define ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11
// undefined power state
#define ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12
#define ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13
 
/****************************************************************************/
// Structures used by TVEncoderControlTable
/****************************************************************************/
2281,9 → 2065,9
USHORT MultimediaConfigInfo; // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios
USHORT StandardVESA_Timing; // Only used by Bios
USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4
USHORT PaletteData; // Only used by BIOS
USHORT DAC_Info; // Will be obsolete from R600
USHORT LCD_Info; // Shared by various SW components,latest version 1.3, was called LVDS_Info
USHORT DIGTransmitterInfo; // Internal used by VBIOS only version 3.1
USHORT TMDS_Info; // Will be obsolete from R600
USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1
USHORT SupportedDevicesInfo; // Will be obsolete from R600
USHORT GPIO_I2C_Info; // Shared by various SW components,latest version 1.2 will be used from R600
2312,6 → 2096,9
USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1
}ATOM_MASTER_LIST_OF_DATA_TABLES;
 
// For backward compatible
#define LVDS_Info LCD_Info
 
typedef struct _ATOM_MASTER_DATA_TABLE
{
ATOM_COMMON_TABLE_HEADER sHeader;
2318,10 → 2105,6
ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
}ATOM_MASTER_DATA_TABLE;
 
// For backward compatible
#define LVDS_Info LCD_Info
#define DAC_Info PaletteData
#define TMDS_Info DIGTransmitterInfo
 
/****************************************************************************/
// Structure used in MultimediaCapabilityInfoTable
2388,9 → 2171,7
typedef struct _ATOM_FIRMWARE_CAPABILITY
{
#if ATOM_BIG_ENDIAN
USHORT Reserved:1;
USHORT SCL2Redefined:1;
USHORT PostWithoutModeSet:1;
USHORT Reserved:3;
USHORT HyperMemory_Size:4;
USHORT HyperMemory_Support:1;
USHORT PPMode_Assigned:1;
2412,9 → 2193,7
USHORT PPMode_Assigned:1;
USHORT HyperMemory_Support:1;
USHORT HyperMemory_Size:4;
USHORT PostWithoutModeSet:1;
USHORT SCL2Redefined:1;
USHORT Reserved:1;
USHORT Reserved:3;
#endif
}ATOM_FIRMWARE_CAPABILITY;
 
2639,8 → 2418,7
USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
ULONG ulReserved4; //Was ulAsicMaximumVoltage
ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
UCHAR ucRemoteDisplayConfig;
UCHAR ucReserved5[3]; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
ULONG ulReserved5; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
ULONG ulReserved6; //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input
ULONG ulReserved7; //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output
USHORT usReserved11; //Was usMaxPixelClock; //In 10Khz unit, Max. Pclk used only for DAC
2660,11 → 2438,6
 
#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_2
 
 
// definition of ucRemoteDisplayConfig
#define REMOTE_DISPLAY_DISABLE 0x00
#define REMOTE_DISPLAY_ENABLE 0x01
 
/****************************************************************************/
// Structures used in IntegratedSystemInfoTable
/****************************************************************************/
2887,9 → 2660,8
#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND 2
#define INTEGRATED_SYSTEM_INFO__AMD_CPU__K8 3
#define INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH 4
#define INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI 5
 
#define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI // this deff reflects max defined CPU code
#define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH // this deff reflects max defined CPU code
 
#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001
#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002
2981,7 → 2753,6
#define ASIC_INT_DIG4_ENCODER_ID 0x0b
#define ASIC_INT_DIG5_ENCODER_ID 0x0c
#define ASIC_INT_DIG6_ENCODER_ID 0x0d
#define ASIC_INT_DIG7_ENCODER_ID 0x0e
 
//define Encoder attribute
#define ATOM_ANALOG_ENCODER 0
3455,8 → 3226,8
 
UCHAR ucPowerSequenceDIGONtoDE_in4Ms;
UCHAR ucPowerSequenceDEtoVARY_BL_in4Ms;
UCHAR ucPowerSequenceDEtoDIGON_in4Ms;
UCHAR ucPowerSequenceVARY_BLtoDE_in4Ms;
UCHAR ucPowerSequenceDEtoDIGON_in4Ms;
 
UCHAR ucOffDelay_in4Ms;
UCHAR ucPowerSequenceVARY_BLtoBLON_in4Ms;
3463,15 → 3234,7
UCHAR ucPowerSequenceBLONtoVARY_BL_in4Ms;
UCHAR ucReserved1;
 
UCHAR ucDPCD_eDP_CONFIGURATION_CAP; // dpcd 0dh
UCHAR ucDPCD_MAX_LINK_RATE; // dpcd 01h
UCHAR ucDPCD_MAX_LANE_COUNT; // dpcd 02h
UCHAR ucDPCD_MAX_DOWNSPREAD; // dpcd 03h
 
USHORT usMaxPclkFreqInSingleLink; // Max PixelClock frequency in single link mode.
UCHAR uceDPToLVDSRxId;
UCHAR ucLcdReservd;
ULONG ulReserved[2];
ULONG ulReserved[4];
}ATOM_LCD_INFO_V13;
 
#define ATOM_LCD_INFO_LAST ATOM_LCD_INFO_V13
3510,11 → 3273,6
//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
#define LCDPANEL_CAP_V13_eDP 0x4 // = LCDPANEL_CAP_eDP no change comparing to previous version
 
//uceDPToLVDSRxId
#define eDP_TO_LVDS_RX_DISABLE 0x00 // no eDP->LVDS translator chip
#define eDP_TO_LVDS_COMMON_ID 0x01 // common eDP->LVDS translator chip without AMD SW init
#define eDP_TO_LVDS_RT_ID 0x02 // RT tanslator which require AMD SW init
 
typedef struct _ATOM_PATCH_RECORD_MODE
{
UCHAR ucRecordType;
3559,7 → 3317,6
#define LCD_CAP_RECORD_TYPE 3
#define LCD_FAKE_EDID_PATCH_RECORD_TYPE 4
#define LCD_PANEL_RESOLUTION_RECORD_TYPE 5
#define LCD_EDID_OFFSET_PATCH_RECORD_TYPE 6
#define ATOM_RECORD_END_TYPE 0xFF
 
/****************************Spread Spectrum Info Table Definitions **********************/
3771,7 → 3528,6
 
CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/
 
/***********************************************************************************/
#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1
 
typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO
4062,17 → 3818,13
ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping;
ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping;
};
UCHAR ucChPNInvert; // bit vector for up to 8 lanes, =0: P and N is not invert, =1 P and N is inverted
USHORT usCaps;
USHORT usReserved;
UCHAR ucReserved;
USHORT usReserved[2];
}EXT_DISPLAY_PATH;
#define NUMBER_OF_UCHAR_FOR_GUID 16
#define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7
 
//usCaps
#define EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE 0x01
 
typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
{
ATOM_COMMON_TABLE_HEADER sHeader;
4080,9 → 3832,7
EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
UCHAR uc3DStereoPinId; // use for eDP panel
UCHAR ucRemoteDisplayConfig;
UCHAR uceDPToLVDSRxId;
UCHAR Reserved[4]; // for potential expansion
UCHAR Reserved [6]; // for potential expansion
}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
 
//Related definitions, all records are different but they have a commond header
4227,7 → 3977,6
#define GPIO_PIN_STATE_ACTIVE_HIGH 0x1
 
// Indexes to GPIO array in GLSync record
// GLSync record is for Frame Lock/Gen Lock feature.
#define ATOM_GPIO_INDEX_GLSYNC_REFCLK 0
#define ATOM_GPIO_INDEX_GLSYNC_HSYNC 1
#define ATOM_GPIO_INDEX_GLSYNC_VSYNC 2
4235,9 → 3984,7
#define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT 4
#define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5
#define ATOM_GPIO_INDEX_GLSYNC_V_RESET 6
#define ATOM_GPIO_INDEX_GLSYNC_SWAP_CNTL 7
#define ATOM_GPIO_INDEX_GLSYNC_SWAP_SEL 8
#define ATOM_GPIO_INDEX_GLSYNC_MAX 9
#define ATOM_GPIO_INDEX_GLSYNC_MAX 7
 
typedef struct _ATOM_ENCODER_DVO_CF_RECORD
{
4247,8 → 3994,7
}ATOM_ENCODER_DVO_CF_RECORD;
 
// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by HW encoder
#define ATOM_ENCODER_CAP_RECORD_HBR2_EN 0x02 // DP1.2 HBR2 setting is qualified and HBR2 can be enabled
#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by this path
 
typedef struct _ATOM_ENCODER_CAP_RECORD
{
4257,13 → 4003,11
USHORT usEncoderCap;
struct {
#if ATOM_BIG_ENDIAN
USHORT usReserved:14; // Bit1-15 may be defined for other capability in future
USHORT usHBR2En:1; // Bit1 is for DP1.2 HBR2 enable
USHORT usReserved:15; // Bit1-15 may be defined for other capability in future
USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
#else
USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
USHORT usHBR2En:1; // Bit1 is for DP1.2 HBR2 enable
USHORT usReserved:14; // Bit1-15 may be defined for other capability in future
USHORT usReserved:15; // Bit1-15 may be defined for other capability in future
#endif
};
};
4413,7 → 4157,6
#define VOLTAGE_CONTROL_ID_VT1556M 0x07
#define VOLTAGE_CONTROL_ID_CHL822x 0x08
#define VOLTAGE_CONTROL_ID_VT1586M 0x09
#define VOLTAGE_CONTROL_ID_UP1637 0x0A
 
typedef struct _ATOM_VOLTAGE_OBJECT
{
4450,69 → 4193,6
USHORT usVoltage;
}ATOM_LEAKID_VOLTAGE;
 
typedef struct _ATOM_VOLTAGE_OBJECT_HEADER_V3{
UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
UCHAR ucVoltageMode; //Indicate voltage control mode: Init/Set/Leakage/Set phase
USHORT usSize; //Size of Object
}ATOM_VOLTAGE_OBJECT_HEADER_V3;
 
typedef struct _VOLTAGE_LUT_ENTRY_V2
{
ULONG ulVoltageId; // The Voltage ID which is used to program GPIO register
USHORT usVoltageValue; // The corresponding Voltage Value, in mV
}VOLTAGE_LUT_ENTRY_V2;
 
typedef struct _LEAKAGE_VOLTAGE_LUT_ENTRY_V2
{
USHORT usVoltageLevel; // The Voltage ID which is used to program GPIO register
USHORT usVoltageId;
USHORT usLeakageId; // The corresponding Voltage Value, in mV
}LEAKAGE_VOLTAGE_LUT_ENTRY_V2;
 
typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3
{
ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
UCHAR ucVoltageRegulatorId; //Indicate Voltage Regulator Id
UCHAR ucVoltageControlI2cLine;
UCHAR ucVoltageControlAddress;
UCHAR ucVoltageControlOffset;
ULONG ulReserved;
VOLTAGE_LUT_ENTRY asVolI2cLut[1]; // end with 0xff
}ATOM_I2C_VOLTAGE_OBJECT_V3;
 
typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3
{
ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
UCHAR ucVoltageGpioCntlId; // default is 0 which indicate control through CG VID mode
UCHAR ucGpioEntryNum; // indiate the entry numbers of Votlage/Gpio value Look up table
UCHAR ucPhaseDelay; // phase delay in unit of micro second
UCHAR ucReserved;
ULONG ulGpioMaskVal; // GPIO Mask value
VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[1];
}ATOM_GPIO_VOLTAGE_OBJECT_V3;
 
typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
{
ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
UCHAR ucLeakageCntlId; // default is 0
UCHAR ucLeakageEntryNum; // indicate the entry number of LeakageId/Voltage Lut table
UCHAR ucReserved[2];
ULONG ulMaxVoltageLevel;
LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1];
}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3;
 
typedef union _ATOM_VOLTAGE_OBJECT_V3{
ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj;
ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj;
ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj;
}ATOM_VOLTAGE_OBJECT_V3;
 
typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1
{
ATOM_COMMON_TABLE_HEADER sHeader;
ATOM_VOLTAGE_OBJECT_V3 asVoltageObj[3]; //Info for Voltage control
}ATOM_VOLTAGE_OBJECT_INFO_V3_1;
 
typedef struct _ATOM_ASIC_PROFILE_VOLTAGE
{
UCHAR ucProfileId;
4625,18 → 4305,7
USHORT usHDMISSpreadRateIn10Hz;
USHORT usDVISSPercentage;
USHORT usDVISSpreadRateIn10Hz;
ULONG SclkDpmBoostMargin;
ULONG SclkDpmThrottleMargin;
USHORT SclkDpmTdpLimitPG;
USHORT SclkDpmTdpLimitBoost;
ULONG ulBoostEngineCLock;
UCHAR ulBoostVid_2bit;
UCHAR EnableBoost;
USHORT GnbTdpLimit;
USHORT usMaxLVDSPclkFreqInSingleLink;
UCHAR ucLvdsMisc;
UCHAR ucLVDSReserved;
ULONG ulReserved3[15];
ULONG ulReserved3[21];
ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
}ATOM_INTEGRATED_SYSTEM_INFO_V6;
 
4644,16 → 4313,9
#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION 0x08
 
//ucLVDSMisc:
#define SYS_INFO_LVDSMISC__888_FPDI_MODE 0x01
#define SYS_INFO_LVDSMISC__DL_CH_SWAP 0x02
#define SYS_INFO_LVDSMISC__888_BPC 0x04
#define SYS_INFO_LVDSMISC__OVERRIDE_EN 0x08
#define SYS_INFO_LVDSMISC__BLON_ACTIVE_LOW 0x10
// ulOtherDisplayMisc
#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01
 
// not used any more
#define SYS_INFO_LVDSMISC__VSYNC_ACTIVE_LOW 0x04
#define SYS_INFO_LVDSMISC__HSYNC_ACTIVE_LOW 0x08
 
/**********************************************************************************************************************
ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
4722,213 → 4384,12
ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default
ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback.
ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
sAvail_SCLK[5]: Arrays to provide available list of SLCK and corresponding voltage, order from low to high
ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz.
ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns.
ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns.
usPCIEClkSSPercentage: PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%.
usPCIEClkSSType: PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread.
usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
[bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
[bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color
[bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
[bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
**********************************************************************************************************************/
 
// this Table is used for Liano/Ontario APU
typedef struct _ATOM_FUSION_SYSTEM_INFO_V1
{
ATOM_INTEGRATED_SYSTEM_INFO_V6 sIntegratedSysInfo;
ULONG ulPowerplayTable[128];
}ATOM_FUSION_SYSTEM_INFO_V1;
/**********************************************************************************************************************
ATOM_FUSION_SYSTEM_INFO_V1 Description
sIntegratedSysInfo: refer to ATOM_INTEGRATED_SYSTEM_INFO_V6 definition.
ulPowerplayTable[128]: This 512 bytes memory is used to save ATOM_PPLIB_POWERPLAYTABLE3, starting form ulPowerplayTable[0]
**********************************************************************************************************************/
 
// this IntegrateSystemInfoTable is used for Trinity APU
typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
{
ATOM_COMMON_TABLE_HEADER sHeader;
ULONG ulBootUpEngineClock;
ULONG ulDentistVCOFreq;
ULONG ulBootUpUMAClock;
ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4];
ULONG ulBootUpReqDisplayVector;
ULONG ulOtherDisplayMisc;
ULONG ulGPUCapInfo;
ULONG ulSB_MMIO_Base_Addr;
USHORT usRequestedPWMFreqInHz;
UCHAR ucHtcTmpLmt;
UCHAR ucHtcHystLmt;
ULONG ulMinEngineClock;
ULONG ulSystemConfig;
ULONG ulCPUCapInfo;
USHORT usNBP0Voltage;
USHORT usNBP1Voltage;
USHORT usBootUpNBVoltage;
USHORT usExtDispConnInfoOffset;
USHORT usPanelRefreshRateRange;
UCHAR ucMemoryType;
UCHAR ucUMAChannelNumber;
UCHAR strVBIOSMsg[40];
ULONG ulReserved[20];
ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5];
ULONG ulGMCRestoreResetTime;
ULONG ulMinimumNClk;
ULONG ulIdleNClk;
ULONG ulDDR_DLL_PowerUpTime;
ULONG ulDDR_PLL_PowerUpTime;
USHORT usPCIEClkSSPercentage;
USHORT usPCIEClkSSType;
USHORT usLvdsSSPercentage;
USHORT usLvdsSSpreadRateIn10Hz;
USHORT usHDMISSPercentage;
USHORT usHDMISSpreadRateIn10Hz;
USHORT usDVISSPercentage;
USHORT usDVISSpreadRateIn10Hz;
ULONG SclkDpmBoostMargin;
ULONG SclkDpmThrottleMargin;
USHORT SclkDpmTdpLimitPG;
USHORT SclkDpmTdpLimitBoost;
ULONG ulBoostEngineCLock;
UCHAR ulBoostVid_2bit;
UCHAR EnableBoost;
USHORT GnbTdpLimit;
USHORT usMaxLVDSPclkFreqInSingleLink;
UCHAR ucLvdsMisc;
UCHAR ucLVDSReserved;
UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
UCHAR ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
UCHAR ucLVDSOffToOnDelay_in4Ms;
UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
UCHAR ucLVDSReserved1;
ULONG ulLCDBitDepthControlVal;
ULONG ulNbpStateMemclkFreq[4];
USHORT usNBP2Voltage;
USHORT usNBP3Voltage;
ULONG ulNbpStateNClkFreq[4];
UCHAR ucNBDPMEnable;
UCHAR ucReserved[3];
UCHAR ucDPMState0VclkFid;
UCHAR ucDPMState0DclkFid;
UCHAR ucDPMState1VclkFid;
UCHAR ucDPMState1DclkFid;
UCHAR ucDPMState2VclkFid;
UCHAR ucDPMState2DclkFid;
UCHAR ucDPMState3VclkFid;
UCHAR ucDPMState3DclkFid;
ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
}ATOM_INTEGRATED_SYSTEM_INFO_V1_7;
 
// ulOtherDisplayMisc
#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01
#define INTEGRATED_SYSTEM_INFO__GET_BOOTUP_DISPLAY_CALLBACK_FUNC_SUPPORT 0x02
#define INTEGRATED_SYSTEM_INFO__GET_EXPANSION_CALLBACK_FUNC_SUPPORT 0x04
#define INTEGRATED_SYSTEM_INFO__FAST_BOOT_SUPPORT 0x08
 
// ulGPUCapInfo
#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02
#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08
 
/**********************************************************************************************************************
ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description
ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
sDISPCLK_Voltage: Report Display clock voltage requirement.
ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Trinity projects:
ATOM_DEVICE_CRT1_SUPPORT 0x0001
ATOM_DEVICE_DFP1_SUPPORT 0x0008
ATOM_DEVICE_DFP6_SUPPORT 0x0040
ATOM_DEVICE_DFP2_SUPPORT 0x0080
ATOM_DEVICE_DFP3_SUPPORT 0x0200
ATOM_DEVICE_DFP4_SUPPORT 0x0400
ATOM_DEVICE_DFP5_SUPPORT 0x0800
ATOM_DEVICE_LCD1_SUPPORT 0x0002
ulOtherDisplayMisc: bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS.
=1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS.
bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS
=1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS
bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS
=1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS
bit[3]=0: VBIOS fast boot is disable
=1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open)
ulGPUCapInfo: bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
=1: TMDS/HDMI Coherent Mode use signel PLL mode.
bit[1]=0: DP mode use cascade PLL mode ( New for Trinity )
=1: DP mode use single PLL mode
bit[3]=0: Enable AUX HW mode detection logic
=1: Disable AUX HW mode detection logic
ulSB_MMIO_Base_Addr: Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
 
usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW).
Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
Changing BL using VBIOS function is functional in both driver and non-driver present environment;
and enabling VariBri under the driver environment from PP table is optional.
 
2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
that BL control from GPU is expected.
VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
it's per platform
and enabling VariBri under the driver environment from PP table is optional.
 
ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt.
Threshold on value to enter HTC_active state.
ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt.
To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
ulMinEngineClock: Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled
=1: PCIE Power Gating Enabled
Bit[1]=0: DDR-DLL shut-down feature disabled.
1: DDR-DLL shut-down feature enabled.
Bit[2]=0: DDR-PLL Power down feature disabled.
1: DDR-PLL Power down feature enabled.
ulCPUCapInfo: TBD
usNBP0Voltage: VID for voltage on NB P0 State
usNBP1Voltage: VID for voltage on NB P1 State
usNBP2Voltage: VID for voltage on NB P2 State
usNBP3Voltage: VID for voltage on NB P3 State
usBootUpNBVoltage: Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure
usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
to indicate a range.
SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
ucUMAChannelNumber: System memory channel numbers.
ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default
ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback.
ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz.
ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns.
ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns.
usPCIEClkSSPercentage: PCIE Clock Spread Spectrum Percentage in unit 0.01%; 100 mean 1%.
usPCIEClkSSType: PCIE Clock Spread Spectrum Type. 0 for Down spread(default); 1 for Center spread.
usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
4937,41 → 4398,6
usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
[bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
[bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color
[bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
[bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
ucLVDSPwrOnSeqDIGONtoDE_in4Ms: LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
=0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
ucLVDSPwrOnDEtoVARY_BL_in4Ms: LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ).
=0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
 
ucLVDSPwrOffVARY_BLtoDE_in4Ms: LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off.
=0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
 
ucLVDSPwrOffDEtoDIGON_in4Ms: LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off.
=0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
 
ucLVDSOffToOnDelay_in4Ms: LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active.
=0 means to use VBIOS default delay which is 125 ( 500ms ).
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
 
ucLVDSPwrOnVARY_BLtoBLON_in4Ms: LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active.
=0 means to use VBIOS default delay which is 0 ( 0ms ).
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
 
ucLVDSPwrOffBLONtoVARY_BL_in4Ms: LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off.
=0 means to use VBIOS default delay which is 0 ( 0ms ).
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
 
ulNbpStateMemclkFreq[4]: system memory clock frequncey in unit of 10Khz in different NB pstate.
 
**********************************************************************************************************************/
 
/**************************************************************************/
5033,7 → 4459,6
#define ASIC_INTERNAL_SS_ON_DP 7
#define ASIC_INTERNAL_SS_ON_DCPLL 8
#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
#define ASIC_INTERNAL_VCE_SS 10
 
typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
{
5095,8 → 4520,8
#define ATOM_DOS_MODE_INFO_DEF 7
#define ATOM_I2C_CHANNEL_STATUS_DEF 8
#define ATOM_I2C_CHANNEL_STATUS1_DEF 9
#define ATOM_INTERNAL_TIMER_DEF 10
 
 
// BIOS_0_SCRATCH Definition
#define ATOM_S0_CRT1_MONO 0x00000001L
#define ATOM_S0_CRT1_COLOR 0x00000002L
5223,7 → 4648,6
#define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF
#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C
#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGEb3 0x10
#define ATOM_S2_TMDS_COHERENT_MODEb3 0x10 // used by VBIOS code only, use coherent mode for TMDS/HDMI mode
#define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20
#define ATOM_S2_ROTATION_STATE_MASKb3 0xC0
 
5614,23 → 5038,6
USHORT usDeviceId; // Active Device Id for this surface. If no device, set to 0.
}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3;
 
typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4
{
USHORT usHight; // Image Hight
USHORT usWidth; // Image Width
USHORT usGraphPitch;
UCHAR ucColorDepth;
UCHAR ucPixelFormat;
UCHAR ucSurface; // Surface 1 or 2
UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
UCHAR ucModeType;
UCHAR ucReserved;
}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4;
 
// ucEnable
#define ATOM_GRAPH_CONTROL_SET_PITCH 0x0f
#define ATOM_GRAPH_CONTROL_SET_DISP_START 0x10
 
typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION
{
ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;
5650,58 → 5057,6
USHORT usY_Size;
}GET_DISPLAY_SURFACE_SIZE_PARAMETERS;
 
typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2
{
union{
USHORT usX_Size; //When use as input parameter, usX_Size indicates which CRTC
USHORT usSurface;
};
USHORT usY_Size;
USHORT usDispXStart;
USHORT usDispYStart;
}GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2;
 
 
typedef struct _PALETTE_DATA_CONTROL_PARAMETERS_V3
{
UCHAR ucLutId;
UCHAR ucAction;
USHORT usLutStartIndex;
USHORT usLutLength;
USHORT usLutOffsetInVram;
}PALETTE_DATA_CONTROL_PARAMETERS_V3;
 
// ucAction:
#define PALETTE_DATA_AUTO_FILL 1
#define PALETTE_DATA_READ 2
#define PALETTE_DATA_WRITE 3
 
 
typedef struct _INTERRUPT_SERVICE_PARAMETERS_V2
{
UCHAR ucInterruptId;
UCHAR ucServiceId;
UCHAR ucStatus;
UCHAR ucReserved;
}INTERRUPT_SERVICE_PARAMETER_V2;
 
// ucInterruptId
#define HDP1_INTERRUPT_ID 1
#define HDP2_INTERRUPT_ID 2
#define HDP3_INTERRUPT_ID 3
#define HDP4_INTERRUPT_ID 4
#define HDP5_INTERRUPT_ID 5
#define HDP6_INTERRUPT_ID 6
#define SW_INTERRUPT_ID 11
 
// ucAction
#define INTERRUPT_SERVICE_GEN_SW_INT 1
#define INTERRUPT_SERVICE_GET_STATUS 2
 
// ucStatus
#define INTERRUPT_STATUS__INT_TRIGGER 1
#define INTERRUPT_STATUS__HPD_HIGH 2
 
typedef struct _INDIRECT_IO_ACCESS
{
ATOM_COMMON_TABLE_HEADER sHeader;
5834,7 → 5189,7
 
#define END_OF_REG_INDEX_BLOCK 0x0ffff
#define END_OF_REG_DATA_BLOCK 0x00000000
#define ATOM_INIT_REG_MASK_FLAG 0x80 //Not used in BIOS
#define ATOM_INIT_REG_MASK_FLAG 0x80
#define CLOCK_RANGE_HIGHEST 0x00ffffff
 
#define VALUE_DWORD SIZEOF ULONG
5874,7 → 5229,6
#define _128Mx8 0x51
#define _128Mx16 0x52
#define _256Mx8 0x61
#define _256Mx16 0x62
 
#define SAMSUNG 0x1
#define INFINEON 0x2
6231,7 → 5585,7
ULONG ulChannelMapCfg; // mmMC_SHARED_CHREMAP
USHORT usModuleSize; // Size of ATOM_VRAM_MODULE_V7
USHORT usPrivateReserved; // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
USHORT usEnableChannels; // bit vector which indicate which channels are enabled
USHORT usReserved;
UCHAR ucExtMemoryID; // Current memory module ID
UCHAR ucMemoryType; // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5
UCHAR ucChannelNum; // Number of mem. channels supported in this module
6243,8 → 5597,7
UCHAR ucNPL_RT; // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2.
UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
UCHAR ucMemorySize; // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
USHORT usSEQSettingOffset;
UCHAR ucReserved;
UCHAR ucReserved[3];
// Memory Module specific values
USHORT usEMRS2Value; // EMRS2/MR2 Value.
USHORT usEMRS3Value; // EMRS3/MR3 Value.
6297,8 → 5650,7
ATOM_COMMON_TABLE_HEADER sHeader;
USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
USHORT usPerBytePresetOffset; // offset of ATOM_INIT_REG_BLOCK structure for Per Byte Offset Preset Settings
USHORT usReserved[3];
USHORT usReserved[4];
UCHAR ucNumOfVRAMModule; // indicate number of VRAM module
UCHAR ucMemoryClkPatchTblVer; // version of memory AC timing register list
UCHAR ucVramModuleVer; // indicate ATOM_VRAM_MODUE version
6583,52 → 5935,6
ASIC_ENCODER_INFO asEncoderInfo[1];
}ATOM_DISP_OUT_INFO_V2;
 
 
typedef struct _ATOM_DISP_CLOCK_ID {
UCHAR ucPpllId;
UCHAR ucPpllAttribute;
}ATOM_DISP_CLOCK_ID;
 
// ucPpllAttribute
#define CLOCK_SOURCE_SHAREABLE 0x01
#define CLOCK_SOURCE_DP_MODE 0x02
#define CLOCK_SOURCE_NONE_DP_MODE 0x04
 
//DispOutInfoTable
typedef struct _ASIC_TRANSMITTER_INFO_V2
{
USHORT usTransmitterObjId;
USHORT usDispClkIdOffset; // point to clock source id list supported by Encoder Object
UCHAR ucTransmitterCmdTblId;
UCHAR ucConfig;
UCHAR ucEncoderID; // available 1st encoder ( default )
UCHAR ucOptionEncoderID; // available 2nd encoder ( optional )
UCHAR uc2ndEncoderID;
UCHAR ucReserved;
}ASIC_TRANSMITTER_INFO_V2;
 
typedef struct _ATOM_DISP_OUT_INFO_V3
{
ATOM_COMMON_TABLE_HEADER sHeader;
USHORT ptrTransmitterInfo;
USHORT ptrEncoderInfo;
USHORT ptrMainCallParserFar; // direct address of main parser call in VBIOS binary.
USHORT usReserved;
UCHAR ucDCERevision;
UCHAR ucMaxDispEngineNum;
UCHAR ucMaxActiveDispEngineNum;
UCHAR ucMaxPPLLNum;
UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE
UCHAR ucReserved[3];
ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only
}ATOM_DISP_OUT_INFO_V3;
 
typedef enum CORE_REF_CLK_SOURCE{
CLOCK_SRC_XTALIN=0,
CLOCK_SRC_XO_IN=1,
CLOCK_SRC_XO_IN2=2,
}CORE_REF_CLK_SOURCE;
 
// DispDevicePriorityInfo
typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO
{
6764,39 → 6070,6
#define HW_I2C_READ 0
#define I2C_2BYTE_ADDR 0x02
 
/****************************************************************************/
// Structures used by HW_Misc_OperationTable
/****************************************************************************/
typedef struct _ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1
{
UCHAR ucCmd; // Input: To tell which action to take
UCHAR ucReserved[3];
ULONG ulReserved;
}ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1;
 
typedef struct _ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1
{
UCHAR ucReturnCode; // Output: Return value base on action was taken
UCHAR ucReserved[3];
ULONG ulReserved;
}ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1;
 
// Actions code
#define ATOM_GET_SDI_SUPPORT 0xF0
 
// Return code
#define ATOM_UNKNOWN_CMD 0
#define ATOM_FEATURE_NOT_SUPPORTED 1
#define ATOM_FEATURE_SUPPORTED 2
 
typedef struct _ATOM_HW_MISC_OPERATION_PS_ALLOCATION
{
ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1 sInput_Output;
PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS sReserved;
}ATOM_HW_MISC_OPERATION_PS_ALLOCATION;
 
/****************************************************************************/
 
typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
{
UCHAR ucHWBlkInst; // HW block instance, 0, 1, 2, ...
6817,52 → 6090,6
#define SELECT_CRTC_PIXEL_RATE 7
#define SELECT_VGA_BLK 8
 
// DIGTransmitterInfoTable structure used to program UNIPHY settings
typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_1{
ATOM_COMMON_TABLE_HEADER sHeader;
USHORT usDPVsPreEmphSettingOffset; // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock
USHORT usPhyAnalogRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info
USHORT usPhyAnalogSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range
USHORT usPhyPllRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info
USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
}DIG_TRANSMITTER_INFO_HEADER_V3_1;
 
typedef struct _CLOCK_CONDITION_REGESTER_INFO{
USHORT usRegisterIndex;
UCHAR ucStartBit;
UCHAR ucEndBit;
}CLOCK_CONDITION_REGESTER_INFO;
 
typedef struct _CLOCK_CONDITION_SETTING_ENTRY{
USHORT usMaxClockFreq;
UCHAR ucEncodeMode;
UCHAR ucPhySel;
ULONG ulAnalogSetting[1];
}CLOCK_CONDITION_SETTING_ENTRY;
 
typedef struct _CLOCK_CONDITION_SETTING_INFO{
USHORT usEntrySize;
CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[1];
}CLOCK_CONDITION_SETTING_INFO;
 
typedef struct _PHY_CONDITION_REG_VAL{
ULONG ulCondition;
ULONG ulRegVal;
}PHY_CONDITION_REG_VAL;
 
typedef struct _PHY_CONDITION_REG_INFO{
USHORT usRegIndex;
USHORT usSize;
PHY_CONDITION_REG_VAL asRegVal[1];
}PHY_CONDITION_REG_INFO;
 
typedef struct _PHY_ANALOG_SETTING_INFO{
UCHAR ucEncodeMode;
UCHAR ucPhySel;
USHORT usSize;
PHY_CONDITION_REG_INFO asAnalogSetting[1];
}PHY_ANALOG_SETTING_INFO;
 
/****************************************************************************/
//Portion VI: Definitinos for vbios MC scratch registers that driver used
/****************************************************************************/
7270,8 → 6497,6
#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally
#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16
#define ATOM_PP_THERMALCONTROLLER_LM96163 17
 
// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
// We probably should reserve the bit 0x80 for this use.
7287,7 → 6512,6
UCHAR ucClockStateIndices[1]; // variable-sized
} ATOM_PPLIB_STATE;
 
 
typedef struct _ATOM_PPLIB_FANTABLE
{
UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
7300,12 → 6524,6
USHORT usPWMHigh; // The PWM value at THigh.
} ATOM_PPLIB_FANTABLE;
 
typedef struct _ATOM_PPLIB_FANTABLE2
{
ATOM_PPLIB_FANTABLE basicTable;
USHORT usTMax; // The max temperature
} ATOM_PPLIB_FANTABLE2;
 
typedef struct _ATOM_PPLIB_EXTENDEDHEADER
{
USHORT usSize;
7312,8 → 6530,6
ULONG ulMaxEngineClock; // For Overdrive.
ULONG ulMaxMemoryClock; // For Overdrive.
// Add extra system parameters here, always adjust size to include all fields.
USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table
} ATOM_PPLIB_EXTENDEDHEADER;
 
//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
7336,7 → 6552,6
#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
 
 
typedef struct _ATOM_PPLIB_POWERPLAYTABLE
{
ATOM_COMMON_TABLE_HEADER sHeader;
7395,8 → 6610,7
USHORT usVddciDependencyOnMCLKOffset;
USHORT usVddcDependencyOnMCLKOffset;
USHORT usMaxClockVoltageOnDCOffset;
USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
USHORT usReserved;
USHORT usReserved[2];
} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
 
typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
7406,9 → 6620,8
ULONG ulNearTDPLimit;
ULONG ulSQRampingThreshold;
USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table
ULONG ulCACLeakage; // The iLeakage for driver calculated CAC leakage table
USHORT usTDPODLimit;
USHORT usLoadLineSlope; // in milliOhms * 100
ULONG ulCACLeakage; // TBD, this parameter is still under discussion. Change to ulReserved if not needed.
ULONG ulReserved;
} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
 
//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
7437,7 → 6650,6
//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002
#define ATOM_PPLIB_CLASSIFICATION2_MVC 0x0004 //Multi-View Codec (BD-3D)
 
//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
7461,9 → 6673,7
 
#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
 
#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
 
#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
 
//memory related flags
7544,24 → 6754,6
 
} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
 
typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
{
USHORT usEngineClockLow;
UCHAR ucEngineClockHigh;
 
USHORT usMemoryClockLow;
UCHAR ucMemoryClockHigh;
 
USHORT usVDDC;
USHORT usVDDCI;
UCHAR ucPCIEGen;
UCHAR ucUnused1;
 
ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
 
} ATOM_PPLIB_SI_CLOCK_INFO;
 
 
typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
 
{
7574,7 → 6766,7
UCHAR ucPadding; // For proper alignment and size.
USHORT usVDDC; // For the 780, use: None, Low, High, Variable
UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requirement.
USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
ULONG ulFlags;
} ATOM_PPLIB_RS780_CLOCK_INFO;
7596,8 → 6788,10
USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz
UCHAR ucEngineClockHigh; //clockfrequency >> 16.
UCHAR vddcIndex; //2-bit vddc index;
USHORT tdpLimit;
UCHAR leakage; //please use 8-bit absolute value, not the 6-bit % value
//please initalize to 0
UCHAR rsv;
//please initalize to 0
USHORT rsv1;
//please initialize to 0s
ULONG rsv2[2];
7619,7 → 6813,7
UCHAR clockInfoIndex[1];
} ATOM_PPLIB_STATE_V2;
 
typedef struct _StateArray{
typedef struct StateArray{
//how many states we have
UCHAR ucNumEntries;
7627,17 → 6821,18
}StateArray;
 
 
typedef struct _ClockInfoArray{
typedef struct ClockInfoArray{
//how many clock levels we have
UCHAR ucNumEntries;
//sizeof(ATOM_PPLIB_CLOCK_INFO)
//sizeof(ATOM_PPLIB_SUMO_CLOCK_INFO)
UCHAR ucEntrySize;
UCHAR clockInfo[1];
//this is for Sumo
ATOM_PPLIB_SUMO_CLOCK_INFO clockInfo[1];
}ClockInfoArray;
 
typedef struct _NonClockInfoArray{
typedef struct NonClockInfoArray{
 
//how many non-clock levels we have. normally should be same as number of states
UCHAR ucNumEntries;
7676,124 → 6871,6
ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
}ATOM_PPLIB_Clock_Voltage_Limit_Table;
 
typedef struct _ATOM_PPLIB_CAC_Leakage_Record
{
USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations
ULONG ulLeakageValue;
}ATOM_PPLIB_CAC_Leakage_Record;
 
typedef struct _ATOM_PPLIB_CAC_Leakage_Table
{
UCHAR ucNumEntries; // Number of entries.
ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries.
}ATOM_PPLIB_CAC_Leakage_Table;
 
typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
{
USHORT usVoltage;
USHORT usSclkLow;
UCHAR ucSclkHigh;
USHORT usMclkLow;
UCHAR ucMclkHigh;
}ATOM_PPLIB_PhaseSheddingLimits_Record;
 
typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
{
UCHAR ucNumEntries; // Number of entries.
ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries.
}ATOM_PPLIB_PhaseSheddingLimits_Table;
 
typedef struct _VCEClockInfo{
USHORT usEVClkLow;
UCHAR ucEVClkHigh;
USHORT usECClkLow;
UCHAR ucECClkHigh;
}VCEClockInfo;
 
typedef struct _VCEClockInfoArray{
UCHAR ucNumEntries;
VCEClockInfo entries[1];
}VCEClockInfoArray;
 
typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
{
USHORT usVoltage;
UCHAR ucVCEClockInfoIndex;
}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
 
typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
{
UCHAR numEntries;
ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
 
typedef struct _ATOM_PPLIB_VCE_State_Record
{
UCHAR ucVCEClockInfoIndex;
UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
}ATOM_PPLIB_VCE_State_Record;
 
typedef struct _ATOM_PPLIB_VCE_State_Table
{
UCHAR numEntries;
ATOM_PPLIB_VCE_State_Record entries[1];
}ATOM_PPLIB_VCE_State_Table;
 
 
typedef struct _ATOM_PPLIB_VCE_Table
{
UCHAR revid;
// VCEClockInfoArray array;
// ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
// ATOM_PPLIB_VCE_State_Table states;
}ATOM_PPLIB_VCE_Table;
 
 
typedef struct _UVDClockInfo{
USHORT usVClkLow;
UCHAR ucVClkHigh;
USHORT usDClkLow;
UCHAR ucDClkHigh;
}UVDClockInfo;
 
typedef struct _UVDClockInfoArray{
UCHAR ucNumEntries;
UVDClockInfo entries[1];
}UVDClockInfoArray;
 
typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
{
USHORT usVoltage;
UCHAR ucUVDClockInfoIndex;
}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
 
typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
{
UCHAR numEntries;
ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
 
typedef struct _ATOM_PPLIB_UVD_State_Record
{
UCHAR ucUVDClockInfoIndex;
UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
}ATOM_PPLIB_UVD_State_Record;
 
typedef struct _ATOM_PPLIB_UVD_State_Table
{
UCHAR numEntries;
ATOM_PPLIB_UVD_State_Record entries[1];
}ATOM_PPLIB_UVD_State_Table;
 
 
typedef struct _ATOM_PPLIB_UVD_Table
{
UCHAR revid;
// UVDClockInfoArray array;
// ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
// ATOM_PPLIB_UVD_State_Table states;
}ATOM_PPLIB_UVD_Table;
 
/**************************************************************************/
 
 
7943,68 → 7020,4
 
#pragma pack() // BIOS data must use byte aligment
 
//
// AMD ACPI Table
//
#pragma pack(1)
 
typedef struct {
ULONG Signature;
ULONG TableLength; //Length
UCHAR Revision;
UCHAR Checksum;
UCHAR OemId[6];
UCHAR OemTableId[8]; //UINT64 OemTableId;
ULONG OemRevision;
ULONG CreatorId;
ULONG CreatorRevision;
} AMD_ACPI_DESCRIPTION_HEADER;
/*
//EFI_ACPI_DESCRIPTION_HEADER from AcpiCommon.h
typedef struct {
UINT32 Signature; //0x0
UINT32 Length; //0x4
UINT8 Revision; //0x8
UINT8 Checksum; //0x9
UINT8 OemId[6]; //0xA
UINT64 OemTableId; //0x10
UINT32 OemRevision; //0x18
UINT32 CreatorId; //0x1C
UINT32 CreatorRevision; //0x20
}EFI_ACPI_DESCRIPTION_HEADER;
*/
typedef struct {
AMD_ACPI_DESCRIPTION_HEADER SHeader;
UCHAR TableUUID[16]; //0x24
ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture.
ULONG Lib1ImageOffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture.
ULONG Reserved[4]; //0x3C
}UEFI_ACPI_VFCT;
 
typedef struct {
ULONG PCIBus; //0x4C
ULONG PCIDevice; //0x50
ULONG PCIFunction; //0x54
USHORT VendorID; //0x58
USHORT DeviceID; //0x5A
USHORT SSVID; //0x5C
USHORT SSID; //0x5E
ULONG Revision; //0x60
ULONG ImageLength; //0x64
}VFCT_IMAGE_HEADER;
 
 
typedef struct {
VFCT_IMAGE_HEADER VbiosHeader;
UCHAR VbiosContent[1];
}GOP_VBIOS_CONTENT;
 
typedef struct {
VFCT_IMAGE_HEADER Lib1Header;
UCHAR Lib1Content[1];
}GOP_LIB1_CONTENT;
 
#pragma pack()
 
 
#endif /* _ATOMBIOS_H */
/drivers/video/drm/radeon/evergreen_blit_shaders.c
24,7 → 24,6
* Alex Deucher <alexander.deucher@amd.com>
*/
 
#include <linux/bug.h>
#include <linux/types.h>
#include <linux/kernel.h>
 
/drivers/video/drm/radeon/radeon_atombios.c
23,8 → 23,8
* Authors: Dave Airlie
* Alex Deucher
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
 
#include "atom.h"
56,10 → 56,6
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
uint32_t supported_device);
 
/* local */
static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
u16 voltage_id, u16 *voltage);
 
union atom_supported_devices {
struct _ATOM_SUPPORTED_DEVICES_INFO info;
struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2;
66,25 → 62,32
struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
};
 
static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev,
ATOM_GPIO_I2C_ASSIGMENT *gpio,
u8 index)
static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
uint8_t id)
{
/* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
if ((rdev->family == CHIP_R420) ||
(rdev->family == CHIP_R423) ||
(rdev->family == CHIP_RV410)) {
if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
gpio->ucClkMaskShift = 0x19;
gpio->ucDataMaskShift = 0x18;
}
}
struct atom_context *ctx = rdev->mode_info.atom_context;
ATOM_GPIO_I2C_ASSIGMENT *gpio;
struct radeon_i2c_bus_rec i2c;
int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
struct _ATOM_GPIO_I2C_INFO *i2c_info;
uint16_t data_offset, size;
int i, num_indices;
 
memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
i2c.valid = false;
 
if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
 
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_I2C_ASSIGMENT);
 
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
 
/* some evergreen boards have bad data for this entry */
if (ASIC_IS_DCE4(rdev)) {
if ((index == 7) &&
if ((i == 7) &&
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
(gpio->sucI2cId.ucAccess == 0)) {
gpio->sucI2cId.ucAccess = 0x97;
97,19 → 100,13
 
/* some DCE3 boards have bad data for this entry */
if (ASIC_IS_DCE3(rdev)) {
if ((index == 4) &&
if ((i == 4) &&
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
(gpio->sucI2cId.ucAccess == 0x94))
gpio->sucI2cId.ucAccess = 0x14;
}
}
 
static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
{
struct radeon_i2c_bus_rec i2c;
 
memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
 
if (gpio->sucI2cId.ucAccess == id) {
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
141,14 → 138,15
 
if (i2c.mask_clk_reg)
i2c.valid = true;
else
i2c.valid = false;
break;
}
}
}
 
return i2c;
}
 
static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
uint8_t id)
void radeon_atombios_i2c_init(struct radeon_device *rdev)
{
struct atom_context *ctx = rdev->mode_info.atom_context;
ATOM_GPIO_I2C_ASSIGMENT *gpio;
157,9 → 155,9
struct _ATOM_GPIO_I2C_INFO *i2c_info;
uint16_t data_offset, size;
int i, num_indices;
char stmp[32];
 
memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
i2c.valid = false;
 
if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
169,44 → 167,60
 
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
i2c.valid = false;
 
radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
 
if (gpio->sucI2cId.ucAccess == id) {
i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
break;
/* some evergreen boards have bad data for this entry */
if (ASIC_IS_DCE4(rdev)) {
if ((i == 7) &&
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
(gpio->sucI2cId.ucAccess == 0)) {
gpio->sucI2cId.ucAccess = 0x97;
gpio->ucDataMaskShift = 8;
gpio->ucDataEnShift = 8;
gpio->ucDataY_Shift = 8;
gpio->ucDataA_Shift = 8;
}
}
}
 
return i2c;
/* some DCE3 boards have bad data for this entry */
if (ASIC_IS_DCE3(rdev)) {
if ((i == 4) &&
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
(gpio->sucI2cId.ucAccess == 0x94))
gpio->sucI2cId.ucAccess = 0x14;
}
 
void radeon_atombios_i2c_init(struct radeon_device *rdev)
{
struct atom_context *ctx = rdev->mode_info.atom_context;
ATOM_GPIO_I2C_ASSIGMENT *gpio;
struct radeon_i2c_bus_rec i2c;
int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
struct _ATOM_GPIO_I2C_INFO *i2c_info;
uint16_t data_offset, size;
int i, num_indices;
char stmp[32];
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
i2c.en_data_mask = (1 << gpio->ucDataEnShift);
i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
 
if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
i2c.hw_capable = true;
else
i2c.hw_capable = false;
 
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_I2C_ASSIGMENT);
if (gpio->sucI2cId.ucAccess == 0xa0)
i2c.mm_i2c = true;
else
i2c.mm_i2c = false;
 
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
i2c.i2c_id = gpio->sucI2cId.ucAccess;
 
radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
 
i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
 
if (i2c.valid) {
if (i2c.mask_clk_reg) {
i2c.valid = true;
sprintf(stmp, "0x%x", i2c.i2c_id);
rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
}
214,7 → 228,7
}
}
 
static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
u8 id)
{
struct atom_context *ctx = rdev->mode_info.atom_context;
257,9 → 271,7
 
memset(&hpd, 0, sizeof(struct radeon_hpd));
 
if (ASIC_IS_DCE6(rdev))
reg = SI_DC_GPIO_HPD_A;
else if (ASIC_IS_DCE4(rdev))
if (ASIC_IS_DCE4(rdev))
reg = EVERGREEN_DC_GPIO_HPD_A;
else
reg = AVIVO_DC_GPIO_HPD_A;
444,26 → 456,10
*/
if ((dev->pdev->device == 0x9498) &&
(dev->pdev->subsystem_vendor == 0x1682) &&
(dev->pdev->subsystem_device == 0x2452) &&
(i2c_bus->valid == false) &&
!(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
(dev->pdev->subsystem_device == 0x2452)) {
struct radeon_device *rdev = dev->dev_private;
*i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
}
 
/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
(dev->pdev->subsystem_vendor == 0x1734) &&
(dev->pdev->subsystem_device == 0x11bd)) {
if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
*connector_type = DRM_MODE_CONNECTOR_DVII;
*line_mux = 0x3103;
} else if (*connector_type == DRM_MODE_CONNECTOR_DVID) {
*connector_type = DRM_MODE_CONNECTOR_DVII;
}
}
 
 
return true;
}
 
1254,10 → 1250,6
if (rdev->clock.max_pixel_clock == 0)
rdev->clock.max_pixel_clock = 40000;
 
/* not technically a clock, but... */
rdev->mode_info.firmware_flags =
le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
 
return true;
}
 
1267,8 → 1259,6
union igp_info {
struct _ATOM_INTEGRATED_SYSTEM_INFO info;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
};
 
bool radeon_atombios_sideport_present(struct radeon_device *rdev)
1396,7 → 1386,7
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
u16 data_offset, size;
union igp_info *igp_info;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info;
u8 frev, crev;
u16 percentage = 0, rate = 0;
 
1403,45 → 1393,22
/* get any igp specific overrides */
if (atom_parse_data_header(mode_info->atom_context, index, &size,
&frev, &crev, &data_offset)) {
igp_info = (union igp_info *)
igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *)
(mode_info->atom_context->bios + data_offset);
switch (crev) {
case 6:
switch (id) {
case ASIC_INTERNAL_SS_ON_TMDS:
percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage);
rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz);
percentage = le16_to_cpu(igp_info->usDVISSPercentage);
rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz);
break;
case ASIC_INTERNAL_SS_ON_HDMI:
percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage);
rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz);
percentage = le16_to_cpu(igp_info->usHDMISSPercentage);
rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz);
break;
case ASIC_INTERNAL_SS_ON_LVDS:
percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage);
rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz);
percentage = le16_to_cpu(igp_info->usLvdsSSPercentage);
rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz);
break;
}
break;
case 7:
switch (id) {
case ASIC_INTERNAL_SS_ON_TMDS:
percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage);
rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz);
break;
case ASIC_INTERNAL_SS_ON_HDMI:
percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage);
rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz);
break;
case ASIC_INTERNAL_SS_ON_LVDS:
percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage);
rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz);
break;
}
break;
default:
DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
break;
}
if (percentage)
ss->percentage = percentage;
if (rate)
1925,8 → 1892,6
"emc2103",
"Sumo",
"Northern Islands",
"Southern Islands",
"lm96163",
};
 
union power_info {
1943,7 → 1908,6
struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
struct _ATOM_PPLIB_SI_CLOCK_INFO si;
};
 
union pplib_power_state {
2009,8 → 1973,7
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 
/* add the i2c bus for thermal/fan chip */
if ((power_info->info.ucOverdriveThermalController > 0) &&
(power_info->info.ucOverdriveThermalController < ARRAY_SIZE(thermal_controller_names))) {
if (power_info->info.ucOverdriveThermalController > 0) {
DRM_INFO("Possible %s thermal controller at 0x%02x\n",
thermal_controller_names[power_info->info.ucOverdriveThermalController],
power_info->info.ucOverdriveControllerAddress >> 1);
2033,14 → 1996,10
return state_index;
/* last mode is usually default, array is low to high */
for (i = 0; i < num_modes; i++) {
rdev->pm.power_state[state_index].clock_info =
kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
if (!rdev->pm.power_state[state_index].clock_info)
return state_index;
rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
switch (frev) {
case 1:
rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].mclk =
le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
rdev->pm.power_state[state_index].clock_info[0].sclk =
2076,6 → 2035,7
state_index++;
break;
case 2:
rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].mclk =
le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
rdev->pm.power_state[state_index].clock_info[0].sclk =
2112,6 → 2072,7
state_index++;
break;
case 3:
rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].mclk =
le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
rdev->pm.power_state[state_index].clock_info[0].sclk =
2202,11 → 2163,6
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
rdev->pm.int_thermal_type = THERMAL_TYPE_NI;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
rdev->pm.int_thermal_type = THERMAL_TYPE_SI;
} else if ((controller->ucType ==
ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
(controller->ucType ==
2214,7 → 2170,7
(controller->ucType ==
ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
DRM_INFO("Special thermal controller config\n");
} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
} else {
DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
pp_lib_thermal_controller_names[controller->ucType],
controller->ucI2cAddress >> 1,
2229,12 → 2185,6
strlcpy(info.type, name, sizeof(info.type));
i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
}
} else {
DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
controller->ucType,
controller->ucI2cAddress >> 1,
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
}
}
}
2307,7 → 2257,7
rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
if (ASIC_IS_DCE5(rdev)) {
/* NI chips post without MC ucode, so default clocks are strobe mode only */
rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
2333,7 → 2283,6
union pplib_clock_info *clock_info)
{
u32 sclk, mclk;
u16 vddc;
 
if (rdev->flags & RADEON_IS_IGP) {
if (rdev->family >= CHIP_PALM) {
2345,19 → 2294,6
sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
}
} else if (ASIC_IS_DCE6(rdev)) {
sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
sclk |= clock_info->si.ucEngineClockHigh << 16;
mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
mclk |= clock_info->si.ucMemoryClockHigh << 16;
rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
VOLTAGE_SW;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
le16_to_cpu(clock_info->si.usVDDC);
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
le16_to_cpu(clock_info->si.usVDDCI);
} else if (ASIC_IS_DCE4(rdev)) {
sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
2385,18 → 2321,11
}
 
/* patch up vddc if necessary */
switch (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage) {
case ATOM_VIRTUAL_VOLTAGE_ID0:
case ATOM_VIRTUAL_VOLTAGE_ID1:
case ATOM_VIRTUAL_VOLTAGE_ID2:
case ATOM_VIRTUAL_VOLTAGE_ID3:
if (radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC,
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage,
&vddc) == 0)
if (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage == 0xff01) {
u16 vddc;
 
if (radeon_atom_get_max_vddc(rdev, &vddc) == 0)
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = vddc;
break;
default:
break;
}
 
if (rdev->flags & RADEON_IS_IGP) {
2448,13 → 2377,6
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
(power_state->v1.ucNonClockStateIndex *
power_info->pplib.ucNonClockSize));
rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
((power_info->pplib.ucStateEntrySize - 1) ?
(power_info->pplib.ucStateEntrySize - 1) : 1),
GFP_KERNEL);
if (!rdev->pm.power_state[i].clock_info)
return state_index;
if (power_info->pplib.ucStateEntrySize - 1) {
for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
clock_info = (union pplib_clock_info *)
(mode_info->atom_context->bios + data_offset +
2467,13 → 2389,6
if (valid)
mode_index++;
}
} else {
rdev->pm.power_state[state_index].clock_info[0].mclk =
rdev->clock.default_mclk;
rdev->pm.power_state[state_index].clock_info[0].sclk =
rdev->clock.default_sclk;
mode_index++;
}
rdev->pm.power_state[state_index].num_clock_modes = mode_index;
if (mode_index) {
radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
2506,9 → 2421,9
int i, j, non_clock_array_index, clock_array_index;
int state_index = 0, mode_index = 0;
union pplib_clock_info *clock_info;
struct _StateArray *state_array;
struct _ClockInfoArray *clock_info_array;
struct _NonClockInfoArray *non_clock_info_array;
struct StateArray *state_array;
struct ClockInfoArray *clock_info_array;
struct NonClockInfoArray *non_clock_info_array;
bool valid;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2521,13 → 2436,13
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 
radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
state_array = (struct _StateArray *)
state_array = (struct StateArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usStateArrayOffset));
clock_info_array = (struct _ClockInfoArray *)
clock_info_array = (struct ClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
non_clock_info_array = (struct _NonClockInfoArray *)
non_clock_info_array = (struct NonClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
2541,13 → 2456,6
non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
(power_state->v2.ucNumDPMLevels ?
power_state->v2.ucNumDPMLevels : 1),
GFP_KERNEL);
if (!rdev->pm.power_state[i].clock_info)
return state_index;
if (power_state->v2.ucNumDPMLevels) {
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
clock_array_index = power_state->v2.clockInfoIndex[j];
/* XXX this might be an inagua bug... */
2554,7 → 2462,7
if (clock_array_index >= clock_info_array->ucNumEntries)
continue;
clock_info = (union pplib_clock_info *)
&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
&clock_info_array->clockInfo[clock_array_index];
valid = radeon_atombios_parse_pplib_clock_info(rdev,
state_index, mode_index,
clock_info);
2561,13 → 2469,6
if (valid)
mode_index++;
}
} else {
rdev->pm.power_state[state_index].clock_info[0].mclk =
rdev->clock.default_mclk;
rdev->pm.power_state[state_index].clock_info[0].sclk =
rdev->clock.default_sclk;
mode_index++;
}
rdev->pm.power_state[state_index].num_clock_modes = mode_index;
if (mode_index) {
radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
2623,9 → 2524,6
} else {
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
if (rdev->pm.power_state) {
rdev->pm.power_state[0].clock_info =
kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
if (rdev->pm.power_state[0].clock_info) {
/* add the default mode */
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
2641,17 → 2539,12
state_index++;
}
}
}
 
rdev->pm.num_power_states = state_index;
 
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
if (rdev->pm.default_power_state_index >= 0)
rdev->pm.current_vddc =
rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
else
rdev->pm.current_vddc = 0;
rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
}
 
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
2711,7 → 2604,6
struct _SET_VOLTAGE_PS_ALLOCATION alloc;
struct _SET_VOLTAGE_PARAMETERS v1;
struct _SET_VOLTAGE_PARAMETERS_V2 v2;
struct _SET_VOLTAGE_PARAMETERS_V1_3 v3;
};
 
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type)
2738,11 → 2630,6
args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
break;
case 3:
args.v3.ucVoltageType = voltage_type;
args.v3.ucVoltageMode = ATOM_SET_VOLTAGE;
args.v3.usVoltageLevel = cpu_to_le16(voltage_level);
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
return;
2751,8 → 2638,8
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
u16 voltage_id, u16 *voltage)
int radeon_atom_get_max_vddc(struct radeon_device *rdev,
u16 *voltage)
{
union set_voltage args;
int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
2773,15 → 2660,6
 
*voltage = le16_to_cpu(args.v2.usVoltageLevel);
break;
case 3:
args.v3.ucVoltageType = voltage_type;
args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
*voltage = le16_to_cpu(args.v3.usVoltageLevel);
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
return -EINVAL;
3033,20 → 2911,6
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5;
}
}
if ((radeon_encoder->devices & ATOM_DEVICE_DFP6_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP6_SUPPORT)) {
if (connected) {
DRM_DEBUG_KMS("DFP6 connected\n");
bios_0_scratch |= ATOM_S0_DFP6;
bios_3_scratch |= ATOM_S3_DFP6_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP6;
} else {
DRM_DEBUG_KMS("DFP6 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP6;
bios_3_scratch &= ~ATOM_S3_DFP6_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP6;
}
}
 
if (rdev->family >= CHIP_R600) {
WREG32(R600_BIOS_0_SCRATCH, bios_0_scratch);
3067,9 → 2931,6
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t bios_3_scratch;
 
if (ASIC_IS_DCE4(rdev))
return;
 
if (rdev->family >= CHIP_R600)
bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
else
3122,9 → 2983,6
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t bios_2_scratch;
 
if (ASIC_IS_DCE4(rdev))
return;
 
if (rdev->family >= CHIP_R600)
bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
else
/drivers/video/drm/radeon/radeon_fb.c
27,17 → 27,20
#include <linux/slab.h>
#include <linux/fb.h>
 
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "radeon_drm.h"
#include "radeon.h"
 
#include <drm/drm_fb_helper.h>
#include "drm_fb_helper.h"
 
#include <drm_mm.h>
#include "radeon_object.h"
 
int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
struct drm_mode_fb_cmd2 *mode_cmd,
int radeonfb_create_object(struct radeon_fbdev *rfbdev,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object **gobj_p);
 
/* object hierarchy -
52,7 → 55,7
};
 
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
// .owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
// .fb_fillrect = cfb_fillrect,
95,7 → 98,7
struct radeon_device *rdev = rfbdev->rdev;
struct fb_info *info;
struct drm_framebuffer *fb = NULL;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_mode_fb_cmd mode_cmd;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
struct device *device = &rdev->pdev->dev;
111,21 → 114,15
if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
sizes->surface_bpp = 32;
 
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.depth = sizes->surface_depth;
 
ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
if (ret) {
DRM_ERROR("failed to create fbcon object %d\n", ret);
return ret;
}
 
ret = radeonfb_create_object(rfbdev, &mode_cmd, &gobj);
rbo = gem_to_radeon_bo(gobj);
 
/* okay we have an object now allocate the framebuffer */
info = framebuffer_alloc(0, device);
if (info == NULL) {
dbgprintf("framebuffer_alloc\n");
ret = -ENOMEM;
goto out_unref;
}
132,11 → 129,7
 
info->par = rfbdev;
 
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initalise framebuffer %d\n", ret);
goto out_unref;
}
radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
 
fb = &rfbdev->rfb.base;
 
148,7 → 141,7
 
strcpy(info->fix.id, "radeondrmfb");
 
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
 
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &radeonfb_ops;
170,17 → 163,21
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = rdev->mc.aper_size;
 
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
// info->pixmap.size = 64*1024;
// info->pixmap.buf_align = 8;
// info->pixmap.access_align = 32;
// info->pixmap.flags = FB_PIXMAP_SYSTEM;
// info->pixmap.scan_align = 1;
 
// if (info->screen_base == NULL) {
// ret = -ENOSPC;
// goto out_unref;
// }
if (info->screen_base == NULL) {
ret = -ENOSPC;
goto out_unref;
}
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
DRM_INFO("fb depth is %d\n", fb->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
DRM_INFO(" pitch is %d\n", fb->pitch);
 
 
LEAVE();
/drivers/video/drm/radeon/radeon_gem.c
25,14 → 25,14
* Alex Deucher
* Jerome Glisse
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
#include "radeon.h"
 
int radeon_gem_object_init(struct drm_gem_object *obj)
{
BUG();
 
/* we do nothings here */
return 0;
}
 
51,7 → 51,6
struct drm_gem_object **obj)
{
struct radeon_bo *robj;
unsigned long max_size;
int r;
 
*obj = NULL;
59,26 → 58,11
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
 
/* maximun bo size is the minimun btw visible vram and gtt size */
max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
if (size > max_size) {
printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
__func__, __LINE__, size >> 20, max_size >> 20);
return -ENOMEM;
}
 
retry:
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj);
if (r) {
if (r != -ERESTARTSYS) {
if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
initial_domain |= RADEON_GEM_DOMAIN_GTT;
goto retry;
}
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
size, initial_domain, alignment, r);
}
return r;
}
*obj = &robj->gem_base;
132,7 → 116,7
}
if (!domain) {
/* Do nothings */
printk(KERN_WARNING "Set domain without domain !\n");
printk(KERN_WARNING "Set domain withou domain !\n");
return 0;
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
167,7 → 151,6
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_info *args = data;
struct ttm_mem_type_manager *man;
unsigned i;
 
man = &rdev->mman.bdev.man[TTM_PL_VRAM];
 
176,9 → 159,8
if (rdev->stollen_vga_memory)
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
args->vram_visible -= radeon_fbdev_total_size(rdev);
args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
for(i = 0; i < RADEON_NUM_RINGS; ++i)
args->gart_size -= rdev->ring[i].ring_size;
args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
RADEON_IB_POOL_SIZE*64*1024;
return 0;
}
 
207,7 → 189,6
uint32_t handle;
int r;
 
down_read(&rdev->exclusive_lock);
/* create a gem object to contain this object in */
args->size = roundup(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, args->alignment,
214,8 → 195,6
args->initial_domain, false,
false, &gobj);
if (r) {
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}
r = drm_gem_handle_create(filp, gobj, &handle);
222,12 → 201,9
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj);
if (r) {
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}
args->handle = handle;
up_read(&rdev->exclusive_lock);
return 0;
}
 
236,7 → 212,6
{
/* transition the BO to a domain -
* just validate the BO into a certain domain */
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_set_domain *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
244,12 → 219,10
 
/* for now if someone requests domain CPU -
* just make sure the buffer is finished with */
down_read(&rdev->exclusive_lock);
 
/* just do a BO wait for now */
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
up_read(&rdev->exclusive_lock);
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
257,8 → 230,6
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
 
drm_gem_object_unreference_unlocked(gobj);
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(robj->rdev, r);
return r;
}
 
290,7 → 261,6
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_busy *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
316,7 → 286,6
break;
}
drm_gem_object_unreference_unlocked(gobj);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}
 
323,7 → 292,6
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_wait_idle *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
336,10 → 304,9
robj = gem_to_radeon_bo(gobj);
r = radeon_bo_wait(robj, NULL, false);
/* callback hw specific functions if any */
if (rdev->asic->ioctl_wait_idle)
robj->rdev->asic->ioctl_wait_idle(rdev, robj);
if (robj->rdev->asic->ioctl_wait_idle)
robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
drm_gem_object_unreference_unlocked(gobj);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}
 
/drivers/video/drm/radeon/radeon_pm.c
20,18 → 20,20
* Authors: Rafał Miłecki <zajec5@gmail.com>
* Alex Deucher <alexdeucher@gmail.com>
*/
#include <drm/drmP.h>
#include "drmP.h"
#include "radeon.h"
#include "avivod.h"
#include "atom.h"
 
#define DRM_DEBUG_DRIVER(fmt, args...)
 
#define RADEON_IDLE_LOOP_MS 100
#define RADEON_RECLOCK_DELAY_MS 200
#define RADEON_WAIT_VBLANK_TIMEOUT 200
#define RADEON_WAIT_IDLE_TIMEOUT 200
 
static const char *radeon_pm_state_type_name[5] = {
"",
"Default",
"Powersave",
"Battery",
"Balanced",
45,26 → 47,24
static void radeon_pm_update_profile(struct radeon_device *rdev);
static void radeon_pm_set_clocks(struct radeon_device *rdev);
 
int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
int instance)
static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
 
#define ACPI_AC_CLASS "ac_adapter"
 
#ifdef CONFIG_ACPI
static int radeon_acpi_event(struct notifier_block *nb,
unsigned long val,
void *data)
{
int i;
int found_instance = -1;
struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
 
for (i = 0; i < rdev->pm.num_power_states; i++) {
if (rdev->pm.power_state[i].type == ps_type) {
found_instance++;
if (found_instance == instance)
return i;
}
}
/* return default if no match */
return rdev->pm.default_power_state_index;
}
if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
if (power_supply_is_system_supplied() > 0)
DRM_DEBUG_DRIVER("pm: AC\n");
else
DRM_DEBUG_DRIVER("pm: DC\n");
 
void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
{
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
mutex_lock(&rdev->pm.mutex);
75,6 → 75,10
}
}
 
return NOTIFY_OK;
}
#endif
 
static void radeon_pm_update_profile(struct radeon_device *rdev)
{
switch (rdev->pm.profile) {
136,15 → 140,6
 
}
 
static void radeon_sync_with_vblank(struct radeon_device *rdev)
{
if (rdev->pm.active_crtcs) {
rdev->pm.vblank_sync = false;
// wait_event_timeout(
// rdev->irq.vblank_queue, rdev->pm.vblank_sync,
// msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
}
}
 
static void radeon_set_power_state(struct radeon_device *rdev)
{
161,21 → 156,8
if (sclk > rdev->pm.default_sclk)
sclk = rdev->pm.default_sclk;
 
/* starting with BTC, there is one state that is used for both
* MH and SH. Difference is that we always use the high clock index for
* mclk.
*/
if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
(rdev->family >= CHIP_BARTS) &&
rdev->pm.active_crtc_count &&
((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
(rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
else
mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].mclk;
 
if (mclk > rdev->pm.default_mclk)
mclk = rdev->pm.default_mclk;
 
183,7 → 165,7
if (sclk < rdev->pm.current_sclk)
misc_after = true;
 
radeon_sync_with_vblank(rdev);
// radeon_sync_with_vblank(rdev);
 
if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
if (!radeon_pm_in_vbl(rdev))
206,7 → 188,7
}
 
/* set memory clock */
if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
radeon_pm_debug_check_in_vbl(rdev, false);
radeon_set_memory_clock(rdev, mclk);
radeon_pm_debug_check_in_vbl(rdev, true);
236,16 → 218,27
return;
 
mutex_lock(&rdev->ddev->struct_mutex);
// down_write(&rdev->pm.mclk_lock);
mutex_lock(&rdev->ring_lock);
mutex_lock(&rdev->vram_mutex);
mutex_lock(&rdev->cp.mutex);
 
/* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
struct radeon_ring *ring = &rdev->ring[i];
if (ring->ready)
radeon_fence_wait_empty_locked(rdev, i);
/* gui idle int has issues on older chips it seems */
if (rdev->family >= CHIP_R600) {
if (rdev->irq.installed) {
/* wait for GPU idle */
rdev->pm.gui_idle = false;
rdev->irq.gui_idle = true;
}
 
} else {
if (rdev->cp.ready) {
// struct radeon_fence *fence;
// radeon_ring_alloc(rdev, 64);
// radeon_fence_create(rdev, &fence);
// radeon_fence_emit(rdev, fence);
// radeon_ring_commit(rdev);
// radeon_fence_wait(fence, false);
// radeon_fence_unref(&fence);
}
}
radeon_unmap_vram_bos(rdev);
 
if (rdev->irq.installed) {
275,8 → 268,8
 
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
 
mutex_unlock(&rdev->ring_lock);
// up_write(&rdev->pm.mclk_lock);
mutex_unlock(&rdev->cp.mutex);
mutex_unlock(&rdev->vram_mutex);
mutex_unlock(&rdev->ddev->struct_mutex);
}
 
301,15 → 294,17
for (j = 0; j < power_state->num_clock_modes; j++) {
clock_info = &(power_state->clock_info[j]);
if (rdev->flags & RADEON_IS_IGP)
DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
DRM_DEBUG_DRIVER("\t\t%d e: %d%s\n",
j,
clock_info->sclk * 10);
clock_info->sclk * 10,
clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
else
DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d%s\n",
j,
clock_info->sclk * 10,
clock_info->mclk * 10,
clock_info->voltage.voltage);
clock_info->voltage.voltage,
clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
}
}
}
318,15 → 313,8
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
int cp = rdev->pm.profile;
 
return snprintf(buf, PAGE_SIZE, "%s\n",
(cp == PM_PROFILE_AUTO) ? "auto" :
(cp == PM_PROFILE_LOW) ? "low" :
(cp == PM_PROFILE_MID) ? "mid" :
(cp == PM_PROFILE_HIGH) ? "high" : "default");
return snprintf(buf, PAGE_SIZE, "%s\n", "default");
}
 
static ssize_t radeon_set_pm_profile(struct device *dev,
338,26 → 326,11
struct radeon_device *rdev = ddev->dev_private;
 
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (strncmp("default", buf, strlen("default")) == 0)
 
rdev->pm.profile = PM_PROFILE_DEFAULT;
else if (strncmp("auto", buf, strlen("auto")) == 0)
rdev->pm.profile = PM_PROFILE_AUTO;
else if (strncmp("low", buf, strlen("low")) == 0)
rdev->pm.profile = PM_PROFILE_LOW;
else if (strncmp("mid", buf, strlen("mid")) == 0)
rdev->pm.profile = PM_PROFILE_MID;
else if (strncmp("high", buf, strlen("high")) == 0)
rdev->pm.profile = PM_PROFILE_HIGH;
else {
count = -EINVAL;
goto fail;
}
 
radeon_pm_update_profile(rdev);
radeon_pm_set_clocks(rdev);
} else
count = -EINVAL;
 
fail:
mutex_unlock(&rdev->pm.mutex);
 
400,7 → 373,7
mutex_unlock(&rdev->pm.mutex);
// cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
} else {
count = -EINVAL;
DRM_ERROR("invalid power method!\n");
goto fail;
}
radeon_pm_compute_clocks(rdev);
408,9 → 381,6
return count;
}
 
//static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
//static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
 
static ssize_t radeon_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
417,7 → 387,7
{
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
int temp;
u32 temp;
 
switch (rdev->pm.int_thermal_type) {
case THERMAL_TYPE_RV6XX:
430,12 → 400,6
case THERMAL_TYPE_NI:
temp = evergreen_get_temp(rdev);
break;
case THERMAL_TYPE_SUMO:
temp = sumo_get_temp(rdev);
break;
case THERMAL_TYPE_SI:
temp = si_get_temp(rdev);
break;
default:
temp = 0;
break;
/drivers/video/drm/radeon/ObjectID.h
85,7 → 85,6
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA 0x1F
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 0x20
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 0x21
#define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24
 
#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
 
388,10 → 387,6
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_NUTMEG << OBJECT_ID_SHIFT)
 
#define ENCODER_VCE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_VCE << OBJECT_ID_SHIFT)
 
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
/****************************************************/
/drivers/video/drm/radeon/atom.h
26,7 → 26,7
#define ATOM_H
 
#include <linux/types.h>
#include <drm/drmP.h>
#include "drmP.h"
 
#define ATOM_BIOS_MAGIC 0xAA55
#define ATOM_ATI_MAGIC_PTR 0x30
44,7 → 44,6
#define ATOM_CMD_SETSCLK 0x0A
#define ATOM_CMD_SETMCLK 0x0B
#define ATOM_CMD_SETPCLK 0x0C
#define ATOM_CMD_SPDFANCNTL 0x39
 
#define ATOM_DATA_FWI_PTR 0xC
#define ATOM_DATA_IIO_PTR 0x32
138,7 → 137,6
int cs_equal, cs_above;
int io_mode;
uint32_t *scratch;
int scratch_size_bytes;
};
 
extern int atom_debug;
/drivers/video/drm/radeon/evergreen_reg.h
35,14 → 35,6
#define EVERGREEN_P1PLL_SS_CNTL 0x414
#define EVERGREEN_P2PLL_SS_CNTL 0x454
# define EVERGREEN_PxPLL_SS_EN (1 << 12)
 
#define EVERGREEN_AUDIO_PLL1_MUL 0x5b0
#define EVERGREEN_AUDIO_PLL1_DIV 0x5b4
#define EVERGREEN_AUDIO_PLL1_UNK 0x5bc
 
#define EVERGREEN_AUDIO_ENABLE 0x5e78
#define EVERGREEN_AUDIO_VENDOR_ID 0x5ec0
 
/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
#define EVERGREEN_GRPH_ENABLE 0x6800
#define EVERGREEN_GRPH_CONTROL 0x6804
50,17 → 42,6
# define EVERGREEN_GRPH_DEPTH_8BPP 0
# define EVERGREEN_GRPH_DEPTH_16BPP 1
# define EVERGREEN_GRPH_DEPTH_32BPP 2
# define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
# define EVERGREEN_ADDR_SURF_2_BANK 0
# define EVERGREEN_ADDR_SURF_4_BANK 1
# define EVERGREEN_ADDR_SURF_8_BANK 2
# define EVERGREEN_ADDR_SURF_16_BANK 3
# define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
# define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
# define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
# define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
# define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
# define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
/* 8 BPP */
# define EVERGREEN_GRPH_FORMAT_INDEXED 0
80,24 → 61,6
# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
# define EVERGREEN_GRPH_FORMAT_RGB111110 6
# define EVERGREEN_GRPH_FORMAT_BGR101111 7
# define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
# define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
# define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
# define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
# define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
# define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
# define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
# define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
# define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
# define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
218,10 → 181,7
#define EVERGREEN_CRTC_CONTROL 0x6e70
# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
#define EVERGREEN_CRTC_BLANK_CONTROL 0x6e74
# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
#define EVERGREEN_CRTC_STATUS 0x6e8c
# define EVERGREEN_CRTC_V_BLANK (1 << 0)
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
231,7 → 191,4
#define EVERGREEN_DC_GPIO_HPD_EN 0x64b8
#define EVERGREEN_DC_GPIO_HPD_Y 0x64bc
 
/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
#define EVERGREEN_HDMI_BASE 0x7030
 
#endif
/drivers/video/drm/radeon/r500_reg.h
351,8 → 351,6
#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084
#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088
#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c
#define AVIVO_D1CRTC_STATUS 0x609c
# define AVIVO_D1CRTC_V_BLANK (1 << 0)
#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
575,7 → 573,6
 
#define AVIVO_TMDSA_CNTL 0x7880
# define AVIVO_TMDSA_CNTL_ENABLE (1 << 0)
# define AVIVO_TMDSA_CNTL_HDMI_EN (1 << 2)
# define AVIVO_TMDSA_CNTL_HPD_MASK (1 << 4)
# define AVIVO_TMDSA_CNTL_HPD_SELECT (1 << 8)
# define AVIVO_TMDSA_CNTL_SYNC_PHASE (1 << 12)
636,7 → 633,6
 
#define AVIVO_LVTMA_CNTL 0x7a80
# define AVIVO_LVTMA_CNTL_ENABLE (1 << 0)
# define AVIVO_LVTMA_CNTL_HDMI_EN (1 << 2)
# define AVIVO_LVTMA_CNTL_HPD_MASK (1 << 4)
# define AVIVO_LVTMA_CNTL_HPD_SELECT (1 << 8)
# define AVIVO_LVTMA_CNTL_SYNC_PHASE (1 << 12)
/drivers/video/drm/radeon/r600_hdmi.c
23,11 → 23,10
*
* Authors: Christian König
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "r600d.h"
#include "atom.h"
 
/*
53,7 → 52,19
AUDIO_STATUS_LEVEL = 0x80
};
 
static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
struct {
uint32_t Clock;
 
int N_32kHz;
int CTS_32kHz;
 
int N_44_1kHz;
int CTS_44_1kHz;
 
int N_48kHz;
int CTS_48kHz;
 
} r600_hdmi_ACR[] = {
/* 32kHz 44.1kHz 48kHz */
/* Clock N CTS N CTS N CTS */
{ 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
72,7 → 83,7
/*
* calculate CTS value if it's not found in the table
*/
static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
{
if (*CTS == 0)
*CTS = clock * N / (128 * freq) * 1000;
80,24 → 91,6
N, *CTS, freq);
}
 
struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
{
struct radeon_hdmi_acr res;
u8 i;
 
for (i = 0; r600_hdmi_predefined_acr[i].clock != clock &&
r600_hdmi_predefined_acr[i].clock != 0; i++)
;
res = r600_hdmi_predefined_acr[i];
 
/* In case some CTS are missing */
r600_hdmi_calc_cts(clock, &res.cts_32khz, res.n_32khz, 32000);
r600_hdmi_calc_cts(clock, &res.cts_44_1khz, res.n_44_1khz, 44100);
r600_hdmi_calc_cts(clock, &res.cts_48khz, res.n_48khz, 48000);
 
return res;
}
 
/*
* update the N and CTS parameters for a given pixel clock rate
*/
105,19 → 98,30
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
int CTS;
int N;
int i;
 
WREG32(HDMI0_ACR_32_0 + offset, HDMI0_ACR_CTS_32(acr.cts_32khz));
WREG32(HDMI0_ACR_32_1 + offset, acr.n_32khz);
for (i = 0; r600_hdmi_ACR[i].Clock != clock && r600_hdmi_ACR[i].Clock != 0; i++);
 
WREG32(HDMI0_ACR_44_0 + offset, HDMI0_ACR_CTS_44(acr.cts_44_1khz));
WREG32(HDMI0_ACR_44_1 + offset, acr.n_44_1khz);
CTS = r600_hdmi_ACR[i].CTS_32kHz;
N = r600_hdmi_ACR[i].N_32kHz;
r600_hdmi_calc_CTS(clock, &CTS, N, 32000);
WREG32(offset+R600_HDMI_32kHz_CTS, CTS << 12);
WREG32(offset+R600_HDMI_32kHz_N, N);
 
WREG32(HDMI0_ACR_48_0 + offset, HDMI0_ACR_CTS_48(acr.cts_48khz));
WREG32(HDMI0_ACR_48_1 + offset, acr.n_48khz);
CTS = r600_hdmi_ACR[i].CTS_44_1kHz;
N = r600_hdmi_ACR[i].N_44_1kHz;
r600_hdmi_calc_CTS(clock, &CTS, N, 44100);
WREG32(offset+R600_HDMI_44_1kHz_CTS, CTS << 12);
WREG32(offset+R600_HDMI_44_1kHz_N, N);
 
CTS = r600_hdmi_ACR[i].CTS_48kHz;
N = r600_hdmi_ACR[i].N_48kHz;
r600_hdmi_calc_CTS(clock, &CTS, N, 48000);
WREG32(offset+R600_HDMI_48kHz_CTS, CTS << 12);
WREG32(offset+R600_HDMI_48kHz_N, N);
}
 
/*
161,9 → 165,7
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
 
uint8_t frame[14];
 
194,21 → 196,14
frame[0xD] = (right_bar >> 8);
 
r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
/* Our header values (type, version, length) should be alright, Intel
* is using the same. Checksum function also seems to be OK, it works
* fine for audio infoframe. However calculated value is always lower
* by 2 in comparison to fglrx. It breaks displaying anything in case
* of TVs that strictly check the checksum. Hack it manually here to
* workaround this issue. */
frame[0x0] += 2;
 
WREG32(HDMI0_AVI_INFO0 + offset,
WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
WREG32(HDMI0_AVI_INFO1 + offset,
WREG32(offset+R600_HDMI_VIDEOINFOFRAME_1,
frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
WREG32(HDMI0_AVI_INFO2 + offset,
WREG32(offset+R600_HDMI_VIDEOINFOFRAME_2,
frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
WREG32(HDMI0_AVI_INFO3 + offset,
WREG32(offset+R600_HDMI_VIDEOINFOFRAME_3,
frame[0xC] | (frame[0xD] << 8));
}
 
229,9 → 224,7
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
 
uint8_t frame[11];
 
249,9 → 242,9
 
r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
 
WREG32(HDMI0_AUDIO_INFO0 + offset,
WREG32(offset+R600_HDMI_AUDIOINFOFRAME_0,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
WREG32(HDMI0_AUDIO_INFO1 + offset,
WREG32(offset+R600_HDMI_AUDIOINFOFRAME_1,
frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24));
}
 
258,15 → 251,13
/*
* test if audio buffer is filled enough to start playing
*/
static bool r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
 
return (RREG32(HDMI0_STATUS + offset) & 0x10) != 0;
return (RREG32(offset+R600_HDMI_STATUS) & 0x10) != 0;
}
 
/*
275,15 → 266,14
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
int status, result;
 
if (!dig->afmt || !dig->afmt->enabled)
if (!radeon_encoder->hdmi_offset)
return 0;
 
status = r600_hdmi_is_audio_buffer_filled(encoder);
result = dig->afmt->last_buffer_filled_status != status;
dig->afmt->last_buffer_filled_status = status;
result = radeon_encoder->hdmi_buffer_status != status;
radeon_encoder->hdmi_buffer_status = status;
 
return result;
}
291,24 → 281,27
/*
* write the audio workaround status to the hardware
*/
static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
bool hdmi_audio_workaround = false; /* FIXME */
u32 value;
uint32_t offset = radeon_encoder->hdmi_offset;
 
if (!hdmi_audio_workaround ||
r600_hdmi_is_audio_buffer_filled(encoder))
value = 0; /* disable workaround */
else
value = HDMI0_AUDIO_TEST_EN; /* enable workaround */
WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
value, ~HDMI0_AUDIO_TEST_EN);
if (!offset)
return;
 
if (!radeon_encoder->hdmi_audio_workaround ||
r600_hdmi_is_audio_buffer_filled(encoder)) {
 
/* disable audio workaround */
WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
 
} else {
/* enable audio workaround */
WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
}
}
 
 
/*
318,74 → 311,41
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
 
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
if (ASIC_IS_DCE4(rdev))
return;
offset = dig->afmt->offset;
 
// r600_audio_set_clock(encoder, mode->clock);
if (!offset)
return;
 
WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
HDMI0_NULL_SEND); /* send null packets when required */
r600_audio_set_clock(encoder, mode->clock);
 
WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000);
WREG32(offset+R600_HDMI_UNKNOWN_0, 0x1000);
WREG32(offset+R600_HDMI_UNKNOWN_1, 0x0);
WREG32(offset+R600_HDMI_UNKNOWN_2, 0x1000);
 
if (ASIC_IS_DCE32(rdev)) {
WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
} else {
WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
}
r600_hdmi_update_ACR(encoder, mode->clock);
 
WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
HDMI0_ACR_SOURCE); /* select SW CTS value */
WREG32(offset+R600_HDMI_VIDEOCNTL, 0x13);
 
WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
HDMI0_NULL_SEND | /* send null packets when required */
HDMI0_GC_SEND | /* send general control packets */
HDMI0_GC_CONT); /* send general control packets every frame */
WREG32(offset+R600_HDMI_VERSION, 0x202);
 
/* TODO: HDMI0_AUDIO_INFO_UPDATE */
WREG32(HDMI0_INFOFRAME_CONTROL0 + offset,
HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */
 
WREG32(HDMI0_INFOFRAME_CONTROL1 + offset,
HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */
 
WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
 
r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
 
r600_hdmi_update_ACR(encoder, mode->clock);
 
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF);
WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF);
WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001);
WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
WREG32(offset+R600_HDMI_AUDIO_DEBUG_0, 0x00FFFFFF);
WREG32(offset+R600_HDMI_AUDIO_DEBUG_1, 0x007FFFFF);
WREG32(offset+R600_HDMI_AUDIO_DEBUG_2, 0x00000001);
WREG32(offset+R600_HDMI_AUDIO_DEBUG_3, 0x00000001);
 
r600_hdmi_audio_workaround(encoder);
 
/* audio packets per line, does anyone know how to calc this ? */
WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
}
 
#if 0
/*
* update settings with current parameters from audio engine
*/
393,83 → 353,127
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct r600_audio audio = r600_audio_status(rdev);
uint32_t offset;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
 
int channels = r600_audio_channels(rdev);
int rate = r600_audio_rate(rdev);
int bps = r600_audio_bits_per_sample(rdev);
uint8_t status_bits = r600_audio_status_bits(rdev);
uint8_t category_code = r600_audio_category_code(rdev);
 
uint32_t iec;
 
if (!dig->afmt || !dig->afmt->enabled)
if (!offset)
return;
offset = dig->afmt->offset;
 
DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n",
r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped",
audio.channels, audio.rate, audio.bits_per_sample);
channels, rate, bps);
DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
(int)audio.status_bits, (int)audio.category_code);
(int)status_bits, (int)category_code);
 
iec = 0;
if (audio.status_bits & AUDIO_STATUS_PROFESSIONAL)
if (status_bits & AUDIO_STATUS_PROFESSIONAL)
iec |= 1 << 0;
if (audio.status_bits & AUDIO_STATUS_NONAUDIO)
if (status_bits & AUDIO_STATUS_NONAUDIO)
iec |= 1 << 1;
if (audio.status_bits & AUDIO_STATUS_COPYRIGHT)
if (status_bits & AUDIO_STATUS_COPYRIGHT)
iec |= 1 << 2;
if (audio.status_bits & AUDIO_STATUS_EMPHASIS)
if (status_bits & AUDIO_STATUS_EMPHASIS)
iec |= 1 << 3;
 
iec |= HDMI0_60958_CS_CATEGORY_CODE(audio.category_code);
iec |= category_code << 8;
 
switch (audio.rate) {
case 32000:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x3);
break;
case 44100:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x0);
break;
case 48000:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x2);
break;
case 88200:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x8);
break;
case 96000:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xa);
break;
case 176400:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xc);
break;
case 192000:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xe);
break;
switch (rate) {
case 32000: iec |= 0x3 << 24; break;
case 44100: iec |= 0x0 << 24; break;
case 88200: iec |= 0x8 << 24; break;
case 176400: iec |= 0xc << 24; break;
case 48000: iec |= 0x2 << 24; break;
case 96000: iec |= 0xa << 24; break;
case 192000: iec |= 0xe << 24; break;
}
 
WREG32(HDMI0_60958_0 + offset, iec);
WREG32(offset+R600_HDMI_IEC60958_1, iec);
 
iec = 0;
switch (audio.bits_per_sample) {
case 16:
iec |= HDMI0_60958_CS_WORD_LENGTH(0x2);
switch (bps) {
case 16: iec |= 0x2; break;
case 20: iec |= 0x3; break;
case 24: iec |= 0xb; break;
}
if (status_bits & AUDIO_STATUS_V)
iec |= 0x5 << 16;
 
WREG32_P(offset+R600_HDMI_IEC60958_2, iec, ~0x5000f);
 
/* 0x021 or 0x031 sets the audio frame length */
WREG32(offset+R600_HDMI_AUDIOCNTL, 0x31);
r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
 
r600_hdmi_audio_workaround(encoder);
}
 
static int r600_hdmi_find_free_block(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
bool free_blocks[3] = { true, true, true };
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->hdmi_offset) {
case R600_HDMI_BLOCK1:
free_blocks[0] = false;
break;
case 20:
iec |= HDMI0_60958_CS_WORD_LENGTH(0x3);
case R600_HDMI_BLOCK2:
free_blocks[1] = false;
break;
case 24:
iec |= HDMI0_60958_CS_WORD_LENGTH(0xb);
case R600_HDMI_BLOCK3:
free_blocks[2] = false;
break;
}
if (audio.status_bits & AUDIO_STATUS_V)
iec |= 0x5 << 16;
WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
}
 
r600_hdmi_audioinfoframe(encoder, audio.channels - 1, 0, 0, 0, 0, 0, 0,
0);
if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690 ||
rdev->family == CHIP_RS740) {
return free_blocks[0] ? R600_HDMI_BLOCK1 : 0;
} else if (rdev->family >= CHIP_R600) {
if (free_blocks[0])
return R600_HDMI_BLOCK1;
else if (free_blocks[1])
return R600_HDMI_BLOCK2;
}
return 0;
}
 
r600_hdmi_audio_workaround(encoder);
static void r600_hdmi_assign_block(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 
if (!dig) {
dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
return;
}
#endif
 
if (ASIC_IS_DCE4(rdev)) {
/* TODO */
} else if (ASIC_IS_DCE3(rdev)) {
radeon_encoder->hdmi_offset = dig->dig_encoder ?
R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
if (ASIC_IS_DCE32(rdev))
radeon_encoder->hdmi_config_offset = dig->dig_encoder ?
R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1;
} else if (rdev->family >= CHIP_R600 || rdev->family == CHIP_RS600 ||
rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev);
}
}
 
/*
* enable the HDMI engine
*/
478,56 → 482,56
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
u32 hdmi;
 
if (ASIC_IS_DCE6(rdev))
if (ASIC_IS_DCE4(rdev))
return;
 
/* Silent, r600_hdmi_enable will raise WARN for us */
if (dig->afmt->enabled)
if (!radeon_encoder->hdmi_offset) {
r600_hdmi_assign_block(encoder);
if (!radeon_encoder->hdmi_offset) {
dev_warn(rdev->dev, "Could not find HDMI block for "
"0x%x encoder\n", radeon_encoder->encoder_id);
return;
offset = dig->afmt->offset;
}
}
 
/* Older chipsets require setting HDMI and routing manually */
if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
hdmi = HDMI0_ERROR_ACK | HDMI0_ENABLE;
offset = radeon_encoder->hdmi_offset;
if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
~AVIVO_TMDSA_CNTL_HDMI_EN);
hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
WREG32(offset + R600_HDMI_ENABLE, 0x101);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
~AVIVO_LVTMA_CNTL_HDMI_EN);
hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
WREG32_P(AVIVO_LVTMA_CNTL, 0x4, ~0x4);
WREG32(offset + R600_HDMI_ENABLE, 0x105);
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
WREG32_P(DDIA_CNTL, DDIA_HDMI_EN, ~DDIA_HDMI_EN);
hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
break;
default:
dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
radeon_encoder->encoder_id);
dev_err(rdev->dev, "Unknown HDMI output type\n");
break;
}
WREG32(HDMI0_CONTROL + offset, hdmi);
}
#if 0
if (rdev->irq.installed
&& rdev->family != CHIP_RS600
&& rdev->family != CHIP_RS690
&& rdev->family != CHIP_RS740) {
 
if (rdev->irq.installed) {
/* if irq is available use it */
// radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
radeon_irq_set(rdev);
 
r600_audio_disable_polling(encoder);
} else {
/* if not fallback to polling */
r600_audio_enable_polling(encoder);
}
 
dig->afmt->enabled = true;
 
#endif
DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
offset, radeon_encoder->encoder_id);
radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
}
 
/*
538,50 → 542,38
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
 
if (ASIC_IS_DCE6(rdev))
if (ASIC_IS_DCE4(rdev))
return;
 
/* Called for ATOM_ENCODER_MODE_HDMI only */
if (!dig || !dig->afmt) {
WARN_ON(1);
offset = radeon_encoder->hdmi_offset;
if (!offset) {
dev_err(rdev->dev, "Disabling not enabled HDMI\n");
return;
}
if (!dig->afmt->enabled)
return;
offset = dig->afmt->offset;
 
DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
offset, radeon_encoder->encoder_id);
 
/* disable irq */
// radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
 
/* Older chipsets not handled by AtomBIOS */
if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, 0,
~AVIVO_TMDSA_CNTL_HDMI_EN);
WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
WREG32(offset + R600_HDMI_ENABLE, 0);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
WREG32_P(AVIVO_LVTMA_CNTL, 0,
~AVIVO_LVTMA_CNTL_HDMI_EN);
WREG32_P(AVIVO_LVTMA_CNTL, 0, ~0x4);
WREG32(offset + R600_HDMI_ENABLE, 0);
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
WREG32_P(DDIA_CNTL, 0, ~DDIA_HDMI_EN);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
break;
default:
dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
radeon_encoder->encoder_id);
dev_err(rdev->dev, "Unknown HDMI output type\n");
break;
}
WREG32(HDMI0_CONTROL + offset, HDMI0_ERROR_ACK);
}
 
dig->afmt->enabled = false;
radeon_encoder->hdmi_offset = 0;
radeon_encoder->hdmi_config_offset = 0;
}
/drivers/video/drm/radeon/r600_reg.h
156,10 → 156,45
#define R600_AUDIO_PIN_WIDGET_CNTL 0x73d4
#define R600_AUDIO_STATUS_BITS 0x73d8
 
#define DCE2_HDMI_OFFSET0 (0x7400 - 0x7400)
#define DCE2_HDMI_OFFSET1 (0x7700 - 0x7400)
/* DCE3.2 second instance starts at 0x7800 */
#define DCE3_HDMI_OFFSET0 (0x7400 - 0x7400)
#define DCE3_HDMI_OFFSET1 (0x7800 - 0x7400)
/* HDMI base register addresses */
#define R600_HDMI_BLOCK1 0x7400
#define R600_HDMI_BLOCK2 0x7700
#define R600_HDMI_BLOCK3 0x7800
 
/* HDMI registers */
#define R600_HDMI_ENABLE 0x00
#define R600_HDMI_STATUS 0x04
# define R600_HDMI_INT_PENDING (1 << 29)
#define R600_HDMI_CNTL 0x08
# define R600_HDMI_INT_EN (1 << 28)
# define R600_HDMI_INT_ACK (1 << 29)
#define R600_HDMI_UNKNOWN_0 0x0C
#define R600_HDMI_AUDIOCNTL 0x10
#define R600_HDMI_VIDEOCNTL 0x14
#define R600_HDMI_VERSION 0x18
#define R600_HDMI_UNKNOWN_1 0x28
#define R600_HDMI_VIDEOINFOFRAME_0 0x54
#define R600_HDMI_VIDEOINFOFRAME_1 0x58
#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
#define R600_HDMI_VIDEOINFOFRAME_3 0x60
#define R600_HDMI_32kHz_CTS 0xac
#define R600_HDMI_32kHz_N 0xb0
#define R600_HDMI_44_1kHz_CTS 0xb4
#define R600_HDMI_44_1kHz_N 0xb8
#define R600_HDMI_48kHz_CTS 0xbc
#define R600_HDMI_48kHz_N 0xc0
#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
#define R600_HDMI_IEC60958_1 0xd4
#define R600_HDMI_IEC60958_2 0xd8
#define R600_HDMI_UNKNOWN_2 0xdc
#define R600_HDMI_AUDIO_DEBUG_0 0xe0
#define R600_HDMI_AUDIO_DEBUG_1 0xe4
#define R600_HDMI_AUDIO_DEBUG_2 0xe8
#define R600_HDMI_AUDIO_DEBUG_3 0xec
 
/* HDMI additional config base register addresses */
#define R600_HDMI_CONFIG1 0x7600
#define R600_HDMI_CONFIG2 0x7a00
 
#endif
/drivers/video/drm/radeon/radeon_asic.h
42,12 → 42,6
void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock);
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
 
void atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
u8 atombios_get_backlight_level(struct radeon_encoder *radeon_encoder);
void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder);
 
 
/*
* r100,rv100,rs100,rv200,rs200
*/
64,20 → 58,17
int r100_suspend(struct radeon_device *rdev);
int r100_resume(struct radeon_device *rdev);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
bool r100_gpu_is_lockup(struct radeon_device *rdev);
int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
void r100_cp_commit(struct radeon_device *rdev);
void r100_ring_start(struct radeon_device *rdev);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
int r100_cs_parse(struct radeon_cs_parser *p);
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
84,8 → 75,8
int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
unsigned num_pages,
struct radeon_fence *fence);
int r100_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
92,7 → 83,7
void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev);
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r100_ring_test(struct radeon_device *rdev);
void r100_hpd_init(struct radeon_device *rdev);
void r100_hpd_fini(struct radeon_device *rdev);
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
109,7 → 100,13
void r100_pci_gart_disable(struct radeon_device *rdev);
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
int r100_gui_wait_for_idle(struct radeon_device *rdev);
int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
struct radeon_cp *cp);
bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
struct r100_gpu_lockup *lockup,
struct radeon_cp *cp);
void r100_ib_fini(struct radeon_device *rdev);
int r100_ib_init(struct radeon_device *rdev);
void r100_irq_disable(struct radeon_device *rdev);
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
139,8 → 136,6
extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc);
extern int r100_mc_wait_for_idle(struct radeon_device *rdev);
 
/*
* r200,rv250,rs300,rv280
148,8 → 143,8
extern int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
unsigned num_pages,
struct radeon_fence *fence);
void r200_set_safe_registers(struct radeon_device *rdev);
 
/*
159,8 → 154,9
extern void r300_fini(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
extern int r300_asic_reset(struct radeon_device *rdev);
extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
extern void r300_ring_start(struct radeon_device *rdev);
extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
extern int r300_cs_parse(struct radeon_cs_parser *p);
177,7 → 173,6
extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
 
/*
* r420,r423,rv410
208,7 → 203,6
void rs400_gart_adjust_size(struct radeon_device *rdev);
void rs400_gart_disable(struct radeon_device *rdev);
void rs400_gart_fini(struct radeon_device *rdev);
extern int rs400_mc_wait_for_idle(struct radeon_device *rdev);
 
/*
* rs600.
239,9 → 233,8
extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
void rs600_set_safe_registers(struct radeon_device *rdev);
extern void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc);
extern int rs600_mc_wait_for_idle(struct radeon_device *rdev);
 
 
/*
* rs690,rs740
*/
255,21 → 248,23
void rs690_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode1,
struct drm_display_mode *mode2);
extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
 
/*
* rv515
*/
struct rv515_mc_save {
u32 d1vga_control;
u32 d2vga_control;
u32 vga_render_control;
u32 vga_hdp_control;
u32 d1crtc_control;
u32 d2crtc_control;
};
 
int rv515_init(struct radeon_device *rdev);
void rv515_fini(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
void rv515_ring_start(struct radeon_device *rdev);
void rv515_bandwidth_update(struct radeon_device *rdev);
int rv515_resume(struct radeon_device *rdev);
int rv515_suspend(struct radeon_device *rdev);
280,14 → 275,13
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
void rv515_clock_startup(struct radeon_device *rdev);
void rv515_debugfs(struct radeon_device *rdev);
int rv515_mc_wait_for_idle(struct radeon_device *rdev);
 
 
/*
* r520,rv530,rv560,rv570,r580
*/
int r520_init(struct radeon_device *rdev);
int r520_resume(struct radeon_device *rdev);
int r520_mc_wait_for_idle(struct radeon_device *rdev);
 
/*
* r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880
299,6 → 293,7
void r600_vga_set_state(struct radeon_device *rdev, bool state);
int r600_wb_init(struct radeon_device *rdev);
void r600_wb_fini(struct radeon_device *rdev);
void r600_cp_commit(struct radeon_device *rdev);
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
305,22 → 300,18
int r600_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
bool r600_gpu_is_lockup(struct radeon_device *rdev);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
int r600_ib_test(struct radeon_device *rdev);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, struct radeon_fence **fence);
unsigned num_pages, struct radeon_fence *fence);
void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
337,7 → 328,7
bool r600_card_posted(struct radeon_device *rdev);
void r600_cp_stop(struct radeon_device *rdev);
int r600_cp_start(struct radeon_device *rdev);
void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size);
void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
int r600_cp_resume(struct radeon_device *rdev);
void r600_cp_fini(struct radeon_device *rdev);
int r600_count_pipe_bits(uint32_t val);
358,23 → 349,26
void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
int r600_audio_init(struct radeon_device *rdev);
int r600_audio_tmds_index(struct drm_encoder *encoder);
void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
struct r600_audio r600_audio_status(struct radeon_device *rdev);
int r600_audio_channels(struct radeon_device *rdev);
int r600_audio_bits_per_sample(struct radeon_device *rdev);
int r600_audio_rate(struct radeon_device *rdev);
uint8_t r600_audio_status_bits(struct radeon_device *rdev);
uint8_t r600_audio_category_code(struct radeon_device *rdev);
void r600_audio_schedule_polling(struct radeon_device *rdev);
void r600_audio_enable_polling(struct drm_encoder *encoder);
void r600_audio_disable_polling(struct drm_encoder *encoder);
void r600_audio_fini(struct radeon_device *rdev);
void r600_hdmi_init(struct drm_encoder *encoder);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
/* r600 blit */
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
struct radeon_fence **fence, struct radeon_sa_bo **vb,
struct radeon_semaphore **sem);
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
struct radeon_sa_bo *vb, struct radeon_semaphore *sem);
int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
unsigned num_gpu_pages,
struct radeon_sa_bo *vb);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
uint64_t r600_get_gpu_clock(struct radeon_device *rdev);
int size_bytes);
 
/*
* rv770,rv730,rv710,rv740
393,20 → 387,23
* evergreen
*/
struct evergreen_mc_save {
u32 vga_control[6];
u32 vga_render_control;
u32 vga_hdp_control;
bool crtc_enabled[RADEON_MAX_CRTCS];
u32 crtc_control[6];
};
 
void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
int evergreen_suspend(struct radeon_device *rdev);
int evergreen_resume(struct radeon_device *rdev);
bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence);
void evergreen_hpd_init(struct radeon_device *rdev);
void evergreen_hpd_fini(struct radeon_device *rdev);
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
419,62 → 416,28
extern void evergreen_pm_misc(struct radeon_device *rdev);
extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev);
extern void sumo_pm_init_profile(struct radeon_device *rdev);
extern void btc_pm_init_profile(struct radeon_device *rdev);
extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
void evergreen_disable_interrupt_state(struct radeon_device *rdev);
int evergreen_blit_init(struct radeon_device *rdev);
int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
void evergreen_blit_fini(struct radeon_device *rdev);
/* evergreen blit */
int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
void evergreen_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
int size_bytes);
 
/*
* cayman
*/
void cayman_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev);
int cayman_suspend(struct radeon_device *rdev);
int cayman_resume(struct radeon_device *rdev);
bool cayman_gpu_is_lockup(struct radeon_device *rdev);
int cayman_asic_reset(struct radeon_device *rdev);
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int cayman_vm_init(struct radeon_device *rdev);
void cayman_vm_fini(struct radeon_device *rdev);
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 
/* DCE6 - SI */
void dce6_bandwidth_update(struct radeon_device *rdev);
 
/*
* si
*/
void si_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void si_pcie_gart_tlb_flush(struct radeon_device *rdev);
int si_init(struct radeon_device *rdev);
void si_fini(struct radeon_device *rdev);
int si_suspend(struct radeon_device *rdev);
int si_resume(struct radeon_device *rdev);
bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int si_asic_reset(struct radeon_device *rdev);
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int si_irq_set(struct radeon_device *rdev);
int si_irq_process(struct radeon_device *rdev);
int si_vm_init(struct radeon_device *rdev);
void si_vm_fini(struct radeon_device *rdev);
void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
uint64_t si_get_gpu_clock(struct radeon_device *rdev);
 
#endif
/drivers/video/drm/radeon/radeon_family.h
87,10 → 87,6
CHIP_TURKS,
CHIP_CAICOS,
CHIP_CAYMAN,
CHIP_ARUBA,
CHIP_TAHITI,
CHIP_PITCAIRN,
CHIP_VERDE,
CHIP_LAST,
};
 
/drivers/video/drm/radeon/radeon_legacy_crtc.c
206,6 → 206,11
WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp);
}
 
void radeon_restore_common_regs(struct drm_device *dev)
{
/* don't need this yet */
}
 
static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
290,7 → 295,7
return 1;
}
 
static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
414,7 → 419,6
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
/* Only 27 bit offset for legacy CRTC */
r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
if (unlikely(r != 0)) {
radeon_bo_unreserve(rbo);
433,7 → 437,7
 
crtc_offset_cntl = 0;
 
pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
crtc_pitch = (((pitch_pixels * target_fb->bits_per_pixel) +
((target_fb->bits_per_pixel * 8) - 1)) /
(target_fb->bits_per_pixel * 8));
984,9 → 988,15
}
 
static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
 
/* adjust pm to upcoming mode change */
radeon_pm_compute_clocks(rdev);
 
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;
1019,11 → 1029,9
 
static void radeon_crtc_prepare(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_crtc *crtci;
 
radeon_crtc->in_mode_set = true;
/*
* The hardware wedges sometimes if you reconfigure one CRTC
* whilst another is running (see fdo bug #24611).
1034,7 → 1042,6
 
static void radeon_crtc_commit(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_crtc *crtci;
 
1045,7 → 1052,6
if (crtci->enabled)
radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
}
radeon_crtc->in_mode_set = false;
}
 
static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
/drivers/video/drm/radeon/radeon_legacy_encoders.c
23,15 → 23,11
* Authors: Dave Airlie
* Alex Deucher
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "drmP.h"
#include "drm_crtc_helper.h"
#include "radeon_drm.h"
#include "radeon.h"
#include "atom.h"
#include <linux/backlight.h>
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
#endif
 
static void radeon_legacy_encoder_disable(struct drm_encoder *encoder)
{
88,7 → 84,7
lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
lvds_pll_cntl |= RADEON_LVDS_PLL_EN;
WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
mdelay(1);
udelay(1000);
 
lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET;
101,7 → 97,7
(backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT));
if (is_mac)
lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
mdelay(panel_pwr_delay);
udelay(panel_pwr_delay * 1000);
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
break;
case DRM_MODE_DPMS_STANDBY:
118,10 → 114,10
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
}
mdelay(panel_pwr_delay);
udelay(panel_pwr_delay * 1000);
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
mdelay(panel_pwr_delay);
udelay(panel_pwr_delay * 1000);
break;
}
 
244,7 → 240,7
}
 
static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
269,49 → 265,15
.disable = radeon_legacy_encoder_disable,
};
 
u8
radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder)
{
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
u8 backlight_level;
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
 
backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
#define MAX_RADEON_LEVEL 0xFF
 
return backlight_level;
}
struct radeon_backlight_privdata {
struct radeon_encoder *encoder;
uint8_t negative;
};
 
void
radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
{
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
int dpms_mode = DRM_MODE_DPMS_ON;
 
if (radeon_encoder->enc_priv) {
if (rdev->is_atom_bios) {
struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
if (lvds->backlight_level > 0)
dpms_mode = lvds->dpms_mode;
else
dpms_mode = DRM_MODE_DPMS_OFF;
lvds->backlight_level = level;
} else {
struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
if (lvds->backlight_level > 0)
dpms_mode = lvds->dpms_mode;
else
dpms_mode = DRM_MODE_DPMS_OFF;
lvds->backlight_level = level;
}
}
 
radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
}
 
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
 
static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
{
struct radeon_backlight_privdata *pdata = bl_get_data(bd);
320,13 → 282,13
/* Convert brightness to hardware level */
if (bd->props.brightness < 0)
level = 0;
else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
level = RADEON_MAX_BL_LEVEL;
else if (bd->props.brightness > MAX_RADEON_LEVEL)
level = MAX_RADEON_LEVEL;
else
level = bd->props.brightness;
 
if (pdata->negative)
level = RADEON_MAX_BL_LEVEL - level;
level = MAX_RADEON_LEVEL - level;
 
return level;
}
335,10 → 297,27
{
struct radeon_backlight_privdata *pdata = bl_get_data(bd);
struct radeon_encoder *radeon_encoder = pdata->encoder;
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
int dpms_mode = DRM_MODE_DPMS_ON;
 
radeon_legacy_set_backlight_level(radeon_encoder,
radeon_legacy_lvds_level(bd));
if (radeon_encoder->enc_priv) {
if (rdev->is_atom_bios) {
struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
dpms_mode = lvds->dpms_mode;
lvds->backlight_level = radeon_legacy_lvds_level(bd);
} else {
struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
dpms_mode = lvds->dpms_mode;
lvds->backlight_level = radeon_legacy_lvds_level(bd);
}
}
 
if (bd->props.brightness > 0)
radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
else
radeon_legacy_lvds_update(&radeon_encoder->base, DRM_MODE_DPMS_OFF);
 
return 0;
}
 
353,7 → 332,7
backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
 
return pdata->negative ? RADEON_MAX_BL_LEVEL - backlight_level : backlight_level;
return pdata->negative ? MAX_RADEON_LEVEL - backlight_level : backlight_level;
}
 
static const struct backlight_ops radeon_backlight_ops = {
370,7 → 349,6
struct backlight_properties props;
struct radeon_backlight_privdata *pdata;
uint8_t backlight_level;
char bl_name[16];
 
if (!radeon_encoder->enc_priv)
return;
387,12 → 365,9
goto error;
}
 
memset(&props, 0, sizeof(props));
props.max_brightness = RADEON_MAX_BL_LEVEL;
props.max_brightness = MAX_RADEON_LEVEL;
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name),
"radeon_bl%d", dev->primary->index);
bd = backlight_device_register(bl_name, &drm_connector->kdev,
bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
pdata, &radeon_backlight_ops, &props);
if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n");
469,7 → 444,7
}
 
if (bd) {
struct radeon_backlight_privdata *pdata;
struct radeon_legacy_backlight_privdata *pdata;
 
pdata = bl_get_data(bd);
backlight_device_unregister(bd);
677,7 → 652,7
 
WREG32(RADEON_DAC_MACRO_CNTL, tmp);
 
mdelay(2);
udelay(2000);
 
if (RREG32(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT)
found = connector_status_connected;
994,7 → 969,11
static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
/* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */
struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
if (tmds) {
if (tmds->i2c_bus)
radeon_i2c_destroy(tmds->i2c_bus);
}
kfree(radeon_encoder->enc_priv);
drm_encoder_cleanup(encoder);
kfree(radeon_encoder);
1516,7 → 1495,7
tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN;
WREG32(RADEON_DAC_CNTL2, tmp);
 
mdelay(10);
udelay(10000);
 
if (ASIC_IS_R300(rdev)) {
if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B)
/drivers/video/drm/radeon/radeon_legacy_tv.c
1,5 → 1,5
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "drmP.h"
#include "drm_crtc_helper.h"
#include "radeon.h"
 
/*
864,7 → 864,7
*v_sync_strt_wid = tmp;
}
 
static int get_post_div(int value)
static inline int get_post_div(int value)
{
int post_div;
switch (value) {
/drivers/video/drm/radeon/radeon_object.h
31,8 → 31,6
#include <drm/radeon_drm.h>
#include "radeon.h"
 
struct sg_table;
 
/**
* radeon_mem_type_to_domain - return domain corresponding to mem_type
* @mem_type: ttm memory type
54,7 → 52,16
return 0;
}
 
int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr);
/**
* radeon_bo_reserve - reserve bo
* @bo: bo structure
* @no_wait: don't sleep while trying to reserve (return -EBUSY)
*
* Returns:
* -EBUSY: buffer is busy and @no_wait is true
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
*/
 
static inline void radeon_bo_unreserve(struct radeon_bo *bo)
{
85,16 → 92,6
return !!atomic_read(&bo->tbo.reserved);
}
 
static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
{
return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
}
 
static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
{
return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
}
 
/**
* radeon_bo_mmap_offset - return mmap offset of bo
* @bo: radeon object for which we query the offset
109,20 → 106,32
return bo->tbo.addr_space_offset;
}
 
extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
bool no_wait);
static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
bool no_wait)
{
int r;
 
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
if (unlikely(r != 0))
return r;
// spin_lock(&bo->tbo.bdev->fence_lock);
if (mem_type)
*mem_type = bo->tbo.mem.mem_type;
if (bo->tbo.sync_obj)
r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
// spin_unlock(&bo->tbo.bdev->fence_lock);
ttm_bo_unreserve(&bo->tbo);
return r;
}
 
extern int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align,
bool kernel, u32 domain,
struct sg_table *sg,
struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
extern void radeon_bo_unref(struct radeon_bo **bo);
extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
u64 max_offset, u64 *gpu_addr);
extern int radeon_bo_unpin(struct radeon_bo *bo);
extern int radeon_bo_evict_vram(struct radeon_device *rdev);
extern void radeon_bo_force_delete(struct radeon_device *rdev);
143,41 → 152,4
struct ttm_mem_reg *mem);
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
 
/*
* sub allocation
*/
 
static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo)
{
return sa_bo->manager->gpu_addr + sa_bo->soffset;
}
 
static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
{
return sa_bo->manager->cpu_ptr + sa_bo->soffset;
}
 
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
unsigned size, u32 domain);
extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
struct radeon_sa_bo **sa_bo,
unsigned size, unsigned align, bool block);
extern void radeon_sa_bo_free(struct radeon_device *rdev,
struct radeon_sa_bo **sa_bo,
struct radeon_fence *fence);
#if defined(CONFIG_DEBUG_FS)
extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
struct seq_file *m);
#endif
 
 
#endif
/drivers/video/drm/radeon/rs600d.h
485,20 → 485,6
#define S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) & 0x1) << 16)
#define G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) >> 16) & 0x1)
#define C_007D18_DC_HOT_PLUG_DETECT2_INT_EN 0xFFFEFFFF
#define R_007404_HDMI0_STATUS 0x007404
#define S_007404_HDMI0_AZ_FORMAT_WTRIG(x) (((x) & 0x1) << 28)
#define G_007404_HDMI0_AZ_FORMAT_WTRIG(x) (((x) >> 28) & 0x1)
#define C_007404_HDMI0_AZ_FORMAT_WTRIG 0xEFFFFFFF
#define S_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x) (((x) & 0x1) << 29)
#define G_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x) (((x) >> 29) & 0x1)
#define C_007404_HDMI0_AZ_FORMAT_WTRIG_INT 0xDFFFFFFF
#define R_007408_HDMI0_AUDIO_PACKET_CONTROL 0x007408
#define S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x) (((x) & 0x1) << 28)
#define G_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x) (((x) >> 28) & 0x1)
#define C_007408_HDMI0_AZ_FORMAT_WTRIG_MASK 0xEFFFFFFF
#define S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x) (((x) & 0x1) << 29)
#define G_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x) (((x) >> 29) & 0x1)
#define C_007408_HDMI0_AZ_FORMAT_WTRIG_ACK 0xDFFFFFFF
 
/* MC registers */
#define R_000000_MC_STATUS 0x000000
/drivers/video/drm/radeon/rv770d.h
106,13 → 106,10
#define BACKEND_MAP(x) ((x) << 16)
 
#define GB_TILING_CONFIG 0x98F0
#define PIPE_TILING__SHIFT 1
#define PIPE_TILING__MASK 0x0000000e
 
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00
#define INACTIVE_QD_PIPES_SHIFT 8
#define INACTIVE_SIMDS(x) ((x) << 16)
#define INACTIVE_SIMDS_MASK 0x00FF0000
 
177,7 → 174,6
#define MC_VM_MD_L1_TLB0_CNTL 0x2654
#define MC_VM_MD_L1_TLB1_CNTL 0x2658
#define MC_VM_MD_L1_TLB2_CNTL 0x265C
#define MC_VM_MD_L1_TLB3_CNTL 0x2698
#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
211,7 → 207,6
#define SCRATCH_UMSK 0x8540
#define SCRATCH_ADDR 0x8544
 
#define SMX_SAR_CTL0 0xA008
#define SMX_DC_CTL0 0xA020
#define USE_HASH_FUNCTION (1 << 0)
#define CACHE_DEPTH(x) ((x) << 1)
311,8 → 306,6
#define TCP_CNTL 0x9610
#define TCP_CHAN_STEER 0x9614
 
#define VC_ENHANCE 0x9714
 
#define VGT_CACHE_INVALIDATION 0x88C4
#define CACHE_INVALIDATION(x) ((x)<<0)
#define VC_ONLY 0
360,197 → 353,6
 
#define SRBM_STATUS 0x0E50
 
/* DCE 3.2 HDMI */
#define HDMI_CONTROL 0x7400
# define HDMI_KEEPOUT_MODE (1 << 0)
# define HDMI_PACKET_GEN_VERSION (1 << 4) /* 0 = r6xx compat */
# define HDMI_ERROR_ACK (1 << 8)
# define HDMI_ERROR_MASK (1 << 9)
#define HDMI_STATUS 0x7404
# define HDMI_ACTIVE_AVMUTE (1 << 0)
# define HDMI_AUDIO_PACKET_ERROR (1 << 16)
# define HDMI_VBI_PACKET_ERROR (1 << 20)
#define HDMI_AUDIO_PACKET_CONTROL 0x7408
# define HDMI_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
# define HDMI_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
#define HDMI_ACR_PACKET_CONTROL 0x740c
# define HDMI_ACR_SEND (1 << 0)
# define HDMI_ACR_CONT (1 << 1)
# define HDMI_ACR_SELECT(x) (((x) & 3) << 4)
# define HDMI_ACR_HW 0
# define HDMI_ACR_32 1
# define HDMI_ACR_44 2
# define HDMI_ACR_48 3
# define HDMI_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
# define HDMI_ACR_AUTO_SEND (1 << 12)
#define HDMI_VBI_PACKET_CONTROL 0x7410
# define HDMI_NULL_SEND (1 << 0)
# define HDMI_GC_SEND (1 << 4)
# define HDMI_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
#define HDMI_INFOFRAME_CONTROL0 0x7414
# define HDMI_AVI_INFO_SEND (1 << 0)
# define HDMI_AVI_INFO_CONT (1 << 1)
# define HDMI_AUDIO_INFO_SEND (1 << 4)
# define HDMI_AUDIO_INFO_CONT (1 << 5)
# define HDMI_MPEG_INFO_SEND (1 << 8)
# define HDMI_MPEG_INFO_CONT (1 << 9)
#define HDMI_INFOFRAME_CONTROL1 0x7418
# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
#define HDMI_GENERIC_PACKET_CONTROL 0x741c
# define HDMI_GENERIC0_SEND (1 << 0)
# define HDMI_GENERIC0_CONT (1 << 1)
# define HDMI_GENERIC1_SEND (1 << 4)
# define HDMI_GENERIC1_CONT (1 << 5)
# define HDMI_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
# define HDMI_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
#define HDMI_GC 0x7428
# define HDMI_GC_AVMUTE (1 << 0)
#define AFMT_AUDIO_PACKET_CONTROL2 0x742c
# define AFMT_AUDIO_LAYOUT_OVRD (1 << 0)
# define AFMT_AUDIO_LAYOUT_SELECT (1 << 1)
# define AFMT_60958_CS_SOURCE (1 << 4)
# define AFMT_AUDIO_CHANNEL_ENABLE(x) (((x) & 0xff) << 8)
# define AFMT_DP_AUDIO_STREAM_ID(x) (((x) & 0xff) << 16)
#define AFMT_AVI_INFO0 0x7454
# define AFMT_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
# define AFMT_AVI_INFO_S(x) (((x) & 3) << 8)
# define AFMT_AVI_INFO_B(x) (((x) & 3) << 10)
# define AFMT_AVI_INFO_A(x) (((x) & 1) << 12)
# define AFMT_AVI_INFO_Y(x) (((x) & 3) << 13)
# define AFMT_AVI_INFO_Y_RGB 0
# define AFMT_AVI_INFO_Y_YCBCR422 1
# define AFMT_AVI_INFO_Y_YCBCR444 2
# define AFMT_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
# define AFMT_AVI_INFO_R(x) (((x) & 0xf) << 16)
# define AFMT_AVI_INFO_M(x) (((x) & 0x3) << 20)
# define AFMT_AVI_INFO_C(x) (((x) & 0x3) << 22)
# define AFMT_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
# define AFMT_AVI_INFO_SC(x) (((x) & 0x3) << 24)
# define AFMT_AVI_INFO_Q(x) (((x) & 0x3) << 26)
# define AFMT_AVI_INFO_EC(x) (((x) & 0x3) << 28)
# define AFMT_AVI_INFO_ITC(x) (((x) & 0x1) << 31)
# define AFMT_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
#define AFMT_AVI_INFO1 0x7458
# define AFMT_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
# define AFMT_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
# define AFMT_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
#define AFMT_AVI_INFO2 0x745c
# define AFMT_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
# define AFMT_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
#define AFMT_AVI_INFO3 0x7460
# define AFMT_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
# define AFMT_AVI_INFO_VERSION(x) (((x) & 3) << 24)
#define AFMT_MPEG_INFO0 0x7464
# define AFMT_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
# define AFMT_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
# define AFMT_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
# define AFMT_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
#define AFMT_MPEG_INFO1 0x7468
# define AFMT_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
# define AFMT_MPEG_INFO_MF(x) (((x) & 3) << 8)
# define AFMT_MPEG_INFO_FR(x) (((x) & 1) << 12)
#define AFMT_GENERIC0_HDR 0x746c
#define AFMT_GENERIC0_0 0x7470
#define AFMT_GENERIC0_1 0x7474
#define AFMT_GENERIC0_2 0x7478
#define AFMT_GENERIC0_3 0x747c
#define AFMT_GENERIC0_4 0x7480
#define AFMT_GENERIC0_5 0x7484
#define AFMT_GENERIC0_6 0x7488
#define AFMT_GENERIC1_HDR 0x748c
#define AFMT_GENERIC1_0 0x7490
#define AFMT_GENERIC1_1 0x7494
#define AFMT_GENERIC1_2 0x7498
#define AFMT_GENERIC1_3 0x749c
#define AFMT_GENERIC1_4 0x74a0
#define AFMT_GENERIC1_5 0x74a4
#define AFMT_GENERIC1_6 0x74a8
#define HDMI_ACR_32_0 0x74ac
# define HDMI_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
#define HDMI_ACR_32_1 0x74b0
# define HDMI_ACR_N_32(x) (((x) & 0xfffff) << 0)
#define HDMI_ACR_44_0 0x74b4
# define HDMI_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
#define HDMI_ACR_44_1 0x74b8
# define HDMI_ACR_N_44(x) (((x) & 0xfffff) << 0)
#define HDMI_ACR_48_0 0x74bc
# define HDMI_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
#define HDMI_ACR_48_1 0x74c0
# define HDMI_ACR_N_48(x) (((x) & 0xfffff) << 0)
#define HDMI_ACR_STATUS_0 0x74c4
#define HDMI_ACR_STATUS_1 0x74c8
#define AFMT_AUDIO_INFO0 0x74cc
# define AFMT_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
# define AFMT_AUDIO_INFO_CC(x) (((x) & 7) << 8)
# define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x) (((x) & 0xff) << 16)
#define AFMT_AUDIO_INFO1 0x74d0
# define AFMT_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
# define AFMT_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
# define AFMT_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
# define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
#define AFMT_60958_0 0x74d4
# define AFMT_60958_CS_A(x) (((x) & 1) << 0)
# define AFMT_60958_CS_B(x) (((x) & 1) << 1)
# define AFMT_60958_CS_C(x) (((x) & 1) << 2)
# define AFMT_60958_CS_D(x) (((x) & 3) << 3)
# define AFMT_60958_CS_MODE(x) (((x) & 3) << 6)
# define AFMT_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
# define AFMT_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
# define AFMT_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
# define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
# define AFMT_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
#define AFMT_60958_1 0x74d8
# define AFMT_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
# define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
# define AFMT_60958_CS_VALID_L(x) (((x) & 1) << 16)
# define AFMT_60958_CS_VALID_R(x) (((x) & 1) << 18)
# define AFMT_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
#define AFMT_AUDIO_CRC_CONTROL 0x74dc
# define AFMT_AUDIO_CRC_EN (1 << 0)
#define AFMT_RAMP_CONTROL0 0x74e0
# define AFMT_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
# define AFMT_RAMP_DATA_SIGN (1 << 31)
#define AFMT_RAMP_CONTROL1 0x74e4
# define AFMT_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
# define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24)
#define AFMT_RAMP_CONTROL2 0x74e8
# define AFMT_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
#define AFMT_RAMP_CONTROL3 0x74ec
# define AFMT_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
#define AFMT_60958_2 0x74f0
# define AFMT_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
# define AFMT_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
# define AFMT_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
# define AFMT_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
# define AFMT_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
# define AFMT_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
#define AFMT_STATUS 0x7600
# define AFMT_AUDIO_ENABLE (1 << 4)
# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
#define AFMT_AUDIO_PACKET_CONTROL 0x7604
# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
# define AFMT_AUDIO_TEST_EN (1 << 12)
# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
# define AFMT_60958_CS_UPDATE (1 << 26)
# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
#define AFMT_VBI_PACKET_CONTROL 0x7608
# define AFMT_GENERIC0_UPDATE (1 << 2)
#define AFMT_INFOFRAME_CONTROL0 0x760c
# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
# define AFMT_MPEG_INFO_UPDATE (1 << 10)
#define AFMT_GENERIC0_7 0x7610
/* second instance starts at 0x7800 */
#define HDMI_OFFSET0 (0x7400 - 0x7400)
#define HDMI_OFFSET1 (0x7800 - 0x7400)
 
#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
/drivers/video/drm/radeon/display.h
51,10 → 51,6
void (__stdcall *move_cursor)(cursor_t *cursor, int x, int y);
void (__stdcall *restore_cursor)(int x, int y);
void (*disable_mouse)(void);
u32 mask_seqno;
u32 check_mouse;
u32 check_m_pixel;
 
};
 
extern display_t *rdisplay;
/drivers/video/drm/radeon/radeon_ttm.c
33,11 → 33,9
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
#include <ttm/ttm_page_alloc.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "radeon_reg.h"
#include "radeon.h"
 
59,12 → 57,12
/*
* Global memory.
*/
static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref)
{
return ttm_mem_global_init(ref->object);
}
 
static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref)
{
ttm_mem_global_release(ref->object);
}
71,16 → 69,16
 
static int radeon_ttm_global_init(struct radeon_device *rdev)
{
struct drm_global_reference *global_ref;
struct ttm_global_reference *global_ref;
int r;
 
rdev->mman.mem_global_referenced = false;
global_ref = &rdev->mman.mem_global_ref;
global_ref->global_type = DRM_GLOBAL_TTM_MEM;
global_ref->global_type = TTM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
global_ref->init = &radeon_ttm_mem_global_init;
global_ref->release = &radeon_ttm_mem_global_release;
r = drm_global_item_ref(global_ref);
r = ttm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM memory accounting "
"subsystem.\n");
90,14 → 88,14
rdev->mman.bo_global_ref.mem_glob =
rdev->mman.mem_global_ref.object;
global_ref = &rdev->mman.bo_global_ref.ref;
global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->global_type = TTM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_init;
global_ref->release = &ttm_bo_global_release;
r = drm_global_item_ref(global_ref);
r = ttm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
drm_global_item_unref(&rdev->mman.mem_global_ref);
ttm_global_item_unref(&rdev->mman.mem_global_ref);
return r;
}
 
106,11 → 104,9
}
 
 
static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
}
struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
 
 
static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
126,8 → 122,7
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_TT:
man->func = &ttm_bo_manager_func;
man->gpu_offset = rdev->mc.gtt_start;
man->gpu_offset = rdev->mc.gtt_location;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
138,22 → 133,34
(unsigned)type);
return -EINVAL;
}
man->io_offset = rdev->mc.agp_base;
man->io_size = rdev->mc.gtt_size;
man->io_addr = NULL;
if (!rdev->ddev->agp->cant_use_aperture)
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
} else
#endif
{
man->io_offset = 0;
man->io_size = 0;
man->io_addr = NULL;
}
#endif
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
man->func = &ttm_bo_manager_func;
man->gpu_offset = rdev->mc.vram_start;
man->gpu_offset = rdev->mc.vram_location;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
man->io_addr = NULL;
man->io_offset = rdev->mc.aper_base;
man->io_size = rdev->mc.aper_size;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);