37,19 → 37,8 |
#include "r600d.h" |
#include "atom.h" |
#include "avivod.h" |
#include "radeon_ucode.h" |
|
#define PFP_UCODE_SIZE 576 |
#define PM4_UCODE_SIZE 1792 |
#define RLC_UCODE_SIZE 768 |
#define R700_PFP_UCODE_SIZE 848 |
#define R700_PM4_UCODE_SIZE 1360 |
#define R700_RLC_UCODE_SIZE 1024 |
#define EVERGREEN_PFP_UCODE_SIZE 1120 |
#define EVERGREEN_PM4_UCODE_SIZE 1376 |
#define EVERGREEN_RLC_UCODE_SIZE 768 |
#define CAYMAN_RLC_UCODE_SIZE 1024 |
#define ARUBA_RLC_UCODE_SIZE 1536 |
|
/* Firmware Names */ |
MODULE_FIRMWARE("radeon/R600_pfp.bin"); |
MODULE_FIRMWARE("radeon/R600_me.bin"); |
67,24 → 56,32 |
MODULE_FIRMWARE("radeon/RS780_me.bin"); |
MODULE_FIRMWARE("radeon/RV770_pfp.bin"); |
MODULE_FIRMWARE("radeon/RV770_me.bin"); |
MODULE_FIRMWARE("radeon/RV770_smc.bin"); |
MODULE_FIRMWARE("radeon/RV730_pfp.bin"); |
MODULE_FIRMWARE("radeon/RV730_me.bin"); |
MODULE_FIRMWARE("radeon/RV730_smc.bin"); |
MODULE_FIRMWARE("radeon/RV740_smc.bin"); |
MODULE_FIRMWARE("radeon/RV710_pfp.bin"); |
MODULE_FIRMWARE("radeon/RV710_me.bin"); |
MODULE_FIRMWARE("radeon/RV710_smc.bin"); |
MODULE_FIRMWARE("radeon/R600_rlc.bin"); |
MODULE_FIRMWARE("radeon/R700_rlc.bin"); |
MODULE_FIRMWARE("radeon/CEDAR_pfp.bin"); |
MODULE_FIRMWARE("radeon/CEDAR_me.bin"); |
MODULE_FIRMWARE("radeon/CEDAR_rlc.bin"); |
MODULE_FIRMWARE("radeon/CEDAR_smc.bin"); |
MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin"); |
MODULE_FIRMWARE("radeon/REDWOOD_me.bin"); |
MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin"); |
MODULE_FIRMWARE("radeon/REDWOOD_smc.bin"); |
MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin"); |
MODULE_FIRMWARE("radeon/JUNIPER_me.bin"); |
MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin"); |
MODULE_FIRMWARE("radeon/JUNIPER_smc.bin"); |
MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); |
MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); |
MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); |
MODULE_FIRMWARE("radeon/CYPRESS_smc.bin"); |
MODULE_FIRMWARE("radeon/PALM_pfp.bin"); |
MODULE_FIRMWARE("radeon/PALM_me.bin"); |
MODULE_FIRMWARE("radeon/SUMO_rlc.bin"); |
107,6 → 104,8 |
void r600_fini(struct radeon_device *rdev); |
void r600_irq_disable(struct radeon_device *rdev); |
static void r600_pcie_gen2_enable(struct radeon_device *rdev); |
extern int evergreen_rlc_resume(struct radeon_device *rdev); |
extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev); |
|
/** |
* r600_get_xclk - get the xclk |
121,6 → 120,64 |
return rdev->clock.spll.reference_freq; |
} |
|
int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) |
{ |
return 0; |
} |
|
void dce3_program_fmt(struct drm_encoder *encoder) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
int bpc = 0; |
u32 tmp = 0; |
enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE; |
|
if (connector) { |
struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
bpc = radeon_get_monitor_bpc(connector); |
dither = radeon_connector->dither; |
} |
|
/* LVDS FMT is set up by atom */ |
if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) |
return; |
|
/* not needed for analog */ |
if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || |
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) |
return; |
|
if (bpc == 0) |
return; |
|
switch (bpc) { |
case 6: |
if (dither == RADEON_FMT_DITHER_ENABLE) |
/* XXX sort out optimal dither settings */ |
tmp |= FMT_SPATIAL_DITHER_EN; |
else |
tmp |= FMT_TRUNCATE_EN; |
break; |
case 8: |
if (dither == RADEON_FMT_DITHER_ENABLE) |
/* XXX sort out optimal dither settings */ |
tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH); |
else |
tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH); |
break; |
case 10: |
default: |
/* not needed */ |
break; |
} |
|
WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp); |
} |
|
/* get temperature in millidegrees */ |
int rv6xx_get_temp(struct radeon_device *rdev) |
{ |
134,11 → 191,439 |
return actual_temp * 1000; |
} |
|
void r600_pm_get_dynpm_state(struct radeon_device *rdev) |
{ |
int i; |
|
rdev->pm.dynpm_can_upclock = true; |
rdev->pm.dynpm_can_downclock = true; |
|
/* power state array is low to high, default is first */ |
if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) { |
int min_power_state_index = 0; |
|
if (rdev->pm.num_power_states > 2) |
min_power_state_index = 1; |
|
switch (rdev->pm.dynpm_planned_action) { |
case DYNPM_ACTION_MINIMUM: |
rdev->pm.requested_power_state_index = min_power_state_index; |
rdev->pm.requested_clock_mode_index = 0; |
rdev->pm.dynpm_can_downclock = false; |
break; |
case DYNPM_ACTION_DOWNCLOCK: |
if (rdev->pm.current_power_state_index == min_power_state_index) { |
rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; |
rdev->pm.dynpm_can_downclock = false; |
} else { |
if (rdev->pm.active_crtc_count > 1) { |
for (i = 0; i < rdev->pm.num_power_states; i++) { |
if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) |
continue; |
else if (i >= rdev->pm.current_power_state_index) { |
rdev->pm.requested_power_state_index = |
rdev->pm.current_power_state_index; |
break; |
} else { |
rdev->pm.requested_power_state_index = i; |
break; |
} |
} |
} else { |
if (rdev->pm.current_power_state_index == 0) |
rdev->pm.requested_power_state_index = |
rdev->pm.num_power_states - 1; |
else |
rdev->pm.requested_power_state_index = |
rdev->pm.current_power_state_index - 1; |
} |
} |
rdev->pm.requested_clock_mode_index = 0; |
/* don't use the power state if crtcs are active and no display flag is set */ |
if ((rdev->pm.active_crtc_count > 0) && |
(rdev->pm.power_state[rdev->pm.requested_power_state_index]. |
clock_info[rdev->pm.requested_clock_mode_index].flags & |
RADEON_PM_MODE_NO_DISPLAY)) { |
rdev->pm.requested_power_state_index++; |
} |
break; |
case DYNPM_ACTION_UPCLOCK: |
if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { |
rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; |
rdev->pm.dynpm_can_upclock = false; |
} else { |
if (rdev->pm.active_crtc_count > 1) { |
for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { |
if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) |
continue; |
else if (i <= rdev->pm.current_power_state_index) { |
rdev->pm.requested_power_state_index = |
rdev->pm.current_power_state_index; |
break; |
} else { |
rdev->pm.requested_power_state_index = i; |
break; |
} |
} |
} else |
rdev->pm.requested_power_state_index = |
rdev->pm.current_power_state_index + 1; |
} |
rdev->pm.requested_clock_mode_index = 0; |
break; |
case DYNPM_ACTION_DEFAULT: |
rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; |
rdev->pm.requested_clock_mode_index = 0; |
rdev->pm.dynpm_can_upclock = false; |
break; |
case DYNPM_ACTION_NONE: |
default: |
DRM_ERROR("Requested mode for not defined action\n"); |
return; |
} |
} else { |
/* XXX select a power state based on AC/DC, single/dualhead, etc. */ |
/* for now just select the first power state and switch between clock modes */ |
/* power state array is low to high, default is first (0) */ |
if (rdev->pm.active_crtc_count > 1) { |
rdev->pm.requested_power_state_index = -1; |
/* start at 1 as we don't want the default mode */ |
for (i = 1; i < rdev->pm.num_power_states; i++) { |
if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) |
continue; |
else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) || |
(rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) { |
rdev->pm.requested_power_state_index = i; |
break; |
} |
} |
/* if nothing selected, grab the default state. */ |
if (rdev->pm.requested_power_state_index == -1) |
rdev->pm.requested_power_state_index = 0; |
} else |
rdev->pm.requested_power_state_index = 1; |
|
switch (rdev->pm.dynpm_planned_action) { |
case DYNPM_ACTION_MINIMUM: |
rdev->pm.requested_clock_mode_index = 0; |
rdev->pm.dynpm_can_downclock = false; |
break; |
case DYNPM_ACTION_DOWNCLOCK: |
if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { |
if (rdev->pm.current_clock_mode_index == 0) { |
rdev->pm.requested_clock_mode_index = 0; |
rdev->pm.dynpm_can_downclock = false; |
} else |
rdev->pm.requested_clock_mode_index = |
rdev->pm.current_clock_mode_index - 1; |
} else { |
rdev->pm.requested_clock_mode_index = 0; |
rdev->pm.dynpm_can_downclock = false; |
} |
/* don't use the power state if crtcs are active and no display flag is set */ |
if ((rdev->pm.active_crtc_count > 0) && |
(rdev->pm.power_state[rdev->pm.requested_power_state_index]. |
clock_info[rdev->pm.requested_clock_mode_index].flags & |
RADEON_PM_MODE_NO_DISPLAY)) { |
rdev->pm.requested_clock_mode_index++; |
} |
break; |
case DYNPM_ACTION_UPCLOCK: |
if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { |
if (rdev->pm.current_clock_mode_index == |
(rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) { |
rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index; |
rdev->pm.dynpm_can_upclock = false; |
} else |
rdev->pm.requested_clock_mode_index = |
rdev->pm.current_clock_mode_index + 1; |
} else { |
rdev->pm.requested_clock_mode_index = |
rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1; |
rdev->pm.dynpm_can_upclock = false; |
} |
break; |
case DYNPM_ACTION_DEFAULT: |
rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; |
rdev->pm.requested_clock_mode_index = 0; |
rdev->pm.dynpm_can_upclock = false; |
break; |
case DYNPM_ACTION_NONE: |
default: |
DRM_ERROR("Requested mode for not defined action\n"); |
return; |
} |
} |
|
DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n", |
rdev->pm.power_state[rdev->pm.requested_power_state_index]. |
clock_info[rdev->pm.requested_clock_mode_index].sclk, |
rdev->pm.power_state[rdev->pm.requested_power_state_index]. |
clock_info[rdev->pm.requested_clock_mode_index].mclk, |
rdev->pm.power_state[rdev->pm.requested_power_state_index]. |
pcie_lanes); |
} |
|
void rs780_pm_init_profile(struct radeon_device *rdev) |
{ |
if (rdev->pm.num_power_states == 2) { |
/* default */ |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; |
/* low sh */ |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; |
/* mid sh */ |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; |
/* high sh */ |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; |
/* low mh */ |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; |
/* mid mh */ |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; |
/* high mh */ |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1; |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; |
} else if (rdev->pm.num_power_states == 3) { |
/* default */ |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; |
/* low sh */ |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; |
/* mid sh */ |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; |
/* high sh */ |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2; |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; |
/* low mh */ |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1; |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1; |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; |
/* mid mh */ |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1; |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1; |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; |
/* high mh */ |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1; |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; |
} else { |
/* default */ |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; |
/* low sh */ |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2; |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2; |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; |
/* mid sh */ |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2; |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2; |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; |
/* high sh */ |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2; |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3; |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; |
/* low mh */ |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; |
/* mid mh */ |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; |
/* high mh */ |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3; |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; |
} |
} |
|
void r600_pm_init_profile(struct radeon_device *rdev) |
{ |
int idx; |
|
if (rdev->family == CHIP_R600) { |
/* XXX */ |
/* default */ |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; |
/* low sh */ |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; |
/* mid sh */ |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; |
/* high sh */ |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; |
/* low mh */ |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; |
/* mid mh */ |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; |
/* high mh */ |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; |
} else { |
if (rdev->pm.num_power_states < 4) { |
/* default */ |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; |
/* low sh */ |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; |
/* mid sh */ |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; |
/* high sh */ |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; |
/* low mh */ |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2; |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; |
/* low mh */ |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2; |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; |
/* high mh */ |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; |
} else { |
/* default */ |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; |
/* low sh */ |
if (rdev->flags & RADEON_IS_MOBILITY) |
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); |
else |
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx; |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx; |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; |
/* mid sh */ |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx; |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx; |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; |
/* high sh */ |
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx; |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx; |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; |
/* low mh */ |
if (rdev->flags & RADEON_IS_MOBILITY) |
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); |
else |
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx; |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx; |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; |
/* mid mh */ |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx; |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx; |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; |
/* high mh */ |
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx; |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx; |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; |
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; |
} |
} |
} |
|
void r600_pm_misc(struct radeon_device *rdev) |
{ |
int req_ps_idx = rdev->pm.requested_power_state_index; |
int req_cm_idx = rdev->pm.requested_clock_mode_index; |
struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; |
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; |
|
if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { |
/* 0xff01 is a flag rather then an actual voltage */ |
if (voltage->voltage == 0xff01) |
return; |
if (voltage->voltage != rdev->pm.current_vddc) { |
radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); |
rdev->pm.current_vddc = voltage->voltage; |
DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage); |
} |
} |
} |
|
bool r600_gui_idle(struct radeon_device *rdev) |
{ |
if (RREG32(GRBM_STATUS) & GUI_ACTIVE) |
483,7 → 968,6 |
r = radeon_gart_table_vram_pin(rdev); |
if (r) |
return r; |
radeon_gart_restore(rdev); |
|
/* Setup L2 cache */ |
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | |
619,20 → 1103,27 |
|
uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
{ |
unsigned long flags; |
uint32_t r; |
|
spin_lock_irqsave(&rdev->mc_idx_lock, flags); |
WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg)); |
r = RREG32(R_0028FC_MC_DATA); |
WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR); |
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); |
return r; |
} |
|
void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
{ |
unsigned long flags; |
|
spin_lock_irqsave(&rdev->mc_idx_lock, flags); |
WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) | |
S_0028F8_MC_IND_WR_EN(1)); |
WREG32(R_0028FC_MC_DATA, v); |
WREG32(R_0028F8_MC_INDEX, 0x7F); |
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); |
} |
|
static void r600_mc_program(struct radeon_device *rdev) |
847,7 → 1338,7 |
if (rdev->vram_scratch.robj == NULL) { |
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, |
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
NULL, &rdev->vram_scratch.robj); |
0, NULL, &rdev->vram_scratch.robj); |
if (r) { |
return r; |
} |
948,7 → 1439,7 |
return true; |
} |
|
static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev) |
u32 r600_gpu_check_soft_reset(struct radeon_device *rdev) |
{ |
u32 reset_mask = 0; |
u32 tmp; |
1153,6 → 1644,67 |
r600_print_gpu_status_regs(rdev); |
} |
|
static void r600_gpu_pci_config_reset(struct radeon_device *rdev) |
{ |
struct rv515_mc_save save; |
u32 tmp, i; |
|
dev_info(rdev->dev, "GPU pci config reset\n"); |
|
/* disable dpm? */ |
|
/* Disable CP parsing/prefetching */ |
if (rdev->family >= CHIP_RV770) |
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1)); |
else |
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); |
|
/* disable the RLC */ |
WREG32(RLC_CNTL, 0); |
|
/* Disable DMA */ |
tmp = RREG32(DMA_RB_CNTL); |
tmp &= ~DMA_RB_ENABLE; |
WREG32(DMA_RB_CNTL, tmp); |
|
mdelay(50); |
|
/* set mclk/sclk to bypass */ |
if (rdev->family >= CHIP_RV770) |
rv770_set_clk_bypass_mode(rdev); |
/* disable BM */ |
pci_clear_master(rdev->pdev); |
/* disable mem access */ |
rv515_mc_stop(rdev, &save); |
if (r600_mc_wait_for_idle(rdev)) { |
dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
} |
|
/* BIF reset workaround. Not sure if this is needed on 6xx */ |
tmp = RREG32(BUS_CNTL); |
tmp |= VGA_COHE_SPEC_TIMER_DIS; |
WREG32(BUS_CNTL, tmp); |
|
tmp = RREG32(BIF_SCRATCH0); |
|
/* reset */ |
radeon_pci_config_reset(rdev); |
mdelay(1); |
|
/* BIF reset workaround. Not sure if this is needed on 6xx */ |
tmp = SOFT_RESET_BIF; |
WREG32(SRBM_SOFT_RESET, tmp); |
mdelay(1); |
WREG32(SRBM_SOFT_RESET, 0); |
|
/* wait for asic to come out of reset */ |
for (i = 0; i < rdev->usec_timeout; i++) { |
if (RREG32(CONFIG_MEMSIZE) != 0xffffffff) |
break; |
udelay(1); |
} |
} |
|
int r600_asic_reset(struct radeon_device *rdev) |
{ |
u32 reset_mask; |
1162,10 → 1714,17 |
if (reset_mask) |
r600_set_bios_scratch_engine_hung(rdev, true); |
|
/* try soft reset */ |
r600_gpu_soft_reset(rdev, reset_mask); |
|
reset_mask = r600_gpu_check_soft_reset(rdev); |
|
/* try pci config reset */ |
if (reset_mask && radeon_hard_reset) |
r600_gpu_pci_config_reset(rdev); |
|
reset_mask = r600_gpu_check_soft_reset(rdev); |
|
if (!reset_mask) |
r600_set_bios_scratch_engine_hung(rdev, false); |
|
1188,36 → 1747,12 |
if (!(reset_mask & (RADEON_RESET_GFX | |
RADEON_RESET_COMPUTE | |
RADEON_RESET_CP))) { |
radeon_ring_lockup_update(ring); |
radeon_ring_lockup_update(rdev, ring); |
return false; |
} |
/* force CP activities */ |
radeon_ring_force_activity(rdev, ring); |
return radeon_ring_test_lockup(rdev, ring); |
} |
|
/** |
* r600_dma_is_lockup - Check if the DMA engine is locked up |
* |
* @rdev: radeon_device pointer |
* @ring: radeon_ring structure holding ring information |
* |
* Check if the async DMA engine is locked up. |
* Returns true if the engine appears to be locked up, false if not. |
*/ |
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) |
{ |
u32 reset_mask = r600_gpu_check_soft_reset(rdev); |
|
if (!(reset_mask & RADEON_RESET_DMA)) { |
radeon_ring_lockup_update(ring); |
return false; |
} |
/* force ring activities */ |
radeon_ring_force_activity(rdev, ring); |
return radeon_ring_test_lockup(rdev, ring); |
} |
|
u32 r6xx_remap_render_backend(struct radeon_device *rdev, |
u32 tiling_pipe_num, |
u32 max_rb_num, |
1277,7 → 1812,6 |
{ |
u32 tiling_config; |
u32 ramcfg; |
u32 cc_rb_backend_disable; |
u32 cc_gc_shader_pipe_config; |
u32 tmp; |
int i, j; |
1404,26 → 1938,20 |
} |
tiling_config |= BANK_SWAPS(1); |
|
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; |
tmp = R6XX_MAX_BACKENDS - |
r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK); |
if (tmp < rdev->config.r600.max_backends) { |
rdev->config.r600.max_backends = tmp; |
} |
|
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00; |
tmp = R6XX_MAX_PIPES - |
r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK); |
if (tmp < rdev->config.r600.max_pipes) { |
rdev->config.r600.max_pipes = tmp; |
} |
tmp = R6XX_MAX_SIMDS - |
tmp = rdev->config.r600.max_simds - |
r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK); |
if (tmp < rdev->config.r600.max_simds) { |
rdev->config.r600.max_simds = tmp; |
} |
rdev->config.r600.active_simds = tmp; |
|
disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK; |
tmp = 0; |
for (i = 0; i < rdev->config.r600.max_backends; i++) |
tmp |= (1 << i); |
/* if all the backends are disabled, fix it up here */ |
if ((disabled_rb_mask & tmp) == tmp) { |
for (i = 0; i < rdev->config.r600.max_backends; i++) |
disabled_rb_mask &= ~(1 << i); |
} |
tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; |
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends, |
R6XX_MAX_BACKENDS, disabled_rb_mask); |
1688,20 → 2216,27 |
*/ |
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
|
spin_lock_irqsave(&rdev->pciep_idx_lock, flags); |
WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); |
(void)RREG32(PCIE_PORT_INDEX); |
r = RREG32(PCIE_PORT_DATA); |
spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags); |
return r; |
} |
|
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
|
spin_lock_irqsave(&rdev->pciep_idx_lock, flags); |
WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); |
(void)RREG32(PCIE_PORT_INDEX); |
WREG32(PCIE_PORT_DATA, (v)); |
(void)RREG32(PCIE_PORT_DATA); |
spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags); |
} |
|
/* |
1709,6 → 2244,7 |
*/ |
void r600_cp_stop(struct radeon_device *rdev) |
{ |
if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) |
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); |
WREG32(SCRATCH_UMSK, 0); |
1717,22 → 2253,15 |
|
int r600_init_microcode(struct radeon_device *rdev) |
{ |
struct platform_device *pdev; |
const char *chip_name; |
const char *rlc_chip_name; |
size_t pfp_req_size, me_req_size, rlc_req_size; |
const char *smc_chip_name = "RV770"; |
size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0; |
char fw_name[30]; |
int err; |
|
DRM_DEBUG("\n"); |
|
pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); |
err = IS_ERR(pdev); |
if (err) { |
printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); |
return -EINVAL; |
} |
|
switch (rdev->family) { |
case CHIP_R600: |
chip_name = "R600"; |
1766,32 → 2295,51 |
case CHIP_RV770: |
chip_name = "RV770"; |
rlc_chip_name = "R700"; |
smc_chip_name = "RV770"; |
smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4); |
break; |
case CHIP_RV730: |
case CHIP_RV740: |
chip_name = "RV730"; |
rlc_chip_name = "R700"; |
smc_chip_name = "RV730"; |
smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4); |
break; |
case CHIP_RV710: |
chip_name = "RV710"; |
rlc_chip_name = "R700"; |
smc_chip_name = "RV710"; |
smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4); |
break; |
case CHIP_RV740: |
chip_name = "RV730"; |
rlc_chip_name = "R700"; |
smc_chip_name = "RV740"; |
smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4); |
break; |
case CHIP_CEDAR: |
chip_name = "CEDAR"; |
rlc_chip_name = "CEDAR"; |
smc_chip_name = "CEDAR"; |
smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4); |
break; |
case CHIP_REDWOOD: |
chip_name = "REDWOOD"; |
rlc_chip_name = "REDWOOD"; |
smc_chip_name = "REDWOOD"; |
smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4); |
break; |
case CHIP_JUNIPER: |
chip_name = "JUNIPER"; |
rlc_chip_name = "JUNIPER"; |
smc_chip_name = "JUNIPER"; |
smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4); |
break; |
case CHIP_CYPRESS: |
case CHIP_HEMLOCK: |
chip_name = "CYPRESS"; |
rlc_chip_name = "CYPRESS"; |
smc_chip_name = "CYPRESS"; |
smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4); |
break; |
case CHIP_PALM: |
chip_name = "PALM"; |
1817,15 → 2365,15 |
me_req_size = R700_PM4_UCODE_SIZE * 4; |
rlc_req_size = R700_RLC_UCODE_SIZE * 4; |
} else { |
pfp_req_size = PFP_UCODE_SIZE * 4; |
me_req_size = PM4_UCODE_SIZE * 12; |
rlc_req_size = RLC_UCODE_SIZE * 4; |
pfp_req_size = R600_PFP_UCODE_SIZE * 4; |
me_req_size = R600_PM4_UCODE_SIZE * 12; |
rlc_req_size = R600_RLC_UCODE_SIZE * 4; |
} |
|
DRM_INFO("Loading %s Microcode\n", chip_name); |
|
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); |
err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); |
err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); |
if (err) |
goto out; |
if (rdev->pfp_fw->size != pfp_req_size) { |
1837,7 → 2385,7 |
} |
|
snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); |
err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); |
err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); |
if (err) |
goto out; |
if (rdev->me_fw->size != me_req_size) { |
1848,7 → 2396,7 |
} |
|
snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); |
err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); |
err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); |
if (err) |
goto out; |
if (rdev->rlc_fw->size != rlc_req_size) { |
1858,9 → 2406,25 |
err = -EINVAL; |
} |
|
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) { |
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name); |
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); |
if (err) { |
printk(KERN_ERR |
"smc: error loading firmware \"%s\"\n", |
fw_name); |
release_firmware(rdev->smc_fw); |
rdev->smc_fw = NULL; |
err = 0; |
} else if (rdev->smc_fw->size != smc_req_size) { |
printk(KERN_ERR |
"smc: Bogus length %zu in firmware \"%s\"\n", |
rdev->smc_fw->size, fw_name); |
err = -EINVAL; |
} |
} |
|
out: |
platform_device_unregister(pdev); |
|
if (err) { |
if (err != -EINVAL) |
printk(KERN_ERR |
1872,10 → 2436,42 |
rdev->me_fw = NULL; |
release_firmware(rdev->rlc_fw); |
rdev->rlc_fw = NULL; |
release_firmware(rdev->smc_fw); |
rdev->smc_fw = NULL; |
} |
return err; |
} |
|
u32 r600_gfx_get_rptr(struct radeon_device *rdev, |
struct radeon_ring *ring) |
{ |
u32 rptr; |
|
if (rdev->wb.enabled) |
rptr = rdev->wb.wb[ring->rptr_offs/4]; |
else |
rptr = RREG32(R600_CP_RB_RPTR); |
|
return rptr; |
} |
|
u32 r600_gfx_get_wptr(struct radeon_device *rdev, |
struct radeon_ring *ring) |
{ |
u32 wptr; |
|
wptr = RREG32(R600_CP_RB_WPTR); |
|
return wptr; |
} |
|
void r600_gfx_set_wptr(struct radeon_device *rdev, |
struct radeon_ring *ring) |
{ |
WREG32(R600_CP_RB_WPTR, ring->wptr); |
(void)RREG32(R600_CP_RB_WPTR); |
} |
|
static int r600_cp_load_microcode(struct radeon_device *rdev) |
{ |
const __be32 *fw_data; |
1902,13 → 2498,13 |
|
fw_data = (const __be32 *)rdev->me_fw->data; |
WREG32(CP_ME_RAM_WADDR, 0); |
for (i = 0; i < PM4_UCODE_SIZE * 3; i++) |
for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++) |
WREG32(CP_ME_RAM_DATA, |
be32_to_cpup(fw_data++)); |
|
fw_data = (const __be32 *)rdev->pfp_fw->data; |
WREG32(CP_PFP_UCODE_ADDR, 0); |
for (i = 0; i < PFP_UCODE_SIZE; i++) |
for (i = 0; i < R600_PFP_UCODE_SIZE; i++) |
WREG32(CP_PFP_UCODE_DATA, |
be32_to_cpup(fw_data++)); |
|
1941,7 → 2537,7 |
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, 0); |
radeon_ring_unlock_commit(rdev, ring); |
radeon_ring_unlock_commit(rdev, ring, false); |
|
cp_me = 0xff; |
WREG32(R_0086D8_CP_ME_CNTL, cp_me); |
1962,8 → 2558,8 |
WREG32(GRBM_SOFT_RESET, 0); |
|
/* Set ring buffer size */ |
rb_bufsz = drm_order(ring->ring_size / 8); |
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
rb_bufsz = order_base_2(ring->ring_size / 8); |
tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
#ifdef __BIG_ENDIAN |
tmp |= BUF_SWAP_32BIT; |
#endif |
1998,8 → 2594,6 |
WREG32(CP_RB_BASE, ring->gpu_addr >> 8); |
WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); |
|
ring->rptr = RREG32(CP_RB_RPTR); |
|
r600_cp_start(rdev); |
ring->ready = true; |
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); |
2007,6 → 2601,10 |
ring->ready = false; |
return r; |
} |
|
if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) |
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); |
|
return 0; |
} |
|
2016,7 → 2614,7 |
int r; |
|
/* Align ring size */ |
rb_bufsz = drm_order(ring_size / 8); |
rb_bufsz = order_base_2(ring_size / 8); |
ring_size = (1 << (rb_bufsz + 1)) * 4; |
ring->ring_size = ring_size; |
ring->align_mask = 16 - 1; |
2039,327 → 2637,6 |
} |
|
/* |
* DMA |
* Starting with R600, the GPU has an asynchronous |
* DMA engine. The programming model is very similar |
* to the 3D engine (ring buffer, IBs, etc.), but the |
* DMA controller has it's own packet format that is |
* different form the PM4 format used by the 3D engine. |
* It supports copying data, writing embedded data, |
* solid fills, and a number of other things. It also |
* has support for tiling/detiling of buffers. |
*/ |
/** |
* r600_dma_stop - stop the async dma engine |
* |
* @rdev: radeon_device pointer |
* |
* Stop the async dma engine (r6xx-evergreen). |
*/ |
void r600_dma_stop(struct radeon_device *rdev) |
{ |
u32 rb_cntl = RREG32(DMA_RB_CNTL); |
|
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
|
rb_cntl &= ~DMA_RB_ENABLE; |
WREG32(DMA_RB_CNTL, rb_cntl); |
|
rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; |
} |
|
/** |
* r600_dma_resume - setup and start the async dma engine |
* |
* @rdev: radeon_device pointer |
* |
* Set up the DMA ring buffer and enable it. (r6xx-evergreen). |
* Returns 0 for success, error for failure. |
*/ |
int r600_dma_resume(struct radeon_device *rdev) |
{ |
struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
u32 rb_cntl, dma_cntl, ib_cntl; |
u32 rb_bufsz; |
int r; |
|
/* Reset dma */ |
if (rdev->family >= CHIP_RV770) |
WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA); |
else |
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); |
RREG32(SRBM_SOFT_RESET); |
udelay(50); |
WREG32(SRBM_SOFT_RESET, 0); |
|
WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0); |
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); |
|
/* Set ring buffer size in dwords */ |
rb_bufsz = drm_order(ring->ring_size / 4); |
rb_cntl = rb_bufsz << 1; |
#ifdef __BIG_ENDIAN |
rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; |
#endif |
WREG32(DMA_RB_CNTL, rb_cntl); |
|
/* Initialize the ring buffer's read and write pointers */ |
WREG32(DMA_RB_RPTR, 0); |
WREG32(DMA_RB_WPTR, 0); |
|
/* set the wb address whether it's enabled or not */ |
WREG32(DMA_RB_RPTR_ADDR_HI, |
upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF); |
WREG32(DMA_RB_RPTR_ADDR_LO, |
((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC)); |
|
if (rdev->wb.enabled) |
rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; |
|
WREG32(DMA_RB_BASE, ring->gpu_addr >> 8); |
|
/* enable DMA IBs */ |
ib_cntl = DMA_IB_ENABLE; |
#ifdef __BIG_ENDIAN |
ib_cntl |= DMA_IB_SWAP_ENABLE; |
#endif |
WREG32(DMA_IB_CNTL, ib_cntl); |
|
dma_cntl = RREG32(DMA_CNTL); |
dma_cntl &= ~CTXEMPTY_INT_ENABLE; |
WREG32(DMA_CNTL, dma_cntl); |
|
if (rdev->family >= CHIP_RV770) |
WREG32(DMA_MODE, 1); |
|
ring->wptr = 0; |
WREG32(DMA_RB_WPTR, ring->wptr << 2); |
|
ring->rptr = RREG32(DMA_RB_RPTR) >> 2; |
|
WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE); |
|
ring->ready = true; |
|
r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring); |
if (r) { |
ring->ready = false; |
return r; |
} |
|
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); |
|
return 0; |
} |
|
/** |
* r600_dma_fini - tear down the async dma engine |
* |
* @rdev: radeon_device pointer |
* |
* Stop the async dma engine and free the ring (r6xx-evergreen). |
*/ |
void r600_dma_fini(struct radeon_device *rdev) |
{ |
r600_dma_stop(rdev); |
radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); |
} |
|
/* |
* UVD |
*/ |
int r600_uvd_rbc_start(struct radeon_device *rdev) |
{ |
struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
uint64_t rptr_addr; |
uint32_t rb_bufsz, tmp; |
int r; |
|
rptr_addr = rdev->wb.gpu_addr + R600_WB_UVD_RPTR_OFFSET; |
|
if (upper_32_bits(rptr_addr) != upper_32_bits(ring->gpu_addr)) { |
DRM_ERROR("UVD ring and rptr not in the same 4GB segment!\n"); |
return -EINVAL; |
} |
|
/* force RBC into idle state */ |
WREG32(UVD_RBC_RB_CNTL, 0x11010101); |
|
/* Set the write pointer delay */ |
WREG32(UVD_RBC_RB_WPTR_CNTL, 0); |
|
/* set the wb address */ |
WREG32(UVD_RBC_RB_RPTR_ADDR, rptr_addr >> 2); |
|
/* programm the 4GB memory segment for rptr and ring buffer */ |
WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(rptr_addr) | |
(0x7 << 16) | (0x1 << 31)); |
|
/* Initialize the ring buffer's read and write pointers */ |
WREG32(UVD_RBC_RB_RPTR, 0x0); |
|
ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR); |
WREG32(UVD_RBC_RB_WPTR, ring->wptr); |
|
/* set the ring address */ |
WREG32(UVD_RBC_RB_BASE, ring->gpu_addr); |
|
/* Set ring buffer size */ |
rb_bufsz = drm_order(ring->ring_size); |
rb_bufsz = (0x1 << 8) | rb_bufsz; |
WREG32(UVD_RBC_RB_CNTL, rb_bufsz); |
|
ring->ready = true; |
r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring); |
if (r) { |
ring->ready = false; |
return r; |
} |
|
r = radeon_ring_lock(rdev, ring, 10); |
if (r) { |
DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r); |
return r; |
} |
|
tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); |
radeon_ring_write(ring, tmp); |
radeon_ring_write(ring, 0xFFFFF); |
|
tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); |
radeon_ring_write(ring, tmp); |
radeon_ring_write(ring, 0xFFFFF); |
|
tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); |
radeon_ring_write(ring, tmp); |
radeon_ring_write(ring, 0xFFFFF); |
|
/* Clear timeout status bits */ |
radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0)); |
radeon_ring_write(ring, 0x8); |
|
radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0)); |
radeon_ring_write(ring, 3); |
|
radeon_ring_unlock_commit(rdev, ring); |
|
return 0; |
} |
|
void r600_uvd_rbc_stop(struct radeon_device *rdev) |
{ |
struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
|
/* force RBC into idle state */ |
WREG32(UVD_RBC_RB_CNTL, 0x11010101); |
ring->ready = false; |
} |
|
int r600_uvd_init(struct radeon_device *rdev) |
{ |
int i, j, r; |
/* disable byte swapping */ |
u32 lmi_swap_cntl = 0; |
u32 mp_swap_cntl = 0; |
|
/* raise clocks while booting up the VCPU */ |
radeon_set_uvd_clocks(rdev, 53300, 40000); |
|
/* disable clock gating */ |
WREG32(UVD_CGC_GATE, 0); |
|
/* disable interupt */ |
WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1)); |
|
/* put LMI, VCPU, RBC etc... into reset */ |
WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET | |
LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET | |
CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET); |
mdelay(5); |
|
/* take UVD block out of reset */ |
WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD); |
mdelay(5); |
|
/* initialize UVD memory controller */ |
WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | |
(1 << 21) | (1 << 9) | (1 << 20)); |
|
#ifdef __BIG_ENDIAN |
/* swap (8 in 32) RB and IB */ |
lmi_swap_cntl = 0xa; |
mp_swap_cntl = 0; |
#endif |
WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl); |
WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl); |
|
WREG32(UVD_MPC_SET_MUXA0, 0x40c2040); |
WREG32(UVD_MPC_SET_MUXA1, 0x0); |
WREG32(UVD_MPC_SET_MUXB0, 0x40c2040); |
WREG32(UVD_MPC_SET_MUXB1, 0x0); |
WREG32(UVD_MPC_SET_ALU, 0); |
WREG32(UVD_MPC_SET_MUX, 0x88); |
|
/* Stall UMC */ |
WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); |
WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); |
|
/* take all subblocks out of reset, except VCPU */ |
WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); |
mdelay(5); |
|
/* enable VCPU clock */ |
WREG32(UVD_VCPU_CNTL, 1 << 9); |
|
/* enable UMC */ |
WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); |
|
/* boot up the VCPU */ |
WREG32(UVD_SOFT_RESET, 0); |
mdelay(10); |
|
WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3)); |
|
for (i = 0; i < 10; ++i) { |
uint32_t status; |
for (j = 0; j < 100; ++j) { |
status = RREG32(UVD_STATUS); |
if (status & 2) |
break; |
mdelay(10); |
} |
r = 0; |
if (status & 2) |
break; |
|
DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); |
WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET); |
mdelay(10); |
WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET); |
mdelay(10); |
r = -1; |
} |
|
if (r) { |
DRM_ERROR("UVD not responding, giving up!!!\n"); |
radeon_set_uvd_clocks(rdev, 0, 0); |
return r; |
} |
|
/* enable interupt */ |
WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1)); |
|
r = r600_uvd_rbc_start(rdev); |
if (!r) |
DRM_INFO("UVD initialized successfully.\n"); |
|
/* lower clocks again */ |
radeon_set_uvd_clocks(rdev, 0, 0); |
|
return r; |
} |
|
/* |
* GPU scratch registers helpers function. |
*/ |
void r600_scratch_init(struct radeon_device *rdev) |
2396,7 → 2673,7 |
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); |
radeon_ring_write(ring, 0xDEADBEEF); |
radeon_ring_unlock_commit(rdev, ring); |
radeon_ring_unlock_commit(rdev, ring, false); |
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = RREG32(scratch); |
if (tmp == 0xDEADBEEF) |
2414,94 → 2691,6 |
return r; |
} |
|
/** |
* r600_dma_ring_test - simple async dma engine test |
* |
* @rdev: radeon_device pointer |
* @ring: radeon_ring structure holding ring information |
* |
* Test the DMA engine by writing using it to write an |
* value to memory. (r6xx-SI). |
* Returns 0 for success, error for failure. |
*/ |
int r600_dma_ring_test(struct radeon_device *rdev, |
struct radeon_ring *ring) |
{ |
unsigned i; |
int r; |
void __iomem *ptr = (void *)rdev->vram_scratch.ptr; |
u32 tmp; |
|
if (!ptr) { |
DRM_ERROR("invalid vram scratch pointer\n"); |
return -EINVAL; |
} |
|
tmp = 0xCAFEDEAD; |
writel(tmp, ptr); |
|
r = radeon_ring_lock(rdev, ring, 4); |
if (r) { |
DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); |
return r; |
} |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); |
radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); |
radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); |
radeon_ring_write(ring, 0xDEADBEEF); |
radeon_ring_unlock_commit(rdev, ring); |
|
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = readl(ptr); |
if (tmp == 0xDEADBEEF) |
break; |
DRM_UDELAY(1); |
} |
|
if (i < rdev->usec_timeout) { |
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); |
} else { |
DRM_ERROR("radeon: ring %d test failed (0x%08X)\n", |
ring->idx, tmp); |
r = -EINVAL; |
} |
return r; |
} |
|
int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) |
{ |
uint32_t tmp = 0; |
unsigned i; |
int r; |
|
WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD); |
r = radeon_ring_lock(rdev, ring, 3); |
if (r) { |
DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", |
ring->idx, r); |
return r; |
} |
radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); |
radeon_ring_write(ring, 0xDEADBEEF); |
radeon_ring_unlock_commit(rdev, ring); |
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = RREG32(UVD_CONTEXT_ID); |
if (tmp == 0xDEADBEEF) |
break; |
DRM_UDELAY(1); |
} |
|
if (i < rdev->usec_timeout) { |
DRM_INFO("ring test on %d succeeded in %d usecs\n", |
ring->idx, i); |
} else { |
DRM_ERROR("radeon: ring %d test failed (0x%08X)\n", |
ring->idx, tmp); |
r = -EINVAL; |
} |
return r; |
} |
|
/* |
* CP fences/semaphores |
*/ |
2510,14 → 2699,17 |
struct radeon_fence *fence) |
{ |
struct radeon_ring *ring = &rdev->ring[fence->ring]; |
u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA | |
PACKET3_SH_ACTION_ENA; |
|
if (rdev->family >= CHIP_RV770) |
cp_coher_cntl |= PACKET3_FULL_CACHE_ENA; |
|
if (rdev->wb.use_event) { |
u64 addr = rdev->fence_drv[fence->ring].gpu_addr; |
/* flush read cache over gart */ |
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); |
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | |
PACKET3_VC_ACTION_ENA | |
PACKET3_SH_ACTION_ENA); |
radeon_ring_write(ring, cp_coher_cntl); |
radeon_ring_write(ring, 0xFFFFFFFF); |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, 10); /* poll interval */ |
2524,7 → 2716,7 |
/* EVENT_WRITE_EOP - flush caches, send int */ |
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); |
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); |
radeon_ring_write(ring, addr & 0xffffffff); |
radeon_ring_write(ring, lower_32_bits(addr)); |
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); |
radeon_ring_write(ring, fence->seq); |
radeon_ring_write(ring, 0); |
2531,9 → 2723,7 |
} else { |
/* flush read cache over gart */ |
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); |
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | |
PACKET3_VC_ACTION_ENA | |
PACKET3_SH_ACTION_ENA); |
radeon_ring_write(ring, cp_coher_cntl); |
radeon_ring_write(ring, 0xFFFFFFFF); |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, 10); /* poll interval */ |
2553,137 → 2743,44 |
} |
} |
|
void r600_uvd_fence_emit(struct radeon_device *rdev, |
struct radeon_fence *fence) |
{ |
struct radeon_ring *ring = &rdev->ring[fence->ring]; |
uint32_t addr = rdev->fence_drv[fence->ring].gpu_addr; |
|
radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); |
radeon_ring_write(ring, fence->seq); |
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); |
radeon_ring_write(ring, addr & 0xffffffff); |
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); |
radeon_ring_write(ring, upper_32_bits(addr) & 0xff); |
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); |
radeon_ring_write(ring, 0); |
|
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); |
radeon_ring_write(ring, 2); |
return; |
} |
|
void r600_semaphore_ring_emit(struct radeon_device *rdev, |
struct radeon_ring *ring, |
struct radeon_semaphore *semaphore, |
bool emit_wait) |
{ |
uint64_t addr = semaphore->gpu_addr; |
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; |
|
if (rdev->family < CHIP_CAYMAN) |
sel |= PACKET3_SEM_WAIT_ON_SIGNAL; |
|
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); |
radeon_ring_write(ring, addr & 0xffffffff); |
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); |
} |
|
/* |
* DMA fences/semaphores |
*/ |
|
/** |
* r600_dma_fence_ring_emit - emit a fence on the DMA ring |
* r600_semaphore_ring_emit - emit a semaphore on the CP ring |
* |
* @rdev: radeon_device pointer |
* @fence: radeon fence object |
* |
* Add a DMA fence packet to the ring to write |
* the fence seq number and DMA trap packet to generate |
* an interrupt if needed (r6xx-r7xx). |
*/ |
void r600_dma_fence_ring_emit(struct radeon_device *rdev, |
struct radeon_fence *fence) |
{ |
struct radeon_ring *ring = &rdev->ring[fence->ring]; |
u64 addr = rdev->fence_drv[fence->ring].gpu_addr; |
|
/* write the fence */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0)); |
radeon_ring_write(ring, addr & 0xfffffffc); |
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); |
radeon_ring_write(ring, lower_32_bits(fence->seq)); |
/* generate an interrupt */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0)); |
} |
|
/** |
* r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring |
* |
* @rdev: radeon_device pointer |
* @ring: radeon_ring structure holding ring information |
* @ring: radeon ring buffer object |
* @semaphore: radeon semaphore object |
* @emit_wait: wait or signal semaphore |
* @emit_wait: Is this a sempahore wait? |
* |
* Add a DMA semaphore packet to the ring wait on or signal |
* other rings (r6xx-SI). |
* Emits a semaphore signal/wait packet to the CP ring and prevents the PFP |
* from running ahead of semaphore waits. |
*/ |
void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, |
bool r600_semaphore_ring_emit(struct radeon_device *rdev, |
struct radeon_ring *ring, |
struct radeon_semaphore *semaphore, |
bool emit_wait) |
{ |
u64 addr = semaphore->gpu_addr; |
u32 s = emit_wait ? 0 : 1; |
|
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0)); |
radeon_ring_write(ring, addr & 0xfffffffc); |
radeon_ring_write(ring, upper_32_bits(addr) & 0xff); |
} |
|
void r600_uvd_semaphore_emit(struct radeon_device *rdev, |
struct radeon_ring *ring, |
struct radeon_semaphore *semaphore, |
bool emit_wait) |
{ |
uint64_t addr = semaphore->gpu_addr; |
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; |
|
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); |
radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); |
if (rdev->family < CHIP_CAYMAN) |
sel |= PACKET3_SEM_WAIT_ON_SIGNAL; |
|
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); |
radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); |
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); |
radeon_ring_write(ring, lower_32_bits(addr)); |
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); |
|
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); |
radeon_ring_write(ring, emit_wait ? 1 : 0); |
/* PFP_SYNC_ME packet only exists on 7xx+ */ |
if (emit_wait && (rdev->family >= CHIP_RV770)) { |
/* Prevent the PFP from running ahead of the semaphore wait */ |
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); |
radeon_ring_write(ring, 0x0); |
} |
|
int r600_copy_blit(struct radeon_device *rdev, |
uint64_t src_offset, |
uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence) |
{ |
struct radeon_semaphore *sem = NULL; |
struct radeon_sa_bo *vb = NULL; |
int r; |
|
r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem); |
if (r) { |
return r; |
return true; |
} |
r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb); |
r600_blit_done_copy(rdev, fence, vb, sem); |
return 0; |
} |
|
/** |
* r600_copy_dma - copy pages using the DMA engine |
* r600_copy_cpdma - copy pages using the CP DMA engine |
* |
* @rdev: radeon_device pointer |
* @src_offset: src GPU address |
2691,19 → 2788,19 |
* @num_gpu_pages: number of GPU pages to xfer |
* @fence: radeon fence object |
* |
* Copy GPU paging using the DMA engine (r6xx). |
* Copy GPU paging using the CP DMA engine (r6xx+). |
* Used by the radeon ttm implementation to move pages if |
* registered as the asic copy callback. |
*/ |
int r600_copy_dma(struct radeon_device *rdev, |
int r600_copy_cpdma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence) |
{ |
struct radeon_semaphore *sem = NULL; |
int ring_index = rdev->asic->copy.dma_ring_index; |
int ring_index = rdev->asic->copy.blit_ring_index; |
struct radeon_ring *ring = &rdev->ring[ring_index]; |
u32 size_in_dw, cur_size_in_dw; |
u32 size_in_bytes, cur_size_in_bytes, tmp; |
int i, num_loops; |
int r = 0; |
|
2713,9 → 2810,9 |
return r; |
} |
|
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; |
num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE); |
r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8); |
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); |
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); |
r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
radeon_semaphore_free(rdev, &sem, NULL); |
2722,35 → 2819,41 |
return r; |
} |
|
if (radeon_fence_need_sync(*fence, ring->idx)) { |
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, |
ring->idx); |
radeon_fence_note_sync(*fence, ring->idx); |
} else { |
radeon_semaphore_free(rdev, &sem, NULL); |
} |
radeon_semaphore_sync_to(sem, *fence); |
radeon_semaphore_sync_rings(rdev, sem, ring->idx); |
|
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); |
radeon_ring_write(ring, WAIT_3D_IDLE_bit); |
for (i = 0; i < num_loops; i++) { |
cur_size_in_dw = size_in_dw; |
if (cur_size_in_dw > 0xFFFE) |
cur_size_in_dw = 0xFFFE; |
size_in_dw -= cur_size_in_dw; |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); |
radeon_ring_write(ring, dst_offset & 0xfffffffc); |
radeon_ring_write(ring, src_offset & 0xfffffffc); |
radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) | |
(upper_32_bits(src_offset) & 0xff))); |
src_offset += cur_size_in_dw * 4; |
dst_offset += cur_size_in_dw * 4; |
cur_size_in_bytes = size_in_bytes; |
if (cur_size_in_bytes > 0x1fffff) |
cur_size_in_bytes = 0x1fffff; |
size_in_bytes -= cur_size_in_bytes; |
tmp = upper_32_bits(src_offset) & 0xff; |
if (size_in_bytes == 0) |
tmp |= PACKET3_CP_DMA_CP_SYNC; |
radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4)); |
radeon_ring_write(ring, lower_32_bits(src_offset)); |
radeon_ring_write(ring, tmp); |
radeon_ring_write(ring, lower_32_bits(dst_offset)); |
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); |
radeon_ring_write(ring, cur_size_in_bytes); |
src_offset += cur_size_in_bytes; |
dst_offset += cur_size_in_bytes; |
} |
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); |
radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit); |
|
r = radeon_fence_emit(rdev, fence, ring->idx); |
if (r) { |
radeon_ring_unlock_undo(rdev, ring); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
} |
|
radeon_ring_unlock_commit(rdev, ring); |
radeon_ring_unlock_commit(rdev, ring, false); |
radeon_semaphore_free(rdev, &sem, *fence); |
|
return r; |
2777,19 → 2880,13 |
/* enable pcie gen2 link */ |
r600_pcie_gen2_enable(rdev); |
|
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
r = r600_init_microcode(rdev); |
if (r) { |
DRM_ERROR("Failed to load firmware!\n"); |
return r; |
} |
} |
|
/* scratch needs to be initialized before MC */ |
r = r600_vram_scratch_init(rdev); |
if (r) |
return r; |
|
r600_mc_program(rdev); |
|
if (rdev->flags & RADEON_IS_AGP) { |
r600_agp_enable(rdev); |
} else { |
2798,12 → 2895,6 |
return r; |
} |
r600_gpu_init(rdev); |
r = r600_blit_init(rdev); |
if (r) { |
// r600_blit_fini(rdev); |
rdev->asic->copy.copy = NULL; |
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); |
} |
|
/* allocate wb buffer */ |
r = radeon_wb_init(rdev); |
2816,12 → 2907,6 |
return r; |
} |
|
r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); |
return r; |
} |
|
/* Enable IRQ */ |
if (!rdev->irq.installed) { |
r = radeon_irq_kms_init(rdev); |
2839,18 → 2924,10 |
|
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
R600_CP_RB_RPTR, R600_CP_RB_WPTR, |
0, 0xfffff, RADEON_CP_PACKET2); |
RADEON_CP_PACKET2); |
if (r) |
return r; |
|
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, |
DMA_RB_RPTR, DMA_RB_WPTR, |
2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
if (r) |
return r; |
|
r = r600_cp_load_microcode(rdev); |
if (r) |
return r; |
2858,15 → 2935,13 |
if (r) |
return r; |
|
r = r600_dma_resume(rdev); |
if (r) |
return r; |
|
r = radeon_ib_pool_init(rdev); |
if (r) { |
dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
return r; |
} |
|
|
return 0; |
} |
|
2946,12 → 3021,20 |
if (r) |
return r; |
|
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
r = r600_init_microcode(rdev); |
if (r) { |
DRM_ERROR("Failed to load firmware!\n"); |
return r; |
} |
} |
|
/* Initialize power management */ |
radeon_pm_init(rdev); |
|
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; |
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); |
|
rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; |
r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); |
|
rdev->ih.ring_obj = NULL; |
r600_ih_ring_init(rdev, 64 * 1024); |
|
3003,16 → 3086,6 |
radeon_ring_write(ring, ib->length_dw); |
} |
|
void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) |
{ |
struct radeon_ring *ring = &rdev->ring[ib->ring]; |
|
radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0)); |
radeon_ring_write(ring, ib->gpu_addr); |
radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0)); |
radeon_ring_write(ring, ib->length_dw); |
} |
|
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) |
{ |
struct radeon_ib ib; |
3036,7 → 3109,7 |
ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); |
ib.ptr[2] = 0xDEADBEEF; |
ib.length_dw = 3; |
r = radeon_ib_schedule(rdev, &ib, NULL); |
r = radeon_ib_schedule(rdev, &ib, NULL, false); |
if (r) { |
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); |
goto free_ib; |
3066,139 → 3139,6 |
return r; |
} |
|
/** |
* r600_dma_ib_test - test an IB on the DMA engine |
* |
* @rdev: radeon_device pointer |
* @ring: radeon_ring structure holding ring information |
* |
* Test a simple IB in the DMA ring (r6xx-SI). |
* Returns 0 on success, error on failure. |
*/ |
int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) |
{ |
struct radeon_ib ib; |
unsigned i; |
int r; |
void __iomem *ptr = (void *)rdev->vram_scratch.ptr; |
u32 tmp = 0; |
|
if (!ptr) { |
DRM_ERROR("invalid vram scratch pointer\n"); |
return -EINVAL; |
} |
|
tmp = 0xCAFEDEAD; |
writel(tmp, ptr); |
|
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); |
if (r) { |
DRM_ERROR("radeon: failed to get ib (%d).\n", r); |
return r; |
} |
|
ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1); |
ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; |
ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff; |
ib.ptr[3] = 0xDEADBEEF; |
ib.length_dw = 4; |
|
r = radeon_ib_schedule(rdev, &ib, NULL); |
if (r) { |
radeon_ib_free(rdev, &ib); |
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); |
return r; |
} |
r = radeon_fence_wait(ib.fence, false); |
if (r) { |
DRM_ERROR("radeon: fence wait failed (%d).\n", r); |
return r; |
} |
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = readl(ptr); |
if (tmp == 0xDEADBEEF) |
break; |
DRM_UDELAY(1); |
} |
if (i < rdev->usec_timeout) { |
DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); |
} else { |
DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp); |
r = -EINVAL; |
} |
radeon_ib_free(rdev, &ib); |
return r; |
} |
|
int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) |
{ |
struct radeon_fence *fence = NULL; |
int r; |
|
r = radeon_set_uvd_clocks(rdev, 53300, 40000); |
if (r) { |
DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r); |
return r; |
} |
|
// r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL); |
if (r) { |
DRM_ERROR("radeon: failed to get create msg (%d).\n", r); |
goto error; |
} |
|
// r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence); |
if (r) { |
DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r); |
goto error; |
} |
|
r = radeon_fence_wait(fence, false); |
if (r) { |
DRM_ERROR("radeon: fence wait failed (%d).\n", r); |
goto error; |
} |
DRM_INFO("ib test on ring %d succeeded\n", ring->idx); |
error: |
radeon_fence_unref(&fence); |
radeon_set_uvd_clocks(rdev, 0, 0); |
return r; |
} |
|
/** |
* r600_dma_ring_ib_execute - Schedule an IB on the DMA engine |
* |
* @rdev: radeon_device pointer |
* @ib: IB object to schedule |
* |
* Schedule an IB in the DMA ring (r6xx-r7xx). |
*/ |
void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) |
{ |
struct radeon_ring *ring = &rdev->ring[ib->ring]; |
|
if (rdev->wb.enabled) { |
u32 next_rptr = ring->wptr + 4; |
while ((next_rptr & 7) != 5) |
next_rptr++; |
next_rptr += 3; |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); |
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); |
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); |
radeon_ring_write(ring, next_rptr); |
} |
|
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. |
* Pad as necessary with NOPs. |
*/ |
while ((ring->wptr & 7) != 5) |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0)); |
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); |
radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF)); |
|
} |
|
/* |
* Interrupts |
* |
3215,7 → 3155,7 |
u32 rb_bufsz; |
|
/* Align ring size */ |
rb_bufsz = drm_order(ring_size / 4); |
rb_bufsz = order_base_2(ring_size / 4); |
ring_size = (1 << rb_bufsz) * 4; |
rdev->ih.ring_size = ring_size; |
rdev->ih.ptr_mask = rdev->ih.ring_size - 1; |
3230,7 → 3170,7 |
if (rdev->ih.ring_obj == NULL) { |
r = radeon_bo_create(rdev, rdev->ih.ring_size, |
PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_GTT, |
RADEON_GEM_DOMAIN_GTT, 0, |
NULL, &rdev->ih.ring_obj); |
if (r) { |
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); |
3295,7 → 3235,7 |
WREG32(RLC_CNTL, RLC_ENABLE); |
} |
|
static int r600_rlc_init(struct radeon_device *rdev) |
static int r600_rlc_resume(struct radeon_device *rdev) |
{ |
u32 i; |
const __be32 *fw_data; |
3307,45 → 3247,22 |
|
WREG32(RLC_HB_CNTL, 0); |
|
if (rdev->family == CHIP_ARUBA) { |
WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); |
WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); |
} |
if (rdev->family <= CHIP_CAYMAN) { |
WREG32(RLC_HB_BASE, 0); |
WREG32(RLC_HB_RPTR, 0); |
WREG32(RLC_HB_WPTR, 0); |
} |
if (rdev->family <= CHIP_CAICOS) { |
WREG32(RLC_HB_WPTR_LSB_ADDR, 0); |
WREG32(RLC_HB_WPTR_MSB_ADDR, 0); |
} |
WREG32(RLC_MC_CNTL, 0); |
WREG32(RLC_UCODE_CNTL, 0); |
|
fw_data = (const __be32 *)rdev->rlc_fw->data; |
if (rdev->family >= CHIP_ARUBA) { |
for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) { |
WREG32(RLC_UCODE_ADDR, i); |
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); |
} |
} else if (rdev->family >= CHIP_CAYMAN) { |
for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) { |
WREG32(RLC_UCODE_ADDR, i); |
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); |
} |
} else if (rdev->family >= CHIP_CEDAR) { |
for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) { |
WREG32(RLC_UCODE_ADDR, i); |
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); |
} |
} else if (rdev->family >= CHIP_RV770) { |
if (rdev->family >= CHIP_RV770) { |
for (i = 0; i < R700_RLC_UCODE_SIZE; i++) { |
WREG32(RLC_UCODE_ADDR, i); |
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); |
} |
} else { |
for (i = 0; i < RLC_UCODE_SIZE; i++) { |
for (i = 0; i < R600_RLC_UCODE_SIZE; i++) { |
WREG32(RLC_UCODE_ADDR, i); |
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); |
} |
3453,7 → 3370,10 |
r600_disable_interrupts(rdev); |
|
/* init rlc */ |
ret = r600_rlc_init(rdev); |
if (rdev->family >= CHIP_CEDAR) |
ret = evergreen_rlc_resume(rdev); |
else |
ret = r600_rlc_resume(rdev); |
if (ret) { |
r600_ih_ring_fini(rdev); |
return ret; |
3472,7 → 3392,7 |
WREG32(INTERRUPT_CNTL, interrupt_cntl); |
|
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); |
rb_bufsz = drm_order(rdev->ih.ring_size / 4); |
rb_bufsz = order_base_2(rdev->ih.ring_size / 4); |
|
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | |
IH_WPTR_OVERFLOW_CLEAR | |
3519,8 → 3439,8 |
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; |
u32 grbm_int_cntl = 0; |
u32 hdmi0, hdmi1; |
u32 d1grph = 0, d2grph = 0; |
u32 dma_cntl; |
u32 thermal_int = 0; |
|
if (!rdev->irq.installed) { |
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
3555,8 → 3475,21 |
hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; |
hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; |
} |
|
dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE; |
|
if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) { |
thermal_int = RREG32(CG_THERMAL_INT) & |
~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); |
} else if (rdev->family >= CHIP_RV770) { |
thermal_int = RREG32(RV770_CG_THERMAL_INT) & |
~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); |
} |
if (rdev->irq.dpm_thermal) { |
DRM_DEBUG("dpm thermal\n"); |
thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; |
} |
|
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { |
DRM_DEBUG("r600_irq_set: sw int\n"); |
cp_int_cntl |= RB_INT_ENABLE; |
3614,8 → 3547,8 |
WREG32(CP_INT_CNTL, cp_int_cntl); |
WREG32(DMA_CNTL, dma_cntl); |
WREG32(DxMODE_INT_MASK, mode_int); |
WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); |
WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); |
WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); |
WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); |
WREG32(GRBM_INT_CNTL, grbm_int_cntl); |
if (ASIC_IS_DCE3(rdev)) { |
WREG32(DC_HPD1_INT_CONTROL, hpd1); |
3638,6 → 3571,11 |
WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0); |
WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1); |
} |
if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) { |
WREG32(CG_THERMAL_INT, thermal_int); |
} else if (rdev->family >= CHIP_RV770) { |
WREG32(RV770_CG_THERMAL_INT, thermal_int); |
} |
|
return 0; |
} |
3787,6 → 3725,7 |
tmp = RREG32(IH_RB_CNTL); |
tmp |= IH_WPTR_OVERFLOW_CLEAR; |
WREG32(IH_RB_CNTL, tmp); |
wptr &= ~RB_OVERFLOW; |
} |
return (wptr & rdev->ih.ptr_mask); |
} |
3831,6 → 3770,7 |
u32 ring_index; |
bool queue_hotplug = false; |
bool queue_hdmi = false; |
bool queue_thermal = false; |
|
if (!rdev->ih.enabled || rdev->shutdown) |
return IRQ_NONE; |
3984,6 → 3924,10 |
break; |
} |
break; |
case 124: /* UVD */ |
DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); |
radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); |
break; |
case 176: /* CP_INT in ring buffer */ |
case 177: /* CP_INT in IB1 */ |
case 178: /* CP_INT in IB2 */ |
3998,6 → 3942,16 |
DRM_DEBUG("IH: DMA trap\n"); |
radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); |
break; |
case 230: /* thermal low to high */ |
DRM_DEBUG("IH: thermal low to high\n"); |
rdev->pm.dpm.thermal.high_to_low = false; |
queue_thermal = true; |
break; |
case 231: /* thermal high to low */ |
DRM_DEBUG("IH: thermal high to low\n"); |
rdev->pm.dpm.thermal.high_to_low = true; |
queue_thermal = true; |
break; |
case 233: /* GUI IDLE */ |
DRM_DEBUG("IH: GUI idle\n"); |
break; |
4053,16 → 4007,15 |
} |
|
/** |
* r600_ioctl_wait_idle - flush host path cache on wait idle ioctl |
* r600_mmio_hdp_flush - flush Host Data Path cache via MMIO |
* rdev: radeon device structure |
* bo: buffer object struct which userspace is waiting for idle |
* |
* Some R6XX/R7XX doesn't seems to take into account HDP flush performed |
* through ring buffer, this leads to corruption in rendering, see |
* http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we |
* directly perform HDP flush by writing register through MMIO. |
* Some R6XX/R7XX don't seem to take into account HDP flushes performed |
* through the ring buffer. This leads to corruption in rendering, see |
* http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we |
* directly perform the HDP flush by writing the register through MMIO. |
*/ |
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) |
void r600_mmio_hdp_flush(struct radeon_device *rdev) |
{ |
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read |
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL. |