/drivers/video/drm/radeon/Makefile |
---|
1,11 → 1,10 |
CC = gcc |
LD = ld |
AS = as |
FASM = fasm.exe |
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 |
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6 |
DEFINES += -DCONFIG_ARCH_HAS_CACHE_LINE_SIZE |
DRV_TOPDIR = $(CURDIR)/../../.. |
DRM_TOPDIR = $(CURDIR)/.. |
12,10 → 11,13 |
DRV_INCLUDES = $(DRV_TOPDIR)/include |
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \ |
-I$(DRV_INCLUDES)/linux |
INCLUDES = -I$(DRV_INCLUDES) \ |
-I$(DRV_INCLUDES)/asm \ |
-I$(DRV_INCLUDES)/uapi \ |
-I$(DRV_INCLUDES)/drm -I./ -I$(DRV_INCLUDES) |
CFLAGS = -c -Os $(INCLUDES) $(DEFINES) -march=i686 -fomit-frame-pointer -fno-builtin-printf |
CFLAGS= -c -O2 $(INCLUDES) $(DEFINES) -march=i686 -fno-ident -fomit-frame-pointer -fno-builtin-printf |
CFLAGS+= -mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields |
LIBPATH:= $(DRV_TOPDIR)/ddk |
30,7 → 32,6 |
HFILES:= $(DRV_INCLUDES)/linux/types.h \ |
$(DRV_INCLUDES)/linux/list.h \ |
$(DRV_INCLUDES)/linux/pci.h \ |
$(DRV_INCLUDES)/drm/drm.h \ |
$(DRV_INCLUDES)/drm/drmP.h \ |
$(DRV_INCLUDES)/drm/drm_edid.h \ |
$(DRV_INCLUDES)/drm/drm_crtc.h \ |
54,6 → 55,7 |
$(DRM_TOPDIR)/drm_crtc_helper.c \ |
$(DRM_TOPDIR)/drm_dp_helper.c \ |
$(DRM_TOPDIR)/drm_drv.c \ |
$(DRM_TOPDIR)/drm_atomic.c \ |
$(DRM_TOPDIR)/drm_edid.c \ |
$(DRM_TOPDIR)/drm_fb_helper.c \ |
$(DRM_TOPDIR)/drm_gem.c \ |
123,6 → 125,7 |
radeon_ring.c \ |
radeon_sa.c \ |
radeon_semaphore.c \ |
radeon_sync.c \ |
radeon_test.c \ |
radeon_ttm.c \ |
radeon_ucode.c \ |
139,7 → 142,6 |
rv740_dpm.c \ |
r520.c \ |
r600.c \ |
r600_audio.c \ |
r600_blit_shaders.c \ |
r600_cs.c \ |
r600_dma.c \ |
/drivers/video/drm/radeon/Makefile.lto |
---|
5,15 → 5,17 |
AS = as |
FASM = fasm |
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 |
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6 |
DEFINES += -DCONFIG_ARCH_HAS_CACHE_LINE_SIZE |
DDK_TOPDIR = d:\kos\kolibri\drivers\ddk |
DRV_INCLUDES = /d/kos/kolibri/drivers/include |
DRM_TOPDIR = $(CURDIR)/.. |
INCLUDES = -I$(DRV_INCLUDES)/linux/uapi -I$(DRV_INCLUDES)/linux \ |
-I$(DRV_INCLUDES)/linux/asm -I$(DRV_INCLUDES)/drm \ |
-I./ -I$(DRV_INCLUDES) |
INCLUDES = -I$(DRV_INCLUDES) \ |
-I$(DRV_INCLUDES)/asm \ |
-I$(DRV_INCLUDES)/uapi \ |
-I$(DRV_INCLUDES)/drm -I./ -I$(DRV_INCLUDES) |
CFLAGS_OPT = -Os -march=i686 -fno-ident -fomit-frame-pointer -fno-builtin-printf -mno-ms-bitfields |
CFLAGS_OPT+= -mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -flto |
34,7 → 36,6 |
HFILES:= $(DRV_INCLUDES)/linux/types.h \ |
$(DRV_INCLUDES)/linux/list.h \ |
$(DRV_INCLUDES)/linux/pci.h \ |
$(DRV_INCLUDES)/drm/drm.h \ |
$(DRV_INCLUDES)/drm/drmP.h \ |
$(DRV_INCLUDES)/drm/drm_edid.h \ |
$(DRV_INCLUDES)/drm/drm_crtc.h \ |
58,6 → 59,7 |
$(DRM_TOPDIR)/drm_crtc_helper.c \ |
$(DRM_TOPDIR)/drm_dp_helper.c \ |
$(DRM_TOPDIR)/drm_drv.c \ |
$(DRM_TOPDIR)/drm_atomic.c \ |
$(DRM_TOPDIR)/drm_edid.c \ |
$(DRM_TOPDIR)/drm_fb_helper.c \ |
$(DRM_TOPDIR)/drm_gem.c \ |
127,6 → 129,7 |
radeon_ring.c \ |
radeon_sa.c \ |
radeon_semaphore.c \ |
radeon_sync.c \ |
radeon_test.c \ |
radeon_ttm.c \ |
radeon_ucode.c \ |
143,7 → 146,6 |
rv740_dpm.c \ |
r520.c \ |
r600.c \ |
r600_audio.c \ |
r600_blit_shaders.c \ |
r600_cs.c \ |
r600_dma.c \ |
/drivers/video/drm/radeon/atom.c |
---|
1215,7 → 1215,7 |
return ret; |
} |
int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) |
int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t * params) |
{ |
int r; |
1236,6 → 1236,15 |
return r; |
} |
int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) |
{ |
int r; |
mutex_lock(&ctx->scratch_mutex); |
r = atom_execute_table_scratch_unlocked(ctx, index, params); |
mutex_unlock(&ctx->scratch_mutex); |
return r; |
} |
static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; |
static void atom_index_iio(struct atom_context *ctx, int base) |
/drivers/video/drm/radeon/atom.h |
---|
125,6 → 125,7 |
struct atom_context { |
struct card_info *card; |
struct mutex mutex; |
struct mutex scratch_mutex; |
void *bios; |
uint32_t cmd_table, data_table; |
uint16_t *iio; |
145,6 → 146,7 |
struct atom_context *atom_parse(struct card_info *, void *); |
int atom_execute_table(struct atom_context *, int, uint32_t *); |
int atom_execute_table_scratch_unlocked(struct atom_context *, int, uint32_t *); |
int atom_asic_init(struct atom_context *); |
void atom_destroy(struct atom_context *); |
bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, |
/drivers/video/drm/radeon/atombios_crtc.c |
---|
2039,6 → 2039,7 |
atombios_crtc_set_base(crtc, x, y, old_fb); |
atombios_overscan_setup(crtc, mode, adjusted_mode); |
atombios_scaler_setup(crtc); |
// radeon_cursor_reset(crtc); |
/* update the hw version fpr dpm */ |
radeon_crtc->hw_mode = *adjusted_mode; |
/drivers/video/drm/radeon/atombios_dp.c |
---|
100,6 → 100,7 |
memset(&args, 0, sizeof(args)); |
mutex_lock(&chan->mutex); |
mutex_lock(&rdev->mode_info.atom_context->scratch_mutex); |
base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); |
113,7 → 114,7 |
if (ASIC_IS_DCE4(rdev)) |
args.v2.ucHPD_ID = chan->rec.hpd; |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
*ack = args.v1.ucReplyStatus; |
147,6 → 148,7 |
r = recv_bytes; |
done: |
mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex); |
mutex_unlock(&chan->mutex); |
return r; |
232,8 → 234,8 |
/***** general DP utility functions *****/ |
#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 |
#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5 |
#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3 |
#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPH_LEVEL_3 |
static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], |
int lane_count, |
/drivers/video/drm/radeon/atombios_encoders.c |
---|
291,29 → 291,6 |
bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, |
struct drm_display_mode *mode); |
static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder) |
{ |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
case ENCODER_OBJECT_ID_INTERNAL_TMDS1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: |
case ENCODER_OBJECT_ID_INTERNAL_LVTM1: |
case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: |
case ENCODER_OBJECT_ID_INTERNAL_DDI: |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: |
return true; |
default: |
return false; |
} |
} |
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, |
const struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
/drivers/video/drm/radeon/atombios_i2c.c |
---|
48,6 → 48,7 |
memset(&args, 0, sizeof(args)); |
mutex_lock(&chan->mutex); |
mutex_lock(&rdev->mode_info.atom_context->scratch_mutex); |
base = (unsigned char *)rdev->mode_info.atom_context->scratch; |
82,7 → 83,7 |
args.ucSlaveAddr = slave_addr << 1; |
args.ucLineNumber = chan->rec.i2c_id; |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
/* error */ |
if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) { |
95,6 → 96,7 |
radeon_atom_copy_swap(buf, base, num, false); |
done: |
mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex); |
mutex_unlock(&chan->mutex); |
return r; |
/drivers/video/drm/radeon/btc_dpm.c |
---|
24,6 → 24,7 |
#include "drmP.h" |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "btcd.h" |
#include "r600_dpm.h" |
#include "cypress_dpm.h" |
2099,7 → 2100,6 |
bool disable_mclk_switching; |
u32 mclk, sclk; |
u16 vddc, vddci; |
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; |
if ((rdev->pm.dpm.new_active_crtc_count > 1) || |
btc_dpm_vblank_too_short(rdev)) |
2141,39 → 2141,6 |
ps->low.vddci = max_limits->vddci; |
} |
/* limit clocks to max supported clocks based on voltage dependency tables */ |
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, |
&max_sclk_vddc); |
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, |
&max_mclk_vddci); |
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, |
&max_mclk_vddc); |
if (max_sclk_vddc) { |
if (ps->low.sclk > max_sclk_vddc) |
ps->low.sclk = max_sclk_vddc; |
if (ps->medium.sclk > max_sclk_vddc) |
ps->medium.sclk = max_sclk_vddc; |
if (ps->high.sclk > max_sclk_vddc) |
ps->high.sclk = max_sclk_vddc; |
} |
if (max_mclk_vddci) { |
if (ps->low.mclk > max_mclk_vddci) |
ps->low.mclk = max_mclk_vddci; |
if (ps->medium.mclk > max_mclk_vddci) |
ps->medium.mclk = max_mclk_vddci; |
if (ps->high.mclk > max_mclk_vddci) |
ps->high.mclk = max_mclk_vddci; |
} |
if (max_mclk_vddc) { |
if (ps->low.mclk > max_mclk_vddc) |
ps->low.mclk = max_mclk_vddc; |
if (ps->medium.mclk > max_mclk_vddc) |
ps->medium.mclk = max_mclk_vddc; |
if (ps->high.mclk > max_mclk_vddc) |
ps->high.mclk = max_mclk_vddc; |
} |
/* XXX validate the min clocks required for display */ |
if (disable_mclk_switching) { |
/drivers/video/drm/radeon/ci_dpm.c |
---|
24,6 → 24,7 |
#include <linux/firmware.h> |
#include "drmP.h" |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "radeon_ucode.h" |
#include "cikd.h" |
#include "r600_dpm.h" |
45,15 → 46,15 |
static const struct ci_pt_defaults defaults_hawaii_xt = |
{ |
1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, |
{ 0x84, 0x0, 0x0, 0x7F, 0x0, 0x0, 0x5A, 0x60, 0x51, 0x8E, 0x79, 0x6B, 0x5F, 0x90, 0x79 }, |
{ 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC } |
{ 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, |
{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } |
}; |
static const struct ci_pt_defaults defaults_hawaii_pro = |
{ |
1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062, |
{ 0x93, 0x0, 0x0, 0x97, 0x0, 0x0, 0x6B, 0x60, 0x51, 0x95, 0x79, 0x6B, 0x5F, 0x90, 0x79 }, |
{ 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC } |
{ 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, |
{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } |
}; |
static const struct ci_pt_defaults defaults_bonaire_xt = |
162,8 → 163,6 |
}; |
extern u8 rv770_get_memory_module_index(struct radeon_device *rdev); |
extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table, |
u32 *max_clock); |
extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev, |
u32 arb_freq_src, u32 arb_freq_dest); |
extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock); |
185,6 → 184,9 |
u32 target_tdp); |
static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate); |
static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev, |
PPSMC_Msg msg, u32 parameter); |
static struct ci_power_info *ci_get_pi(struct radeon_device *rdev) |
{ |
struct ci_power_info *pi = rdev->pm.dpm.priv; |
250,6 → 252,9 |
if (pi->caps_power_containment) { |
pi->caps_cac = true; |
if (rdev->family == CHIP_HAWAII) |
pi->enable_bapm_feature = false; |
else |
pi->enable_bapm_feature = true; |
pi->enable_tdc_limit_feature = true; |
pi->enable_pkg_pwr_tracking_feature = true; |
353,6 → 358,21 |
return 0; |
} |
static int ci_populate_fuzzy_fan(struct radeon_device *rdev) |
{ |
struct ci_power_info *pi = ci_get_pi(rdev); |
if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) || |
(rdev->pm.dpm.fan.fan_output_sensitivity == 0)) |
rdev->pm.dpm.fan.fan_output_sensitivity = |
rdev->pm.dpm.fan.default_fan_output_sensitivity; |
pi->smc_powertune_table.FuzzyFan_PwmSetDelta = |
cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity); |
return 0; |
} |
static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev) |
{ |
struct ci_power_info *pi = ci_get_pi(rdev); |
478,6 → 498,9 |
ret = ci_populate_dw8(rdev); |
if (ret) |
return ret; |
ret = ci_populate_fuzzy_fan(rdev); |
if (ret) |
return ret; |
ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev); |
if (ret) |
return ret; |
691,6 → 714,25 |
return ret; |
} |
static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev, |
bool enable) |
{ |
struct ci_power_info *pi = ci_get_pi(rdev); |
PPSMC_Result smc_result = PPSMC_Result_OK; |
if (pi->thermal_sclk_dpm_enabled) { |
if (enable) |
smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM); |
else |
smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM); |
} |
if (smc_result == PPSMC_Result_OK) |
return 0; |
else |
return -EINVAL; |
} |
static int ci_power_control_set_level(struct radeon_device *rdev) |
{ |
struct ci_power_info *pi = ci_get_pi(rdev); |
701,13 → 743,11 |
int ret = 0; |
bool adjust_polarity = false; /* ??? */ |
if (pi->caps_power_containment && |
(pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) { |
if (pi->caps_power_containment) { |
adjust_percent = adjust_polarity ? |
rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment); |
target_tdp = ((100 + adjust_percent) * |
(s32)cac_tdp_table->configurable_tdp) / 100; |
target_tdp *= 256; |
ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp); |
} |
748,7 → 788,6 |
struct radeon_clock_and_voltage_limits *max_limits; |
bool disable_mclk_switching; |
u32 sclk, mclk; |
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; |
int i; |
if (rps->vce_active) { |
784,29 → 823,6 |
} |
} |
/* limit clocks to max supported clocks based on voltage dependency tables */ |
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, |
&max_sclk_vddc); |
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, |
&max_mclk_vddci); |
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, |
&max_mclk_vddc); |
for (i = 0; i < ps->performance_level_count; i++) { |
if (max_sclk_vddc) { |
if (ps->performance_levels[i].sclk > max_sclk_vddc) |
ps->performance_levels[i].sclk = max_sclk_vddc; |
} |
if (max_mclk_vddci) { |
if (ps->performance_levels[i].mclk > max_mclk_vddci) |
ps->performance_levels[i].mclk = max_mclk_vddci; |
} |
if (max_mclk_vddc) { |
if (ps->performance_levels[i].mclk > max_mclk_vddc) |
ps->performance_levels[i].mclk = max_mclk_vddc; |
} |
} |
/* XXX validate the min clocks required for display */ |
if (disable_mclk_switching) { |
839,7 → 855,7 |
} |
} |
static int ci_set_thermal_temperature_range(struct radeon_device *rdev, |
static int ci_thermal_set_temperature_range(struct radeon_device *rdev, |
int min_temp, int max_temp) |
{ |
int low_temp = 0 * 1000; |
875,7 → 891,351 |
return 0; |
} |
static int ci_thermal_enable_alert(struct radeon_device *rdev, |
bool enable) |
{ |
u32 thermal_int = RREG32_SMC(CG_THERMAL_INT); |
PPSMC_Result result; |
if (enable) { |
thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); |
WREG32_SMC(CG_THERMAL_INT, thermal_int); |
rdev->irq.dpm_thermal = false; |
result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable); |
if (result != PPSMC_Result_OK) { |
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); |
return -EINVAL; |
} |
} else { |
thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; |
WREG32_SMC(CG_THERMAL_INT, thermal_int); |
rdev->irq.dpm_thermal = true; |
result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable); |
if (result != PPSMC_Result_OK) { |
DRM_DEBUG_KMS("Could not disable thermal interrupts.\n"); |
return -EINVAL; |
} |
} |
return 0; |
} |
static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode) |
{ |
struct ci_power_info *pi = ci_get_pi(rdev); |
u32 tmp; |
if (pi->fan_ctrl_is_in_default_mode) { |
tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT; |
pi->fan_ctrl_default_mode = tmp; |
tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT; |
pi->t_min = tmp; |
pi->fan_ctrl_is_in_default_mode = false; |
} |
tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK; |
tmp |= TMIN(0); |
WREG32_SMC(CG_FDO_CTRL2, tmp); |
tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; |
tmp |= FDO_PWM_MODE(mode); |
WREG32_SMC(CG_FDO_CTRL2, tmp); |
} |
static int ci_thermal_setup_fan_table(struct radeon_device *rdev) |
{ |
struct ci_power_info *pi = ci_get_pi(rdev); |
SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; |
u32 duty100; |
u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2; |
u16 fdo_min, slope1, slope2; |
u32 reference_clock, tmp; |
int ret; |
u64 tmp64; |
if (!pi->fan_table_start) { |
rdev->pm.dpm.fan.ucode_fan_control = false; |
return 0; |
} |
duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; |
if (duty100 == 0) { |
rdev->pm.dpm.fan.ucode_fan_control = false; |
return 0; |
} |
tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100; |
do_div(tmp64, 10000); |
fdo_min = (u16)tmp64; |
t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min; |
t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med; |
pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min; |
pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med; |
slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); |
slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); |
fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100); |
fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100); |
fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100); |
fan_table.Slope1 = cpu_to_be16(slope1); |
fan_table.Slope2 = cpu_to_be16(slope2); |
fan_table.FdoMin = cpu_to_be16(fdo_min); |
fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst); |
fan_table.HystUp = cpu_to_be16(1); |
fan_table.HystSlope = cpu_to_be16(1); |
fan_table.TempRespLim = cpu_to_be16(5); |
reference_clock = radeon_get_xclk(rdev); |
fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay * |
reference_clock) / 1600); |
fan_table.FdoMax = cpu_to_be16((u16)duty100); |
tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT; |
fan_table.TempSrc = (uint8_t)tmp; |
ret = ci_copy_bytes_to_smc(rdev, |
pi->fan_table_start, |
(u8 *)(&fan_table), |
sizeof(fan_table), |
pi->sram_end); |
if (ret) { |
DRM_ERROR("Failed to load fan table to the SMC."); |
rdev->pm.dpm.fan.ucode_fan_control = false; |
} |
return 0; |
} |
static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev) |
{ |
struct ci_power_info *pi = ci_get_pi(rdev); |
PPSMC_Result ret; |
if (pi->caps_od_fuzzy_fan_control_support) { |
ret = ci_send_msg_to_smc_with_parameter(rdev, |
PPSMC_StartFanControl, |
FAN_CONTROL_FUZZY); |
if (ret != PPSMC_Result_OK) |
return -EINVAL; |
ret = ci_send_msg_to_smc_with_parameter(rdev, |
PPSMC_MSG_SetFanPwmMax, |
rdev->pm.dpm.fan.default_max_fan_pwm); |
if (ret != PPSMC_Result_OK) |
return -EINVAL; |
} else { |
ret = ci_send_msg_to_smc_with_parameter(rdev, |
PPSMC_StartFanControl, |
FAN_CONTROL_TABLE); |
if (ret != PPSMC_Result_OK) |
return -EINVAL; |
} |
return 0; |
} |
#if 0 |
static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev) |
{ |
PPSMC_Result ret; |
ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl); |
if (ret == PPSMC_Result_OK) |
return 0; |
else |
return -EINVAL; |
} |
static int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev, |
u32 *speed) |
{ |
u32 duty, duty100; |
u64 tmp64; |
if (rdev->pm.no_fan) |
return -ENOENT; |
duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; |
duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT; |
if (duty100 == 0) |
return -EINVAL; |
tmp64 = (u64)duty * 100; |
do_div(tmp64, duty100); |
*speed = (u32)tmp64; |
if (*speed > 100) |
*speed = 100; |
return 0; |
} |
static int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev, |
u32 speed) |
{ |
u32 tmp; |
u32 duty, duty100; |
u64 tmp64; |
if (rdev->pm.no_fan) |
return -ENOENT; |
if (speed > 100) |
return -EINVAL; |
if (rdev->pm.dpm.fan.ucode_fan_control) |
ci_fan_ctrl_stop_smc_fan_control(rdev); |
duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; |
if (duty100 == 0) |
return -EINVAL; |
tmp64 = (u64)speed * duty100; |
do_div(tmp64, 100); |
duty = (u32)tmp64; |
tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK; |
tmp |= FDO_STATIC_DUTY(duty); |
WREG32_SMC(CG_FDO_CTRL0, tmp); |
ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC); |
return 0; |
} |
static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev, |
u32 *speed) |
{ |
u32 tach_period; |
u32 xclk = radeon_get_xclk(rdev); |
if (rdev->pm.no_fan) |
return -ENOENT; |
if (rdev->pm.fan_pulses_per_revolution == 0) |
return -ENOENT; |
tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT; |
if (tach_period == 0) |
return -ENOENT; |
*speed = 60 * xclk * 10000 / tach_period; |
return 0; |
} |
static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev, |
u32 speed) |
{ |
u32 tach_period, tmp; |
u32 xclk = radeon_get_xclk(rdev); |
if (rdev->pm.no_fan) |
return -ENOENT; |
if (rdev->pm.fan_pulses_per_revolution == 0) |
return -ENOENT; |
if ((speed < rdev->pm.fan_min_rpm) || |
(speed > rdev->pm.fan_max_rpm)) |
return -EINVAL; |
if (rdev->pm.dpm.fan.ucode_fan_control) |
ci_fan_ctrl_stop_smc_fan_control(rdev); |
tach_period = 60 * xclk * 10000 / (8 * speed); |
tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK; |
tmp |= TARGET_PERIOD(tach_period); |
WREG32_SMC(CG_TACH_CTRL, tmp); |
ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM); |
return 0; |
} |
#endif |
static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev) |
{ |
struct ci_power_info *pi = ci_get_pi(rdev); |
u32 tmp; |
if (!pi->fan_ctrl_is_in_default_mode) { |
tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; |
tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode); |
WREG32_SMC(CG_FDO_CTRL2, tmp); |
tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK; |
tmp |= TMIN(pi->t_min); |
WREG32_SMC(CG_FDO_CTRL2, tmp); |
pi->fan_ctrl_is_in_default_mode = true; |
} |
} |
static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev) |
{ |
if (rdev->pm.dpm.fan.ucode_fan_control) { |
ci_fan_ctrl_start_smc_fan_control(rdev); |
ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC); |
} |
} |
static void ci_thermal_initialize(struct radeon_device *rdev) |
{ |
u32 tmp; |
if (rdev->pm.fan_pulses_per_revolution) { |
tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK; |
tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1); |
WREG32_SMC(CG_TACH_CTRL, tmp); |
} |
tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK; |
tmp |= TACH_PWM_RESP_RATE(0x28); |
WREG32_SMC(CG_FDO_CTRL2, tmp); |
} |
static int ci_thermal_start_thermal_controller(struct radeon_device *rdev) |
{ |
int ret; |
ci_thermal_initialize(rdev); |
ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); |
if (ret) |
return ret; |
ret = ci_thermal_enable_alert(rdev, true); |
if (ret) |
return ret; |
if (rdev->pm.dpm.fan.ucode_fan_control) { |
ret = ci_thermal_setup_fan_table(rdev); |
if (ret) |
return ret; |
ci_thermal_start_smc_fan_control(rdev); |
} |
return 0; |
} |
static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev) |
{ |
if (!rdev->pm.no_fan) |
ci_fan_ctrl_set_default_mode(rdev); |
} |
#if 0 |
static int ci_read_smc_soft_register(struct radeon_device *rdev, |
u16 reg_offset, u32 *value) |
{ |
1278,7 → 1638,7 |
if (!pi->sclk_dpm_key_disabled) { |
PPSMC_Result smc_result = |
ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n); |
ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n); |
if (smc_result != PPSMC_Result_OK) |
return -EINVAL; |
} |
1292,7 → 1652,7 |
if (!pi->mclk_dpm_key_disabled) { |
PPSMC_Result smc_result = |
ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n); |
ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n); |
if (smc_result != PPSMC_Result_OK) |
return -EINVAL; |
} |
2067,6 → 2427,33 |
return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0); |
} |
static void ci_register_patching_mc_arb(struct radeon_device *rdev, |
const u32 engine_clock, |
const u32 memory_clock, |
u32 *dram_timimg2) |
{ |
bool patch; |
u32 tmp, tmp2; |
tmp = RREG32(MC_SEQ_MISC0); |
patch = ((tmp & 0x0000f00) == 0x300) ? true : false; |
if (patch && |
((rdev->pdev->device == 0x67B0) || |
(rdev->pdev->device == 0x67B1))) { |
if ((memory_clock > 100000) && (memory_clock <= 125000)) { |
tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff; |
*dram_timimg2 &= ~0x00ff0000; |
*dram_timimg2 |= tmp2 << 16; |
} else if ((memory_clock > 125000) && (memory_clock <= 137500)) { |
tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff; |
*dram_timimg2 &= ~0x00ff0000; |
*dram_timimg2 |= tmp2 << 16; |
} |
} |
} |
static int ci_populate_memory_timing_parameters(struct radeon_device *rdev, |
u32 sclk, |
u32 mclk, |
2082,6 → 2469,8 |
dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); |
burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK; |
ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2); |
arb_regs->McArbDramTiming = cpu_to_be32(dram_timing); |
arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2); |
arb_regs->McArbBurstTime = (u8)burst_time; |
2376,10 → 2765,10 |
u32 tmp; |
u32 reference_clock = rdev->clock.mpll.reference_freq; |
if (pi->mem_gddr5) |
freq_nom = memory_clock * 4; |
if (mpll_param.qdr == 1) |
freq_nom = memory_clock * 4 * (1 << mpll_param.post_div); |
else |
freq_nom = memory_clock * 2; |
freq_nom = memory_clock * 2 * (1 << mpll_param.post_div); |
tmp = (freq_nom / reference_clock); |
tmp = tmp * tmp; |
2459,7 → 2848,6 |
&memory_level->MinVddcPhases); |
memory_level->EnabledForThrottle = 1; |
memory_level->EnabledForActivity = 1; |
memory_level->UpH = 0; |
memory_level->DownH = 100; |
memory_level->VoltageDownH = 0; |
2792,7 → 3180,6 |
graphic_level->CcPwrDynRm = 0; |
graphic_level->CcPwrDynRm1 = 0; |
graphic_level->EnabledForActivity = 1; |
graphic_level->EnabledForThrottle = 1; |
graphic_level->UpH = 0; |
graphic_level->DownH = 0; |
2841,10 → 3228,13 |
&pi->smc_state_table.GraphicsLevel[i]); |
if (ret) |
return ret; |
if (i > 1) |
pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; |
if (i == (dpm_table->sclk_table.count - 1)) |
pi->smc_state_table.GraphicsLevel[i].DisplayWatermark = |
PPSMC_DISPLAY_WATERMARK_HIGH; |
} |
pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; |
pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count; |
pi->dpm_level_enable_mask.sclk_dpm_enable_mask = |
2888,6 → 3278,16 |
return ret; |
} |
pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1; |
if ((dpm_table->mclk_table.count >= 2) && |
((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) { |
pi->smc_state_table.MemoryLevel[1].MinVddc = |
pi->smc_state_table.MemoryLevel[0].MinVddc; |
pi->smc_state_table.MemoryLevel[1].MinVddcPhases = |
pi->smc_state_table.MemoryLevel[0].MinVddcPhases; |
} |
pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F); |
pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count; |
2944,8 → 3344,13 |
&pi->dpm_table.pcie_speed_table, |
SMU7_MAX_LEVELS_LINK); |
if (rdev->family == CHIP_BONAIRE) |
ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, |
pi->pcie_gen_powersaving.min, |
pi->pcie_lane_powersaving.max); |
else |
ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, |
pi->pcie_gen_powersaving.min, |
pi->pcie_lane_powersaving.min); |
ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1, |
pi->pcie_gen_performance.min, |
3013,7 → 3418,8 |
allowed_sclk_vddc_table->entries[i].clk)) { |
pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value = |
allowed_sclk_vddc_table->entries[i].clk; |
pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true; |
pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = |
(i == 0) ? true : false; |
pi->dpm_table.sclk_table.count++; |
} |
} |
3025,7 → 3431,8 |
allowed_mclk_table->entries[i].clk)) { |
pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value = |
allowed_mclk_table->entries[i].clk; |
pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true; |
pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = |
(i == 0) ? true : false; |
pi->dpm_table.mclk_table.count++; |
} |
} |
3191,7 → 3598,7 |
table->VddcVddciDelta = 4000; |
table->PhaseResponseTime = 0; |
table->MemoryThermThrottleEnable = 1; |
table->PCIeBootLinkLevel = 0; |
table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1; |
table->PCIeGenInterval = 1; |
if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) |
table->SVI2Enable = 1; |
3345,6 → 3752,8 |
struct ci_power_info *pi = ci_get_pi(rdev); |
PPSMC_Result result; |
ci_apply_disp_minimum_voltage_request(rdev); |
if (!pi->sclk_dpm_key_disabled) { |
if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { |
result = ci_send_msg_to_smc_with_parameter(rdev, |
3364,7 → 3773,7 |
return -EINVAL; |
} |
} |
#if 0 |
if (!pi->pcie_dpm_key_disabled) { |
if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { |
result = ci_send_msg_to_smc_with_parameter(rdev, |
3374,9 → 3783,7 |
return -EINVAL; |
} |
} |
ci_apply_disp_minimum_voltage_request(rdev); |
#endif |
return 0; |
} |
3402,7 → 3809,7 |
pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; |
} else { |
/* XXX check display min clock requirements */ |
if (0 != CISLAND_MINIMUM_ENGINE_CLOCK) |
if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK) |
pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; |
} |
3732,24 → 4139,23 |
enum radeon_dpm_forced_level level) |
{ |
struct ci_power_info *pi = ci_get_pi(rdev); |
PPSMC_Result smc_result; |
u32 tmp, levels, i; |
int ret; |
if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { |
if ((!pi->sclk_dpm_key_disabled) && |
pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { |
if ((!pi->pcie_dpm_key_disabled) && |
pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { |
levels = 0; |
tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask; |
tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask; |
while (tmp >>= 1) |
levels++; |
if (levels) { |
ret = ci_dpm_force_state_sclk(rdev, levels); |
ret = ci_dpm_force_state_pcie(rdev, level); |
if (ret) |
return ret; |
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & |
CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT; |
tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) & |
CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT; |
if (tmp == levels) |
break; |
udelay(1); |
3756,19 → 4162,19 |
} |
} |
} |
if ((!pi->mclk_dpm_key_disabled) && |
pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { |
if ((!pi->sclk_dpm_key_disabled) && |
pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { |
levels = 0; |
tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask; |
tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask; |
while (tmp >>= 1) |
levels++; |
if (levels) { |
ret = ci_dpm_force_state_mclk(rdev, levels); |
ret = ci_dpm_force_state_sclk(rdev, levels); |
if (ret) |
return ret; |
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & |
CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT; |
CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT; |
if (tmp == levels) |
break; |
udelay(1); |
3775,19 → 4181,19 |
} |
} |
} |
if ((!pi->pcie_dpm_key_disabled) && |
pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { |
if ((!pi->mclk_dpm_key_disabled) && |
pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { |
levels = 0; |
tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask; |
tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask; |
while (tmp >>= 1) |
levels++; |
if (levels) { |
ret = ci_dpm_force_state_pcie(rdev, level); |
ret = ci_dpm_force_state_mclk(rdev, levels); |
if (ret) |
return ret; |
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) & |
CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT; |
tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & |
CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT; |
if (tmp == levels) |
break; |
udelay(1); |
3841,21 → 4247,17 |
} |
} |
} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { |
if (!pi->sclk_dpm_key_disabled) { |
smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel); |
if (smc_result != PPSMC_Result_OK) |
return -EINVAL; |
} |
if (!pi->mclk_dpm_key_disabled) { |
smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel); |
if (smc_result != PPSMC_Result_OK) |
return -EINVAL; |
} |
if (!pi->pcie_dpm_key_disabled) { |
smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel); |
PPSMC_Result smc_result; |
smc_result = ci_send_msg_to_smc(rdev, |
PPSMC_MSG_PCIeDPM_UnForceLevel); |
if (smc_result != PPSMC_Result_OK) |
return -EINVAL; |
} |
ret = ci_upload_dpm_level_enable_mask(rdev); |
if (ret) |
return ret; |
} |
rdev->pm.dpm.forced_level = level; |
4061,6 → 4463,96 |
return 0; |
} |
static int ci_register_patching_mc_seq(struct radeon_device *rdev, |
struct ci_mc_reg_table *table) |
{ |
u8 i, k; |
u32 tmp; |
bool patch; |
tmp = RREG32(MC_SEQ_MISC0); |
patch = ((tmp & 0x0000f00) == 0x300) ? true : false; |
if (patch && |
((rdev->pdev->device == 0x67B0) || |
(rdev->pdev->device == 0x67B1))) { |
for (i = 0; i < table->last; i++) { |
if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) |
return -EINVAL; |
switch(table->mc_reg_address[i].s1 >> 2) { |
case MC_SEQ_MISC1: |
for (k = 0; k < table->num_entries; k++) { |
if ((table->mc_reg_table_entry[k].mclk_max == 125000) || |
(table->mc_reg_table_entry[k].mclk_max == 137500)) |
table->mc_reg_table_entry[k].mc_data[i] = |
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) | |
0x00000007; |
} |
break; |
case MC_SEQ_WR_CTL_D0: |
for (k = 0; k < table->num_entries; k++) { |
if ((table->mc_reg_table_entry[k].mclk_max == 125000) || |
(table->mc_reg_table_entry[k].mclk_max == 137500)) |
table->mc_reg_table_entry[k].mc_data[i] = |
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) | |
0x0000D0DD; |
} |
break; |
case MC_SEQ_WR_CTL_D1: |
for (k = 0; k < table->num_entries; k++) { |
if ((table->mc_reg_table_entry[k].mclk_max == 125000) || |
(table->mc_reg_table_entry[k].mclk_max == 137500)) |
table->mc_reg_table_entry[k].mc_data[i] = |
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) | |
0x0000D0DD; |
} |
break; |
case MC_SEQ_WR_CTL_2: |
for (k = 0; k < table->num_entries; k++) { |
if ((table->mc_reg_table_entry[k].mclk_max == 125000) || |
(table->mc_reg_table_entry[k].mclk_max == 137500)) |
table->mc_reg_table_entry[k].mc_data[i] = 0; |
} |
break; |
case MC_SEQ_CAS_TIMING: |
for (k = 0; k < table->num_entries; k++) { |
if (table->mc_reg_table_entry[k].mclk_max == 125000) |
table->mc_reg_table_entry[k].mc_data[i] = |
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) | |
0x000C0140; |
else if (table->mc_reg_table_entry[k].mclk_max == 137500) |
table->mc_reg_table_entry[k].mc_data[i] = |
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) | |
0x000C0150; |
} |
break; |
case MC_SEQ_MISC_TIMING: |
for (k = 0; k < table->num_entries; k++) { |
if (table->mc_reg_table_entry[k].mclk_max == 125000) |
table->mc_reg_table_entry[k].mc_data[i] = |
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) | |
0x00000030; |
else if (table->mc_reg_table_entry[k].mclk_max == 137500) |
table->mc_reg_table_entry[k].mc_data[i] = |
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) | |
0x00000035; |
} |
break; |
default: |
break; |
} |
} |
WREG32(MC_SEQ_IO_DEBUG_INDEX, 3); |
tmp = RREG32(MC_SEQ_IO_DEBUG_DATA); |
tmp = (tmp & 0xFFF8FFFF) | (1 << 16); |
WREG32(MC_SEQ_IO_DEBUG_INDEX, 3); |
WREG32(MC_SEQ_IO_DEBUG_DATA, tmp); |
} |
return 0; |
} |
static int ci_initialize_mc_reg_table(struct radeon_device *rdev) |
{ |
struct ci_power_info *pi = ci_get_pi(rdev); |
4104,6 → 4596,10 |
ci_set_s0_mc_reg_index(ci_table); |
ret = ci_register_patching_mc_seq(rdev, ci_table); |
if (ret) |
goto init_mc_done; |
ret = ci_set_mc_special_registers(rdev, ci_table); |
if (ret) |
goto init_mc_done; |
4700,37 → 5196,52 |
return ret; |
} |
ret = ci_power_control_set_level(rdev); |
if (ret) { |
DRM_ERROR("ci_power_control_set_level failed\n"); |
return ret; |
} |
ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); |
ret = ci_enable_thermal_based_sclk_dpm(rdev, true); |
if (ret) { |
DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n"); |
return ret; |
} |
ci_thermal_start_thermal_controller(rdev); |
ci_update_current_ps(rdev, boot_ps); |
return 0; |
} |
int ci_dpm_late_enable(struct radeon_device *rdev) |
static int ci_set_temperature_range(struct radeon_device *rdev) |
{ |
int ret; |
if (rdev->irq.installed && |
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
#if 0 |
PPSMC_Result result; |
#endif |
ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); |
if (ret) { |
DRM_ERROR("ci_set_thermal_temperature_range failed\n"); |
ret = ci_thermal_enable_alert(rdev, false); |
if (ret) |
return ret; |
} |
rdev->irq.dpm_thermal = true; |
radeon_irq_set(rdev); |
#if 0 |
result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt); |
ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); |
if (ret) |
return ret; |
ret = ci_thermal_enable_alert(rdev, true); |
if (ret) |
return ret; |
if (result != PPSMC_Result_OK) |
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); |
#endif |
return ret; |
} |
int ci_dpm_late_enable(struct radeon_device *rdev) |
{ |
int ret; |
ret = ci_set_temperature_range(rdev); |
if (ret) |
return ret; |
ci_dpm_powergate_uvd(rdev, true); |
return 0; |
4746,6 → 5257,8 |
if (!ci_is_smc_running(rdev)) |
return; |
ci_thermal_stop_thermal_controller(rdev); |
if (pi->thermal_protection) |
ci_enable_thermal_protection(rdev, false); |
ci_enable_power_containment(rdev, false); |
4754,12 → 5267,13 |
ci_enable_spread_spectrum(rdev, false); |
ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false); |
ci_stop_dpm(rdev); |
ci_enable_ds_master_switch(rdev, true); |
ci_enable_ds_master_switch(rdev, false); |
ci_enable_ulv(rdev, false); |
ci_clear_vc(rdev); |
ci_reset_to_default(rdev); |
ci_dpm_stop_smc(rdev); |
ci_force_switch_to_arb_f0(rdev); |
ci_enable_thermal_based_sclk_dpm(rdev, false); |
ci_update_current_ps(rdev, boot_ps); |
} |
4829,11 → 5343,6 |
return 0; |
} |
int ci_dpm_power_control_set_level(struct radeon_device *rdev) |
{ |
return ci_power_control_set_level(rdev); |
} |
void ci_dpm_reset_asic(struct radeon_device *rdev) |
{ |
ci_set_boot_state(rdev); |
5093,6 → 5602,8 |
int ci_dpm_init(struct radeon_device *rdev) |
{ |
int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); |
SMU7_Discrete_DpmTable *dpm_table; |
struct radeon_gpio_rec gpio; |
u16 data_offset, size; |
u8 frev, crev; |
struct ci_power_info *pi; |
5162,6 → 5673,7 |
pi->sclk_dpm_key_disabled = 0; |
pi->mclk_dpm_key_disabled = 0; |
pi->pcie_dpm_key_disabled = 0; |
pi->thermal_sclk_dpm_enabled = 0; |
/* mclk dpm is unstable on some R7 260X cards with the old mc ucode */ |
if ((rdev->pdev->device == 0x6658) && |
5226,6 → 5738,55 |
pi->uvd_enabled = false; |
dpm_table = &pi->smc_state_table; |
gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID); |
if (gpio.valid) { |
dpm_table->VRHotGpio = gpio.shift; |
rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT; |
} else { |
dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN; |
rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT; |
} |
gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID); |
if (gpio.valid) { |
dpm_table->AcDcGpio = gpio.shift; |
rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC; |
} else { |
dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN; |
rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC; |
} |
gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID); |
if (gpio.valid) { |
u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL); |
switch (gpio.shift) { |
case 0: |
tmp &= ~GNB_SLOW_MODE_MASK; |
tmp |= GNB_SLOW_MODE(1); |
break; |
case 1: |
tmp &= ~GNB_SLOW_MODE_MASK; |
tmp |= GNB_SLOW_MODE(2); |
break; |
case 2: |
tmp |= GNB_SLOW; |
break; |
case 3: |
tmp |= FORCE_NB_PS1; |
break; |
case 4: |
tmp |= DPM_ENABLED; |
break; |
default: |
DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift); |
break; |
} |
WREG32_SMC(CNB_PWRMGT_CNTL, tmp); |
} |
pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE; |
pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE; |
pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE; |
5287,6 → 5848,8 |
rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc = |
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; |
pi->fan_ctrl_is_in_default_mode = true; |
return 0; |
} |
5293,9 → 5856,13 |
void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, |
struct seq_file *m) |
{ |
struct ci_power_info *pi = ci_get_pi(rdev); |
struct radeon_ps *rps = &pi->current_rps; |
u32 sclk = ci_get_average_sclk_freq(rdev); |
u32 mclk = ci_get_average_mclk_freq(rdev); |
seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis"); |
seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis"); |
seq_printf(m, "power level avg sclk: %u mclk: %u\n", |
sclk, mclk); |
} |
/drivers/video/drm/radeon/ci_dpm.h |
---|
33,6 → 33,8 |
#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2 |
#define CISLANDS_UNUSED_GPIO_PIN 0x7F |
struct ci_pl { |
u32 mclk; |
u32 sclk; |
237,6 → 239,7 |
u32 sclk_dpm_key_disabled; |
u32 mclk_dpm_key_disabled; |
u32 pcie_dpm_key_disabled; |
u32 thermal_sclk_dpm_enabled; |
struct ci_pcie_perf_range pcie_gen_performance; |
struct ci_pcie_perf_range pcie_lane_performance; |
struct ci_pcie_perf_range pcie_gen_powersaving; |
264,6 → 267,7 |
bool caps_automatic_dc_transition; |
bool caps_sclk_throttle_low_notification; |
bool caps_dynamic_ac_timing; |
bool caps_od_fuzzy_fan_control_support; |
/* flags */ |
bool thermal_protection; |
bool pcie_performance_request; |
285,6 → 289,10 |
struct ci_ps current_ps; |
struct radeon_ps requested_rps; |
struct ci_ps requested_ps; |
/* fan control */ |
bool fan_ctrl_is_in_default_mode; |
u32 t_min; |
u32 fan_ctrl_default_mode; |
}; |
#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0 |
/drivers/video/drm/radeon/ci_smc.c |
---|
129,7 → 129,7 |
int ci_program_jump_on_start(struct radeon_device *rdev) |
{ |
static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 }; |
static const u8 data[] = { 0xE0, 0x00, 0x80, 0x40 }; |
return ci_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1); |
} |
/drivers/video/drm/radeon/cik.c |
---|
32,6 → 32,7 |
#include "cik_blit_shaders.h" |
#include "radeon_ucode.h" |
#include "clearstate_ci.h" |
#include "radeon_kfd.h" |
MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin"); |
MODULE_FIRMWARE("radeon/BONAIRE_me.bin"); |
1563,6 → 1564,8 |
static void cik_init_golden_registers(struct radeon_device *rdev) |
{ |
/* Some of the registers might be dependent on GRBM_GFX_INDEX */ |
mutex_lock(&rdev->grbm_idx_mutex); |
switch (rdev->family) { |
case CHIP_BONAIRE: |
radeon_program_register_sequence(rdev, |
1637,6 → 1640,7 |
default: |
break; |
} |
mutex_unlock(&rdev->grbm_idx_mutex); |
} |
/** |
1806,7 → 1810,7 |
{ |
const __be32 *fw_data = NULL; |
const __le32 *new_fw_data = NULL; |
u32 running, blackout = 0; |
u32 running, blackout = 0, tmp; |
u32 *io_mc_regs = NULL; |
const __le32 *new_io_mc_regs = NULL; |
int i, regs_size, ucode_size; |
1866,6 → 1870,15 |
WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); |
} |
} |
tmp = RREG32(MC_SEQ_MISC0); |
if ((rdev->pdev->device == 0x6649) && ((tmp & 0xff00) == 0x5600)) { |
WREG32(MC_SEQ_IO_DEBUG_INDEX, 5); |
WREG32(MC_SEQ_IO_DEBUG_DATA, 0x00000023); |
WREG32(MC_SEQ_IO_DEBUG_INDEX, 9); |
WREG32(MC_SEQ_IO_DEBUG_DATA, 0x000001f0); |
} |
/* load the MC ucode */ |
for (i = 0; i < ucode_size; i++) { |
if (rdev->new_fw) |
3419,6 → 3432,7 |
u32 disabled_rbs = 0; |
u32 enabled_rbs = 0; |
mutex_lock(&rdev->grbm_idx_mutex); |
for (i = 0; i < se_num; i++) { |
for (j = 0; j < sh_per_se; j++) { |
cik_select_se_sh(rdev, i, j); |
3430,6 → 3444,7 |
} |
} |
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); |
mutex_unlock(&rdev->grbm_idx_mutex); |
mask = 1; |
for (i = 0; i < max_rb_num_per_se * se_num; i++) { |
3440,6 → 3455,7 |
rdev->config.cik.backend_enable_mask = enabled_rbs; |
mutex_lock(&rdev->grbm_idx_mutex); |
for (i = 0; i < se_num; i++) { |
cik_select_se_sh(rdev, i, 0xffffffff); |
data = 0; |
3467,6 → 3483,7 |
WREG32(PA_SC_RASTER_CONFIG, data); |
} |
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); |
mutex_unlock(&rdev->grbm_idx_mutex); |
} |
/** |
3684,6 → 3701,12 |
/* set HW defaults for 3D engine */ |
WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); |
mutex_lock(&rdev->grbm_idx_mutex); |
/* |
* making sure that the following register writes will be broadcasted |
* to all the shaders |
*/ |
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); |
WREG32(SX_DEBUG_1, 0x20); |
WREG32(TA_CNTL_AUX, 0x00010000); |
3739,6 → 3762,7 |
WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); |
WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER); |
mutex_unlock(&rdev->grbm_idx_mutex); |
udelay(50); |
} |
3959,18 → 3983,19 |
* @src_offset: src GPU address |
* @dst_offset: dst GPU address |
* @num_gpu_pages: number of GPU pages to xfer |
* @fence: radeon fence object |
* @resv: reservation object to sync to |
* |
* Copy GPU paging using the CP DMA engine (CIK+). |
* Used by the radeon ttm implementation to move pages if |
* registered as the asic copy callback. |
*/ |
int cik_copy_cpdma(struct radeon_device *rdev, |
struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence) |
struct reservation_object *resv) |
{ |
struct radeon_semaphore *sem = NULL; |
struct radeon_fence *fence; |
struct radeon_sync sync; |
int ring_index = rdev->asic->copy.blit_ring_index; |
struct radeon_ring *ring = &rdev->ring[ring_index]; |
u32 size_in_bytes, cur_size_in_bytes, control; |
3977,11 → 4002,7 |
int i, num_loops; |
int r = 0; |
r = radeon_semaphore_create(rdev, &sem); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
return r; |
} |
radeon_sync_create(&sync); |
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); |
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); |
3988,12 → 4009,12 |
r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
radeon_sync_free(rdev, &sync, NULL); |
return ERR_PTR(r); |
} |
radeon_semaphore_sync_to(sem, *fence); |
radeon_semaphore_sync_rings(rdev, sem, ring->idx); |
radeon_sync_resv(rdev, &sync, resv, false); |
radeon_sync_rings(rdev, &sync, ring->idx); |
for (i = 0; i < num_loops; i++) { |
cur_size_in_bytes = size_in_bytes; |
4014,17 → 4035,17 |
dst_offset += cur_size_in_bytes; |
} |
r = radeon_fence_emit(rdev, fence, ring->idx); |
r = radeon_fence_emit(rdev, &fence, ring->idx); |
if (r) { |
radeon_ring_unlock_undo(rdev, ring); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
radeon_sync_free(rdev, &sync, NULL); |
return ERR_PTR(r); |
} |
radeon_ring_unlock_commit(rdev, ring, false); |
radeon_semaphore_free(rdev, &sem, *fence); |
radeon_sync_free(rdev, &sync, fence); |
return r; |
return fence; |
} |
/* |
4045,6 → 4066,7 |
void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) |
{ |
struct radeon_ring *ring = &rdev->ring[ib->ring]; |
unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0; |
u32 header, control = INDIRECT_BUFFER_VALID; |
if (ib->is_const_ib) { |
4073,8 → 4095,7 |
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); |
} |
control |= ib->length_dw | |
(ib->vm ? (ib->vm->id << 24) : 0); |
control |= ib->length_dw | (vm_id << 24); |
radeon_ring_write(ring, header); |
radeon_ring_write(ring, |
4234,7 → 4255,7 |
WREG32(CP_PFP_UCODE_ADDR, 0); |
for (i = 0; i < fw_size; i++) |
WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++)); |
WREG32(CP_PFP_UCODE_ADDR, 0); |
WREG32(CP_PFP_UCODE_ADDR, le32_to_cpu(pfp_hdr->header.ucode_version)); |
/* CE */ |
fw_data = (const __le32 *) |
4243,7 → 4264,7 |
WREG32(CP_CE_UCODE_ADDR, 0); |
for (i = 0; i < fw_size; i++) |
WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++)); |
WREG32(CP_CE_UCODE_ADDR, 0); |
WREG32(CP_CE_UCODE_ADDR, le32_to_cpu(ce_hdr->header.ucode_version)); |
/* ME */ |
fw_data = (const __be32 *) |
4252,7 → 4273,8 |
WREG32(CP_ME_RAM_WADDR, 0); |
for (i = 0; i < fw_size; i++) |
WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++)); |
WREG32(CP_ME_RAM_WADDR, 0); |
WREG32(CP_ME_RAM_WADDR, le32_to_cpu(me_hdr->header.ucode_version)); |
WREG32(CP_ME_RAM_RADDR, le32_to_cpu(me_hdr->header.ucode_version)); |
} else { |
const __be32 *fw_data; |
4278,10 → 4300,6 |
WREG32(CP_ME_RAM_WADDR, 0); |
} |
WREG32(CP_PFP_UCODE_ADDR, 0); |
WREG32(CP_CE_UCODE_ADDR, 0); |
WREG32(CP_ME_RAM_WADDR, 0); |
WREG32(CP_ME_RAM_RADDR, 0); |
return 0; |
} |
4315,8 → 4333,8 |
/* init the CE partitions. CE only used for gfx on CIK */ |
radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); |
radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); |
radeon_ring_write(ring, 0xc000); |
radeon_ring_write(ring, 0xc000); |
radeon_ring_write(ring, 0x8000); |
radeon_ring_write(ring, 0x8000); |
/* setup clear context state */ |
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); |
4563,7 → 4581,7 |
WREG32(CP_MEC_ME1_UCODE_ADDR, 0); |
for (i = 0; i < fw_size; i++) |
WREG32(CP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++)); |
WREG32(CP_MEC_ME1_UCODE_ADDR, 0); |
WREG32(CP_MEC_ME1_UCODE_ADDR, le32_to_cpu(mec_hdr->header.ucode_version)); |
/* MEC2 */ |
if (rdev->family == CHIP_KAVERI) { |
4577,7 → 4595,7 |
WREG32(CP_MEC_ME2_UCODE_ADDR, 0); |
for (i = 0; i < fw_size; i++) |
WREG32(CP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++)); |
WREG32(CP_MEC_ME2_UCODE_ADDR, 0); |
WREG32(CP_MEC_ME2_UCODE_ADDR, le32_to_cpu(mec2_hdr->header.ucode_version)); |
} |
} else { |
const __be32 *fw_data; |
4677,12 → 4695,11 |
/* |
* KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total |
* CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total |
* Nonetheless, we assign only 1 pipe because all other pipes will |
* be handled by KFD |
*/ |
if (rdev->family == CHIP_KAVERI) |
rdev->mec.num_mec = 2; |
else |
rdev->mec.num_mec = 1; |
rdev->mec.num_pipe = 4; |
rdev->mec.num_pipe = 1; |
rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8; |
if (rdev->mec.hpd_eop_obj == NULL) { |
4689,7 → 4706,7 |
r = radeon_bo_create(rdev, |
rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2, |
PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_GTT, 0, NULL, |
RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL, |
&rdev->mec.hpd_eop_obj); |
if (r) { |
dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r); |
4824,13 → 4841,10 |
/* init the pipes */ |
mutex_lock(&rdev->srbm_mutex); |
for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) { |
int me = (i < 4) ? 1 : 2; |
int pipe = (i < 4) ? i : (i - 4); |
eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2); |
eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr; |
cik_srbm_select(rdev, me, pipe, 0, 0); |
cik_srbm_select(rdev, 0, 0, 0, 0); |
/* write the EOP addr */ |
WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8); |
4844,8 → 4858,7 |
tmp &= ~EOP_SIZE_MASK; |
tmp |= order_base_2(MEC_HPD_SIZE / 8); |
WREG32(CP_HPD_EOP_CONTROL, tmp); |
} |
cik_srbm_select(rdev, 0, 0, 0, 0); |
mutex_unlock(&rdev->srbm_mutex); |
/* init the queues. Just two for now. */ |
4860,7 → 4873,7 |
sizeof(struct bonaire_mqd), |
PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_GTT, 0, NULL, |
&rdev->ring[idx].mqd_obj); |
NULL, &rdev->ring[idx].mqd_obj); |
if (r) { |
dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r); |
return r; |
5899,8 → 5912,13 |
*/ |
int cik_vm_init(struct radeon_device *rdev) |
{ |
/* number of VMs */ |
rdev->vm_manager.nvm = 16; |
/* |
* number of VMs |
* VMID 0 is reserved for System |
* radeon graphics/compute will use VMIDs 1-7 |
* amdkfd will use VMIDs 8-15 |
*/ |
rdev->vm_manager.nvm = RADEON_NUM_OF_VMIDS; |
/* base offset of vram pages */ |
if (rdev->flags & RADEON_IS_IGP) { |
u64 tmp = RREG32(MC_VM_FB_OFFSET); |
5960,26 → 5978,23 |
* Update the page table base and flush the VM TLB |
* using the CP (CIK). |
*/ |
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) |
void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, |
unsigned vm_id, uint64_t pd_addr) |
{ |
struct radeon_ring *ring = &rdev->ring[ridx]; |
int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX); |
int usepfp = (ring->idx == RADEON_RING_TYPE_GFX_INDEX); |
if (vm == NULL) |
return; |
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | |
WRITE_DATA_DST_SEL(0))); |
if (vm->id < 8) { |
if (vm_id < 8) { |
radeon_ring_write(ring, |
(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); |
(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2); |
} else { |
radeon_ring_write(ring, |
(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2); |
(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2); |
} |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, vm->pd_gpu_addr >> 12); |
radeon_ring_write(ring, pd_addr >> 12); |
/* update SH_MEM_* regs */ |
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
5987,7 → 6002,7 |
WRITE_DATA_DST_SEL(0))); |
radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, VMID(vm->id)); |
radeon_ring_write(ring, VMID(vm_id)); |
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6)); |
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | |
6008,7 → 6023,7 |
radeon_ring_write(ring, VMID(0)); |
/* HDP flush */ |
cik_hdp_flush_cp_ring_emit(rdev, ridx); |
cik_hdp_flush_cp_ring_emit(rdev, ring->idx); |
/* bits 0-15 are the VM contexts0-15 */ |
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
6016,7 → 6031,7 |
WRITE_DATA_DST_SEL(0))); |
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, 1 << vm->id); |
radeon_ring_write(ring, 1 << vm_id); |
/* compute doesn't have PFP */ |
if (usepfp) { |
6061,6 → 6076,7 |
u32 i, j, k; |
u32 mask; |
mutex_lock(&rdev->grbm_idx_mutex); |
for (i = 0; i < rdev->config.cik.max_shader_engines; i++) { |
for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) { |
cik_select_se_sh(rdev, i, j); |
6072,6 → 6088,7 |
} |
} |
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); |
mutex_unlock(&rdev->grbm_idx_mutex); |
mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY; |
for (k = 0; k < rdev->usec_timeout; k++) { |
6206,10 → 6223,12 |
WREG32(RLC_LB_CNTR_INIT, 0); |
WREG32(RLC_LB_CNTR_MAX, 0x00008000); |
mutex_lock(&rdev->grbm_idx_mutex); |
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); |
WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff); |
WREG32(RLC_LB_PARAMS, 0x00600408); |
WREG32(RLC_LB_CNTL, 0x80000004); |
mutex_unlock(&rdev->grbm_idx_mutex); |
WREG32(RLC_MC_CNTL, 0); |
WREG32(RLC_UCODE_CNTL, 0); |
6226,7 → 6245,7 |
WREG32(RLC_GPM_UCODE_ADDR, 0); |
for (i = 0; i < size; i++) |
WREG32(RLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); |
WREG32(RLC_GPM_UCODE_ADDR, 0); |
WREG32(RLC_GPM_UCODE_ADDR, le32_to_cpu(hdr->header.ucode_version)); |
} else { |
const __be32 *fw_data; |
6276,11 → 6295,13 |
tmp = cik_halt_rlc(rdev); |
mutex_lock(&rdev->grbm_idx_mutex); |
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); |
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); |
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); |
tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE; |
WREG32(RLC_SERDES_WR_CTRL, tmp2); |
mutex_unlock(&rdev->grbm_idx_mutex); |
cik_update_rlc(rdev, tmp); |
6316,6 → 6337,7 |
} |
orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); |
data |= 0x00000001; |
data &= 0xfffffffd; |
if (orig != data) |
WREG32(RLC_CGTT_MGCG_OVERRIDE, data); |
6322,11 → 6344,13 |
tmp = cik_halt_rlc(rdev); |
mutex_lock(&rdev->grbm_idx_mutex); |
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); |
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); |
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); |
data = BPM_ADDR_MASK | MGCG_OVERRIDE_0; |
WREG32(RLC_SERDES_WR_CTRL, data); |
mutex_unlock(&rdev->grbm_idx_mutex); |
cik_update_rlc(rdev, tmp); |
6347,7 → 6371,7 |
} |
} else { |
orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); |
data |= 0x00000002; |
data |= 0x00000003; |
if (orig != data) |
WREG32(RLC_CGTT_MGCG_OVERRIDE, data); |
6370,11 → 6394,13 |
tmp = cik_halt_rlc(rdev); |
mutex_lock(&rdev->grbm_idx_mutex); |
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); |
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); |
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); |
data = BPM_ADDR_MASK | MGCG_OVERRIDE_1; |
WREG32(RLC_SERDES_WR_CTRL, data); |
mutex_unlock(&rdev->grbm_idx_mutex); |
cik_update_rlc(rdev, tmp); |
} |
6803,10 → 6829,12 |
u32 mask = 0, tmp, tmp1; |
int i; |
mutex_lock(&rdev->grbm_idx_mutex); |
cik_select_se_sh(rdev, se, sh); |
tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG); |
tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG); |
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); |
mutex_unlock(&rdev->grbm_idx_mutex); |
tmp &= 0xffff0000; |
7290,8 → 7318,7 |
int cik_irq_set(struct radeon_device *rdev) |
{ |
u32 cp_int_cntl; |
u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3; |
u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3; |
u32 cp_m1p0; |
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; |
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; |
u32 grbm_int_cntl = 0; |
7325,13 → 7352,6 |
dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; |
cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; |
cp_m1p1 = RREG32(CP_ME1_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; |
cp_m1p2 = RREG32(CP_ME1_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; |
cp_m1p3 = RREG32(CP_ME1_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; |
cp_m2p0 = RREG32(CP_ME2_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; |
cp_m2p1 = RREG32(CP_ME2_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; |
cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; |
cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; |
if (rdev->flags & RADEON_IS_IGP) |
thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) & |
7353,37 → 7373,10 |
case 0: |
cp_m1p0 |= TIME_STAMP_INT_ENABLE; |
break; |
case 1: |
cp_m1p1 |= TIME_STAMP_INT_ENABLE; |
break; |
case 2: |
cp_m1p2 |= TIME_STAMP_INT_ENABLE; |
break; |
case 3: |
cp_m1p2 |= TIME_STAMP_INT_ENABLE; |
break; |
default: |
DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe); |
break; |
} |
} else if (ring->me == 2) { |
switch (ring->pipe) { |
case 0: |
cp_m2p0 |= TIME_STAMP_INT_ENABLE; |
break; |
case 1: |
cp_m2p1 |= TIME_STAMP_INT_ENABLE; |
break; |
case 2: |
cp_m2p2 |= TIME_STAMP_INT_ENABLE; |
break; |
case 3: |
cp_m2p2 |= TIME_STAMP_INT_ENABLE; |
break; |
default: |
DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe); |
break; |
} |
} else { |
DRM_DEBUG("si_irq_set: sw int cp1 invalid me %d\n", ring->me); |
} |
7396,37 → 7389,10 |
case 0: |
cp_m1p0 |= TIME_STAMP_INT_ENABLE; |
break; |
case 1: |
cp_m1p1 |= TIME_STAMP_INT_ENABLE; |
break; |
case 2: |
cp_m1p2 |= TIME_STAMP_INT_ENABLE; |
break; |
case 3: |
cp_m1p2 |= TIME_STAMP_INT_ENABLE; |
break; |
default: |
DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe); |
break; |
} |
} else if (ring->me == 2) { |
switch (ring->pipe) { |
case 0: |
cp_m2p0 |= TIME_STAMP_INT_ENABLE; |
break; |
case 1: |
cp_m2p1 |= TIME_STAMP_INT_ENABLE; |
break; |
case 2: |
cp_m2p2 |= TIME_STAMP_INT_ENABLE; |
break; |
case 3: |
cp_m2p2 |= TIME_STAMP_INT_ENABLE; |
break; |
default: |
DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe); |
break; |
} |
} else { |
DRM_DEBUG("si_irq_set: sw int cp2 invalid me %d\n", ring->me); |
} |
7511,13 → 7477,6 |
WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1); |
WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0); |
WREG32(CP_ME1_PIPE1_INT_CNTL, cp_m1p1); |
WREG32(CP_ME1_PIPE2_INT_CNTL, cp_m1p2); |
WREG32(CP_ME1_PIPE3_INT_CNTL, cp_m1p3); |
WREG32(CP_ME2_PIPE0_INT_CNTL, cp_m2p0); |
WREG32(CP_ME2_PIPE1_INT_CNTL, cp_m2p1); |
WREG32(CP_ME2_PIPE2_INT_CNTL, cp_m2p2); |
WREG32(CP_ME2_PIPE3_INT_CNTL, cp_m2p3); |
WREG32(GRBM_INT_CNTL, grbm_int_cntl); |
7834,6 → 7793,10 |
while (rptr != wptr) { |
/* wptr/rptr are in bytes! */ |
ring_index = rptr / 4; |
// radeon_kfd_interrupt(rdev, |
// (const void *) &rdev->ih.ring[ring_index]); |
src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; |
src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; |
ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff; |
8457,6 → 8420,10 |
return r; |
} |
// r = radeon_kfd_resume(rdev); |
// if (r) |
// return r; |
return 0; |
} |
9280,6 → 9247,9 |
u32 num_heads = 0, lb_size; |
int i; |
if (!rdev->mode_info.mode_config_initialized) |
return; |
radeon_update_display_priority(rdev); |
for (i = 0; i < rdev->num_crtc; i++) { |
/drivers/video/drm/radeon/cik_reg.h |
---|
147,4 → 147,140 |
#define CIK_LB_DESKTOP_HEIGHT 0x6b0c |
#define CP_HQD_IQ_RPTR 0xC970u |
#define AQL_ENABLE (1U << 0) |
#define IDLE (1 << 2) |
struct cik_mqd { |
uint32_t header; |
uint32_t compute_dispatch_initiator; |
uint32_t compute_dim_x; |
uint32_t compute_dim_y; |
uint32_t compute_dim_z; |
uint32_t compute_start_x; |
uint32_t compute_start_y; |
uint32_t compute_start_z; |
uint32_t compute_num_thread_x; |
uint32_t compute_num_thread_y; |
uint32_t compute_num_thread_z; |
uint32_t compute_pipelinestat_enable; |
uint32_t compute_perfcount_enable; |
uint32_t compute_pgm_lo; |
uint32_t compute_pgm_hi; |
uint32_t compute_tba_lo; |
uint32_t compute_tba_hi; |
uint32_t compute_tma_lo; |
uint32_t compute_tma_hi; |
uint32_t compute_pgm_rsrc1; |
uint32_t compute_pgm_rsrc2; |
uint32_t compute_vmid; |
uint32_t compute_resource_limits; |
uint32_t compute_static_thread_mgmt_se0; |
uint32_t compute_static_thread_mgmt_se1; |
uint32_t compute_tmpring_size; |
uint32_t compute_static_thread_mgmt_se2; |
uint32_t compute_static_thread_mgmt_se3; |
uint32_t compute_restart_x; |
uint32_t compute_restart_y; |
uint32_t compute_restart_z; |
uint32_t compute_thread_trace_enable; |
uint32_t compute_misc_reserved; |
uint32_t compute_user_data_0; |
uint32_t compute_user_data_1; |
uint32_t compute_user_data_2; |
uint32_t compute_user_data_3; |
uint32_t compute_user_data_4; |
uint32_t compute_user_data_5; |
uint32_t compute_user_data_6; |
uint32_t compute_user_data_7; |
uint32_t compute_user_data_8; |
uint32_t compute_user_data_9; |
uint32_t compute_user_data_10; |
uint32_t compute_user_data_11; |
uint32_t compute_user_data_12; |
uint32_t compute_user_data_13; |
uint32_t compute_user_data_14; |
uint32_t compute_user_data_15; |
uint32_t cp_compute_csinvoc_count_lo; |
uint32_t cp_compute_csinvoc_count_hi; |
uint32_t cp_mqd_base_addr_lo; |
uint32_t cp_mqd_base_addr_hi; |
uint32_t cp_hqd_active; |
uint32_t cp_hqd_vmid; |
uint32_t cp_hqd_persistent_state; |
uint32_t cp_hqd_pipe_priority; |
uint32_t cp_hqd_queue_priority; |
uint32_t cp_hqd_quantum; |
uint32_t cp_hqd_pq_base_lo; |
uint32_t cp_hqd_pq_base_hi; |
uint32_t cp_hqd_pq_rptr; |
uint32_t cp_hqd_pq_rptr_report_addr_lo; |
uint32_t cp_hqd_pq_rptr_report_addr_hi; |
uint32_t cp_hqd_pq_wptr_poll_addr_lo; |
uint32_t cp_hqd_pq_wptr_poll_addr_hi; |
uint32_t cp_hqd_pq_doorbell_control; |
uint32_t cp_hqd_pq_wptr; |
uint32_t cp_hqd_pq_control; |
uint32_t cp_hqd_ib_base_addr_lo; |
uint32_t cp_hqd_ib_base_addr_hi; |
uint32_t cp_hqd_ib_rptr; |
uint32_t cp_hqd_ib_control; |
uint32_t cp_hqd_iq_timer; |
uint32_t cp_hqd_iq_rptr; |
uint32_t cp_hqd_dequeue_request; |
uint32_t cp_hqd_dma_offload; |
uint32_t cp_hqd_sema_cmd; |
uint32_t cp_hqd_msg_type; |
uint32_t cp_hqd_atomic0_preop_lo; |
uint32_t cp_hqd_atomic0_preop_hi; |
uint32_t cp_hqd_atomic1_preop_lo; |
uint32_t cp_hqd_atomic1_preop_hi; |
uint32_t cp_hqd_hq_status0; |
uint32_t cp_hqd_hq_control0; |
uint32_t cp_mqd_control; |
uint32_t cp_mqd_query_time_lo; |
uint32_t cp_mqd_query_time_hi; |
uint32_t cp_mqd_connect_start_time_lo; |
uint32_t cp_mqd_connect_start_time_hi; |
uint32_t cp_mqd_connect_end_time_lo; |
uint32_t cp_mqd_connect_end_time_hi; |
uint32_t cp_mqd_connect_end_wf_count; |
uint32_t cp_mqd_connect_end_pq_rptr; |
uint32_t cp_mqd_connect_end_pq_wptr; |
uint32_t cp_mqd_connect_end_ib_rptr; |
uint32_t reserved_96; |
uint32_t reserved_97; |
uint32_t reserved_98; |
uint32_t reserved_99; |
uint32_t iqtimer_pkt_header; |
uint32_t iqtimer_pkt_dw0; |
uint32_t iqtimer_pkt_dw1; |
uint32_t iqtimer_pkt_dw2; |
uint32_t iqtimer_pkt_dw3; |
uint32_t iqtimer_pkt_dw4; |
uint32_t iqtimer_pkt_dw5; |
uint32_t iqtimer_pkt_dw6; |
uint32_t reserved_108; |
uint32_t reserved_109; |
uint32_t reserved_110; |
uint32_t reserved_111; |
uint32_t queue_doorbell_id0; |
uint32_t queue_doorbell_id1; |
uint32_t queue_doorbell_id2; |
uint32_t queue_doorbell_id3; |
uint32_t queue_doorbell_id4; |
uint32_t queue_doorbell_id5; |
uint32_t queue_doorbell_id6; |
uint32_t queue_doorbell_id7; |
uint32_t queue_doorbell_id8; |
uint32_t queue_doorbell_id9; |
uint32_t queue_doorbell_id10; |
uint32_t queue_doorbell_id11; |
uint32_t queue_doorbell_id12; |
uint32_t queue_doorbell_id13; |
uint32_t queue_doorbell_id14; |
uint32_t queue_doorbell_id15; |
}; |
#endif |
/drivers/video/drm/radeon/cik_sdma.c |
---|
134,7 → 134,7 |
struct radeon_ib *ib) |
{ |
struct radeon_ring *ring = &rdev->ring[ib->ring]; |
u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf; |
u32 extra_bits = (ib->vm ? ib->vm->ids[ib->ring].id : 0) & 0xf; |
if (rdev->wb.enabled) { |
u32 next_rptr = ring->wptr + 5; |
530,18 → 530,19 |
* @src_offset: src GPU address |
* @dst_offset: dst GPU address |
* @num_gpu_pages: number of GPU pages to xfer |
* @fence: radeon fence object |
* @resv: reservation object to sync to |
* |
* Copy GPU paging using the DMA engine (CIK). |
* Used by the radeon ttm implementation to move pages if |
* registered as the asic copy callback. |
*/ |
int cik_copy_dma(struct radeon_device *rdev, |
struct radeon_fence *cik_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence) |
struct reservation_object *resv) |
{ |
struct radeon_semaphore *sem = NULL; |
struct radeon_fence *fence; |
struct radeon_sync sync; |
int ring_index = rdev->asic->copy.dma_ring_index; |
struct radeon_ring *ring = &rdev->ring[ring_index]; |
u32 size_in_bytes, cur_size_in_bytes; |
548,11 → 549,7 |
int i, num_loops; |
int r = 0; |
r = radeon_semaphore_create(rdev, &sem); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
return r; |
} |
radeon_sync_create(&sync); |
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); |
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); |
559,12 → 556,12 |
r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
radeon_sync_free(rdev, &sync, NULL); |
return ERR_PTR(r); |
} |
radeon_semaphore_sync_to(sem, *fence); |
radeon_semaphore_sync_rings(rdev, sem, ring->idx); |
radeon_sync_resv(rdev, &sync, resv, false); |
radeon_sync_rings(rdev, &sync, ring->idx); |
for (i = 0; i < num_loops; i++) { |
cur_size_in_bytes = size_in_bytes; |
582,17 → 579,17 |
dst_offset += cur_size_in_bytes; |
} |
r = radeon_fence_emit(rdev, fence, ring->idx); |
r = radeon_fence_emit(rdev, &fence, ring->idx); |
if (r) { |
radeon_ring_unlock_undo(rdev, ring); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
radeon_sync_free(rdev, &sync, NULL); |
return ERR_PTR(r); |
} |
radeon_ring_unlock_commit(rdev, ring, false); |
radeon_semaphore_free(rdev, &sem, *fence); |
radeon_sync_free(rdev, &sync, fence); |
return r; |
return fence; |
} |
/** |
666,17 → 663,20 |
{ |
struct radeon_ib ib; |
unsigned i; |
unsigned index; |
int r; |
void __iomem *ptr = (void *)rdev->vram_scratch.ptr; |
u32 tmp = 0; |
u64 gpu_addr; |
if (!ptr) { |
DRM_ERROR("invalid vram scratch pointer\n"); |
return -EINVAL; |
} |
if (ring->idx == R600_RING_TYPE_DMA_INDEX) |
index = R600_WB_DMA_RING_TEST_OFFSET; |
else |
index = CAYMAN_WB_DMA1_RING_TEST_OFFSET; |
gpu_addr = rdev->wb.gpu_addr + index; |
tmp = 0xCAFEDEAD; |
writel(tmp, ptr); |
rdev->wb.wb[index/4] = cpu_to_le32(tmp); |
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); |
if (r) { |
685,8 → 685,8 |
} |
ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); |
ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; |
ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr); |
ib.ptr[1] = lower_32_bits(gpu_addr); |
ib.ptr[2] = upper_32_bits(gpu_addr); |
ib.ptr[3] = 1; |
ib.ptr[4] = 0xDEADBEEF; |
ib.length_dw = 5; |
703,7 → 703,7 |
return r; |
} |
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = readl(ptr); |
tmp = le32_to_cpu(rdev->wb.wb[index/4]); |
if (tmp == 0xDEADBEEF) |
break; |
DRM_UDELAY(1); |
900,25 → 900,21 |
* Update the page table base and flush the VM TLB |
* using sDMA (CIK). |
*/ |
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) |
void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, |
unsigned vm_id, uint64_t pd_addr) |
{ |
struct radeon_ring *ring = &rdev->ring[ridx]; |
if (vm == NULL) |
return; |
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); |
if (vm->id < 8) { |
radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); |
if (vm_id < 8) { |
radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2); |
} else { |
radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2); |
radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2); |
} |
radeon_ring_write(ring, vm->pd_gpu_addr >> 12); |
radeon_ring_write(ring, pd_addr >> 12); |
/* update SH_MEM_* regs */ |
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); |
radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); |
radeon_ring_write(ring, VMID(vm->id)); |
radeon_ring_write(ring, VMID(vm_id)); |
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); |
radeon_ring_write(ring, SH_MEM_BASES >> 2); |
941,11 → 937,11 |
radeon_ring_write(ring, VMID(0)); |
/* flush HDP */ |
cik_sdma_hdp_flush_ring_emit(rdev, ridx); |
cik_sdma_hdp_flush_ring_emit(rdev, ring->idx); |
/* flush TLB */ |
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); |
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); |
radeon_ring_write(ring, 1 << vm->id); |
radeon_ring_write(ring, 1 << vm_id); |
} |
/drivers/video/drm/radeon/cikd.h |
---|
30,6 → 30,8 |
#define CIK_RB_BITMAP_WIDTH_PER_SH 2 |
#define HAWAII_RB_BITMAP_WIDTH_PER_SH 4 |
#define RADEON_NUM_OF_VMIDS 8 |
/* DIDT IND registers */ |
#define DIDT_SQ_CTRL0 0x0 |
# define DIDT_CTRL_EN (1 << 0) |
184,7 → 186,10 |
#define DIG_THERM_DPM(x) ((x) << 14) |
#define DIG_THERM_DPM_MASK 0x003FC000 |
#define DIG_THERM_DPM_SHIFT 14 |
#define CG_THERMAL_STATUS 0xC0300008 |
#define FDO_PWM_DUTY(x) ((x) << 9) |
#define FDO_PWM_DUTY_MASK (0xff << 9) |
#define FDO_PWM_DUTY_SHIFT 9 |
#define CG_THERMAL_INT 0xC030000C |
#define CI_DIG_THERM_INTH(x) ((x) << 8) |
#define CI_DIG_THERM_INTH_MASK 0x0000FF00 |
194,7 → 199,10 |
#define CI_DIG_THERM_INTL_SHIFT 16 |
#define THERM_INT_MASK_HIGH (1 << 24) |
#define THERM_INT_MASK_LOW (1 << 25) |
#define CG_MULT_THERMAL_CTRL 0xC0300010 |
#define TEMP_SEL(x) ((x) << 20) |
#define TEMP_SEL_MASK (0xff << 20) |
#define TEMP_SEL_SHIFT 20 |
#define CG_MULT_THERMAL_STATUS 0xC0300014 |
#define ASIC_MAX_TEMP(x) ((x) << 0) |
#define ASIC_MAX_TEMP_MASK 0x000001ff |
203,6 → 211,36 |
#define CTF_TEMP_MASK 0x0003fe00 |
#define CTF_TEMP_SHIFT 9 |
#define CG_FDO_CTRL0 0xC0300064 |
#define FDO_STATIC_DUTY(x) ((x) << 0) |
#define FDO_STATIC_DUTY_MASK 0x000000FF |
#define FDO_STATIC_DUTY_SHIFT 0 |
#define CG_FDO_CTRL1 0xC0300068 |
#define FMAX_DUTY100(x) ((x) << 0) |
#define FMAX_DUTY100_MASK 0x000000FF |
#define FMAX_DUTY100_SHIFT 0 |
#define CG_FDO_CTRL2 0xC030006C |
#define TMIN(x) ((x) << 0) |
#define TMIN_MASK 0x000000FF |
#define TMIN_SHIFT 0 |
#define FDO_PWM_MODE(x) ((x) << 11) |
#define FDO_PWM_MODE_MASK (7 << 11) |
#define FDO_PWM_MODE_SHIFT 11 |
#define TACH_PWM_RESP_RATE(x) ((x) << 25) |
#define TACH_PWM_RESP_RATE_MASK (0x7f << 25) |
#define TACH_PWM_RESP_RATE_SHIFT 25 |
#define CG_TACH_CTRL 0xC0300070 |
# define EDGE_PER_REV(x) ((x) << 0) |
# define EDGE_PER_REV_MASK (0x7 << 0) |
# define EDGE_PER_REV_SHIFT 0 |
# define TARGET_PERIOD(x) ((x) << 3) |
# define TARGET_PERIOD_MASK 0xfffffff8 |
# define TARGET_PERIOD_SHIFT 3 |
#define CG_TACH_STATUS 0xC0300074 |
# define TACH_PERIOD(x) ((x) << 0) |
# define TACH_PERIOD_MASK 0xffffffff |
# define TACH_PERIOD_SHIFT 0 |
#define CG_ECLK_CNTL 0xC05000AC |
# define ECLK_DIVIDER_MASK 0x7f |
# define ECLK_DIR_CNTL_EN (1 << 8) |
1137,6 → 1175,9 |
#define SH_MEM_ALIGNMENT_MODE_UNALIGNED 3 |
#define DEFAULT_MTYPE(x) ((x) << 4) |
#define APE1_MTYPE(x) ((x) << 7) |
/* valid for both DEFAULT_MTYPE and APE1_MTYPE */ |
#define MTYPE_CACHED 0 |
#define MTYPE_NONCACHED 3 |
#define SX_DEBUG_1 0x9060 |
1447,6 → 1488,16 |
#define CP_HQD_ACTIVE 0xC91C |
#define CP_HQD_VMID 0xC920 |
#define CP_HQD_PERSISTENT_STATE 0xC924u |
#define DEFAULT_CP_HQD_PERSISTENT_STATE (0x33U << 8) |
#define CP_HQD_PIPE_PRIORITY 0xC928u |
#define CP_HQD_QUEUE_PRIORITY 0xC92Cu |
#define CP_HQD_QUANTUM 0xC930u |
#define QUANTUM_EN 1U |
#define QUANTUM_SCALE_1MS (1U << 4) |
#define QUANTUM_DURATION(x) ((x) << 8) |
#define CP_HQD_PQ_BASE 0xC934 |
#define CP_HQD_PQ_BASE_HI 0xC938 |
#define CP_HQD_PQ_RPTR 0xC93C |
1474,12 → 1525,32 |
#define PRIV_STATE (1 << 30) |
#define KMD_QUEUE (1 << 31) |
#define CP_HQD_IB_BASE_ADDR 0xC95Cu |
#define CP_HQD_IB_BASE_ADDR_HI 0xC960u |
#define CP_HQD_IB_RPTR 0xC964u |
#define CP_HQD_IB_CONTROL 0xC968u |
#define IB_ATC_EN (1U << 23) |
#define DEFAULT_MIN_IB_AVAIL_SIZE (3U << 20) |
#define CP_HQD_DEQUEUE_REQUEST 0xC974 |
#define DEQUEUE_REQUEST_DRAIN 1 |
#define DEQUEUE_REQUEST_RESET 2 |
#define CP_MQD_CONTROL 0xC99C |
#define MQD_VMID(x) ((x) << 0) |
#define MQD_VMID_MASK (0xf << 0) |
#define CP_HQD_SEMA_CMD 0xC97Cu |
#define CP_HQD_MSG_TYPE 0xC980u |
#define CP_HQD_ATOMIC0_PREOP_LO 0xC984u |
#define CP_HQD_ATOMIC0_PREOP_HI 0xC988u |
#define CP_HQD_ATOMIC1_PREOP_LO 0xC98Cu |
#define CP_HQD_ATOMIC1_PREOP_HI 0xC990u |
#define CP_HQD_HQ_SCHEDULER0 0xC994u |
#define CP_HQD_HQ_SCHEDULER1 0xC998u |
#define SH_STATIC_MEM_CONFIG 0x9604u |
#define DB_RENDER_CONTROL 0x28000 |
#define PA_SC_RASTER_CONFIG 0x28350 |
2069,4 → 2140,20 |
#define VCE_CMD_IB_AUTO 0x00000005 |
#define VCE_CMD_SEMAPHORE 0x00000006 |
#define ATC_VMID0_PASID_MAPPING 0x339Cu |
#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS 0x3398u |
#define ATC_VMID_PASID_MAPPING_VALID (1U << 31) |
#define ATC_VM_APERTURE0_CNTL 0x3310u |
#define ATS_ACCESS_MODE_NEVER 0 |
#define ATS_ACCESS_MODE_ALWAYS 1 |
#define ATC_VM_APERTURE0_CNTL2 0x3318u |
#define ATC_VM_APERTURE0_HIGH_ADDR 0x3308u |
#define ATC_VM_APERTURE0_LOW_ADDR 0x3300u |
#define ATC_VM_APERTURE1_CNTL 0x3314u |
#define ATC_VM_APERTURE1_CNTL2 0x331Cu |
#define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu |
#define ATC_VM_APERTURE1_LOW_ADDR 0x3304u |
#endif |
/drivers/video/drm/radeon/cmdline.c |
---|
1,7 → 1,5 |
#include <drm/drmP.h> |
#include <drm.h> |
#include <drm_mm.h> |
#include <drm/radeon_drm.h> |
#include "radeon.h" |
#include "radeon_object.h" |
/drivers/video/drm/radeon/cypress_dpm.c |
---|
24,6 → 24,7 |
#include "drmP.h" |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "evergreend.h" |
#include "r600_dpm.h" |
#include "cypress_dpm.h" |
/drivers/video/drm/radeon/dce3_1_afmt.c |
---|
32,7 → 32,7 |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector = NULL; |
u32 tmp; |
u8 *sadb; |
u8 *sadb = NULL; |
int sad_count; |
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { |
165,7 → 165,7 |
/* disable audio prior to setting up hw */ |
dig->afmt->pin = r600_audio_get_pin(rdev); |
r600_audio_enable(rdev, dig->afmt->pin, false); |
r600_audio_enable(rdev, dig->afmt->pin, 0); |
r600_audio_set_dto(encoder, mode->clock); |
240,5 → 240,5 |
r600_hdmi_audio_workaround(encoder); |
/* enable audio after to setting up hw */ |
r600_audio_enable(rdev, dig->afmt->pin, true); |
r600_audio_enable(rdev, dig->afmt->pin, 0xf); |
} |
/drivers/video/drm/radeon/dce6_afmt.c |
---|
155,7 → 155,7 |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector = NULL; |
u32 offset, tmp; |
u8 *sadb; |
u8 *sadb = NULL; |
int sad_count; |
if (!dig || !dig->afmt || !dig->afmt->pin) |
284,13 → 284,13 |
void dce6_audio_enable(struct radeon_device *rdev, |
struct r600_audio_pin *pin, |
bool enable) |
u8 enable_mask) |
{ |
if (!pin) |
return; |
WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL, |
enable ? AUDIO_ENABLED : 0); |
WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, |
enable_mask ? AUDIO_ENABLED : 0); |
} |
static const u32 pin_offsets[7] = |
/drivers/video/drm/radeon/evergreen.c |
---|
22,7 → 22,6 |
* Authors: Alex Deucher |
*/ |
#include <linux/firmware.h> |
//#include <linux/platform_device.h> |
#include <linux/slab.h> |
#include <drm/drmP.h> |
#include "radeon.h" |
2346,6 → 2345,9 |
u32 num_heads = 0, lb_size; |
int i; |
if (!rdev->mode_info.mode_config_initialized) |
return; |
radeon_update_display_priority(rdev); |
for (i = 0; i < rdev->num_crtc; i++) { |
2553,6 → 2555,7 |
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); |
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; |
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); |
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); |
} |
} else { |
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); |
3006,7 → 3009,7 |
u32 vgt_cache_invalidation; |
u32 hdp_host_path_cntl, tmp; |
u32 disabled_rb_mask; |
int i, j, num_shader_engines, ps_thread_count; |
int i, j, ps_thread_count; |
switch (rdev->family) { |
case CHIP_CYPRESS: |
3304,8 → 3307,6 |
rdev->config.evergreen.tile_config |= |
((gb_addr_config & 0x30000000) >> 28) << 12; |
num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1; |
if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) { |
u32 efuse_straps_4; |
u32 efuse_straps_3; |
4023,7 → 4024,7 |
if (rdev->rlc.save_restore_obj == NULL) { |
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_VRAM, 0, NULL, |
&rdev->rlc.save_restore_obj); |
NULL, &rdev->rlc.save_restore_obj); |
if (r) { |
dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); |
return r; |
4102,7 → 4103,7 |
if (rdev->rlc.clear_state_obj == NULL) { |
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_VRAM, 0, NULL, |
&rdev->rlc.clear_state_obj); |
NULL, &rdev->rlc.clear_state_obj); |
if (r) { |
dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); |
sumo_rlc_fini(rdev); |
4179,7 → 4180,7 |
r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, |
PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_VRAM, 0, NULL, |
&rdev->rlc.cp_table_obj); |
NULL, &rdev->rlc.cp_table_obj); |
if (r) { |
dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r); |
sumo_rlc_fini(rdev); |
5135,9 → 5136,9 |
/* wptr/rptr are in bytes! */ |
rptr += 16; |
rptr &= rdev->ih.ptr_mask; |
WREG32(IH_RB_RPTR, rptr); |
} |
rdev->ih.rptr = rptr; |
WREG32(IH_RB_RPTR, rdev->ih.rptr); |
atomic_set(&rdev->ih.lock, 0); |
/* make sure wptr hasn't changed while processing */ |
/drivers/video/drm/radeon/evergreen_cs.c |
---|
35,7 → 35,7 |
#define MIN(a,b) (((a)<(b))?(a):(b)) |
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, |
struct radeon_cs_reloc **cs_reloc); |
struct radeon_bo_list **cs_reloc); |
struct evergreen_cs_track { |
u32 group_size; |
u32 nbanks; |
1094,7 → 1094,7 |
static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) |
{ |
struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track; |
struct radeon_cs_reloc *reloc; |
struct radeon_bo_list *reloc; |
u32 last_reg; |
u32 m, i, tmp, *ib; |
int r; |
1792,7 → 1792,7 |
static int evergreen_packet3_check(struct radeon_cs_parser *p, |
struct radeon_cs_packet *pkt) |
{ |
struct radeon_cs_reloc *reloc; |
struct radeon_bo_list *reloc; |
struct evergreen_cs_track *track; |
volatile u32 *ib; |
unsigned idx; |
2661,7 → 2661,7 |
p->track = NULL; |
return r; |
} |
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); |
} while (p->idx < p->chunk_ib->length_dw); |
#if 0 |
for (r = 0; r < p->ib.length_dw; r++) { |
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); |
2684,8 → 2684,8 |
**/ |
int evergreen_dma_cs_parse(struct radeon_cs_parser *p) |
{ |
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; |
struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc; |
struct radeon_cs_chunk *ib_chunk = p->chunk_ib; |
struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc; |
u32 header, cmd, count, sub_cmd; |
volatile u32 *ib = p->ib.ptr; |
u32 idx; |
3100,7 → 3100,7 |
DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); |
return -EINVAL; |
} |
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); |
} while (p->idx < p->chunk_ib->length_dw); |
#if 0 |
for (r = 0; r < p->ib->length_dw; r++) { |
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); |
/drivers/video/drm/radeon/evergreen_dma.c |
---|
104,12 → 104,14 |
* Used by the radeon ttm implementation to move pages if |
* registered as the asic copy callback. |
*/ |
int evergreen_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, |
uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence) |
struct reservation_object *resv) |
{ |
struct radeon_semaphore *sem = NULL; |
struct radeon_fence *fence; |
struct radeon_sync sync; |
int ring_index = rdev->asic->copy.dma_ring_index; |
struct radeon_ring *ring = &rdev->ring[ring_index]; |
u32 size_in_dw, cur_size_in_dw; |
116,11 → 118,7 |
int i, num_loops; |
int r = 0; |
r = radeon_semaphore_create(rdev, &sem); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
return r; |
} |
radeon_sync_create(&sync); |
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; |
num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff); |
127,12 → 125,12 |
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
radeon_sync_free(rdev, &sync, NULL); |
return ERR_PTR(r); |
} |
radeon_semaphore_sync_to(sem, *fence); |
radeon_semaphore_sync_rings(rdev, sem, ring->idx); |
radeon_sync_resv(rdev, &sync, resv, false); |
radeon_sync_rings(rdev, &sync, ring->idx); |
for (i = 0; i < num_loops; i++) { |
cur_size_in_dw = size_in_dw; |
148,17 → 146,17 |
dst_offset += cur_size_in_dw * 4; |
} |
r = radeon_fence_emit(rdev, fence, ring->idx); |
r = radeon_fence_emit(rdev, &fence, ring->idx); |
if (r) { |
radeon_ring_unlock_undo(rdev, ring); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
radeon_sync_free(rdev, &sync, NULL); |
return ERR_PTR(r); |
} |
radeon_ring_unlock_commit(rdev, ring, false); |
radeon_semaphore_free(rdev, &sem, *fence); |
radeon_sync_free(rdev, &sync, fence); |
return r; |
return fence; |
} |
/** |
/drivers/video/drm/radeon/evergreen_hdmi.c |
---|
38,6 → 38,37 |
extern void dce6_afmt_write_latency_fields(struct drm_encoder *encoder, |
struct drm_display_mode *mode); |
/* enable the audio stream */ |
static void dce4_audio_enable(struct radeon_device *rdev, |
struct r600_audio_pin *pin, |
u8 enable_mask) |
{ |
u32 tmp = RREG32(AZ_HOT_PLUG_CONTROL); |
if (!pin) |
return; |
if (enable_mask) { |
tmp |= AUDIO_ENABLED; |
if (enable_mask & 1) |
tmp |= PIN0_AUDIO_ENABLED; |
if (enable_mask & 2) |
tmp |= PIN1_AUDIO_ENABLED; |
if (enable_mask & 4) |
tmp |= PIN2_AUDIO_ENABLED; |
if (enable_mask & 8) |
tmp |= PIN3_AUDIO_ENABLED; |
} else { |
tmp &= ~(AUDIO_ENABLED | |
PIN0_AUDIO_ENABLED | |
PIN1_AUDIO_ENABLED | |
PIN2_AUDIO_ENABLED | |
PIN3_AUDIO_ENABLED); |
} |
WREG32(AZ_HOT_PLUG_CONTROL, tmp); |
} |
/* |
* update the N and CTS parameters for a given pixel clock rate |
*/ |
102,7 → 133,7 |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector = NULL; |
u32 tmp; |
u8 *sadb; |
u8 *sadb = NULL; |
int sad_count; |
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { |
318,10 → 349,10 |
/* disable audio prior to setting up hw */ |
if (ASIC_IS_DCE6(rdev)) { |
dig->afmt->pin = dce6_audio_get_pin(rdev); |
dce6_audio_enable(rdev, dig->afmt->pin, false); |
dce6_audio_enable(rdev, dig->afmt->pin, 0); |
} else { |
dig->afmt->pin = r600_audio_get_pin(rdev); |
r600_audio_enable(rdev, dig->afmt->pin, false); |
dce4_audio_enable(rdev, dig->afmt->pin, 0); |
} |
evergreen_audio_set_dto(encoder, mode->clock); |
463,13 → 494,15 |
/* enable audio after to setting up hw */ |
if (ASIC_IS_DCE6(rdev)) |
dce6_audio_enable(rdev, dig->afmt->pin, true); |
dce6_audio_enable(rdev, dig->afmt->pin, 1); |
else |
r600_audio_enable(rdev, dig->afmt->pin, true); |
dce4_audio_enable(rdev, dig->afmt->pin, 0xf); |
} |
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
482,6 → 515,14 |
if (!enable && !dig->afmt->enabled) |
return; |
if (!enable && dig->afmt->pin) { |
if (ASIC_IS_DCE6(rdev)) |
dce6_audio_enable(rdev, dig->afmt->pin, 0); |
else |
dce4_audio_enable(rdev, dig->afmt->pin, 0); |
dig->afmt->pin = NULL; |
} |
dig->afmt->enabled = enable; |
DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n", |
/drivers/video/drm/radeon/kv_dpm.c |
---|
2800,6 → 2800,8 |
tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> |
SMU_VOLTAGE_CURRENT_LEVEL_SHIFT; |
vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp); |
seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); |
seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); |
seq_printf(m, "power level %d sclk: %u vddc: %u\n", |
current_index, sclk, vddc); |
} |
/drivers/video/drm/radeon/main.c |
---|
29,7 → 29,7 |
videomode_t usermode; |
void cpu_detect(); |
void cpu_detect1(); |
int _stdcall display_handler(ioctl_t *io); |
static char log[256]; |
117,7 → 117,7 |
asm volatile ("int $0x40"::"a"(-1)); |
} |
u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline) |
u32 __attribute__((externally_visible)) drvEntry(int action, char *cmdline) |
{ |
struct radeon_device *rdev = NULL; |
134,7 → 134,7 |
if( GetService("DISPLAY") != 0 ) |
return 0; |
printf("Radeon v3.17-rc5 cmdline %s\n", cmdline); |
printf("Radeon v3.19-rc1 cmdline %s\n", cmdline); |
if( cmdline && *cmdline ) |
parse_cmdline(cmdline, &usermode, log, &radeon_modeset); |
145,7 → 145,7 |
return 0; |
} |
cpu_detect(); |
cpu_detect1(); |
err = enum_pci_devices(); |
if( unlikely(err != 0) ) |
217,8 → 217,8 |
int _stdcall display_handler(ioctl_t *io) |
{ |
int retval = -1; |
u32_t *inp; |
u32_t *outp; |
u32 *inp; |
u32 *outp; |
inp = io->input; |
outp = io->output; |
273,10 → 273,10 |
#define PCI_CLASS_REVISION 0x08 |
#define PCI_CLASS_DISPLAY_VGA 0x0300 |
int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn) |
int pci_scan_filter(u32 id, u32 busnr, u32 devfn) |
{ |
u16_t vendor, device; |
u32_t class; |
u16 vendor, device; |
u32 class; |
int ret = 0; |
vendor = id & 0xffff; |
/drivers/video/drm/radeon/ni.c |
---|
1366,6 → 1366,7 |
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) |
{ |
struct radeon_ring *ring = &rdev->ring[ib->ring]; |
unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0; |
u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA | |
PACKET3_SH_ACTION_ENA; |
1388,8 → 1389,7 |
#endif |
(ib->gpu_addr & 0xFFFFFFFC)); |
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); |
radeon_ring_write(ring, ib->length_dw | |
(ib->vm ? (ib->vm->id << 24) : 0)); |
radeon_ring_write(ring, ib->length_dw | (vm_id << 24)); |
/* flush read cache over gart for this vmid */ |
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); |
1396,7 → 1396,7 |
radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl); |
radeon_ring_write(ring, 0xFFFFFFFF); |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */ |
radeon_ring_write(ring, (vm_id << 24) | 10); /* poll interval */ |
} |
static void cayman_cp_enable(struct radeon_device *rdev, bool enable) |
2395,16 → 2395,12 |
* Update the page table base and flush the VM TLB |
* using the CP (cayman-si). |
*/ |
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) |
void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, |
unsigned vm_id, uint64_t pd_addr) |
{ |
struct radeon_ring *ring = &rdev->ring[ridx]; |
radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2), 0)); |
radeon_ring_write(ring, pd_addr >> 12); |
if (vm == NULL) |
return; |
radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0)); |
radeon_ring_write(ring, vm->pd_gpu_addr >> 12); |
/* flush hdp cache */ |
radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0)); |
radeon_ring_write(ring, 0x1); |
2411,7 → 2407,7 |
/* bits 0-7 are the VM contexts0-7 */ |
radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); |
radeon_ring_write(ring, 1 << vm->id); |
radeon_ring_write(ring, 1 << vm_id); |
/* sync PFP to ME, otherwise we might get invalid PFP reads */ |
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); |
/drivers/video/drm/radeon/ni_dma.c |
---|
123,6 → 123,7 |
struct radeon_ib *ib) |
{ |
struct radeon_ring *ring = &rdev->ring[ib->ring]; |
unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0; |
if (rdev->wb.enabled) { |
u32 next_rptr = ring->wptr + 4; |
140,7 → 141,7 |
*/ |
while ((ring->wptr & 7) != 5) |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0)); |
radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0)); |
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); |
radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); |
446,16 → 447,12 |
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0); |
} |
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) |
void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, |
unsigned vm_id, uint64_t pd_addr) |
{ |
struct radeon_ring *ring = &rdev->ring[ridx]; |
if (vm == NULL) |
return; |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); |
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); |
radeon_ring_write(ring, vm->pd_gpu_addr >> 12); |
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2)); |
radeon_ring_write(ring, pd_addr >> 12); |
/* flush hdp cache */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); |
465,6 → 462,6 |
/* bits 0-7 are the VM contexts0-7 */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); |
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); |
radeon_ring_write(ring, 1 << vm->id); |
radeon_ring_write(ring, 1 << vm_id); |
} |
/drivers/video/drm/radeon/ni_dpm.c |
---|
23,6 → 23,7 |
#include "drmP.h" |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "nid.h" |
#include "r600_dpm.h" |
#include "ni_dpm.h" |
789,7 → 790,6 |
bool disable_mclk_switching; |
u32 mclk; |
u16 vddci; |
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; |
int i; |
if ((rdev->pm.dpm.new_active_crtc_count > 1) || |
816,29 → 816,6 |
} |
} |
/* limit clocks to max supported clocks based on voltage dependency tables */ |
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, |
&max_sclk_vddc); |
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk, |
&max_mclk_vddci); |
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk, |
&max_mclk_vddc); |
for (i = 0; i < ps->performance_level_count; i++) { |
if (max_sclk_vddc) { |
if (ps->performance_levels[i].sclk > max_sclk_vddc) |
ps->performance_levels[i].sclk = max_sclk_vddc; |
} |
if (max_mclk_vddci) { |
if (ps->performance_levels[i].mclk > max_mclk_vddci) |
ps->performance_levels[i].mclk = max_mclk_vddci; |
} |
if (max_mclk_vddc) { |
if (ps->performance_levels[i].mclk > max_mclk_vddc) |
ps->performance_levels[i].mclk = max_mclk_vddc; |
} |
} |
/* XXX validate the min clocks required for display */ |
/* adjust low state */ |
/drivers/video/drm/radeon/pci.c |
---|
1,21 → 1,13 |
#include <linux/kernel.h> |
#include <linux/types.h> |
#include <linux/export.h> |
#include <linux/mutex.h> |
#include <linux/mod_devicetable.h> |
#include <errno-base.h> |
#include <pci.h> |
#include <linux/slab.h> |
#include <linux/pci.h> |
#include <syscall.h> |
static inline __attribute__((const)) |
bool is_power_of_2(unsigned long n) |
{ |
return (n != 0 && ((n & (n - 1)) == 0)); |
} |
extern int pci_scan_filter(u32 id, u32 busnr, u32 devfn); |
extern int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn); |
static LIST_HEAD(devices); |
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ |
39,9 → 31,9 |
} |
static u32_t pci_size(u32_t base, u32_t maxbase, u32_t mask) |
static u32 pci_size(u32 base, u32 maxbase, u32 mask) |
{ |
u32_t size = mask & maxbase; /* Find the significant bits */ |
u32 size = mask & maxbase; /* Find the significant bits */ |
if (!size) |
return 0; |
58,9 → 50,9 |
return size; |
} |
static u64_t pci_size64(u64_t base, u64_t maxbase, u64_t mask) |
static u64 pci_size64(u64 base, u64 maxbase, u64 mask) |
{ |
u64_t size = mask & maxbase; /* Find the significant bits */ |
u64 size = mask & maxbase; /* Find the significant bits */ |
if (!size) |
return 0; |
77,7 → 69,7 |
return size; |
} |
static inline int is_64bit_memory(u32_t mask) |
static inline int is_64bit_memory(u32 mask) |
{ |
if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == |
(PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) |
87,15 → 79,15 |
static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) |
{ |
u32_t pos, reg, next; |
u32_t l, sz; |
u32 pos, reg, next; |
u32 l, sz; |
struct resource *res; |
for(pos=0; pos < howmany; pos = next) |
{ |
u64_t l64; |
u64_t sz64; |
u32_t raw_sz; |
u64 l64; |
u64 sz64; |
u32 raw_sz; |
next = pos + 1; |
117,7 → 109,7 |
if ((l & PCI_BASE_ADDRESS_SPACE) == |
PCI_BASE_ADDRESS_SPACE_MEMORY) |
{ |
sz = pci_size(l, sz, (u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK); |
/* |
* For 64bit prefetchable memory sz could be 0, if the |
* real size is bigger than 4G, so we need to check |
139,14 → 131,14 |
res->flags |= pci_calc_resource_flags(l); |
if (is_64bit_memory(l)) |
{ |
u32_t szhi, lhi; |
u32 szhi, lhi; |
lhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
PciWrite32(dev->busnr, dev->devfn, reg+4, ~0); |
szhi = PciRead32(dev->busnr, dev->devfn, reg+4); |
PciWrite32(dev->busnr, dev->devfn, reg+4, lhi); |
sz64 = ((u64_t)szhi << 32) | raw_sz; |
l64 = ((u64_t)lhi << 32) | l; |
sz64 = ((u64)szhi << 32) | raw_sz; |
l64 = ((u64)lhi << 32) | l; |
sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); |
next++; |
170,7 → 162,7 |
{ |
/* 64-bit wide address, treat as disabled */ |
PciWrite32(dev->busnr, dev->devfn, reg, |
l & ~(u32_t)PCI_BASE_ADDRESS_MEM_MASK); |
l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK); |
PciWrite32(dev->busnr, dev->devfn, reg+4, 0); |
res->start = 0; |
res->end = sz; |
194,7 → 186,7 |
if (sz && sz != 0xffffffff) |
{ |
sz = pci_size(l, sz, (u32_t)PCI_ROM_ADDRESS_MASK); |
sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK); |
if (sz) |
{ |
210,7 → 202,7 |
static void pci_read_irq(struct pci_dev *dev) |
{ |
u8_t irq; |
u8 irq; |
irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN); |
dev->pin = irq; |
222,7 → 214,7 |
int pci_setup_device(struct pci_dev *dev) |
{ |
u32_t class; |
u32 class; |
class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION); |
dev->revision = class & 0xff; |
254,7 → 246,7 |
*/ |
if (class == PCI_CLASS_STORAGE_IDE) |
{ |
u8_t progif; |
u8 progif; |
progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG); |
if ((progif & 1) == 0) |
319,12 → 311,12 |
return 0; |
}; |
static pci_dev_t* pci_scan_device(u32_t busnr, int devfn) |
static pci_dev_t* pci_scan_device(u32 busnr, int devfn) |
{ |
pci_dev_t *dev; |
u32_t id; |
u8_t hdr; |
u32 id; |
u8 hdr; |
int timeout = 10; |
380,7 → 372,7 |
int pci_scan_slot(u32_t bus, int devfn) |
int pci_scan_slot(u32 bus, int devfn) |
{ |
int func, nr = 0; |
488,8 → 480,8 |
int enum_pci_devices() |
{ |
pci_dev_t *dev; |
u32_t last_bus; |
u32_t bus = 0 , devfn = 0; |
u32 last_bus; |
u32 bus = 0 , devfn = 0; |
last_bus = PciApi(1); |
672,11 → 664,6 |
} |
struct pci_bus_region { |
resource_size_t start; |
resource_size_t end; |
}; |
static inline void |
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, |
struct resource *res) |
775,21 → 762,11 |
loff_t start; |
void __iomem *rom; |
// ENTER(); |
// dbgprintf("resource start %x end %x flags %x\n", |
// res->start, res->end, res->flags); |
/* |
* IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy |
* memory map if the VGA enable bit of the Bridge Control register is |
* set for embedded VGA. |
*/ |
start = (loff_t)0xC0000; |
*size = 0x20000; /* cover C000:0 through E000:0 */ |
#if 0 |
if (res->flags & IORESOURCE_ROM_SHADOW) { |
/* primary video rom always starts here */ |
start = (loff_t)0xC0000; |
801,21 → 778,11 |
return (void __iomem *)(unsigned long) |
pci_resource_start(pdev, PCI_ROM_RESOURCE); |
} else { |
/* assign the ROM an address if it doesn't have one */ |
// if (res->parent == NULL && |
// pci_assign_resource(pdev,PCI_ROM_RESOURCE)) |
return NULL; |
// start = pci_resource_start(pdev, PCI_ROM_RESOURCE); |
// *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); |
// if (*size == 0) |
// return NULL; |
start = (loff_t)0xC0000; |
*size = 0x20000; /* cover C000:0 through E000:0 */ |
/* Enable ROM space decodes */ |
// if (pci_enable_rom(pdev)) |
// return NULL; |
} |
} |
#endif |
rom = ioremap(start, *size); |
if (!rom) { |
833,7 → 800,6 |
* True size is important if the ROM is going to be copied. |
*/ |
*size = pci_get_rom_size(pdev, rom, *size); |
// LEAVE(); |
return rom; |
} |
861,6 → 827,8 |
else |
cmd = old_cmd & ~PCI_COMMAND_MASTER; |
if (cmd != old_cmd) { |
dbgprintf("%s bus mastering\n", |
enable ? "enabling" : "disabling"); |
pci_write_config_word(dev, PCI_COMMAND, cmd); |
} |
dev->is_busmaster = enable; |
/drivers/video/drm/radeon/ppsmc.h |
---|
56,6 → 56,14 |
#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20 |
#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40 |
#define FDO_MODE_HARDWARE 0 |
#define FDO_MODE_PIECE_WISE_LINEAR 1 |
enum FAN_CONTROL { |
FAN_CONTROL_FUZZY, |
FAN_CONTROL_TABLE |
}; |
#define PPSMC_Result_OK ((uint8_t)0x01) |
#define PPSMC_Result_Failed ((uint8_t)0xFF) |
79,6 → 87,8 |
#define PPSMC_MSG_DisableCac ((uint8_t)0x54) |
#define PPSMC_TDPClampingActive ((uint8_t)0x59) |
#define PPSMC_TDPClampingInactive ((uint8_t)0x5A) |
#define PPSMC_StartFanControl ((uint8_t)0x5B) |
#define PPSMC_StopFanControl ((uint8_t)0x5C) |
#define PPSMC_MSG_NoDisplay ((uint8_t)0x5D) |
#define PPSMC_MSG_HasDisplay ((uint8_t)0x5E) |
#define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60) |
106,6 → 116,7 |
#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130) |
#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131) |
#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132) |
#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133) |
#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135) |
#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136) |
#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d) |
149,7 → 160,11 |
#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F) |
#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) |
#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) |
#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A) |
#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C) |
#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D) |
#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200) |
#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201) |
157,10 → 172,11 |
#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102) |
#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104) |
#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108) |
#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112) |
#define PPSMC_MSG_Thermal_Cntl_Enable ((uint32_t) 0x10a) |
#define PPSMC_MSG_Voltage_Cntl_Enable ((uint32_t) 0x109) |
#define PPSMC_MSG_VCEPowerOFF ((uint32_t) 0x10e) |
#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f) |
#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112) |
#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d) |
#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e) |
#define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120) |
/drivers/video/drm/radeon/pptable.h |
---|
96,6 → 96,14 |
USHORT usTMax; // The max temperature |
} ATOM_PPLIB_FANTABLE2; |
typedef struct _ATOM_PPLIB_FANTABLE3 |
{ |
ATOM_PPLIB_FANTABLE2 basicTable2; |
UCHAR ucFanControlMode; |
USHORT usFanPWMMax; |
USHORT usFanOutputSensitivity; |
} ATOM_PPLIB_FANTABLE3; |
typedef struct _ATOM_PPLIB_EXTENDEDHEADER |
{ |
USHORT usSize; |
/drivers/video/drm/radeon/r100.c |
---|
869,13 → 869,14 |
return false; |
} |
int r100_copy_blit(struct radeon_device *rdev, |
struct radeon_fence *r100_copy_blit(struct radeon_device *rdev, |
uint64_t src_offset, |
uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence) |
struct reservation_object *resv) |
{ |
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
struct radeon_fence *fence; |
uint32_t cur_pages; |
uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; |
uint32_t pitch; |
896,7 → 897,7 |
r = radeon_ring_lock(rdev, ring, ndw); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); |
return -EINVAL; |
return ERR_PTR(-EINVAL); |
} |
while (num_gpu_pages > 0) { |
cur_pages = num_gpu_pages; |
936,11 → 937,13 |
RADEON_WAIT_2D_IDLECLEAN | |
RADEON_WAIT_HOST_IDLECLEAN | |
RADEON_WAIT_DMA_GUI_IDLE); |
if (fence) { |
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); |
r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); |
if (r) { |
radeon_ring_unlock_undo(rdev, ring); |
return ERR_PTR(r); |
} |
radeon_ring_unlock_commit(rdev, ring, false); |
return r; |
return fence; |
} |
static int r100_cp_wait_for_idle(struct radeon_device *rdev) |
1247,7 → 1250,7 |
int r; |
u32 tile_flags = 0; |
u32 tmp; |
struct radeon_cs_reloc *reloc; |
struct radeon_bo_list *reloc; |
u32 value; |
r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
1286,7 → 1289,7 |
int idx) |
{ |
unsigned c, i; |
struct radeon_cs_reloc *reloc; |
struct radeon_bo_list *reloc; |
struct r100_cs_track *track; |
int r = 0; |
volatile uint32_t *ib; |
1535,7 → 1538,7 |
struct radeon_cs_packet *pkt, |
unsigned idx, unsigned reg) |
{ |
struct radeon_cs_reloc *reloc; |
struct radeon_bo_list *reloc; |
struct r100_cs_track *track; |
volatile uint32_t *ib; |
uint32_t tmp; |
1894,7 → 1897,7 |
static int r100_packet3_check(struct radeon_cs_parser *p, |
struct radeon_cs_packet *pkt) |
{ |
struct radeon_cs_reloc *reloc; |
struct radeon_bo_list *reloc; |
struct r100_cs_track *track; |
unsigned idx; |
volatile uint32_t *ib; |
2054,7 → 2057,7 |
} |
if (r) |
return r; |
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); |
} while (p->idx < p->chunk_ib->length_dw); |
return 0; |
} |
3200,6 → 3203,9 |
uint32_t pixel_bytes1 = 0; |
uint32_t pixel_bytes2 = 0; |
if (!rdev->mode_info.mode_config_initialized) |
return; |
radeon_update_display_priority(rdev); |
if (rdev->mode_info.crtcs[0]->base.enabled) { |
/drivers/video/drm/radeon/r200.c |
---|
80,13 → 80,14 |
return vtx_size; |
} |
int r200_copy_dma(struct radeon_device *rdev, |
struct radeon_fence *r200_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, |
uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence) |
struct reservation_object *resv) |
{ |
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
struct radeon_fence *fence; |
uint32_t size; |
uint32_t cur_size; |
int i, num_loops; |
98,7 → 99,7 |
r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
return r; |
return ERR_PTR(r); |
} |
/* Must wait for 2D idle & clean before DMA or hangs might happen */ |
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); |
118,11 → 119,13 |
} |
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); |
radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE); |
if (fence) { |
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); |
r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); |
if (r) { |
radeon_ring_unlock_undo(rdev, ring); |
return ERR_PTR(r); |
} |
radeon_ring_unlock_commit(rdev, ring, false); |
return r; |
return fence; |
} |
143,7 → 146,7 |
struct radeon_cs_packet *pkt, |
unsigned idx, unsigned reg) |
{ |
struct radeon_cs_reloc *reloc; |
struct radeon_bo_list *reloc; |
struct r100_cs_track *track; |
volatile uint32_t *ib; |
uint32_t tmp; |
/drivers/video/drm/radeon/r300.c |
---|
598,7 → 598,7 |
struct radeon_cs_packet *pkt, |
unsigned idx, unsigned reg) |
{ |
struct radeon_cs_reloc *reloc; |
struct radeon_bo_list *reloc; |
struct r100_cs_track *track; |
volatile uint32_t *ib; |
uint32_t tmp, tile_flags = 0; |
1142,7 → 1142,7 |
static int r300_packet3_check(struct radeon_cs_parser *p, |
struct radeon_cs_packet *pkt) |
{ |
struct radeon_cs_reloc *reloc; |
struct radeon_bo_list *reloc; |
struct r100_cs_track *track; |
volatile uint32_t *ib; |
unsigned idx; |
1283,7 → 1283,7 |
if (r) { |
return r; |
} |
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); |
} while (p->idx < p->chunk_ib->length_dw); |
return 0; |
} |
/drivers/video/drm/radeon/r600.c |
---|
122,9 → 122,97 |
int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) |
{ |
unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0; |
int r; |
/* bypass vclk and dclk with bclk */ |
WREG32_P(CG_UPLL_FUNC_CNTL_2, |
VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1), |
~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK)); |
/* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */ |
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~( |
UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK)); |
if (rdev->family >= CHIP_RS780) |
WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL, |
~UPLL_BYPASS_CNTL); |
if (!vclk || !dclk) { |
/* keep the Bypass mode, put PLL to sleep */ |
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); |
return 0; |
} |
if (rdev->clock.spll.reference_freq == 10000) |
ref_div = 34; |
else |
ref_div = 4; |
r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000, |
ref_div + 1, 0xFFF, 2, 30, ~0, |
&fb_div, &vclk_div, &dclk_div); |
if (r) |
return r; |
if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780) |
fb_div >>= 1; |
else |
fb_div |= 1; |
r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); |
if (r) |
return r; |
/* assert PLL_RESET */ |
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK); |
/* For RS780 we have to choose ref clk */ |
if (rdev->family >= CHIP_RS780) |
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK, |
~UPLL_REFCLK_SRC_SEL_MASK); |
/* set the required fb, ref and post divder values */ |
WREG32_P(CG_UPLL_FUNC_CNTL, |
UPLL_FB_DIV(fb_div) | |
UPLL_REF_DIV(ref_div), |
~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK)); |
WREG32_P(CG_UPLL_FUNC_CNTL_2, |
UPLL_SW_HILEN(vclk_div >> 1) | |
UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) | |
UPLL_SW_HILEN2(dclk_div >> 1) | |
UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) | |
UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK, |
~UPLL_SW_MASK); |
/* give the PLL some time to settle */ |
mdelay(15); |
/* deassert PLL_RESET */ |
WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK); |
mdelay(15); |
/* deassert BYPASS EN */ |
WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK); |
if (rdev->family >= CHIP_RS780) |
WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL); |
r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); |
if (r) |
return r; |
/* switch VCLK and DCLK selection */ |
WREG32_P(CG_UPLL_FUNC_CNTL_2, |
VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2), |
~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK)); |
mdelay(100); |
return 0; |
} |
void dce3_program_fmt(struct drm_encoder *encoder) |
{ |
struct drm_device *dev = encoder->dev; |
992,6 → 1080,8 |
WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); |
WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); |
WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); |
WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp); |
WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp); |
WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); |
WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); |
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); |
1042,6 → 1132,8 |
WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); |
WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); |
WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); |
WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp); |
WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp); |
radeon_gart_table_vram_unpin(rdev); |
} |
1338,7 → 1430,7 |
if (rdev->vram_scratch.robj == NULL) { |
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, |
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
0, NULL, &rdev->vram_scratch.robj); |
0, NULL, NULL, &rdev->vram_scratch.robj); |
if (r) { |
return r; |
} |
2792,12 → 2884,13 |
* Used by the radeon ttm implementation to move pages if |
* registered as the asic copy callback. |
*/ |
int r600_copy_cpdma(struct radeon_device *rdev, |
struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence) |
struct reservation_object *resv) |
{ |
struct radeon_semaphore *sem = NULL; |
struct radeon_fence *fence; |
struct radeon_sync sync; |
int ring_index = rdev->asic->copy.blit_ring_index; |
struct radeon_ring *ring = &rdev->ring[ring_index]; |
u32 size_in_bytes, cur_size_in_bytes, tmp; |
2804,11 → 2897,7 |
int i, num_loops; |
int r = 0; |
r = radeon_semaphore_create(rdev, &sem); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
return r; |
} |
radeon_sync_create(&sync); |
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); |
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); |
2815,12 → 2904,12 |
r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
radeon_sync_free(rdev, &sync, NULL); |
return ERR_PTR(r); |
} |
radeon_semaphore_sync_to(sem, *fence); |
radeon_semaphore_sync_rings(rdev, sem, ring->idx); |
radeon_sync_resv(rdev, &sync, resv, false); |
radeon_sync_rings(rdev, &sync, ring->idx); |
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); |
2846,17 → 2935,17 |
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); |
radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit); |
r = radeon_fence_emit(rdev, fence, ring->idx); |
r = radeon_fence_emit(rdev, &fence, ring->idx); |
if (r) { |
radeon_ring_unlock_undo(rdev, ring); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
radeon_sync_free(rdev, &sync, NULL); |
return ERR_PTR(r); |
} |
radeon_ring_unlock_commit(rdev, ring, false); |
radeon_semaphore_free(rdev, &sem, *fence); |
radeon_sync_free(rdev, &sync, fence); |
return r; |
return fence; |
} |
int r600_set_surface_reg(struct radeon_device *rdev, int reg, |
3171,7 → 3260,7 |
r = radeon_bo_create(rdev, rdev->ih.ring_size, |
PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_GTT, 0, |
NULL, &rdev->ih.ring_obj); |
NULL, NULL, &rdev->ih.ring_obj); |
if (r) { |
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); |
return r; |
/drivers/video/drm/radeon/r600_cs.c |
---|
969,7 → 969,7 |
static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) |
{ |
struct r600_cs_track *track = (struct r600_cs_track *)p->track; |
struct radeon_cs_reloc *reloc; |
struct radeon_bo_list *reloc; |
u32 m, i, tmp, *ib; |
int r; |
1626,7 → 1626,7 |
static int r600_packet3_check(struct radeon_cs_parser *p, |
struct radeon_cs_packet *pkt) |
{ |
struct radeon_cs_reloc *reloc; |
struct radeon_bo_list *reloc; |
struct r600_cs_track *track; |
volatile u32 *ib; |
unsigned idx; |
2316,7 → 2316,7 |
p->track = NULL; |
return r; |
} |
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); |
} while (p->idx < p->chunk_ib->length_dw); |
#if 0 |
for (r = 0; r < p->ib.length_dw; r++) { |
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); |
2351,10 → 2351,10 |
static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p) |
{ |
if (p->chunk_relocs_idx == -1) { |
if (p->chunk_relocs == NULL) { |
return 0; |
} |
p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL); |
p->relocs = kzalloc(sizeof(struct radeon_bo_list), GFP_KERNEL); |
if (p->relocs == NULL) { |
return -ENOMEM; |
} |
2398,7 → 2398,7 |
/* Copy the packet into the IB, the parser will read from the |
* input memory (cached) and write to the IB (which can be |
* uncached). */ |
ib_chunk = &parser.chunks[parser.chunk_ib_idx]; |
ib_chunk = parser.chunk_ib; |
parser.ib.length_dw = ib_chunk->length_dw; |
*l = parser.ib.length_dw; |
if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) { |
2435,17 → 2435,17 |
* GPU offset using the provided start. |
**/ |
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, |
struct radeon_cs_reloc **cs_reloc) |
struct radeon_bo_list **cs_reloc) |
{ |
struct radeon_cs_chunk *relocs_chunk; |
unsigned idx; |
*cs_reloc = NULL; |
if (p->chunk_relocs_idx == -1) { |
if (p->chunk_relocs == NULL) { |
DRM_ERROR("No relocation chunk !\n"); |
return -EINVAL; |
} |
relocs_chunk = &p->chunks[p->chunk_relocs_idx]; |
relocs_chunk = p->chunk_relocs; |
idx = p->dma_reloc_idx; |
if (idx >= p->nrelocs) { |
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", |
2452,7 → 2452,7 |
idx, p->nrelocs); |
return -EINVAL; |
} |
*cs_reloc = p->relocs_ptr[idx]; |
*cs_reloc = &p->relocs[idx]; |
p->dma_reloc_idx++; |
return 0; |
} |
2472,8 → 2472,8 |
**/ |
int r600_dma_cs_parse(struct radeon_cs_parser *p) |
{ |
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; |
struct radeon_cs_reloc *src_reloc, *dst_reloc; |
struct radeon_cs_chunk *ib_chunk = p->chunk_ib; |
struct radeon_bo_list *src_reloc, *dst_reloc; |
u32 header, cmd, count, tiled; |
volatile u32 *ib = p->ib.ptr; |
u32 idx, idx_value; |
2619,7 → 2619,7 |
DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); |
return -EINVAL; |
} |
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); |
} while (p->idx < p->chunk_ib->length_dw); |
#if 0 |
for (r = 0; r < p->ib->length_dw; r++) { |
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); |
/drivers/video/drm/radeon/r600_dma.c |
---|
338,17 → 338,17 |
{ |
struct radeon_ib ib; |
unsigned i; |
unsigned index; |
int r; |
void __iomem *ptr = (void *)rdev->vram_scratch.ptr; |
u32 tmp = 0; |
u64 gpu_addr; |
if (!ptr) { |
DRM_ERROR("invalid vram scratch pointer\n"); |
return -EINVAL; |
} |
if (ring->idx == R600_RING_TYPE_DMA_INDEX) |
index = R600_WB_DMA_RING_TEST_OFFSET; |
else |
index = CAYMAN_WB_DMA1_RING_TEST_OFFSET; |
tmp = 0xCAFEDEAD; |
writel(tmp, ptr); |
gpu_addr = rdev->wb.gpu_addr + index; |
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); |
if (r) { |
357,8 → 357,8 |
} |
ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1); |
ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; |
ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff; |
ib.ptr[1] = lower_32_bits(gpu_addr); |
ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff; |
ib.ptr[3] = 0xDEADBEEF; |
ib.length_dw = 4; |
374,7 → 374,7 |
return r; |
} |
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = readl(ptr); |
tmp = le32_to_cpu(rdev->wb.wb[index/4]); |
if (tmp == 0xDEADBEEF) |
break; |
DRM_UDELAY(1); |
430,18 → 430,19 |
* @src_offset: src GPU address |
* @dst_offset: dst GPU address |
* @num_gpu_pages: number of GPU pages to xfer |
* @fence: radeon fence object |
* @resv: reservation object to sync to |
* |
* Copy GPU paging using the DMA engine (r6xx). |
* Used by the radeon ttm implementation to move pages if |
* registered as the asic copy callback. |
*/ |
int r600_copy_dma(struct radeon_device *rdev, |
struct radeon_fence *r600_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence) |
struct reservation_object *resv) |
{ |
struct radeon_semaphore *sem = NULL; |
struct radeon_fence *fence; |
struct radeon_sync sync; |
int ring_index = rdev->asic->copy.dma_ring_index; |
struct radeon_ring *ring = &rdev->ring[ring_index]; |
u32 size_in_dw, cur_size_in_dw; |
448,11 → 449,7 |
int i, num_loops; |
int r = 0; |
r = radeon_semaphore_create(rdev, &sem); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
return r; |
} |
radeon_sync_create(&sync); |
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; |
num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE); |
459,12 → 456,12 |
r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
radeon_sync_free(rdev, &sync, NULL); |
return ERR_PTR(r); |
} |
radeon_semaphore_sync_to(sem, *fence); |
radeon_semaphore_sync_rings(rdev, sem, ring->idx); |
radeon_sync_resv(rdev, &sync, resv, false); |
radeon_sync_rings(rdev, &sync, ring->idx); |
for (i = 0; i < num_loops; i++) { |
cur_size_in_dw = size_in_dw; |
480,15 → 477,15 |
dst_offset += cur_size_in_dw * 4; |
} |
r = radeon_fence_emit(rdev, fence, ring->idx); |
r = radeon_fence_emit(rdev, &fence, ring->idx); |
if (r) { |
radeon_ring_unlock_undo(rdev, ring); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
radeon_sync_free(rdev, &sync, NULL); |
return ERR_PTR(r); |
} |
radeon_ring_unlock_commit(rdev, ring, false); |
radeon_semaphore_free(rdev, &sem, *fence); |
radeon_sync_free(rdev, &sync, fence); |
return r; |
return fence; |
} |
/drivers/video/drm/radeon/r600_dpm.c |
---|
24,6 → 24,7 |
#include "drmP.h" |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "r600d.h" |
#include "r600_dpm.h" |
#include "atom.h" |
810,6 → 811,7 |
union fan_info { |
struct _ATOM_PPLIB_FANTABLE fan; |
struct _ATOM_PPLIB_FANTABLE2 fan2; |
struct _ATOM_PPLIB_FANTABLE3 fan3; |
}; |
static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table, |
899,6 → 901,14 |
else |
rdev->pm.dpm.fan.t_max = 10900; |
rdev->pm.dpm.fan.cycle_delay = 100000; |
if (fan_info->fan.ucFanTableFormat >= 3) { |
rdev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode; |
rdev->pm.dpm.fan.default_max_fan_pwm = |
le16_to_cpu(fan_info->fan3.usFanPWMMax); |
rdev->pm.dpm.fan.default_fan_output_sensitivity = 4836; |
rdev->pm.dpm.fan.fan_output_sensitivity = |
le16_to_cpu(fan_info->fan3.usFanOutputSensitivity); |
} |
rdev->pm.dpm.fan.ucode_fan_control = true; |
} |
} |
1255,7 → 1265,7 |
(mode_info->atom_context->bios + data_offset + |
le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); |
rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = |
ppt->usMaximumPowerDeliveryLimit; |
le16_to_cpu(ppt->usMaximumPowerDeliveryLimit); |
pt = &ppt->power_tune_table; |
} else { |
ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) |
/drivers/video/drm/radeon/r600_dpm.h |
---|
96,6 → 96,9 |
#define R600_TEMP_RANGE_MIN (90 * 1000) |
#define R600_TEMP_RANGE_MAX (120 * 1000) |
#define FDO_PWM_MODE_STATIC 1 |
#define FDO_PWM_MODE_STATIC_RPM 5 |
enum r600_power_level { |
R600_POWER_LEVEL_LOW = 0, |
R600_POWER_LEVEL_MEDIUM = 1, |
/drivers/video/drm/radeon/r600_hdmi.c |
---|
71,6 → 71,169 |
/* |
* check if the chipset is supported |
*/ |
static int r600_audio_chipset_supported(struct radeon_device *rdev) |
{ |
return ASIC_IS_DCE2(rdev) && !ASIC_IS_NODCE(rdev); |
} |
static struct r600_audio_pin r600_audio_status(struct radeon_device *rdev) |
{ |
struct r600_audio_pin status; |
uint32_t value; |
value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL); |
/* number of channels */ |
status.channels = (value & 0x7) + 1; |
/* bits per sample */ |
switch ((value & 0xF0) >> 4) { |
case 0x0: |
status.bits_per_sample = 8; |
break; |
case 0x1: |
status.bits_per_sample = 16; |
break; |
case 0x2: |
status.bits_per_sample = 20; |
break; |
case 0x3: |
status.bits_per_sample = 24; |
break; |
case 0x4: |
status.bits_per_sample = 32; |
break; |
default: |
dev_err(rdev->dev, "Unknown bits per sample 0x%x, using 16\n", |
(int)value); |
status.bits_per_sample = 16; |
} |
/* current sampling rate in HZ */ |
if (value & 0x4000) |
status.rate = 44100; |
else |
status.rate = 48000; |
status.rate *= ((value >> 11) & 0x7) + 1; |
status.rate /= ((value >> 8) & 0x7) + 1; |
value = RREG32(R600_AUDIO_STATUS_BITS); |
/* iec 60958 status bits */ |
status.status_bits = value & 0xff; |
/* iec 60958 category code */ |
status.category_code = (value >> 8) & 0xff; |
return status; |
} |
/* |
* update all hdmi interfaces with current audio parameters |
*/ |
void r600_audio_update_hdmi(struct work_struct *work) |
{ |
struct radeon_device *rdev = container_of(work, struct radeon_device, |
audio_work); |
struct drm_device *dev = rdev->ddev; |
struct r600_audio_pin audio_status = r600_audio_status(rdev); |
struct drm_encoder *encoder; |
bool changed = false; |
if (rdev->audio.pin[0].channels != audio_status.channels || |
rdev->audio.pin[0].rate != audio_status.rate || |
rdev->audio.pin[0].bits_per_sample != audio_status.bits_per_sample || |
rdev->audio.pin[0].status_bits != audio_status.status_bits || |
rdev->audio.pin[0].category_code != audio_status.category_code) { |
rdev->audio.pin[0] = audio_status; |
changed = true; |
} |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
if (!radeon_encoder_is_digital(encoder)) |
continue; |
if (changed || r600_hdmi_buffer_status_changed(encoder)) |
r600_hdmi_update_audio_settings(encoder); |
} |
} |
/* enable the audio stream */ |
void r600_audio_enable(struct radeon_device *rdev, |
struct r600_audio_pin *pin, |
u8 enable_mask) |
{ |
u32 tmp = RREG32(AZ_HOT_PLUG_CONTROL); |
if (!pin) |
return; |
if (enable_mask) { |
tmp |= AUDIO_ENABLED; |
if (enable_mask & 1) |
tmp |= PIN0_AUDIO_ENABLED; |
if (enable_mask & 2) |
tmp |= PIN1_AUDIO_ENABLED; |
if (enable_mask & 4) |
tmp |= PIN2_AUDIO_ENABLED; |
if (enable_mask & 8) |
tmp |= PIN3_AUDIO_ENABLED; |
} else { |
tmp &= ~(AUDIO_ENABLED | |
PIN0_AUDIO_ENABLED | |
PIN1_AUDIO_ENABLED | |
PIN2_AUDIO_ENABLED | |
PIN3_AUDIO_ENABLED); |
} |
WREG32(AZ_HOT_PLUG_CONTROL, tmp); |
} |
/* |
* initialize the audio vars |
*/ |
int r600_audio_init(struct radeon_device *rdev) |
{ |
if (!radeon_audio || !r600_audio_chipset_supported(rdev)) |
return 0; |
rdev->audio.enabled = true; |
rdev->audio.num_pins = 1; |
rdev->audio.pin[0].channels = -1; |
rdev->audio.pin[0].rate = -1; |
rdev->audio.pin[0].bits_per_sample = -1; |
rdev->audio.pin[0].status_bits = 0; |
rdev->audio.pin[0].category_code = 0; |
rdev->audio.pin[0].id = 0; |
/* disable audio. it will be set up later */ |
r600_audio_enable(rdev, &rdev->audio.pin[0], 0); |
return 0; |
} |
/* |
* release the audio timer |
* TODO: How to do this correctly on SMP systems? |
*/ |
void r600_audio_fini(struct radeon_device *rdev) |
{ |
if (!rdev->audio.enabled) |
return; |
r600_audio_enable(rdev, &rdev->audio.pin[0], 0); |
rdev->audio.enabled = false; |
} |
struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev) |
{ |
/* only one pin on 6xx-NI */ |
return &rdev->audio.pin[0]; |
} |
/* |
* calculate CTS and N values if they are not found in the table |
*/ |
static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int *N, int freq) |
356,7 → 519,7 |
/* disable audio prior to setting up hw */ |
dig->afmt->pin = r600_audio_get_pin(rdev); |
r600_audio_enable(rdev, dig->afmt->pin, false); |
r600_audio_enable(rdev, dig->afmt->pin, 0xf); |
r600_audio_set_dto(encoder, mode->clock); |
442,7 → 605,7 |
WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001); |
/* enable audio after to setting up hw */ |
r600_audio_enable(rdev, dig->afmt->pin, true); |
r600_audio_enable(rdev, dig->afmt->pin, 0xf); |
} |
/** |
527,6 → 690,11 |
if (!enable && !dig->afmt->enabled) |
return; |
if (!enable && dig->afmt->pin) { |
r600_audio_enable(rdev, dig->afmt->pin, 0); |
dig->afmt->pin = NULL; |
} |
/* Older chipsets require setting HDMI and routing manually */ |
if (!ASIC_IS_DCE3(rdev)) { |
if (enable) |
/drivers/video/drm/radeon/r600d.h |
---|
323,11 → 323,12 |
#define HDP_TILING_CONFIG 0x2F3C |
#define HDP_DEBUG1 0x2F34 |
#define MC_CONFIG 0x2000 |
#define MC_VM_AGP_TOP 0x2184 |
#define MC_VM_AGP_BOT 0x2188 |
#define MC_VM_AGP_BASE 0x218C |
#define MC_VM_FB_LOCATION 0x2180 |
#define MC_VM_L1_TLB_MCD_RD_A_CNTL 0x219C |
#define MC_VM_L1_TLB_MCB_RD_UVD_CNTL 0x2124 |
#define ENABLE_L1_TLB (1 << 0) |
#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1) |
#define ENABLE_L1_STRICT_ORDERING (1 << 2) |
347,6 → 348,7 |
#define EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 7) << 15) |
#define EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00038000 |
#define EFFECTIVE_L1_QUEUE_SIZE_SHIFT 15 |
#define MC_VM_L1_TLB_MCD_RD_A_CNTL 0x219C |
#define MC_VM_L1_TLB_MCD_RD_B_CNTL 0x21A0 |
#define MC_VM_L1_TLB_MCB_RD_GFX_CNTL 0x21FC |
#define MC_VM_L1_TLB_MCB_RD_HDP_CNTL 0x2204 |
353,6 → 355,7 |
#define MC_VM_L1_TLB_MCB_RD_PDMA_CNTL 0x2208 |
#define MC_VM_L1_TLB_MCB_RD_SEM_CNTL 0x220C |
#define MC_VM_L1_TLB_MCB_RD_SYS_CNTL 0x2200 |
#define MC_VM_L1_TLB_MCB_WR_UVD_CNTL 0x212c |
#define MC_VM_L1_TLB_MCD_WR_A_CNTL 0x21A4 |
#define MC_VM_L1_TLB_MCD_WR_B_CNTL 0x21A8 |
#define MC_VM_L1_TLB_MCB_WR_GFX_CNTL 0x2210 |
366,6 → 369,8 |
#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194 |
#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198 |
#define RS_DQ_RD_RET_CONF 0x2348 |
#define PA_CL_ENHANCE 0x8A14 |
#define CLIP_VTX_REORDER_ENA (1 << 0) |
#define NUM_CLIP_SEQ(x) ((x) << 1) |
922,6 → 927,23 |
# define TARGET_LINK_SPEED_MASK (0xf << 0) |
# define SELECTABLE_DEEMPHASIS (1 << 6) |
/* Audio */ |
#define AZ_HOT_PLUG_CONTROL 0x7300 |
# define AZ_FORCE_CODEC_WAKE (1 << 0) |
# define JACK_DETECTION_ENABLE (1 << 4) |
# define UNSOLICITED_RESPONSE_ENABLE (1 << 8) |
# define CODEC_HOT_PLUG_ENABLE (1 << 12) |
# define AUDIO_ENABLED (1 << 31) |
/* DCE3 adds */ |
# define PIN0_JACK_DETECTION_ENABLE (1 << 4) |
# define PIN1_JACK_DETECTION_ENABLE (1 << 5) |
# define PIN2_JACK_DETECTION_ENABLE (1 << 6) |
# define PIN3_JACK_DETECTION_ENABLE (1 << 7) |
# define PIN0_AUDIO_ENABLED (1 << 24) |
# define PIN1_AUDIO_ENABLED (1 << 25) |
# define PIN2_AUDIO_ENABLED (1 << 26) |
# define PIN3_AUDIO_ENABLED (1 << 27) |
/* Audio clocks DCE 2.0/3.0 */ |
#define AUDIO_DTO 0x7340 |
# define AUDIO_DTO_PHASE(x) (((x) & 0xffff) << 0) |
1476,6 → 1498,7 |
#define UVD_CGC_GATE 0xf4a8 |
#define UVD_LMI_CTRL2 0xf4f4 |
#define UVD_MASTINT_EN 0xf500 |
#define UVD_FW_START 0xf51C |
#define UVD_LMI_ADDR_EXT 0xf594 |
#define UVD_LMI_CTRL 0xf598 |
#define UVD_LMI_SWAP_CNTL 0xf5b4 |
1488,6 → 1511,13 |
#define UVD_MPC_SET_MUX 0xf5f4 |
#define UVD_MPC_SET_ALU 0xf5f8 |
#define UVD_VCPU_CACHE_OFFSET0 0xf608 |
#define UVD_VCPU_CACHE_SIZE0 0xf60c |
#define UVD_VCPU_CACHE_OFFSET1 0xf610 |
#define UVD_VCPU_CACHE_SIZE1 0xf614 |
#define UVD_VCPU_CACHE_OFFSET2 0xf618 |
#define UVD_VCPU_CACHE_SIZE2 0xf61c |
#define UVD_VCPU_CNTL 0xf660 |
#define UVD_SOFT_RESET 0xf680 |
#define RBC_SOFT_RESET (1<<0) |
1517,9 → 1547,35 |
#define UVD_CONTEXT_ID 0xf6f4 |
/* rs780 only */ |
#define GFX_MACRO_BYPASS_CNTL 0x30c0 |
#define SPLL_BYPASS_CNTL (1 << 0) |
#define UPLL_BYPASS_CNTL (1 << 1) |
#define CG_UPLL_FUNC_CNTL 0x7e0 |
# define UPLL_RESET_MASK 0x00000001 |
# define UPLL_SLEEP_MASK 0x00000002 |
# define UPLL_BYPASS_EN_MASK 0x00000004 |
# define UPLL_CTLREQ_MASK 0x00000008 |
# define UPLL_FB_DIV(x) ((x) << 4) |
# define UPLL_FB_DIV_MASK 0x0000FFF0 |
# define UPLL_REF_DIV(x) ((x) << 16) |
# define UPLL_REF_DIV_MASK 0x003F0000 |
# define UPLL_REFCLK_SRC_SEL_MASK 0x20000000 |
# define UPLL_CTLACK_MASK 0x40000000 |
# define UPLL_CTLACK2_MASK 0x80000000 |
#define CG_UPLL_FUNC_CNTL_2 0x7e4 |
# define UPLL_SW_HILEN(x) ((x) << 0) |
# define UPLL_SW_LOLEN(x) ((x) << 4) |
# define UPLL_SW_HILEN2(x) ((x) << 8) |
# define UPLL_SW_LOLEN2(x) ((x) << 12) |
# define UPLL_DIVEN_MASK 0x00010000 |
# define UPLL_DIVEN2_MASK 0x00020000 |
# define UPLL_SW_MASK 0x0003FFFF |
# define VCLK_SRC_SEL(x) ((x) << 20) |
# define VCLK_SRC_SEL_MASK 0x01F00000 |
# define DCLK_SRC_SEL(x) ((x) << 25) |
# define DCLK_SRC_SEL_MASK 0x3E000000 |
/* |
* PM4 |
/drivers/video/drm/radeon/radeon.h |
---|
60,12 → 60,13 |
* are considered as fatal) |
*/ |
#include <asm/atomic.h> |
#include <linux/atomic.h> |
#include <linux/wait.h> |
#include <linux/list.h> |
#include <linux/kref.h> |
#include <linux/interval_tree.h> |
#include <asm/div64.h> |
#include <linux/fence.h> |
#include <ttm/ttm_bo_api.h> |
#include <ttm/ttm_bo_driver.h> |
73,11 → 74,11 |
//#include <ttm/ttm_module.h> |
#include <ttm/ttm_execbuf_util.h> |
#include <drm/drm_gem.h> |
#include <linux/irqreturn.h> |
#include <pci.h> |
#include <linux/pci.h> |
#include <errno-base.h> |
#include "radeon_family.h" |
#include "radeon_mode.h" |
#include "radeon_reg.h" |
154,9 → 155,6 |
#define RADEONFB_CONN_LIMIT 4 |
#define RADEON_BIOS_NUM_SCRATCH 8 |
/* fence seq are set to this number when signaled */ |
#define RADEON_FENCE_SIGNALED_SEQ 0LL |
/* internal ring indices */ |
/* r1xx+ has gfx CP ring */ |
#define RADEON_RING_TYPE_GFX_INDEX 0 |
183,9 → 181,6 |
/* number of hw syncs before falling back on blocking */ |
#define RADEON_NUM_SYNCS 4 |
/* number of hw syncs before falling back on blocking */ |
#define RADEON_NUM_SYNCS 4 |
/* hardcode those limit for now */ |
#define RADEON_VA_IB_OFFSET (1 << 20) |
#define RADEON_VA_RESERVED_SIZE (8 << 20) |
384,6 → 379,7 |
* Fences. |
*/ |
struct radeon_fence_driver { |
struct radeon_device *rdev; |
uint32_t scratch_reg; |
uint64_t gpu_addr; |
volatile uint32_t *cpu_addr; |
390,22 → 386,26 |
/* sync_seq is protected by ring emission lock */ |
uint64_t sync_seq[RADEON_NUM_RINGS]; |
atomic64_t last_seq; |
bool initialized; |
bool initialized, delayed_irq; |
struct delayed_work lockup_work; |
}; |
struct radeon_fence { |
struct fence base; |
struct radeon_device *rdev; |
struct kref kref; |
/* protected by radeon_fence.lock */ |
uint64_t seq; |
/* RB, DMA, etc. */ |
unsigned ring; |
bool is_vm_update; |
wait_queue_t fence_wake; |
}; |
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); |
int radeon_fence_driver_init(struct radeon_device *rdev); |
void radeon_fence_driver_fini(struct radeon_device *rdev); |
void radeon_fence_driver_force_completion(struct radeon_device *rdev); |
void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring); |
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring); |
void radeon_fence_process(struct radeon_device *rdev, int ring); |
bool radeon_fence_signaled(struct radeon_fence *fence); |
481,6 → 481,15 |
#endif |
}; |
struct radeon_bo_list { |
struct radeon_bo *robj; |
struct ttm_validate_buffer tv; |
uint64_t gpu_offset; |
unsigned prefered_domains; |
unsigned allowed_domains; |
uint32_t tiling_flags; |
}; |
/* bo virtual address in a specific vm */ |
struct radeon_bo_va { |
/* protected by bo being reserved */ |
487,6 → 496,7 |
struct list_head bo_list; |
uint32_t flags; |
uint64_t addr; |
struct radeon_fence *last_pt_update; |
unsigned ref_count; |
/* protected by vm mutex */ |
503,7 → 513,7 |
struct list_head list; |
/* Protected by tbo.reserved */ |
u32 initial_domain; |
u32 placements[3]; |
struct ttm_place placements[4]; |
struct ttm_placement placement; |
struct ttm_buffer_object tbo; |
struct ttm_bo_kmap_obj kmap; |
522,6 → 532,8 |
struct drm_gem_object gem_base; |
pid_t pid; |
struct radeon_mn *mn; |
}; |
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) |
604,7 → 616,6 |
struct radeon_sa_bo *sa_bo; |
signed waiters; |
uint64_t gpu_addr; |
struct radeon_fence *sync_to[RADEON_NUM_RINGS]; |
}; |
int radeon_semaphore_create(struct radeon_device *rdev, |
613,16 → 624,33 |
struct radeon_semaphore *semaphore); |
bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, |
struct radeon_semaphore *semaphore); |
void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore, |
struct radeon_fence *fence); |
int radeon_semaphore_sync_rings(struct radeon_device *rdev, |
struct radeon_semaphore *semaphore, |
int waiting_ring); |
void radeon_semaphore_free(struct radeon_device *rdev, |
struct radeon_semaphore **semaphore, |
struct radeon_fence *fence); |
/* |
* Synchronization |
*/ |
struct radeon_sync { |
struct radeon_semaphore *semaphores[RADEON_NUM_SYNCS]; |
struct radeon_fence *sync_to[RADEON_NUM_RINGS]; |
struct radeon_fence *last_vm_update; |
}; |
void radeon_sync_create(struct radeon_sync *sync); |
void radeon_sync_fence(struct radeon_sync *sync, |
struct radeon_fence *fence); |
int radeon_sync_resv(struct radeon_device *rdev, |
struct radeon_sync *sync, |
struct reservation_object *resv, |
bool shared); |
int radeon_sync_rings(struct radeon_device *rdev, |
struct radeon_sync *sync, |
int waiting_ring); |
void radeon_sync_free(struct radeon_device *rdev, struct radeon_sync *sync, |
struct radeon_fence *fence); |
/* |
* GART structures, functions & helpers |
*/ |
struct radeon_mc; |
722,6 → 750,10 |
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page); |
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell); |
void radeon_doorbell_get_kfd_info(struct radeon_device *rdev, |
phys_addr_t *aperture_base, |
size_t *aperture_size, |
size_t *start_offset); |
/* |
* IRQS. |
801,6 → 833,7 |
int radeon_irq_kms_init(struct radeon_device *rdev); |
void radeon_irq_kms_fini(struct radeon_device *rdev); |
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring); |
bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring); |
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring); |
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc); |
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); |
822,7 → 855,7 |
struct radeon_fence *fence; |
struct radeon_vm *vm; |
bool is_const_ib; |
struct radeon_semaphore *semaphore; |
struct radeon_sync sync; |
}; |
struct radeon_ring { |
899,10 → 932,23 |
uint64_t addr; |
}; |
struct radeon_vm_id { |
unsigned id; |
uint64_t pd_gpu_addr; |
/* last flushed PD/PT update */ |
struct radeon_fence *flushed_updates; |
/* last use of vmid */ |
struct radeon_fence *last_id_use; |
}; |
struct radeon_vm { |
struct mutex mutex; |
struct rb_root va; |
unsigned id; |
/* protecting invalidated and freed */ |
spinlock_t status_lock; |
/* BOs moved, but not yet updated in the PT */ |
struct list_head invalidated; |
911,7 → 957,6 |
/* contains the page directory */ |
struct radeon_bo *page_directory; |
uint64_t pd_gpu_addr; |
unsigned max_pde_used; |
/* array of page tables, one for each page directory entry */ |
919,13 → 964,8 |
struct radeon_bo_va *ib_bo_va; |
struct mutex mutex; |
/* last fence for cs using this vm */ |
struct radeon_fence *fence; |
/* last flush or NULL if we still need to flush */ |
struct radeon_fence *last_flush; |
/* last use of vmid */ |
struct radeon_fence *last_id_use; |
/* for id and flush management per ring */ |
struct radeon_vm_id ids[RADEON_NUM_RINGS]; |
}; |
struct radeon_vm_manager { |
1033,19 → 1073,7 |
/* |
* CS. |
*/ |
struct radeon_cs_reloc { |
struct drm_gem_object *gobj; |
struct radeon_bo *robj; |
struct ttm_validate_buffer tv; |
uint64_t gpu_offset; |
unsigned prefered_domains; |
unsigned allowed_domains; |
uint32_t tiling_flags; |
uint32_t handle; |
}; |
struct radeon_cs_chunk { |
uint32_t chunk_id; |
uint32_t length_dw; |
uint32_t *kdata; |
void __user *user_ptr; |
1063,16 → 1091,15 |
unsigned idx; |
/* relocations */ |
unsigned nrelocs; |
struct radeon_cs_reloc *relocs; |
struct radeon_cs_reloc **relocs_ptr; |
struct radeon_cs_reloc *vm_bos; |
struct radeon_bo_list *relocs; |
struct radeon_bo_list *vm_bos; |
struct list_head validated; |
unsigned dma_reloc_idx; |
/* indices of various chunks */ |
int chunk_ib_idx; |
int chunk_relocs_idx; |
int chunk_flags_idx; |
int chunk_const_ib_idx; |
struct radeon_cs_chunk *chunk_ib; |
struct radeon_cs_chunk *chunk_relocs; |
struct radeon_cs_chunk *chunk_flags; |
struct radeon_cs_chunk *chunk_const_ib; |
struct radeon_ib ib; |
struct radeon_ib const_ib; |
void *track; |
1086,7 → 1113,7 |
static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) |
{ |
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; |
struct radeon_cs_chunk *ibc = p->chunk_ib; |
if (ibc->kdata) |
return ibc->kdata[idx]; |
1498,6 → 1525,10 |
u8 t_hyst; |
u32 cycle_delay; |
u16 t_max; |
u8 control_mode; |
u16 default_max_fan_pwm; |
u16 default_fan_output_sensitivity; |
u16 fan_output_sensitivity; |
bool ucode_fan_control; |
}; |
1631,6 → 1662,11 |
/* internal thermal controller on rv6xx+ */ |
enum radeon_int_thermal_type int_thermal_type; |
struct device *int_hwmon_dev; |
/* fan control parameters */ |
bool no_fan; |
u8 fan_pulses_per_revolution; |
u8 fan_min_rpm; |
u8 fan_max_rpm; |
/* dpm */ |
bool dpm_enabled; |
struct radeon_dpm dpm; |
1665,7 → 1701,8 |
uint32_t handle, struct radeon_fence **fence); |
int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, |
uint32_t handle, struct radeon_fence **fence); |
void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo); |
void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo, |
uint32_t allowed_domains); |
void radeon_uvd_free_handles(struct radeon_device *rdev, |
struct drm_file *filp); |
int radeon_uvd_cs_parse(struct radeon_cs_parser *parser); |
1754,6 → 1791,11 |
struct radeon_ring *cpB); |
void radeon_test_syncing(struct radeon_device *rdev); |
/* |
* MMU Notifier |
*/ |
int radeon_mn_register(struct radeon_bo *bo, unsigned long addr); |
void radeon_mn_unregister(struct radeon_bo *bo); |
/* |
* Debugfs |
1787,7 → 1829,8 |
void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring); |
bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, |
struct radeon_semaphore *semaphore, bool emit_wait); |
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
void (*vm_flush)(struct radeon_device *rdev, struct radeon_ring *ring, |
unsigned vm_id, uint64_t pd_addr); |
/* testing functions */ |
int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp); |
1868,24 → 1911,24 |
} display; |
/* copy functions for bo handling */ |
struct { |
int (*blit)(struct radeon_device *rdev, |
struct radeon_fence *(*blit)(struct radeon_device *rdev, |
uint64_t src_offset, |
uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence); |
struct reservation_object *resv); |
u32 blit_ring_index; |
int (*dma)(struct radeon_device *rdev, |
struct radeon_fence *(*dma)(struct radeon_device *rdev, |
uint64_t src_offset, |
uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence); |
struct reservation_object *resv); |
u32 dma_ring_index; |
/* method used for bo copy */ |
int (*copy)(struct radeon_device *rdev, |
struct radeon_fence *(*copy)(struct radeon_device *rdev, |
uint64_t src_offset, |
uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence); |
struct reservation_object *resv); |
/* ring used for bo copies */ |
u32 copy_ring_index; |
} copy; |
2291,6 → 2334,7 |
struct radeon_mman mman; |
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; |
wait_queue_head_t fence_queue; |
unsigned fence_context; |
struct mutex ring_lock; |
struct radeon_ring ring[RADEON_NUM_RINGS]; |
bool ib_pool_ready; |
2309,7 → 2353,7 |
bool need_dma32; |
bool accel_working; |
bool fastfb_working; /* IGP feature*/ |
bool needs_reset; |
bool needs_reset, in_reset; |
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; |
const struct firmware *me_fw; /* all family ME firmware */ |
const struct firmware *pfp_fw; /* r6/700 PFP firmware */ |
2330,7 → 2374,6 |
struct radeon_mec mec; |
struct work_struct hotplug_work; |
struct work_struct audio_work; |
struct work_struct reset_work; |
int num_crtc; /* number of crtcs */ |
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ |
bool has_uvd; |
2355,6 → 2398,8 |
struct radeon_atcs atcs; |
/* srbm instance registers */ |
struct mutex srbm_mutex; |
/* GRBM index mutex. Protects concurrents access to GRBM index */ |
struct mutex grbm_idx_mutex; |
/* clock, powergating flags */ |
u32 cg_flags; |
u32 pg_flags; |
2366,6 → 2411,7 |
/* tracking pinned memory */ |
u64 vram_pin_size; |
u64 gart_pin_size; |
struct mutex mn_lock; |
}; |
bool radeon_is_px(struct drm_device *dev); |
2421,8 → 2467,18 |
/* |
* Cast helper |
*/ |
#define to_radeon_fence(p) ((struct radeon_fence *)(p)) |
extern const struct fence_ops radeon_fence_ops; |
static inline struct radeon_fence *to_radeon_fence(struct fence *f) |
{ |
struct radeon_fence *__f = container_of(f, struct radeon_fence, base); |
if (__f->base.ops == &radeon_fence_ops) |
return __f; |
return NULL; |
} |
/* |
* Registers read & write functions. |
*/ |
2741,18 → 2797,25 |
/* |
* RING helpers. |
*/ |
#if DRM_DEBUG_CODE == 0 |
/** |
* radeon_ring_write - write a value to the ring |
* |
* @ring: radeon_ring structure holding ring information |
* @v: dword (dw) value to write |
* |
* Write a value to the requested ring buffer (all asics). |
*/ |
static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) |
{ |
if (ring->count_dw <= 0) |
DRM_ERROR("radeon: writing more dwords to the ring than expected!\n"); |
ring->ring[ring->wptr++] = v; |
ring->wptr &= ring->ptr_mask; |
ring->count_dw--; |
ring->ring_free_dw--; |
} |
#else |
/* With debugging this is just too big to inline */ |
void radeon_ring_write(struct radeon_ring *ring, uint32_t v); |
#endif |
/* |
* ASICs macro. |
2778,7 → 2841,7 |
#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib)) |
#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib)) |
#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp)) |
#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)]->vm_flush((rdev), (r), (vm)) |
#define radeon_ring_vm_flush(rdev, r, vm_id, pd_addr) (rdev)->asic->ring[(r)->idx]->vm_flush((rdev), (r), (vm_id), (pd_addr)) |
#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r)) |
#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r)) |
#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r)) |
2791,9 → 2854,9 |
#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m)) |
#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence)) |
#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) |
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f)) |
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f)) |
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f)) |
#define radeon_copy_blit(rdev, s, d, np, resv) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (resv)) |
#define radeon_copy_dma(rdev, s, d, np, resv) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (resv)) |
#define radeon_copy(rdev, s, d, np, resv) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (resv)) |
#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index |
#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index |
#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index |
2867,6 → 2930,10 |
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); |
extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); |
extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); |
extern int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, |
uint32_t flags); |
extern bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm); |
extern bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm); |
extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base); |
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); |
extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon); |
2883,7 → 2950,7 |
void radeon_vm_manager_fini(struct radeon_device *rdev); |
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); |
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); |
struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, |
struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev, |
struct radeon_vm *vm, |
struct list_head *head); |
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, |
2890,7 → 2957,7 |
struct radeon_vm *vm, int ring); |
void radeon_vm_flush(struct radeon_device *rdev, |
struct radeon_vm *vm, |
int ring); |
int ring, struct radeon_fence *fence); |
void radeon_vm_fence(struct radeon_device *rdev, |
struct radeon_vm *vm, |
struct radeon_fence *fence); |
2924,10 → 2991,10 |
struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev); |
void r600_audio_enable(struct radeon_device *rdev, |
struct r600_audio_pin *pin, |
bool enable); |
u8 enable_mask); |
void dce6_audio_enable(struct radeon_device *rdev, |
struct r600_audio_pin *pin, |
bool enable); |
u8 enable_mask); |
/* |
* R600 vram scratch functions |
2997,7 → 3064,7 |
void radeon_cs_dump_packet(struct radeon_cs_parser *p, |
struct radeon_cs_packet *pkt); |
int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, |
struct radeon_cs_reloc **cs_reloc, |
struct radeon_bo_list **cs_reloc, |
int nomm); |
int r600_cs_common_vline_parse(struct radeon_cs_parser *p, |
uint32_t *vline_start_end, |
3005,7 → 3072,7 |
#include "radeon_object.h" |
#define DRM_UDELAY(d) udelay(d) |
#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159 |
resource_size_t |
drm_get_resource_start(struct drm_device *dev, unsigned int resource); |
/drivers/video/drm/radeon/radeon_asic.c |
---|
2294,6 → 2294,14 |
case CHIP_RS780: |
case CHIP_RS880: |
rdev->asic = &rs780_asic; |
/* 760G/780V/880V don't have UVD */ |
if ((rdev->pdev->device == 0x9616)|| |
(rdev->pdev->device == 0x9611)|| |
(rdev->pdev->device == 0x9613)|| |
(rdev->pdev->device == 0x9711)|| |
(rdev->pdev->device == 0x9713)) |
rdev->has_uvd = false; |
else |
rdev->has_uvd = true; |
break; |
case CHIP_RV770: |
/drivers/video/drm/radeon/radeon_asic.h |
---|
81,11 → 81,11 |
int r100_cs_parse(struct radeon_cs_parser *p); |
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg); |
int r100_copy_blit(struct radeon_device *rdev, |
struct radeon_fence *r100_copy_blit(struct radeon_device *rdev, |
uint64_t src_offset, |
uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence); |
struct reservation_object *resv); |
int r100_set_surface_reg(struct radeon_device *rdev, int reg, |
uint32_t tiling_flags, uint32_t pitch, |
uint32_t offset, uint32_t obj_size); |
152,11 → 152,11 |
/* |
* r200,rv250,rs300,rv280 |
*/ |
extern int r200_copy_dma(struct radeon_device *rdev, |
struct radeon_fence *r200_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, |
uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence); |
struct reservation_object *resv); |
void r200_set_safe_registers(struct radeon_device *rdev); |
/* |
340,12 → 340,14 |
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); |
int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); |
int r600_copy_cpdma(struct radeon_device *rdev, |
struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, struct radeon_fence **fence); |
int r600_copy_dma(struct radeon_device *rdev, |
unsigned num_gpu_pages, |
struct reservation_object *resv); |
struct radeon_fence *r600_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, struct radeon_fence **fence); |
unsigned num_gpu_pages, |
struct reservation_object *resv); |
void r600_hpd_init(struct radeon_device *rdev); |
void r600_hpd_fini(struct radeon_device *rdev); |
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
389,7 → 391,6 |
void r600_rlc_stop(struct radeon_device *rdev); |
/* r600 audio */ |
int r600_audio_init(struct radeon_device *rdev); |
struct r600_audio_pin r600_audio_status(struct radeon_device *rdev); |
void r600_audio_fini(struct radeon_device *rdev); |
void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock); |
void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer, |
461,10 → 462,10 |
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); |
void r700_cp_stop(struct radeon_device *rdev); |
void r700_cp_fini(struct radeon_device *rdev); |
int rv770_copy_dma(struct radeon_device *rdev, |
struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence); |
struct reservation_object *resv); |
u32 rv770_get_xclk(struct radeon_device *rdev); |
int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); |
int rv770_get_temp(struct radeon_device *rdev); |
535,10 → 536,10 |
struct radeon_fence *fence); |
void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, |
struct radeon_ib *ib); |
int evergreen_copy_dma(struct radeon_device *rdev, |
struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence); |
struct reservation_object *resv); |
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); |
void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); |
int evergreen_get_temp(struct radeon_device *rdev); |
598,7 → 599,8 |
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
int cayman_vm_init(struct radeon_device *rdev); |
void cayman_vm_fini(struct radeon_device *rdev); |
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, |
unsigned vm_id, uint64_t pd_addr); |
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags); |
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); |
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); |
623,7 → 625,8 |
uint32_t incr, uint32_t flags); |
void cayman_dma_vm_pad_ib(struct radeon_ib *ib); |
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, |
unsigned vm_id, uint64_t pd_addr); |
u32 cayman_gfx_get_rptr(struct radeon_device *rdev, |
struct radeon_ring *ring); |
698,12 → 701,13 |
int si_irq_process(struct radeon_device *rdev); |
int si_vm_init(struct radeon_device *rdev); |
void si_vm_fini(struct radeon_device *rdev); |
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, |
unsigned vm_id, uint64_t pd_addr); |
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); |
int si_copy_dma(struct radeon_device *rdev, |
struct radeon_fence *si_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence); |
struct reservation_object *resv); |
void si_dma_vm_copy_pages(struct radeon_device *rdev, |
struct radeon_ib *ib, |
720,7 → 724,8 |
uint64_t addr, unsigned count, |
uint32_t incr, uint32_t flags); |
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, |
unsigned vm_id, uint64_t pd_addr); |
u32 si_get_xclk(struct radeon_device *rdev); |
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev); |
int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); |
759,14 → 764,14 |
struct radeon_semaphore *semaphore, |
bool emit_wait); |
void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
int cik_copy_dma(struct radeon_device *rdev, |
struct radeon_fence *cik_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence); |
int cik_copy_cpdma(struct radeon_device *rdev, |
struct reservation_object *resv); |
struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence); |
struct reservation_object *resv); |
int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); |
int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); |
bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); |
792,7 → 797,8 |
int cik_irq_process(struct radeon_device *rdev); |
int cik_vm_init(struct radeon_device *rdev); |
void cik_vm_fini(struct radeon_device *rdev); |
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, |
unsigned vm_id, uint64_t pd_addr); |
void cik_sdma_vm_copy_pages(struct radeon_device *rdev, |
struct radeon_ib *ib, |
810,7 → 816,8 |
uint32_t incr, uint32_t flags); |
void cik_sdma_vm_pad_ib(struct radeon_ib *ib); |
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, |
unsigned vm_id, uint64_t pd_addr); |
int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); |
u32 cik_gfx_get_rptr(struct radeon_device *rdev, |
struct radeon_ring *ring); |
882,6 → 889,7 |
struct radeon_ring *ring); |
void uvd_v1_0_set_wptr(struct radeon_device *rdev, |
struct radeon_ring *ring); |
int uvd_v1_0_resume(struct radeon_device *rdev); |
int uvd_v1_0_init(struct radeon_device *rdev); |
void uvd_v1_0_fini(struct radeon_device *rdev); |
889,6 → 897,8 |
void uvd_v1_0_stop(struct radeon_device *rdev); |
int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); |
void uvd_v1_0_fence_emit(struct radeon_device *rdev, |
struct radeon_fence *fence); |
int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); |
bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev, |
struct radeon_ring *ring, |
/drivers/video/drm/radeon/radeon_atombios.c |
---|
196,7 → 196,7 |
} |
} |
static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev, |
struct radeon_gpio_rec radeon_atombios_lookup_gpio(struct radeon_device *rdev, |
u8 id) |
{ |
struct atom_context *ctx = rdev->mode_info.atom_context; |
221,6 → 221,7 |
if (id == pin->ucGPIO_ID) { |
gpio.id = pin->ucGPIO_ID; |
gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4; |
gpio.shift = pin->ucGpioPinBitShift; |
gpio.mask = (1 << pin->ucGpioPinBitShift); |
gpio.valid = true; |
break; |
458,7 → 459,7 |
return true; |
} |
const int supported_devices_connector_convert[] = { |
static const int supported_devices_connector_convert[] = { |
DRM_MODE_CONNECTOR_Unknown, |
DRM_MODE_CONNECTOR_VGA, |
DRM_MODE_CONNECTOR_DVII, |
477,7 → 478,7 |
DRM_MODE_CONNECTOR_DisplayPort |
}; |
const uint16_t supported_devices_connector_object_id_convert[] = { |
static const uint16_t supported_devices_connector_object_id_convert[] = { |
CONNECTOR_OBJECT_ID_NONE, |
CONNECTOR_OBJECT_ID_VGA, |
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, /* not all boards support DL */ |
494,7 → 495,7 |
CONNECTOR_OBJECT_ID_SVIDEO |
}; |
const int object_connector_convert[] = { |
static const int object_connector_convert[] = { |
DRM_MODE_CONNECTOR_Unknown, |
DRM_MODE_CONNECTOR_DVII, |
DRM_MODE_CONNECTOR_DVII, |
801,7 → 802,7 |
hpd_record = |
(ATOM_HPD_INT_RECORD *) |
record; |
gpio = radeon_lookup_gpio(rdev, |
gpio = radeon_atombios_lookup_gpio(rdev, |
hpd_record->ucHPDIntGPIOID); |
hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio); |
hpd.plugged_state = hpd_record->ucPlugged_PinState; |
2128,7 → 2129,7 |
rdev->pm.power_state[state_index].clock_info[0].voltage.type = |
VOLTAGE_GPIO; |
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = |
radeon_lookup_gpio(rdev, |
radeon_atombios_lookup_gpio(rdev, |
power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex); |
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) |
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = |
2164,7 → 2165,7 |
rdev->pm.power_state[state_index].clock_info[0].voltage.type = |
VOLTAGE_GPIO; |
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = |
radeon_lookup_gpio(rdev, |
radeon_atombios_lookup_gpio(rdev, |
power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex); |
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) |
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = |
2200,7 → 2201,7 |
rdev->pm.power_state[state_index].clock_info[0].voltage.type = |
VOLTAGE_GPIO; |
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = |
radeon_lookup_gpio(rdev, |
radeon_atombios_lookup_gpio(rdev, |
power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex); |
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) |
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = |
2248,6 → 2249,14 |
/* add the i2c bus for thermal/fan chip */ |
if (controller->ucType > 0) { |
if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) |
rdev->pm.no_fan = true; |
rdev->pm.fan_pulses_per_revolution = |
controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; |
if (rdev->pm.fan_pulses_per_revolution) { |
rdev->pm.fan_min_rpm = controller->ucFanMinRPM; |
rdev->pm.fan_max_rpm = controller->ucFanMaxRPM; |
} |
if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { |
DRM_INFO("Internal thermal controller %s fan control\n", |
(controller->ucFanParameters & |
/drivers/video/drm/radeon/radeon_benchmark.c |
---|
45,33 → 45,29 |
for (i = 0; i < n; i++) { |
switch (flag) { |
case RADEON_BENCHMARK_COPY_DMA: |
r = radeon_copy_dma(rdev, saddr, daddr, |
fence = radeon_copy_dma(rdev, saddr, daddr, |
size / RADEON_GPU_PAGE_SIZE, |
&fence); |
NULL); |
break; |
case RADEON_BENCHMARK_COPY_BLIT: |
r = radeon_copy_blit(rdev, saddr, daddr, |
fence = radeon_copy_blit(rdev, saddr, daddr, |
size / RADEON_GPU_PAGE_SIZE, |
&fence); |
NULL); |
break; |
default: |
DRM_ERROR("Unknown copy method\n"); |
r = -EINVAL; |
return -EINVAL; |
} |
if (r) |
goto exit_do_move; |
if (IS_ERR(fence)) |
return PTR_ERR(fence); |
r = radeon_fence_wait(fence, false); |
radeon_fence_unref(&fence); |
if (r) |
goto exit_do_move; |
radeon_fence_unref(&fence); |
return r; |
} |
end_jiffies = jiffies; |
r = jiffies_to_msecs(end_jiffies - start_jiffies); |
exit_do_move: |
if (fence) |
radeon_fence_unref(&fence); |
return r; |
return jiffies_to_msecs(end_jiffies - start_jiffies); |
} |
100,7 → 96,7 |
ENTER(); |
n = RADEON_BENCHMARK_ITERATIONS; |
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj); |
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, NULL, &sobj); |
if (r) { |
goto out_cleanup; |
} |
112,7 → 108,7 |
if (r) { |
goto out_cleanup; |
} |
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj); |
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, NULL, &dobj); |
if (r) { |
goto out_cleanup; |
} |
/drivers/video/drm/radeon/radeon_combios.c |
---|
116,7 → 116,7 |
CONNECTOR_UNSUPPORTED_LEGACY |
}; |
const int legacy_connector_convert[] = { |
static const int legacy_connector_convert[] = { |
DRM_MODE_CONNECTOR_Unknown, |
DRM_MODE_CONNECTOR_DVID, |
DRM_MODE_CONNECTOR_VGA, |
/drivers/video/drm/radeon/radeon_connectors.c |
---|
322,6 → 322,12 |
} |
if (!radeon_connector->edid) { |
/* don't fetch the edid from the vbios if ddc fails and runpm is |
* enabled so we report disconnected. |
*/ |
if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) |
return; |
if (rdev->is_atom_bios) { |
/* some laptops provide a hardcoded edid in rom for LCDs */ |
if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) || |
826,6 → 832,8 |
static enum drm_connector_status |
radeon_lvds_detect(struct drm_connector *connector, bool force) |
{ |
struct drm_device *dev = connector->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
struct drm_encoder *encoder = radeon_best_single_encoder(connector); |
enum drm_connector_status ret = connector_status_disconnected; |
/drivers/video/drm/radeon/radeon_cs.c |
---|
84,21 → 84,18 |
struct drm_device *ddev = p->rdev->ddev; |
struct radeon_cs_chunk *chunk; |
struct radeon_cs_buckets buckets; |
unsigned i, j; |
bool duplicate; |
unsigned i; |
bool need_mmap_lock = false; |
int r; |
if (p->chunk_relocs_idx == -1) { |
if (p->chunk_relocs == NULL) { |
return 0; |
} |
chunk = &p->chunks[p->chunk_relocs_idx]; |
chunk = p->chunk_relocs; |
p->dma_reloc_idx = 0; |
/* FIXME: we assume that each relocs use 4 dwords */ |
p->nrelocs = chunk->length_dw / 4; |
p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL); |
if (p->relocs_ptr == NULL) { |
return -ENOMEM; |
} |
p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL); |
p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL); |
if (p->relocs == NULL) { |
return -ENOMEM; |
} |
107,31 → 104,17 |
for (i = 0; i < p->nrelocs; i++) { |
struct drm_radeon_cs_reloc *r; |
struct drm_gem_object *gobj; |
unsigned priority; |
duplicate = false; |
r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; |
for (j = 0; j < i; j++) { |
if (r->handle == p->relocs[j].handle) { |
p->relocs_ptr[i] = &p->relocs[j]; |
duplicate = true; |
break; |
} |
} |
if (duplicate) { |
p->relocs[i].handle = 0; |
continue; |
} |
p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp, |
r->handle); |
if (p->relocs[i].gobj == NULL) { |
gobj = drm_gem_object_lookup(ddev, p->filp, r->handle); |
if (gobj == NULL) { |
DRM_ERROR("gem object lookup failed 0x%x\n", |
r->handle); |
return -ENOENT; |
} |
p->relocs_ptr[i] = &p->relocs[i]; |
p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj); |
p->relocs[i].robj = gem_to_radeon_bo(gobj); |
/* The userspace buffer priorities are from 0 to 15. A higher |
* number means the buffer is more important. |
143,10 → 126,13 |
+ !!r->write_domain; |
/* the first reloc of an UVD job is the msg and that must be in |
VRAM, also but everything into VRAM on AGP cards to avoid |
image corruptions */ |
VRAM, also but everything into VRAM on AGP cards and older |
IGP chips to avoid image corruptions */ |
if (p->ring == R600_RING_TYPE_UVD_INDEX && |
(i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) { |
(i == 0 || drm_pci_device_is_agp(p->rdev->ddev) || |
p->rdev->family == CHIP_RS780 || |
p->rdev->family == CHIP_RS880)) { |
/* TODO: is this still needed for NI+ ? */ |
p->relocs[i].prefered_domains = |
RADEON_GEM_DOMAIN_VRAM; |
171,9 → 157,22 |
domain |= RADEON_GEM_DOMAIN_GTT; |
p->relocs[i].allowed_domains = domain; |
} |
/* |
if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) { |
uint32_t domain = p->relocs[i].prefered_domains; |
if (!(domain & RADEON_GEM_DOMAIN_GTT)) { |
DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is " |
"allowed for userptr BOs\n"); |
return -EINVAL; |
} |
need_mmap_lock = true; |
domain = RADEON_GEM_DOMAIN_GTT; |
p->relocs[i].prefered_domains = domain; |
p->relocs[i].allowed_domains = domain; |
} |
*/ |
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; |
p->relocs[i].handle = r->handle; |
p->relocs[i].tv.shared = !r->write_domain; |
radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head, |
priority); |
184,8 → 183,15 |
if (p->cs_flags & RADEON_CS_USE_VM) |
p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm, |
&p->validated); |
// if (need_mmap_lock) |
// down_read(¤t->mm->mmap_sem); |
return radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); |
r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); |
// if (need_mmap_lock) |
// up_read(¤t->mm->mmap_sem); |
return r; |
} |
static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) |
231,17 → 237,21 |
return 0; |
} |
static void radeon_cs_sync_rings(struct radeon_cs_parser *p) |
static int radeon_cs_sync_rings(struct radeon_cs_parser *p) |
{ |
int i; |
struct radeon_bo_list *reloc; |
int r; |
for (i = 0; i < p->nrelocs; i++) { |
if (!p->relocs[i].robj) |
continue; |
list_for_each_entry(reloc, &p->validated, tv.head) { |
struct reservation_object *resv; |
radeon_semaphore_sync_to(p->ib.semaphore, |
p->relocs[i].robj->tbo.sync_obj); |
resv = reloc->robj->tbo.resv; |
r = radeon_sync_resv(p->rdev, &p->ib.sync, resv, |
reloc->tv.shared); |
if (r) |
return r; |
} |
return 0; |
} |
/* XXX: note that this is called from the legacy UMS CS ioctl as well */ |
260,13 → 270,11 |
INIT_LIST_HEAD(&p->validated); |
p->idx = 0; |
p->ib.sa_bo = NULL; |
p->ib.semaphore = NULL; |
p->const_ib.sa_bo = NULL; |
p->const_ib.semaphore = NULL; |
p->chunk_ib_idx = -1; |
p->chunk_relocs_idx = -1; |
p->chunk_flags_idx = -1; |
p->chunk_const_ib_idx = -1; |
p->chunk_ib = NULL; |
p->chunk_relocs = NULL; |
p->chunk_flags = NULL; |
p->chunk_const_ib = NULL; |
p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL); |
if (p->chunks_array == NULL) { |
return -ENOMEM; |
293,24 → 301,23 |
return -EFAULT; |
} |
p->chunks[i].length_dw = user_chunk.length_dw; |
p->chunks[i].chunk_id = user_chunk.chunk_id; |
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { |
p->chunk_relocs_idx = i; |
if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) { |
p->chunk_relocs = &p->chunks[i]; |
} |
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { |
p->chunk_ib_idx = i; |
if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) { |
p->chunk_ib = &p->chunks[i]; |
/* zero length IB isn't useful */ |
if (p->chunks[i].length_dw == 0) |
return -EINVAL; |
} |
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) { |
p->chunk_const_ib_idx = i; |
if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) { |
p->chunk_const_ib = &p->chunks[i]; |
/* zero length CONST IB isn't useful */ |
if (p->chunks[i].length_dw == 0) |
return -EINVAL; |
} |
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { |
p->chunk_flags_idx = i; |
if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) { |
p->chunk_flags = &p->chunks[i]; |
/* zero length flags aren't useful */ |
if (p->chunks[i].length_dw == 0) |
return -EINVAL; |
319,10 → 326,10 |
size = p->chunks[i].length_dw; |
cdata = (void __user *)(unsigned long)user_chunk.chunk_data; |
p->chunks[i].user_ptr = cdata; |
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) |
if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) |
continue; |
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { |
if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) { |
if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP)) |
continue; |
} |
335,7 → 342,7 |
if (copy_from_user(p->chunks[i].kdata, cdata, size)) { |
return -EFAULT; |
} |
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { |
if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) { |
p->cs_flags = p->chunks[i].kdata[0]; |
if (p->chunks[i].length_dw > 1) |
ring = p->chunks[i].kdata[1]; |
376,8 → 383,8 |
static int cmp_size_smaller_first(void *priv, struct list_head *a, |
struct list_head *b) |
{ |
struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head); |
struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head); |
struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head); |
struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head); |
/* Sort A before B if A is smaller. */ |
return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; |
410,7 → 417,7 |
ttm_eu_fence_buffer_objects(&parser->ticket, |
&parser->validated, |
parser->ib.fence); |
&parser->ib.fence->base); |
} else if (backoff) { |
ttm_eu_backoff_reservation(&parser->ticket, |
&parser->validated); |
418,14 → 425,16 |
if (parser->relocs != NULL) { |
for (i = 0; i < parser->nrelocs; i++) { |
if (parser->relocs[i].gobj) |
drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); |
struct radeon_bo *bo = parser->relocs[i].robj; |
if (bo == NULL) |
continue; |
drm_gem_object_unreference_unlocked(&bo->gem_base); |
} |
} |
kfree(parser->track); |
kfree(parser->relocs); |
kfree(parser->relocs_ptr); |
kfree(parser->vm_bos); |
drm_free_large(parser->vm_bos); |
for (i = 0; i < parser->nchunks; i++) |
drm_free_large(parser->chunks[i].kdata); |
kfree(parser->chunks); |
439,7 → 448,7 |
{ |
int r; |
if (parser->chunk_ib_idx == -1) |
if (parser->chunk_ib == NULL) |
return 0; |
if (parser->cs_flags & RADEON_CS_USE_VM) |
451,6 → 460,13 |
return r; |
} |
r = radeon_cs_sync_rings(parser); |
if (r) { |
if (r != -ERESTARTSYS) |
DRM_ERROR("Failed to sync rings: %i\n", r); |
return r; |
} |
if (parser->ring == R600_RING_TYPE_UVD_INDEX) |
radeon_uvd_note_usage(rdev); |
else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) || |
457,7 → 473,6 |
(parser->ring == TN_RING_TYPE_VCE2_INDEX)) |
radeon_vce_note_usage(rdev); |
radeon_cs_sync_rings(parser); |
r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); |
if (r) { |
DRM_ERROR("Failed to schedule IB !\n"); |
493,10 → 508,6 |
for (i = 0; i < p->nrelocs; i++) { |
struct radeon_bo *bo; |
/* ignore duplicates */ |
if (p->relocs_ptr[i] != &p->relocs[i]) |
continue; |
bo = p->relocs[i].robj; |
bo_va = radeon_vm_bo_find(vm, bo); |
if (bo_va == NULL) { |
507,6 → 518,8 |
r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem); |
if (r) |
return r; |
radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update); |
} |
return radeon_vm_clear_invalids(rdev, vm); |
519,7 → 532,7 |
struct radeon_vm *vm = &fpriv->vm; |
int r; |
if (parser->chunk_ib_idx == -1) |
if (parser->chunk_ib == NULL) |
return 0; |
if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) |
return 0; |
544,11 → 557,16 |
if (r) { |
goto out; |
} |
radeon_cs_sync_rings(parser); |
radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence); |
r = radeon_cs_sync_rings(parser); |
if (r) { |
if (r != -ERESTARTSYS) |
DRM_ERROR("Failed to sync rings: %i\n", r); |
goto out; |
} |
if ((rdev->family >= CHIP_TAHITI) && |
(parser->chunk_const_ib_idx != -1)) { |
(parser->chunk_const_ib != NULL)) { |
r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true); |
} else { |
r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); |
575,7 → 593,7 |
struct radeon_vm *vm = NULL; |
int r; |
if (parser->chunk_ib_idx == -1) |
if (parser->chunk_ib == NULL) |
return 0; |
if (parser->cs_flags & RADEON_CS_USE_VM) { |
583,8 → 601,8 |
vm = &fpriv->vm; |
if ((rdev->family >= CHIP_TAHITI) && |
(parser->chunk_const_ib_idx != -1)) { |
ib_chunk = &parser->chunks[parser->chunk_const_ib_idx]; |
(parser->chunk_const_ib != NULL)) { |
ib_chunk = parser->chunk_const_ib; |
if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { |
DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw); |
return -EINVAL; |
603,13 → 621,13 |
return -EFAULT; |
} |
ib_chunk = &parser->chunks[parser->chunk_ib_idx]; |
ib_chunk = parser->chunk_ib; |
if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { |
DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw); |
return -EINVAL; |
} |
} |
ib_chunk = &parser->chunks[parser->chunk_ib_idx]; |
ib_chunk = parser->chunk_ib; |
r = radeon_ib_get(rdev, parser->ring, &parser->ib, |
vm, ib_chunk->length_dw * 4); |
694,7 → 712,7 |
struct radeon_cs_packet *pkt, |
unsigned idx) |
{ |
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; |
struct radeon_cs_chunk *ib_chunk = p->chunk_ib; |
struct radeon_device *rdev = p->rdev; |
uint32_t header; |
788,7 → 806,7 |
* GPU offset using the provided start. |
**/ |
int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, |
struct radeon_cs_reloc **cs_reloc, |
struct radeon_bo_list **cs_reloc, |
int nomm) |
{ |
struct radeon_cs_chunk *relocs_chunk; |
796,12 → 814,12 |
unsigned idx; |
int r; |
if (p->chunk_relocs_idx == -1) { |
if (p->chunk_relocs == NULL) { |
DRM_ERROR("No relocation chunk !\n"); |
return -EINVAL; |
} |
*cs_reloc = NULL; |
relocs_chunk = &p->chunks[p->chunk_relocs_idx]; |
relocs_chunk = p->chunk_relocs; |
r = radeon_cs_packet_parse(p, &p3reloc, p->idx); |
if (r) |
return r; |
827,6 → 845,6 |
(u64)relocs_chunk->kdata[idx + 3] << 32; |
(*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0]; |
} else |
*cs_reloc = p->relocs_ptr[(idx / 4)]; |
*cs_reloc = &p->relocs[(idx / 4)]; |
return 0; |
} |
/drivers/video/drm/radeon/radeon_device.c |
---|
82,7 → 82,7 |
int init_display(struct radeon_device *rdev, videomode_t *mode); |
int init_display_kms(struct drm_device *dev, videomode_t *usermode); |
int get_modes(videomode_t *mode, u32_t *count); |
int get_modes(videomode_t *mode, u32 *count); |
int set_user_mode(videomode_t *mode); |
int r100_2D_test(struct radeon_device *rdev); |
437,6 → 437,37 |
__clear_bit(doorbell, rdev->doorbell.used); |
} |
/** |
* radeon_doorbell_get_kfd_info - Report doorbell configuration required to |
* setup KFD |
* |
* @rdev: radeon_device pointer |
* @aperture_base: output returning doorbell aperture base physical address |
* @aperture_size: output returning doorbell aperture size in bytes |
* @start_offset: output returning # of doorbell bytes reserved for radeon. |
* |
* Radeon and the KFD share the doorbell aperture. Radeon sets it up, |
* takes doorbells required for its own rings and reports the setup to KFD. |
* Radeon reserved doorbells are at the start of the doorbell aperture. |
*/ |
void radeon_doorbell_get_kfd_info(struct radeon_device *rdev, |
phys_addr_t *aperture_base, |
size_t *aperture_size, |
size_t *start_offset) |
{ |
/* The first num_doorbells are used by radeon. |
* KFD takes whatever's left in the aperture. */ |
if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) { |
*aperture_base = rdev->doorbell.base; |
*aperture_size = rdev->doorbell.size; |
*start_offset = rdev->doorbell.num_doorbells * sizeof(u32); |
} else { |
*aperture_base = 0; |
*aperture_size = 0; |
*start_offset = 0; |
} |
} |
/* |
* radeon_wb_*() |
* Writeback is the the method by which the the GPU updates special pages |
494,7 → 525,7 |
if (rdev->wb.wb_obj == NULL) { |
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_GTT, 0, NULL, |
RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL, |
&rdev->wb.wb_obj); |
if (r) { |
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); |
998,6 → 1029,7 |
} |
mutex_init(&rdev->mode_info.atom_context->mutex); |
mutex_init(&rdev->mode_info.atom_context->scratch_mutex); |
radeon_atom_initialize_bios_scratch_regs(rdev->ddev); |
atom_allocate_fb_scratch(rdev->mode_info.atom_context); |
return 0; |
1234,6 → 1266,7 |
for (i = 0; i < RADEON_NUM_RINGS; i++) { |
rdev->ring[i].idx = i; |
} |
rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS); |
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", |
radeon_family_name[rdev->family], pdev->vendor, pdev->device, |
1248,9 → 1281,13 |
mutex_init(&rdev->pm.mutex); |
mutex_init(&rdev->gpu_clock_mutex); |
mutex_init(&rdev->srbm_mutex); |
mutex_init(&rdev->grbm_idx_mutex); |
// init_rwsem(&rdev->pm.mclk_lock); |
// init_rwsem(&rdev->exclusive_lock); |
init_waitqueue_head(&rdev->irq.vblank_queue); |
mutex_init(&rdev->mn_lock); |
// hash_init(rdev->mn_hash); |
r = radeon_gem_init(rdev); |
if (r) |
return r; |
1362,9 → 1399,6 |
if (r) |
return r; |
r = radeon_ib_ring_tests(rdev); |
if (r) |
DRM_ERROR("ib ring test failed (%d).\n", r); |
if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { |
1379,6 → 1413,10 |
return r; |
} |
// r = radeon_ib_ring_tests(rdev); |
// if (r) |
// DRM_ERROR("ib ring test failed (%d).\n", r); |
if ((radeon_testing & 1)) { |
if (rdev->accel_working) |
radeon_test_moves(rdev); |
1436,7 → 1474,6 |
} |
} |
retry: |
r = radeon_asic_reset(rdev); |
if (!r) { |
dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n"); |
1445,25 → 1482,12 |
radeon_restore_bios_scratch_regs(rdev); |
if (!r) { |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
if (!r && ring_data[i]) { |
radeon_ring_restore(rdev, &rdev->ring[i], |
ring_sizes[i], ring_data[i]); |
ring_sizes[i] = 0; |
ring_data[i] = NULL; |
} |
// r = radeon_ib_ring_tests(rdev); |
// if (r) { |
// dev_err(rdev->dev, "ib ring test failed (%d).\n", r); |
// if (saved) { |
// saved = false; |
// radeon_suspend(rdev); |
// goto retry; |
// } |
// } |
} else { |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
radeon_fence_driver_force_completion(rdev, i); |
kfree(ring_data[i]); |
} |
} |
/drivers/video/drm/radeon/radeon_display.c |
---|
1537,7 → 1537,7 |
/* In vblank? */ |
if (in_vbl) |
ret |= DRM_SCANOUTPOS_INVBL; |
ret |= DRM_SCANOUTPOS_IN_VBLANK; |
/* Is vpos outside nominal vblank area, but less than |
* 1/100 of a frame height away from start of vblank? |
/drivers/video/drm/radeon/radeon_encoders.c |
---|
179,6 → 179,9 |
(rdev->pdev->subsystem_vendor == 0x1734) && |
(rdev->pdev->subsystem_device == 0x1107)) |
use_bl = false; |
/* disable native backlight control on older asics */ |
else if (rdev->family < CHIP_R600) |
use_bl = false; |
else |
use_bl = true; |
} |
410,3 → 413,24 |
} |
} |
bool radeon_encoder_is_digital(struct drm_encoder *encoder) |
{ |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
case ENCODER_OBJECT_ID_INTERNAL_TMDS1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: |
case ENCODER_OBJECT_ID_INTERNAL_LVTM1: |
case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: |
case ENCODER_OBJECT_ID_INTERNAL_DDI: |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: |
return true; |
default: |
return false; |
} |
} |
/drivers/video/drm/radeon/radeon_fb.c |
---|
184,7 → 184,8 |
static int radeonfb_create(struct drm_fb_helper *helper, |
struct drm_fb_helper_surface_size *sizes) |
{ |
struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper; |
struct radeon_fbdev *rfbdev = |
container_of(helper, struct radeon_fbdev, helper); |
struct radeon_device *rdev = rfbdev->rdev; |
struct fb_info *info; |
struct drm_framebuffer *fb = NULL; |
/drivers/video/drm/radeon/radeon_fence.c |
---|
29,9 → 29,8 |
* Dave Airlie |
*/ |
#include <linux/seq_file.h> |
#include <asm/atomic.h> |
#include <linux/atomic.h> |
#include <linux/wait.h> |
#include <linux/list.h> |
#include <linux/kref.h> |
#include <linux/slab.h> |
#include <drm/drmP.h> |
111,15 → 110,19 |
struct radeon_fence **fence, |
int ring) |
{ |
u64 seq = ++rdev->fence_drv[ring].sync_seq[ring]; |
/* we are protected by the ring emission mutex */ |
*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL); |
if ((*fence) == NULL) { |
return -ENOMEM; |
} |
kref_init(&((*fence)->kref)); |
(*fence)->rdev = rdev; |
(*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring]; |
(*fence)->seq = seq; |
(*fence)->ring = ring; |
(*fence)->is_vm_update = false; |
fence_init(&(*fence)->base, &radeon_fence_ops, |
&rdev->fence_queue.lock, rdev->fence_context + ring, seq); |
radeon_fence_ring_emit(rdev, ring, *fence); |
trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq); |
return 0; |
126,15 → 129,51 |
} |
/** |
* radeon_fence_process - process a fence |
* radeon_fence_check_signaled - callback from fence_queue |
* |
* this function is called with fence_queue lock held, which is also used |
* for the fence locking itself, so unlocked variants are used for |
* fence_signal, and remove_wait_queue. |
*/ |
static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key) |
{ |
struct radeon_fence *fence; |
u64 seq; |
fence = container_of(wait, struct radeon_fence, fence_wake); |
/* |
* We cannot use radeon_fence_process here because we're already |
* in the waitqueue, in a call from wake_up_all. |
*/ |
seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq); |
if (seq >= fence->seq) { |
int ret = fence_signal_locked(&fence->base); |
if (!ret) |
FENCE_TRACE(&fence->base, "signaled from irq context\n"); |
else |
FENCE_TRACE(&fence->base, "was already signaled\n"); |
radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring); |
// __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake); |
fence_put(&fence->base); |
} else |
FENCE_TRACE(&fence->base, "pending\n"); |
return 0; |
} |
/** |
* radeon_fence_activity - check for fence activity |
* |
* @rdev: radeon_device pointer |
* @ring: ring index the fence is associated with |
* |
* Checks the current fence value and wakes the fence queue |
* if the sequence number has increased (all asics). |
* Checks the current fence value and calculates the last |
* signalled fence value. Returns true if activity occured |
* on the ring, and the fence_queue should be waken up. |
*/ |
void radeon_fence_process(struct radeon_device *rdev, int ring) |
static bool radeon_fence_activity(struct radeon_device *rdev, int ring) |
{ |
uint64_t seq, last_seq, last_emitted; |
unsigned count_loop = 0; |
190,26 → 229,80 |
} |
} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq); |
if (wake) |
wake_up_all(&rdev->fence_queue); |
// if (seq < last_emitted) |
// radeon_fence_schedule_check(rdev, ring); |
return wake; |
} |
/** |
* radeon_fence_destroy - destroy a fence |
* radeon_fence_check_lockup - check for hardware lockup |
* |
* @kref: fence kref |
* @work: delayed work item |
* |
* Frees the fence object (all asics). |
* Checks for fence activity and if there is none probe |
* the hardware if a lockup occured. |
*/ |
static void radeon_fence_destroy(struct kref *kref) |
static void radeon_fence_check_lockup(struct work_struct *work) |
{ |
struct radeon_fence *fence; |
struct radeon_fence_driver *fence_drv; |
struct radeon_device *rdev; |
int ring; |
fence = container_of(kref, struct radeon_fence, kref); |
kfree(fence); |
fence_drv = container_of(work, struct radeon_fence_driver, |
lockup_work.work); |
rdev = fence_drv->rdev; |
ring = fence_drv - &rdev->fence_drv[0]; |
// if (!down_read_trylock(&rdev->exclusive_lock)) { |
// /* just reschedule the check if a reset is going on */ |
// radeon_fence_schedule_check(rdev, ring); |
// return; |
// } |
if (fence_drv->delayed_irq && rdev->ddev->irq_enabled) { |
unsigned long irqflags; |
fence_drv->delayed_irq = false; |
spin_lock_irqsave(&rdev->irq.lock, irqflags); |
radeon_irq_set(rdev); |
spin_unlock_irqrestore(&rdev->irq.lock, irqflags); |
} |
if (radeon_fence_activity(rdev, ring)) |
wake_up_all(&rdev->fence_queue); |
else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) { |
/* good news we believe it's a lockup */ |
dev_warn(rdev->dev, "GPU lockup (current fence id " |
"0x%016llx last fence id 0x%016llx on ring %d)\n", |
(uint64_t)atomic64_read(&fence_drv->last_seq), |
fence_drv->sync_seq[ring], ring); |
/* remember that we need an reset */ |
rdev->needs_reset = true; |
wake_up_all(&rdev->fence_queue); |
} |
// up_read(&rdev->exclusive_lock); |
} |
/** |
* radeon_fence_process - process a fence |
* |
* @rdev: radeon_device pointer |
* @ring: ring index the fence is associated with |
* |
* Checks the current fence value and wakes the fence queue |
* if the sequence number has increased (all asics). |
*/ |
void radeon_fence_process(struct radeon_device *rdev, int ring) |
{ |
if (radeon_fence_activity(rdev, ring)) |
wake_up_all(&rdev->fence_queue); |
} |
/** |
* radeon_fence_seq_signaled - check if a fence sequence number has signaled |
* |
* @rdev: radeon device pointer |
237,7 → 330,78 |
return false; |
} |
static bool radeon_fence_is_signaled(struct fence *f) |
{ |
struct radeon_fence *fence = to_radeon_fence(f); |
struct radeon_device *rdev = fence->rdev; |
unsigned ring = fence->ring; |
u64 seq = fence->seq; |
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
return true; |
} |
// if (down_read_trylock(&rdev->exclusive_lock)) |
{ |
radeon_fence_process(rdev, ring); |
// up_read(&rdev->exclusive_lock); |
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
return true; |
} |
} |
return false; |
} |
/** |
* radeon_fence_enable_signaling - enable signalling on fence |
* @fence: fence |
* |
* This function is called with fence_queue lock held, and adds a callback |
* to fence_queue that checks if this fence is signaled, and if so it |
* signals the fence and removes itself. |
*/ |
static bool radeon_fence_enable_signaling(struct fence *f) |
{ |
struct radeon_fence *fence = to_radeon_fence(f); |
struct radeon_device *rdev = fence->rdev; |
if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) |
return false; |
// if (down_read_trylock(&rdev->exclusive_lock)) |
{ |
radeon_irq_kms_sw_irq_get(rdev, fence->ring); |
// if (radeon_fence_activity(rdev, fence->ring)) |
// wake_up_all_locked(&rdev->fence_queue); |
/* did fence get signaled after we enabled the sw irq? */ |
if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) { |
radeon_irq_kms_sw_irq_put(rdev, fence->ring); |
// up_read(&rdev->exclusive_lock); |
return false; |
} |
// up_read(&rdev->exclusive_lock); |
// } else { |
/* we're probably in a lockup, lets not fiddle too much */ |
// if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring)) |
// rdev->fence_drv[fence->ring].delayed_irq = true; |
// radeon_fence_schedule_check(rdev, fence->ring); |
} |
// fence->fence_wake.flags = 0; |
// fence->fence_wake.private = NULL; |
fence->fence_wake.func = radeon_fence_check_signaled; |
__add_wait_queue(&rdev->fence_queue, &fence->fence_wake); |
fence_get(f); |
FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring); |
return true; |
} |
/** |
* radeon_fence_signaled - check if a fence has signaled |
* |
* @fence: radeon fence object |
247,14 → 411,15 |
*/ |
bool radeon_fence_signaled(struct radeon_fence *fence) |
{ |
if (!fence) { |
if (!fence) |
return true; |
} |
if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) { |
return true; |
} |
if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) { |
fence->seq = RADEON_FENCE_SIGNALED_SEQ; |
int ret; |
ret = fence_signal(&fence->base); |
if (!ret) |
FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n"); |
return true; |
} |
return false; |
283,11 → 448,12 |
} |
/** |
* radeon_fence_wait_seq - wait for a specific sequence numbers |
* radeon_fence_wait_seq_timeout - wait for a specific sequence numbers |
* |
* @rdev: radeon device pointer |
* @target_seq: sequence number(s) we want to wait for |
* @intr: use interruptable sleep |
* @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait |
* |
* Wait for the requested sequence number(s) to be written by any ring |
* (all asics). Sequnce number array is indexed by ring id. |
294,24 → 460,25 |
* @intr selects whether to use interruptable (true) or non-interruptable |
* (false) sleep when waiting for the sequence number. Helper function |
* for radeon_fence_wait_*(). |
* Returns 0 if the sequence number has passed, error for all other cases. |
* Returns remaining time if the sequence number has passed, 0 when |
* the wait timeout, or an error for all other cases. |
* -EDEADLK is returned when a GPU lockup has been detected. |
*/ |
static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, |
bool intr) |
static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev, |
u64 *target_seq, bool intr, |
long timeout) |
{ |
uint64_t last_seq[RADEON_NUM_RINGS]; |
bool signaled; |
int i, r; |
long r; |
int i; |
while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { |
if (radeon_fence_any_seq_signaled(rdev, target_seq)) |
return timeout; |
/* Save current sequence values, used to check for GPU lockups */ |
/* enable IRQs and tracing */ |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
if (!target_seq[i]) |
continue; |
last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq); |
trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); |
radeon_irq_kms_sw_irq_get(rdev, i); |
} |
318,14 → 485,17 |
if (intr) { |
r = wait_event_interruptible_timeout(rdev->fence_queue, ( |
(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) |
|| rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT); |
radeon_fence_any_seq_signaled(rdev, target_seq) |
|| rdev->needs_reset), timeout); |
} else { |
r = wait_event_timeout(rdev->fence_queue, ( |
(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) |
|| rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT); |
radeon_fence_any_seq_signaled(rdev, target_seq) |
|| rdev->needs_reset), timeout); |
} |
if (rdev->needs_reset) |
r = -EDEADLK; |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
if (!target_seq[i]) |
continue; |
334,59 → 504,14 |
trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); |
} |
if (unlikely(r < 0)) |
return r; |
if (unlikely(!signaled)) { |
if (rdev->needs_reset) |
return -EDEADLK; |
/* we were interrupted for some reason and fence |
* isn't signaled yet, resume waiting */ |
if (r) |
continue; |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
if (!target_seq[i]) |
continue; |
if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq)) |
break; |
} |
if (i != RADEON_NUM_RINGS) |
continue; |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
if (!target_seq[i]) |
continue; |
if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i])) |
break; |
} |
if (i < RADEON_NUM_RINGS) { |
/* good news we believe it's a lockup */ |
dev_warn(rdev->dev, "GPU lockup (waiting for " |
"0x%016llx last fence id 0x%016llx on" |
" ring %d)\n", |
target_seq[i], last_seq[i], i); |
/* remember that we need an reset */ |
rdev->needs_reset = true; |
wake_up_all(&rdev->fence_queue); |
return -EDEADLK; |
} |
} |
} |
return 0; |
} |
/** |
* radeon_fence_wait - wait for a fence to signal |
* |
* @fence: radeon fence object |
* @intr: use interruptable sleep |
* @intr: use interruptible sleep |
* |
* Wait for the requested fence to signal (all asics). |
* @intr selects whether to use interruptable (true) or non-interruptable |
396,22 → 521,26 |
int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
{ |
uint64_t seq[RADEON_NUM_RINGS] = {}; |
int r; |
long r; |
if (fence == NULL) { |
WARN(1, "Querying an invalid fence : %p !\n", fence); |
return -EINVAL; |
} |
/* |
* This function should not be called on !radeon fences. |
* If this is the case, it would mean this function can |
* also be called on radeon fences belonging to another card. |
* exclusive_lock is not held in that case. |
*/ |
if (WARN_ON_ONCE(!to_radeon_fence(&fence->base))) |
return fence_wait(&fence->base, intr); |
seq[fence->ring] = fence->seq; |
if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) |
return 0; |
r = radeon_fence_wait_seq(fence->rdev, seq, intr); |
if (r) |
r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT); |
if (r < 0) { |
return r; |
} |
fence->seq = RADEON_FENCE_SIGNALED_SEQ; |
r = fence_signal(&fence->base); |
if (!r) |
FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); |
return 0; |
} |
434,7 → 563,7 |
{ |
uint64_t seq[RADEON_NUM_RINGS]; |
unsigned i, num_rings = 0; |
int r; |
long r; |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
seq[i] = 0; |
445,10 → 574,6 |
seq[i] = fences[i]->seq; |
++num_rings; |
/* test if something was allready signaled */ |
if (seq[i] == RADEON_FENCE_SIGNALED_SEQ) |
return 0; |
} |
/* nothing to wait for ? */ |
455,8 → 580,8 |
if (num_rings == 0) |
return -ENOENT; |
r = radeon_fence_wait_seq(rdev, seq, intr); |
if (r) { |
r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT); |
if (r < 0) { |
return r; |
} |
return 0; |
475,6 → 600,7 |
int radeon_fence_wait_next(struct radeon_device *rdev, int ring) |
{ |
uint64_t seq[RADEON_NUM_RINGS] = {}; |
long r; |
seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; |
if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) { |
482,7 → 608,10 |
already the last emited fence */ |
return -ENOENT; |
} |
return radeon_fence_wait_seq(rdev, seq, false); |
r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT); |
if (r < 0) |
return r; |
return 0; |
} |
/** |
498,18 → 627,18 |
int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) |
{ |
uint64_t seq[RADEON_NUM_RINGS] = {}; |
int r; |
long r; |
seq[ring] = rdev->fence_drv[ring].sync_seq[ring]; |
if (!seq[ring]) |
return 0; |
r = radeon_fence_wait_seq(rdev, seq, false); |
if (r) { |
r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT); |
if (r < 0) { |
if (r == -EDEADLK) |
return -EDEADLK; |
dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", |
dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n", |
ring, r); |
} |
return 0; |
525,7 → 654,7 |
*/ |
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence) |
{ |
kref_get(&fence->kref); |
fence_get(&fence->base); |
return fence; |
} |
542,7 → 671,7 |
*fence = NULL; |
if (tmp) { |
kref_put(&tmp->kref, radeon_fence_destroy); |
fence_put(&tmp->base); |
} |
} |
711,6 → 840,9 |
rdev->fence_drv[ring].sync_seq[i] = 0; |
atomic64_set(&rdev->fence_drv[ring].last_seq, 0); |
rdev->fence_drv[ring].initialized = false; |
INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work, |
radeon_fence_check_lockup); |
rdev->fence_drv[ring].rdev = rdev; |
} |
/** |
758,7 → 890,7 |
r = radeon_fence_wait_empty(rdev, ring); |
if (r) { |
/* no need to trigger GPU reset as we are unloading */ |
radeon_fence_driver_force_completion(rdev); |
radeon_fence_driver_force_completion(rdev, ring); |
} |
wake_up_all(&rdev->fence_queue); |
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
771,17 → 903,14 |
* radeon_fence_driver_force_completion - force all fence waiter to complete |
* |
* @rdev: radeon device pointer |
* @ring: the ring to complete |
* |
* In case of GPU reset failure make sure no process keep waiting on fence |
* that will never complete. |
*/ |
void radeon_fence_driver_force_completion(struct radeon_device *rdev) |
void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring) |
{ |
int ring; |
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
if (!rdev->fence_drv[ring].initialized) |
continue; |
if (rdev->fence_drv[ring].initialized) { |
radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); |
} |
} |
833,6 → 962,7 |
down_read(&rdev->exclusive_lock); |
seq_printf(m, "%d\n", rdev->needs_reset); |
rdev->needs_reset = true; |
wake_up_all(&rdev->fence_queue); |
up_read(&rdev->exclusive_lock); |
return 0; |
852,3 → 982,72 |
return 0; |
#endif |
} |
static const char *radeon_fence_get_driver_name(struct fence *fence) |
{ |
return "radeon"; |
} |
static const char *radeon_fence_get_timeline_name(struct fence *f) |
{ |
struct radeon_fence *fence = to_radeon_fence(f); |
switch (fence->ring) { |
case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx"; |
case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1"; |
case CAYMAN_RING_TYPE_CP2_INDEX: return "radeon.cp2"; |
case R600_RING_TYPE_DMA_INDEX: return "radeon.dma"; |
case CAYMAN_RING_TYPE_DMA1_INDEX: return "radeon.dma1"; |
case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd"; |
case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1"; |
case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2"; |
default: WARN_ON_ONCE(1); return "radeon.unk"; |
} |
} |
static inline bool radeon_test_signaled(struct radeon_fence *fence) |
{ |
return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); |
} |
static signed long radeon_fence_default_wait(struct fence *f, bool intr, |
signed long t) |
{ |
struct radeon_fence *fence = to_radeon_fence(f); |
struct radeon_device *rdev = fence->rdev; |
bool signaled; |
fence_enable_sw_signaling(&fence->base); |
/* |
* This function has to return -EDEADLK, but cannot hold |
* exclusive_lock during the wait because some callers |
* may already hold it. This means checking needs_reset without |
* lock, and not fiddling with any gpu internals. |
* |
* The callback installed with fence_enable_sw_signaling will |
* run before our wait_event_*timeout call, so we will see |
* both the signaled fence and the changes to needs_reset. |
*/ |
if (intr) |
t = wait_event_interruptible_timeout(rdev->fence_queue, |
((signaled = radeon_test_signaled(fence)) || |
rdev->needs_reset), t); |
else |
t = wait_event_timeout(rdev->fence_queue, |
((signaled = radeon_test_signaled(fence)) || |
rdev->needs_reset), t); |
if (t > 0 && !signaled) |
return -EDEADLK; |
return t; |
} |
const struct fence_ops radeon_fence_ops = { |
.get_driver_name = radeon_fence_get_driver_name, |
.get_timeline_name = radeon_fence_get_timeline_name, |
.enable_signaling = radeon_fence_enable_signaling, |
.signaled = radeon_fence_is_signaled, |
.wait = radeon_fence_default_wait, |
.release = NULL, |
}; |
/drivers/video/drm/radeon/radeon_gart.c |
---|
137,7 → 137,7 |
if (rdev->gart.robj == NULL) { |
r = radeon_bo_create(rdev, rdev->gart.table_size, |
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
0, NULL, &rdev->gart.robj); |
0, NULL, NULL, &rdev->gart.robj); |
if (r) { |
return r; |
} |
/drivers/video/drm/radeon/radeon_gem.c |
---|
65,7 → 65,7 |
retry: |
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, |
flags, NULL, &robj); |
flags, NULL, NULL, &robj); |
if (r) { |
if (r != -ERESTARTSYS) { |
if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { |
91,7 → 91,7 |
{ |
struct radeon_bo *robj; |
uint32_t domain; |
int r; |
long r; |
/* FIXME: reeimplement */ |
robj = gem_to_radeon_bo(gobj); |
229,9 → 229,10 |
return r; |
} |
int radeon_mode_dumb_mmap(struct drm_file *filp, |
static int radeon_mode_mmap(struct drm_file *filp, |
struct drm_device *dev, |
uint32_t handle, uint64_t *offset_p) |
uint32_t handle, bool dumb, |
uint64_t *offset_p) |
{ |
struct drm_gem_object *gobj; |
struct radeon_bo *robj; |
240,6 → 241,14 |
if (gobj == NULL) { |
return -ENOENT; |
} |
/* |
* We don't allow dumb mmaps on objects created using another |
* interface. |
*/ |
WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach), |
"Illegal dumb map of GPU buffer.\n"); |
robj = gem_to_radeon_bo(gobj); |
*offset_p = radeon_bo_mmap_offset(robj); |
drm_gem_object_unreference_unlocked(gobj); |
251,7 → 260,8 |
{ |
struct drm_radeon_gem_mmap *args = data; |
return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); |
return radeon_mode_mmap(filp, dev, args->handle, false, |
&args->addr_ptr); |
} |
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, |
283,8 → 293,9 |
struct drm_radeon_gem_wait_idle *args = data; |
struct drm_gem_object *gobj; |
struct radeon_bo *robj; |
int r; |
int r = 0; |
uint32_t cur_placement = 0; |
long ret; |
gobj = drm_gem_object_lookup(dev, filp, args->handle); |
if (gobj == NULL) { |
/drivers/video/drm/radeon/radeon_ib.c |
---|
64,10 → 64,7 |
return r; |
} |
r = radeon_semaphore_create(rdev, &ib->semaphore); |
if (r) { |
return r; |
} |
radeon_sync_create(&ib->sync); |
ib->ring = ring; |
ib->fence = NULL; |
96,7 → 93,7 |
*/ |
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) |
{ |
radeon_semaphore_free(rdev, &ib->semaphore, ib->fence); |
radeon_sync_free(rdev, &ib->sync, ib->fence); |
radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence); |
radeon_fence_unref(&ib->fence); |
} |
145,11 → 142,11 |
if (ib->vm) { |
struct radeon_fence *vm_id_fence; |
vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring); |
radeon_semaphore_sync_to(ib->semaphore, vm_id_fence); |
radeon_sync_fence(&ib->sync, vm_id_fence); |
} |
/* sync with other rings */ |
r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring); |
r = radeon_sync_rings(rdev, &ib->sync, ib->ring); |
if (r) { |
dev_err(rdev->dev, "failed to sync rings (%d)\n", r); |
radeon_ring_unlock_undo(rdev, ring); |
157,11 → 154,12 |
} |
if (ib->vm) |
radeon_vm_flush(rdev, ib->vm, ib->ring); |
radeon_vm_flush(rdev, ib->vm, ib->ring, |
ib->sync.last_vm_update); |
if (const_ib) { |
radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); |
radeon_semaphore_free(rdev, &const_ib->semaphore, NULL); |
radeon_sync_free(rdev, &const_ib->sync, NULL); |
} |
radeon_ring_ib_execute(rdev, ib->ring, ib); |
r = radeon_fence_emit(rdev, &ib->fence, ib->ring); |
269,6 → 267,7 |
r = radeon_ib_test(rdev, i, ring); |
if (r) { |
radeon_fence_driver_force_completion(rdev, i); |
ring->ready = false; |
rdev->needs_reset = false; |
/drivers/video/drm/radeon/radeon_irq_kms.c |
---|
206,6 → 206,21 |
} |
/** |
* radeon_irq_kms_sw_irq_get_delayed - enable software interrupt |
* |
* @rdev: radeon device pointer |
* @ring: ring whose interrupt you want to enable |
* |
* Enables the software interrupt for a specific ring (all asics). |
* The software interrupt is generally used to signal a fence on |
* a particular ring. |
*/ |
bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring) |
{ |
return atomic_inc_return(&rdev->irq.ring_int[ring]) == 1; |
} |
/** |
* radeon_irq_kms_sw_irq_put - disable software interrupt |
* |
* @rdev: radeon device pointer |
/drivers/video/drm/radeon/radeon_kfd.h |
---|
0,0 → 1,47 |
/* |
* Copyright 2014 Advanced Micro Devices, Inc. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included in |
* all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
/* |
* radeon_kfd.h defines the private interface between the |
* AMD kernel graphics drivers and the AMD KFD. |
*/ |
#ifndef RADEON_KFD_H_INCLUDED |
#define RADEON_KFD_H_INCLUDED |
#include <linux/types.h> |
//#include "../amd/include/kgd_kfd_interface.h" |
struct radeon_device; |
bool radeon_kfd_init(void); |
void radeon_kfd_fini(void); |
void radeon_kfd_suspend(struct radeon_device *rdev); |
int radeon_kfd_resume(struct radeon_device *rdev); |
void radeon_kfd_interrupt(struct radeon_device *rdev, |
const void *ih_ring_entry); |
void radeon_kfd_device_probe(struct radeon_device *rdev); |
void radeon_kfd_device_init(struct radeon_device *rdev); |
void radeon_kfd_device_fini(struct radeon_device *rdev); |
#endif /* RADEON_KFD_H_INCLUDED */ |
/drivers/video/drm/radeon/radeon_mode.h |
---|
321,6 → 321,10 |
uint32_t crtc_offset; |
struct drm_gem_object *cursor_bo; |
uint64_t cursor_addr; |
int cursor_x; |
int cursor_y; |
int cursor_hot_x; |
int cursor_hot_y; |
int cursor_width; |
int cursor_height; |
int max_cursor_width; |
462,6 → 466,7 |
u8 id; |
u32 reg; |
u32 mask; |
u32 shift; |
}; |
struct radeon_hpd { |
748,6 → 753,8 |
extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, |
struct radeon_atom_ss *ss, |
int id, u32 clock); |
extern struct radeon_gpio_rec radeon_atombios_lookup_gpio(struct radeon_device *rdev, |
u8 id); |
extern void radeon_compute_pll_legacy(struct radeon_pll *pll, |
uint64_t freq, |
777,6 → 784,7 |
extern int atombios_get_encoder_mode(struct drm_encoder *encoder); |
extern bool atombios_set_edp_panel_power(struct drm_connector *connector, int action); |
extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); |
extern bool radeon_encoder_is_digital(struct drm_encoder *encoder); |
extern void radeon_crtc_load_lut(struct drm_crtc *crtc); |
extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, |
801,13 → 809,16 |
extern int radeon_crtc_do_set_base(struct drm_crtc *crtc, |
struct drm_framebuffer *fb, |
int x, int y, int atomic); |
extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, |
extern int radeon_crtc_cursor_set2(struct drm_crtc *crtc, |
struct drm_file *file_priv, |
uint32_t handle, |
uint32_t width, |
uint32_t height); |
uint32_t height, |
int32_t hot_x, |
int32_t hot_y); |
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, |
int x, int y); |
extern void radeon_cursor_reset(struct drm_crtc *crtc); |
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, |
unsigned int flags, |
/drivers/video/drm/radeon/radeon_object.c |
---|
96,40 → 96,83 |
{ |
u32 c = 0, i; |
rbo->placement.fpfn = 0; |
rbo->placement.lpfn = 0; |
rbo->placement.placement = rbo->placements; |
rbo->placement.busy_placement = rbo->placements; |
if (domain & RADEON_GEM_DOMAIN_VRAM) |
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | |
if (domain & RADEON_GEM_DOMAIN_VRAM) { |
/* Try placing BOs which don't need CPU access outside of the |
* CPU accessible part of VRAM |
*/ |
if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) && |
rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) { |
rbo->placements[c].fpfn = |
rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; |
rbo->placements[c++].flags = TTM_PL_FLAG_WC | |
TTM_PL_FLAG_UNCACHED | |
TTM_PL_FLAG_VRAM; |
} |
rbo->placements[c].fpfn = 0; |
rbo->placements[c++].flags = TTM_PL_FLAG_WC | |
TTM_PL_FLAG_UNCACHED | |
TTM_PL_FLAG_VRAM; |
} |
if (domain & RADEON_GEM_DOMAIN_GTT) { |
if (rbo->flags & RADEON_GEM_GTT_UC) { |
rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT; |
rbo->placements[c].fpfn = 0; |
rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | |
TTM_PL_FLAG_TT; |
} else if ((rbo->flags & RADEON_GEM_GTT_WC) || |
(rbo->rdev->flags & RADEON_IS_AGP)) { |
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | |
rbo->placements[c].fpfn = 0; |
rbo->placements[c++].flags = TTM_PL_FLAG_WC | |
TTM_PL_FLAG_UNCACHED | |
TTM_PL_FLAG_TT; |
} else { |
rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; |
rbo->placements[c].fpfn = 0; |
rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | |
TTM_PL_FLAG_TT; |
} |
} |
if (domain & RADEON_GEM_DOMAIN_CPU) { |
if (rbo->flags & RADEON_GEM_GTT_UC) { |
rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM; |
rbo->placements[c].fpfn = 0; |
rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | |
TTM_PL_FLAG_SYSTEM; |
} else if ((rbo->flags & RADEON_GEM_GTT_WC) || |
rbo->rdev->flags & RADEON_IS_AGP) { |
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | |
rbo->placements[c].fpfn = 0; |
rbo->placements[c++].flags = TTM_PL_FLAG_WC | |
TTM_PL_FLAG_UNCACHED | |
TTM_PL_FLAG_SYSTEM; |
} else { |
rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; |
rbo->placements[c].fpfn = 0; |
rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | |
TTM_PL_FLAG_SYSTEM; |
} |
} |
if (!c) |
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; |
if (!c) { |
rbo->placements[c].fpfn = 0; |
rbo->placements[c++].flags = TTM_PL_MASK_CACHING | |
TTM_PL_FLAG_SYSTEM; |
} |
rbo->placement.num_placement = c; |
rbo->placement.num_busy_placement = c; |
for (i = 0; i < c; ++i) { |
if ((rbo->flags & RADEON_GEM_CPU_ACCESS) && |
(rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && |
!rbo->placements[i].fpfn) |
rbo->placements[i].lpfn = |
rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; |
else |
rbo->placements[i].lpfn = 0; |
} |
/* |
* Use two-ended allocation depending on the buffer size to |
* improve fragmentation quality. |
137,14 → 180,16 |
*/ |
if (rbo->tbo.mem.size > 512 * 1024) { |
for (i = 0; i < c; i++) { |
rbo->placements[i] |= TTM_PL_FLAG_TOPDOWN; |
rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN; |
} |
} |
} |
int radeon_bo_create(struct radeon_device *rdev, |
unsigned long size, int byte_align, bool kernel, u32 domain, |
u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr) |
unsigned long size, int byte_align, bool kernel, |
u32 domain, u32 flags, struct sg_table *sg, |
struct reservation_object *resv, |
struct radeon_bo **bo_ptr) |
{ |
struct radeon_bo *bo; |
enum ttm_bo_type type; |
187,11 → 232,12 |
if (!(rdev->flags & RADEON_IS_PCIE)) |
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
// printf("%s rdev->flags %x bo->flags %x\n", |
// __FUNCTION__, bo->flags); |
if(flags & RADEON_GEM_GTT_WC) |
#ifdef CONFIG_X86_32 |
/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit |
* See https://bugs.freedesktop.org/show_bug.cgi?id=84627 |
*/ |
bo->flags&= ~RADEON_GEM_GTT_WC; |
#endif |
radeon_ttm_placement_from_domain(bo, domain); |
/* Kernel allocation are uninterruptible */ |
198,7 → 244,7 |
// down_read(&rdev->pm.mclk_lock); |
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, |
&bo->placement, page_align, !kernel, NULL, |
acc_size, sg, &radeon_ttm_bo_destroy); |
acc_size, sg, resv, &radeon_ttm_bo_destroy); |
// up_read(&rdev->pm.mclk_lock); |
if (unlikely(r != 0)) { |
return r; |
289,21 → 335,19 |
return 0; |
} |
radeon_ttm_placement_from_domain(bo, domain); |
if (domain == RADEON_GEM_DOMAIN_VRAM) { |
for (i = 0; i < bo->placement.num_placement; i++) { |
/* force to pin into visible video ram */ |
bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; |
if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && |
!(bo->flags & RADEON_GEM_NO_CPU_ACCESS) && |
(!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) |
bo->placements[i].lpfn = |
bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; |
else |
bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; |
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; |
} |
if (max_offset) { |
u64 lpfn = max_offset >> PAGE_SHIFT; |
if (!bo->placement.lpfn) |
bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; |
if (lpfn < bo->placement.lpfn) |
bo->placement.lpfn = lpfn; |
} |
for (i = 0; i < bo->placement.num_placement; i++) |
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; |
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
if (likely(r == 0)) { |
bo->pin_count = 1; |
335,8 → 379,10 |
bo->pin_count--; |
if (bo->pin_count) |
return 0; |
for (i = 0; i < bo->placement.num_placement; i++) |
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; |
for (i = 0; i < bo->placement.num_placement; i++) { |
bo->placements[i].lpfn = 0; |
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; |
} |
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
if (likely(r == 0)) { |
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) |
422,24 → 468,29 |
struct ww_acquire_ctx *ticket, |
struct list_head *head, int ring) |
{ |
struct radeon_cs_reloc *lobj; |
struct radeon_bo *bo; |
struct radeon_bo_list *lobj; |
struct list_head duplicates; |
int r; |
u64 bytes_moved = 0, initial_bytes_moved; |
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); |
r = ttm_eu_reserve_buffers(ticket, head); |
INIT_LIST_HEAD(&duplicates); |
r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates); |
if (unlikely(r != 0)) { |
return r; |
} |
list_for_each_entry(lobj, head, tv.head) { |
bo = lobj->robj; |
struct radeon_bo *bo = lobj->robj; |
if (!bo->pin_count) { |
u32 domain = lobj->prefered_domains; |
u32 allowed = lobj->allowed_domains; |
u32 current_domain = |
radeon_mem_type_to_domain(bo->tbo.mem.mem_type); |
WARN_ONCE(bo->gem_base.dumb, |
"GPU use of dumb buffer is illegal.\n"); |
/* Check if this buffer will be moved and don't move it |
* if we have moved too many buffers for this IB already. |
* |
448,7 → 499,7 |
* into account. We don't want to disallow buffer moves |
* completely. |
*/ |
if ((lobj->allowed_domains & current_domain) != 0 && |
if ((allowed & current_domain) != 0 && |
(domain & current_domain) == 0 && /* will be moved */ |
bytes_moved > bytes_moved_threshold) { |
/* don't move it */ |
458,7 → 509,7 |
retry: |
radeon_ttm_placement_from_domain(bo, domain); |
if (ring == R600_RING_TYPE_UVD_INDEX) |
radeon_uvd_force_into_uvd_segment(bo); |
radeon_uvd_force_into_uvd_segment(bo, allowed); |
initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); |
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
478,6 → 529,12 |
lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
lobj->tiling_flags = bo->tiling_flags; |
} |
list_for_each_entry(lobj, &duplicates, tv.head) { |
lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj); |
lobj->tiling_flags = lobj->robj->tiling_flags; |
} |
return 0; |
} |
678,12 → 735,29 |
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); |
if (unlikely(r != 0)) |
return r; |
spin_lock(&bo->tbo.bdev->fence_lock); |
if (mem_type) |
*mem_type = bo->tbo.mem.mem_type; |
if (bo->tbo.sync_obj) |
r = ttm_bo_wait(&bo->tbo, true, true, no_wait); |
spin_unlock(&bo->tbo.bdev->fence_lock); |
ttm_bo_unreserve(&bo->tbo); |
return r; |
} |
/** |
* radeon_bo_fence - add fence to buffer object |
* |
* @bo: buffer object in question |
* @fence: fence to add |
* @shared: true if fence should be added shared |
* |
*/ |
void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, |
bool shared) |
{ |
struct reservation_object *resv = bo->tbo.resv; |
if (shared) |
reservation_object_add_shared_fence(resv, &fence->base); |
else |
reservation_object_add_excl_fence(resv, &fence->base); |
} |
/drivers/video/drm/radeon/radeon_object.h |
---|
126,6 → 126,7 |
unsigned long size, int byte_align, |
bool kernel, u32 domain, u32 flags, |
struct sg_table *sg, |
struct reservation_object *resv, |
struct radeon_bo **bo_ptr); |
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); |
extern void radeon_bo_kunmap(struct radeon_bo *bo); |
154,6 → 155,8 |
struct ttm_mem_reg *new_mem); |
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); |
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); |
extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, |
bool shared); |
/* |
* sub allocation |
/drivers/video/drm/radeon/radeon_pm.c |
---|
1479,7 → 1479,7 |
if (rdev->pm.active_crtcs & (1 << crtc)) { |
vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL); |
if ((vbl_status & DRM_SCANOUTPOS_VALID) && |
!(vbl_status & DRM_SCANOUTPOS_INVBL)) |
!(vbl_status & DRM_SCANOUTPOS_IN_VBLANK)) |
in_vbl = false; |
} |
} |
/drivers/video/drm/radeon/radeon_ring.c |
---|
45,27 → 45,6 |
static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); |
/** |
* radeon_ring_write - write a value to the ring |
* |
* @ring: radeon_ring structure holding ring information |
* @v: dword (dw) value to write |
* |
* Write a value to the requested ring buffer (all asics). |
*/ |
void radeon_ring_write(struct radeon_ring *ring, uint32_t v) |
{ |
#if DRM_DEBUG_CODE |
if (ring->count_dw <= 0) { |
DRM_ERROR("radeon: writing more dwords to the ring than expected!\n"); |
} |
#endif |
ring->ring[ring->wptr++] = v; |
ring->wptr &= ring->ptr_mask; |
ring->count_dw--; |
ring->ring_free_dw--; |
} |
/** |
* radeon_ring_supports_scratch_reg - check if the ring supports |
* writing to scratch registers |
* |
404,7 → 383,7 |
/* Allocate ring buffer */ |
if (ring->ring_obj == NULL) { |
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_GTT, 0, |
RADEON_GEM_DOMAIN_GTT, 0, NULL, |
NULL, &ring->ring_obj); |
if (r) { |
dev_err(rdev->dev, "(%d) ring create failed\n", r); |
/drivers/video/drm/radeon/radeon_sa.c |
---|
65,7 → 65,7 |
} |
r = radeon_bo_create(rdev, size, align, true, |
domain, flags, NULL, &sa_manager->bo); |
domain, flags, NULL, NULL, &sa_manager->bo); |
if (r) { |
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); |
return r; |
/drivers/video/drm/radeon/radeon_semaphore.c |
---|
34,15 → 34,14 |
int radeon_semaphore_create(struct radeon_device *rdev, |
struct radeon_semaphore **semaphore) |
{ |
uint64_t *cpu_addr; |
int i, r; |
int r; |
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); |
if (*semaphore == NULL) { |
return -ENOMEM; |
} |
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo, |
8 * RADEON_NUM_SYNCS, 8); |
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, |
&(*semaphore)->sa_bo, 8, 8); |
if (r) { |
kfree(*semaphore); |
*semaphore = NULL; |
51,13 → 50,8 |
(*semaphore)->waiters = 0; |
(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); |
cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo); |
for (i = 0; i < RADEON_NUM_SYNCS; ++i) |
cpu_addr[i] = 0; |
*((uint64_t *)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; |
for (i = 0; i < RADEON_NUM_RINGS; ++i) |
(*semaphore)->sync_to[i] = NULL; |
return 0; |
} |
95,99 → 89,6 |
return false; |
} |
/** |
* radeon_semaphore_sync_to - use the semaphore to sync to a fence |
* |
* @semaphore: semaphore object to add fence to |
* @fence: fence to sync to |
* |
* Sync to the fence using this semaphore object |
*/ |
void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore, |
struct radeon_fence *fence) |
{ |
struct radeon_fence *other; |
if (!fence) |
return; |
other = semaphore->sync_to[fence->ring]; |
semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other); |
} |
/** |
* radeon_semaphore_sync_rings - sync ring to all registered fences |
* |
* @rdev: radeon_device pointer |
* @semaphore: semaphore object to use for sync |
* @ring: ring that needs sync |
* |
* Ensure that all registered fences are signaled before letting |
* the ring continue. The caller must hold the ring lock. |
*/ |
int radeon_semaphore_sync_rings(struct radeon_device *rdev, |
struct radeon_semaphore *semaphore, |
int ring) |
{ |
unsigned count = 0; |
int i, r; |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
struct radeon_fence *fence = semaphore->sync_to[i]; |
/* check if we really need to sync */ |
if (!radeon_fence_need_sync(fence, ring)) |
continue; |
/* prevent GPU deadlocks */ |
if (!rdev->ring[i].ready) { |
dev_err(rdev->dev, "Syncing to a disabled ring!"); |
return -EINVAL; |
} |
if (++count > RADEON_NUM_SYNCS) { |
/* not enough room, wait manually */ |
r = radeon_fence_wait(fence, false); |
if (r) |
return r; |
continue; |
} |
/* allocate enough space for sync command */ |
r = radeon_ring_alloc(rdev, &rdev->ring[i], 16); |
if (r) { |
return r; |
} |
/* emit the signal semaphore */ |
if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) { |
/* signaling wasn't successful wait manually */ |
radeon_ring_undo(&rdev->ring[i]); |
r = radeon_fence_wait(fence, false); |
if (r) |
return r; |
continue; |
} |
/* we assume caller has already allocated space on waiters ring */ |
if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) { |
/* waiting wasn't successful wait manually */ |
radeon_ring_undo(&rdev->ring[i]); |
r = radeon_fence_wait(fence, false); |
if (r) |
return r; |
continue; |
} |
radeon_ring_commit(rdev, &rdev->ring[i], false); |
radeon_fence_note_sync(fence, ring); |
semaphore->gpu_addr += 8; |
} |
return 0; |
} |
void radeon_semaphore_free(struct radeon_device *rdev, |
struct radeon_semaphore **semaphore, |
struct radeon_fence *fence) |
/drivers/video/drm/radeon/radeon_sync.c |
---|
0,0 → 1,220 |
/* |
* Copyright 2014 Advanced Micro Devices, Inc. |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
* USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
*/ |
/* |
* Authors: |
* Christian König <christian.koenig@amd.com> |
*/ |
#include <drm/drmP.h> |
#include "radeon.h" |
#include "radeon_trace.h" |
/** |
* radeon_sync_create - zero init sync object |
* |
* @sync: sync object to initialize |
* |
* Just clear the sync object for now. |
*/ |
void radeon_sync_create(struct radeon_sync *sync) |
{ |
unsigned i; |
for (i = 0; i < RADEON_NUM_SYNCS; ++i) |
sync->semaphores[i] = NULL; |
for (i = 0; i < RADEON_NUM_RINGS; ++i) |
sync->sync_to[i] = NULL; |
sync->last_vm_update = NULL; |
} |
/** |
* radeon_sync_fence - use the semaphore to sync to a fence |
* |
* @sync: sync object to add fence to |
* @fence: fence to sync to |
* |
* Sync to the fence using the semaphore objects |
*/ |
void radeon_sync_fence(struct radeon_sync *sync, |
struct radeon_fence *fence) |
{ |
struct radeon_fence *other; |
if (!fence) |
return; |
other = sync->sync_to[fence->ring]; |
sync->sync_to[fence->ring] = radeon_fence_later(fence, other); |
if (fence->is_vm_update) { |
other = sync->last_vm_update; |
sync->last_vm_update = radeon_fence_later(fence, other); |
} |
} |
/** |
* radeon_sync_resv - use the semaphores to sync to a reservation object |
* |
* @sync: sync object to add fences from reservation object to |
* @resv: reservation object with embedded fence |
* @shared: true if we should only sync to the exclusive fence |
* |
* Sync to the fence using the semaphore objects |
*/ |
int radeon_sync_resv(struct radeon_device *rdev, |
struct radeon_sync *sync, |
struct reservation_object *resv, |
bool shared) |
{ |
struct reservation_object_list *flist; |
struct fence *f; |
struct radeon_fence *fence; |
unsigned i; |
int r = 0; |
/* always sync to the exclusive fence */ |
f = reservation_object_get_excl(resv); |
fence = f ? to_radeon_fence(f) : NULL; |
if (fence && fence->rdev == rdev) |
radeon_sync_fence(sync, fence); |
else if (f) |
r = fence_wait(f, true); |
flist = reservation_object_get_list(resv); |
if (shared || !flist || r) |
return r; |
for (i = 0; i < flist->shared_count; ++i) { |
f = rcu_dereference_protected(flist->shared[i], |
reservation_object_held(resv)); |
fence = to_radeon_fence(f); |
if (fence && fence->rdev == rdev) |
radeon_sync_fence(sync, fence); |
else |
r = fence_wait(f, true); |
if (r) |
break; |
} |
return r; |
} |
/** |
* radeon_sync_rings - sync ring to all registered fences |
* |
* @rdev: radeon_device pointer |
* @sync: sync object to use |
* @ring: ring that needs sync |
* |
* Ensure that all registered fences are signaled before letting |
* the ring continue. The caller must hold the ring lock. |
*/ |
int radeon_sync_rings(struct radeon_device *rdev, |
struct radeon_sync *sync, |
int ring) |
{ |
unsigned count = 0; |
int i, r; |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
struct radeon_fence *fence = sync->sync_to[i]; |
struct radeon_semaphore *semaphore; |
/* check if we really need to sync */ |
if (!radeon_fence_need_sync(fence, ring)) |
continue; |
/* prevent GPU deadlocks */ |
if (!rdev->ring[i].ready) { |
dev_err(rdev->dev, "Syncing to a disabled ring!"); |
return -EINVAL; |
} |
if (count >= RADEON_NUM_SYNCS) { |
/* not enough room, wait manually */ |
r = radeon_fence_wait(fence, false); |
if (r) |
return r; |
continue; |
} |
r = radeon_semaphore_create(rdev, &semaphore); |
if (r) |
return r; |
sync->semaphores[count++] = semaphore; |
/* allocate enough space for sync command */ |
r = radeon_ring_alloc(rdev, &rdev->ring[i], 16); |
if (r) |
return r; |
/* emit the signal semaphore */ |
if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) { |
/* signaling wasn't successful wait manually */ |
radeon_ring_undo(&rdev->ring[i]); |
r = radeon_fence_wait(fence, false); |
if (r) |
return r; |
continue; |
} |
/* we assume caller has already allocated space on waiters ring */ |
if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) { |
/* waiting wasn't successful wait manually */ |
radeon_ring_undo(&rdev->ring[i]); |
r = radeon_fence_wait(fence, false); |
if (r) |
return r; |
continue; |
} |
radeon_ring_commit(rdev, &rdev->ring[i], false); |
radeon_fence_note_sync(fence, ring); |
} |
return 0; |
} |
/** |
* radeon_sync_free - free the sync object |
* |
* @rdev: radeon_device pointer |
* @sync: sync object to use |
* @fence: fence to use for the free |
* |
* Free the sync object by freeing all semaphores in it. |
*/ |
void radeon_sync_free(struct radeon_device *rdev, |
struct radeon_sync *sync, |
struct radeon_fence *fence) |
{ |
unsigned i; |
for (i = 0; i < RADEON_NUM_SYNCS; ++i) |
radeon_semaphore_free(rdev, &sync->semaphores[i], fence); |
} |
/drivers/video/drm/radeon/radeon_test.c |
---|
67,7 → 67,7 |
} |
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
0, NULL, &vram_obj); |
0, NULL, NULL, &vram_obj); |
if (r) { |
DRM_ERROR("Failed to create VRAM object\n"); |
goto out_cleanup; |
87,7 → 87,8 |
struct radeon_fence *fence = NULL; |
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i); |
RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL, |
gtt_obj + i); |
if (r) { |
DRM_ERROR("Failed to create GTT object %d\n", i); |
goto out_lclean; |
116,11 → 117,16 |
radeon_bo_kunmap(gtt_obj[i]); |
if (ring == R600_RING_TYPE_DMA_INDEX) |
r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); |
fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, |
size / RADEON_GPU_PAGE_SIZE, |
NULL); |
else |
r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); |
if (r) { |
fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, |
size / RADEON_GPU_PAGE_SIZE, |
NULL); |
if (IS_ERR(fence)) { |
DRM_ERROR("Failed GTT->VRAM copy %d\n", i); |
r = PTR_ERR(fence); |
goto out_lclean_unpin; |
} |
162,11 → 168,16 |
radeon_bo_kunmap(vram_obj); |
if (ring == R600_RING_TYPE_DMA_INDEX) |
r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); |
fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, |
size / RADEON_GPU_PAGE_SIZE, |
NULL); |
else |
r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); |
if (r) { |
fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, |
size / RADEON_GPU_PAGE_SIZE, |
NULL); |
if (IS_ERR(fence)) { |
DRM_ERROR("Failed VRAM->GTT copy %d\n", i); |
r = PTR_ERR(fence); |
goto out_lclean_unpin; |
} |
222,7 → 233,7 |
radeon_bo_unreserve(gtt_obj[i]); |
radeon_bo_unref(>t_obj[i]); |
} |
if (fence) |
if (fence && !IS_ERR(fence)) |
radeon_fence_unref(&fence); |
break; |
} |
/drivers/video/drm/radeon/radeon_ttm.c |
---|
166,12 → 166,15 |
static void radeon_evict_flags(struct ttm_buffer_object *bo, |
struct ttm_placement *placement) |
{ |
static struct ttm_place placements = { |
.fpfn = 0, |
.lpfn = 0, |
.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM |
}; |
struct radeon_bo *rbo; |
static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; |
if (!radeon_ttm_bo_is_radeon_bo(bo)) { |
placement->fpfn = 0; |
placement->lpfn = 0; |
placement->placement = &placements; |
placement->busy_placement = &placements; |
placement->num_placement = 1; |
181,9 → 184,32 |
rbo = container_of(bo, struct radeon_bo, tbo); |
switch (bo->mem.mem_type) { |
case TTM_PL_VRAM: |
if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false) |
if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false) |
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); |
else |
else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size && |
bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) { |
unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; |
int i; |
/* Try evicting to the CPU inaccessible part of VRAM |
* first, but only set GTT as busy placement, so this |
* BO will be evicted to GTT rather than causing other |
* BOs to be evicted from VRAM |
*/ |
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM | |
RADEON_GEM_DOMAIN_GTT); |
rbo->placement.num_busy_placement = 0; |
for (i = 0; i < rbo->placement.num_placement; i++) { |
if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) { |
if (rbo->placements[0].fpfn < fpfn) |
rbo->placements[0].fpfn = fpfn; |
} else { |
rbo->placement.busy_placement = |
&rbo->placements[i]; |
rbo->placement.num_busy_placement = 1; |
} |
} |
} else |
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); |
break; |
case TTM_PL_TT: |
216,6 → 242,7 |
struct radeon_device *rdev; |
uint64_t old_start, new_start; |
struct radeon_fence *fence; |
unsigned num_pages; |
int r, ridx; |
rdev = radeon_get_rdev(bo->bdev); |
252,13 → 279,12 |
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); |
/* sync other rings */ |
fence = bo->sync_obj; |
r = radeon_copy(rdev, old_start, new_start, |
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ |
&fence); |
/* FIXME: handle copy error */ |
r = ttm_bo_move_accel_cleanup(bo, (void *)fence, |
num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv); |
if (IS_ERR(fence)) |
return PTR_ERR(fence); |
r = ttm_bo_move_accel_cleanup(bo, &fence->base, |
evict, no_wait_gpu, new_mem); |
radeon_fence_unref(&fence); |
return r; |
272,7 → 298,7 |
struct radeon_device *rdev; |
struct ttm_mem_reg *old_mem = &bo->mem; |
struct ttm_mem_reg tmp_mem; |
u32 placements; |
struct ttm_place placements; |
struct ttm_placement placement; |
int r; |
279,13 → 305,13 |
rdev = radeon_get_rdev(bo->bdev); |
tmp_mem = *new_mem; |
tmp_mem.mm_node = NULL; |
placement.fpfn = 0; |
placement.lpfn = 0; |
placement.num_placement = 1; |
placement.placement = &placements; |
placement.num_busy_placement = 1; |
placement.busy_placement = &placements; |
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
placements.fpfn = 0; |
placements.lpfn = 0; |
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, |
interruptible, no_wait_gpu); |
if (unlikely(r)) { |
320,19 → 346,19 |
struct ttm_mem_reg *old_mem = &bo->mem; |
struct ttm_mem_reg tmp_mem; |
struct ttm_placement placement; |
u32 placements; |
struct ttm_place placements; |
int r; |
rdev = radeon_get_rdev(bo->bdev); |
tmp_mem = *new_mem; |
tmp_mem.mm_node = NULL; |
placement.fpfn = 0; |
placement.lpfn = 0; |
placement.num_placement = 1; |
placement.placement = &placements; |
placement.num_busy_placement = 1; |
placement.busy_placement = &placements; |
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
placements.fpfn = 0; |
placements.lpfn = 0; |
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, |
interruptible, no_wait_gpu); |
if (unlikely(r)) { |
471,31 → 497,6 |
{ |
} |
static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) |
{ |
return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible); |
} |
static int radeon_sync_obj_flush(void *sync_obj) |
{ |
return 0; |
} |
static void radeon_sync_obj_unref(void **sync_obj) |
{ |
radeon_fence_unref((struct radeon_fence **)sync_obj); |
} |
static void *radeon_sync_obj_ref(void *sync_obj) |
{ |
return radeon_fence_ref((struct radeon_fence *)sync_obj); |
} |
static bool radeon_sync_obj_signaled(void *sync_obj) |
{ |
return radeon_fence_signaled((struct radeon_fence *)sync_obj); |
} |
/* |
* TTM backend functions. |
*/ |
503,6 → 504,10 |
struct ttm_dma_tt ttm; |
struct radeon_device *rdev; |
u64 offset; |
uint64_t userptr; |
struct mm_struct *usermm; |
uint32_t userflags; |
}; |
static int radeon_ttm_backend_bind(struct ttm_tt *ttm, |
580,10 → 585,17 |
return >t->ttm.ttm; |
} |
static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm) |
{ |
if (!ttm || ttm->func != &radeon_backend_func) |
return NULL; |
return (struct radeon_ttm_tt *)ttm; |
} |
static int radeon_ttm_tt_populate(struct ttm_tt *ttm) |
{ |
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); |
struct radeon_device *rdev; |
struct radeon_ttm_tt *gtt = (void *)ttm; |
unsigned i; |
int r; |
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
628,7 → 640,7 |
static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) |
{ |
struct radeon_device *rdev; |
struct radeon_ttm_tt *gtt = (void *)ttm; |
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); |
unsigned i; |
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
663,11 → 675,6 |
.evict_flags = &radeon_evict_flags, |
.move = &radeon_bo_move, |
.verify_access = &radeon_verify_access, |
.sync_obj_signaled = &radeon_sync_obj_signaled, |
.sync_obj_wait = &radeon_sync_obj_wait, |
.sync_obj_flush = &radeon_sync_obj_flush, |
.sync_obj_unref = &radeon_sync_obj_unref, |
.sync_obj_ref = &radeon_sync_obj_ref, |
.move_notify = &radeon_bo_move_notify, |
// .fault_reserve_notify = &radeon_bo_fault_reserve_notify, |
.io_mem_reserve = &radeon_ttm_io_mem_reserve, |
704,7 → 711,7 |
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
r = radeon_bo_create(rdev, 16*1024*1024, PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_VRAM, 0, |
RADEON_GEM_DOMAIN_VRAM, 0, NULL, |
NULL, &rdev->stollen_vga_memory); |
if (r) { |
return r; |
/drivers/video/drm/radeon/radeon_uvd.c |
---|
46,6 → 46,9 |
#define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin" |
#define FIRMWARE_BONAIRE "radeon/BONAIRE_uvd.bin" |
MODULE_FIRMWARE(FIRMWARE_R600); |
MODULE_FIRMWARE(FIRMWARE_RS780); |
MODULE_FIRMWARE(FIRMWARE_RV770); |
MODULE_FIRMWARE(FIRMWARE_RV710); |
MODULE_FIRMWARE(FIRMWARE_CYPRESS); |
MODULE_FIRMWARE(FIRMWARE_SUMO); |
115,9 → 118,11 |
} |
bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + |
RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; |
RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE + |
RADEON_GPU_PAGE_SIZE; |
r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->uvd.vcpu_bo); |
RADEON_GEM_DOMAIN_VRAM, 0, NULL, |
NULL, &rdev->uvd.vcpu_bo); |
if (r) { |
dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r); |
return r; |
231,12 → 236,32 |
return 0; |
} |
void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo) |
void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo, |
uint32_t allowed_domains) |
{ |
rbo->placement.fpfn = 0 >> PAGE_SHIFT; |
rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; |
int i; |
for (i = 0; i < rbo->placement.num_placement; ++i) { |
rbo->placements[i].fpfn = 0 >> PAGE_SHIFT; |
rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; |
} |
/* If it must be in VRAM it must be in the first segment as well */ |
if (allowed_domains == RADEON_GEM_DOMAIN_VRAM) |
return; |
/* abort if we already have more than one placement */ |
if (rbo->placement.num_placement > 1) |
return; |
/* add another 256MB segment */ |
rbo->placements[1] = rbo->placements[0]; |
rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT; |
rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT; |
rbo->placement.num_placement++; |
rbo->placement.num_busy_placement++; |
} |
void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) |
{ |
int i, r; |
356,6 → 381,7 |
{ |
int32_t *msg, msg_type, handle; |
unsigned img_size = 0; |
struct fence *f; |
void *ptr; |
int i, r; |
365,8 → 391,9 |
return -EINVAL; |
} |
if (bo->tbo.sync_obj) { |
r = radeon_fence_wait(bo->tbo.sync_obj, false); |
f = reservation_object_get_excl(bo->tbo.resv); |
if (f) { |
r = radeon_fence_wait((struct radeon_fence *)f, false); |
if (r) { |
DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); |
return r; |
441,12 → 468,12 |
unsigned buf_sizes[], bool *has_msg_cmd) |
{ |
struct radeon_cs_chunk *relocs_chunk; |
struct radeon_cs_reloc *reloc; |
struct radeon_bo_list *reloc; |
unsigned idx, cmd, offset; |
uint64_t start, end; |
int r; |
relocs_chunk = &p->chunks[p->chunk_relocs_idx]; |
relocs_chunk = p->chunk_relocs; |
offset = radeon_get_ib_value(p, data0); |
idx = radeon_get_ib_value(p, data1); |
if (idx >= relocs_chunk->length_dw) { |
455,7 → 482,7 |
return -EINVAL; |
} |
reloc = p->relocs_ptr[(idx / 4)]; |
reloc = &p->relocs[(idx / 4)]; |
start = reloc->gpu_offset; |
end = start + radeon_bo_size(reloc->robj); |
start += offset; |
563,13 → 590,13 |
[0x00000003] = 2048, |
}; |
if (p->chunks[p->chunk_ib_idx].length_dw % 16) { |
if (p->chunk_ib->length_dw % 16) { |
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", |
p->chunks[p->chunk_ib_idx].length_dw); |
p->chunk_ib->length_dw); |
return -EINVAL; |
} |
if (p->chunk_relocs_idx == -1) { |
if (p->chunk_relocs == NULL) { |
DRM_ERROR("No relocation chunk !\n"); |
return -EINVAL; |
} |
593,7 → 620,7 |
DRM_ERROR("Unknown packet type %d !\n", pkt.type); |
return -EINVAL; |
} |
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); |
} while (p->idx < p->chunk_ib->length_dw); |
if (!has_msg_cmd) { |
DRM_ERROR("UVD-IBs need a msg command!\n"); |
604,38 → 631,16 |
} |
static int radeon_uvd_send_msg(struct radeon_device *rdev, |
int ring, struct radeon_bo *bo, |
int ring, uint64_t addr, |
struct radeon_fence **fence) |
{ |
struct ttm_validate_buffer tv; |
struct ww_acquire_ctx ticket; |
struct list_head head; |
struct radeon_ib ib; |
uint64_t addr; |
int i, r; |
memset(&tv, 0, sizeof(tv)); |
tv.bo = &bo->tbo; |
INIT_LIST_HEAD(&head); |
list_add(&tv.head, &head); |
r = ttm_eu_reserve_buffers(&ticket, &head); |
r = radeon_ib_get(rdev, ring, &ib, NULL, 64); |
if (r) |
return r; |
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM); |
radeon_uvd_force_into_uvd_segment(bo); |
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
if (r) |
goto err; |
r = radeon_ib_get(rdev, ring, &ib, NULL, 64); |
if (r) |
goto err; |
addr = radeon_bo_gpu_offset(bo); |
ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0); |
ib.ptr[1] = addr; |
ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0); |
647,19 → 652,11 |
ib.length_dw = 16; |
r = radeon_ib_schedule(rdev, &ib, NULL, false); |
if (r) |
goto err; |
ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); |
if (fence) |
*fence = radeon_fence_ref(ib.fence); |
radeon_ib_free(rdev, &ib); |
radeon_bo_unref(&bo); |
return 0; |
err: |
ttm_eu_backoff_reservation(&ticket, &head); |
return r; |
} |
669,28 → 666,19 |
int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, |
uint32_t handle, struct radeon_fence **fence) |
{ |
struct radeon_bo *bo; |
uint32_t *msg; |
/* we use the last page of the vcpu bo for the UVD message */ |
uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) - |
RADEON_GPU_PAGE_SIZE; |
uint32_t *msg = rdev->uvd.cpu_addr + offs; |
uint64_t addr = rdev->uvd.gpu_addr + offs; |
int r, i; |
r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo); |
r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true); |
if (r) |
return r; |
r = radeon_bo_reserve(bo, false); |
if (r) { |
radeon_bo_unref(&bo); |
return r; |
} |
r = radeon_bo_kmap(bo, (void **)&msg); |
if (r) { |
radeon_bo_unreserve(bo); |
radeon_bo_unref(&bo); |
return r; |
} |
/* stitch together an UVD create msg */ |
msg[0] = cpu_to_le32(0x00000de4); |
msg[1] = cpu_to_le32(0x00000000); |
706,37 → 694,27 |
for (i = 11; i < 1024; ++i) |
msg[i] = cpu_to_le32(0x0); |
radeon_bo_kunmap(bo); |
radeon_bo_unreserve(bo); |
return radeon_uvd_send_msg(rdev, ring, bo, fence); |
r = radeon_uvd_send_msg(rdev, ring, addr, fence); |
radeon_bo_unreserve(rdev->uvd.vcpu_bo); |
return r; |
} |
int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, |
uint32_t handle, struct radeon_fence **fence) |
{ |
struct radeon_bo *bo; |
uint32_t *msg; |
/* we use the last page of the vcpu bo for the UVD message */ |
uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) - |
RADEON_GPU_PAGE_SIZE; |
uint32_t *msg = rdev->uvd.cpu_addr + offs; |
uint64_t addr = rdev->uvd.gpu_addr + offs; |
int r, i; |
r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo); |
r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true); |
if (r) |
return r; |
r = radeon_bo_reserve(bo, false); |
if (r) { |
radeon_bo_unref(&bo); |
return r; |
} |
r = radeon_bo_kmap(bo, (void **)&msg); |
if (r) { |
radeon_bo_unreserve(bo); |
radeon_bo_unref(&bo); |
return r; |
} |
/* stitch together an UVD destroy msg */ |
msg[0] = cpu_to_le32(0x00000de4); |
msg[1] = cpu_to_le32(0x00000002); |
745,10 → 723,9 |
for (i = 4; i < 1024; ++i) |
msg[i] = cpu_to_le32(0x0); |
radeon_bo_kunmap(bo); |
radeon_bo_unreserve(bo); |
return radeon_uvd_send_msg(rdev, ring, bo, fence); |
r = radeon_uvd_send_msg(rdev, ring, addr, fence); |
radeon_bo_unreserve(rdev->uvd.vcpu_bo); |
return r; |
} |
/** |
/drivers/video/drm/radeon/radeon_vce.c |
---|
126,7 → 126,8 |
size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) + |
RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE; |
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->vce.vcpu_bo); |
RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL, |
&rdev->vce.vcpu_bo); |
if (r) { |
dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r); |
return r; |
452,11 → 453,11 |
unsigned size) |
{ |
struct radeon_cs_chunk *relocs_chunk; |
struct radeon_cs_reloc *reloc; |
struct radeon_bo_list *reloc; |
uint64_t start, end, offset; |
unsigned idx; |
relocs_chunk = &p->chunks[p->chunk_relocs_idx]; |
relocs_chunk = p->chunk_relocs; |
offset = radeon_get_ib_value(p, lo); |
idx = radeon_get_ib_value(p, hi); |
466,7 → 467,7 |
return -EINVAL; |
} |
reloc = p->relocs_ptr[(idx / 4)]; |
reloc = &p->relocs[(idx / 4)]; |
start = reloc->gpu_offset; |
end = start + radeon_bo_size(reloc->robj); |
start += offset; |
533,7 → 534,7 |
uint32_t *size = &tmp; |
int i, r; |
while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { |
while (p->idx < p->chunk_ib->length_dw) { |
uint32_t len = radeon_get_ib_value(p, p->idx); |
uint32_t cmd = radeon_get_ib_value(p, p->idx + 1); |
/drivers/video/drm/radeon/radeon_vm.c |
---|
125,26 → 125,25 |
* Add the page directory to the list of BOs to |
* validate for command submission (cayman+). |
*/ |
struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, |
struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev, |
struct radeon_vm *vm, |
struct list_head *head) |
{ |
struct radeon_cs_reloc *list; |
struct radeon_bo_list *list; |
unsigned i, idx; |
list = kmalloc_array(vm->max_pde_used + 2, |
sizeof(struct radeon_cs_reloc), GFP_KERNEL); |
sizeof(struct radeon_bo_list), GFP_KERNEL); |
if (!list) |
return NULL; |
/* add the vm page table to the list */ |
list[0].gobj = NULL; |
list[0].robj = vm->page_directory; |
list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM; |
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM; |
list[0].tv.bo = &vm->page_directory->tbo; |
list[0].tv.shared = true; |
list[0].tiling_flags = 0; |
list[0].handle = 0; |
list_add(&list[0].tv.head, head); |
for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { |
151,13 → 150,12 |
if (!vm->page_tables[i].bo) |
continue; |
list[idx].gobj = NULL; |
list[idx].robj = vm->page_tables[i].bo; |
list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM; |
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM; |
list[idx].tv.bo = &list[idx].robj->tbo; |
list[idx].tv.shared = true; |
list[idx].tiling_flags = 0; |
list[idx].handle = 0; |
list_add(&list[idx++].tv.head, head); |
} |
180,15 → 178,18 |
struct radeon_vm *vm, int ring) |
{ |
struct radeon_fence *best[RADEON_NUM_RINGS] = {}; |
struct radeon_vm_id *vm_id = &vm->ids[ring]; |
unsigned choices[2] = {}; |
unsigned i; |
/* check if the id is still valid */ |
if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id]) |
if (vm_id->id && vm_id->last_id_use && |
vm_id->last_id_use == rdev->vm_manager.active[vm_id->id]) |
return NULL; |
/* we definately need to flush */ |
radeon_fence_unref(&vm->last_flush); |
vm_id->pd_gpu_addr = ~0ll; |
/* skip over VMID 0, since it is the system VM */ |
for (i = 1; i < rdev->vm_manager.nvm; ++i) { |
196,8 → 197,8 |
if (fence == NULL) { |
/* found a free one */ |
vm->id = i; |
trace_radeon_vm_grab_id(vm->id, ring); |
vm_id->id = i; |
trace_radeon_vm_grab_id(i, ring); |
return NULL; |
} |
209,8 → 210,8 |
for (i = 0; i < 2; ++i) { |
if (choices[i]) { |
vm->id = choices[i]; |
trace_radeon_vm_grab_id(vm->id, ring); |
vm_id->id = choices[i]; |
trace_radeon_vm_grab_id(choices[i], ring); |
return rdev->vm_manager.active[choices[i]]; |
} |
} |
226,6 → 227,7 |
* @rdev: radeon_device pointer |
* @vm: vm we want to flush |
* @ring: ring to use for flush |
* @updates: last vm update that is waited for |
* |
* Flush the vm (cayman+). |
* |
233,15 → 235,21 |
*/ |
void radeon_vm_flush(struct radeon_device *rdev, |
struct radeon_vm *vm, |
int ring) |
int ring, struct radeon_fence *updates) |
{ |
uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); |
struct radeon_vm_id *vm_id = &vm->ids[ring]; |
/* if we can't remember our last VM flush then flush now! */ |
if (!vm->last_flush || pd_addr != vm->pd_gpu_addr) { |
trace_radeon_vm_flush(pd_addr, ring, vm->id); |
vm->pd_gpu_addr = pd_addr; |
radeon_ring_vm_flush(rdev, ring, vm); |
if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates || |
radeon_fence_is_earlier(vm_id->flushed_updates, updates)) { |
trace_radeon_vm_flush(pd_addr, ring, vm->ids[ring].id); |
radeon_fence_unref(&vm_id->flushed_updates); |
vm_id->flushed_updates = radeon_fence_ref(updates); |
vm_id->pd_gpu_addr = pd_addr; |
radeon_ring_vm_flush(rdev, &rdev->ring[ring], |
vm_id->id, vm_id->pd_gpu_addr); |
} |
} |
261,18 → 269,13 |
struct radeon_vm *vm, |
struct radeon_fence *fence) |
{ |
radeon_fence_unref(&vm->fence); |
vm->fence = radeon_fence_ref(fence); |
unsigned vm_id = vm->ids[fence->ring].id; |
radeon_fence_unref(&rdev->vm_manager.active[vm->id]); |
rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence); |
radeon_fence_unref(&rdev->vm_manager.active[vm_id]); |
rdev->vm_manager.active[vm_id] = radeon_fence_ref(fence); |
radeon_fence_unref(&vm->last_id_use); |
vm->last_id_use = radeon_fence_ref(fence); |
/* we just flushed the VM, remember that */ |
if (!vm->last_flush) |
vm->last_flush = radeon_fence_ref(fence); |
radeon_fence_unref(&vm->ids[fence->ring].last_id_use); |
vm->ids[fence->ring].last_id_use = radeon_fence_ref(fence); |
} |
/** |
385,27 → 388,18 |
static int radeon_vm_clear_bo(struct radeon_device *rdev, |
struct radeon_bo *bo) |
{ |
struct ttm_validate_buffer tv; |
struct ww_acquire_ctx ticket; |
struct list_head head; |
struct radeon_ib ib; |
unsigned entries; |
uint64_t addr; |
int r; |
memset(&tv, 0, sizeof(tv)); |
tv.bo = &bo->tbo; |
INIT_LIST_HEAD(&head); |
list_add(&tv.head, &head); |
r = ttm_eu_reserve_buffers(&ticket, &head); |
r = radeon_bo_reserve(bo, false); |
if (r) |
return r; |
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
if (r) |
goto error; |
goto error_unreserve; |
addr = radeon_bo_gpu_offset(bo); |
entries = radeon_bo_size(bo) / 8; |
412,7 → 406,7 |
r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256); |
if (r) |
goto error; |
goto error_unreserve; |
ib.length_dw = 0; |
422,15 → 416,16 |
r = radeon_ib_schedule(rdev, &ib, NULL, false); |
if (r) |
goto error; |
goto error_free; |
ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); |
ib.fence->is_vm_update = true; |
radeon_bo_fence(bo, ib.fence, false); |
error_free: |
radeon_ib_free(rdev, &ib); |
return 0; |
error: |
ttm_eu_backoff_reservation(&ticket, &head); |
error_unreserve: |
radeon_bo_unreserve(bo); |
return r; |
} |
446,7 → 441,7 |
* Validate and set the offset requested within the vm address space. |
* Returns 0 for success, error for failure. |
* |
* Object has to be reserved! |
* Object has to be reserved and gets unreserved by this function! |
*/ |
int radeon_vm_bo_set_addr(struct radeon_device *rdev, |
struct radeon_bo_va *bo_va, |
492,7 → 487,9 |
tmp->vm = vm; |
tmp->addr = bo_va->addr; |
tmp->bo = radeon_bo_ref(bo_va->bo); |
spin_lock(&vm->status_lock); |
list_add(&tmp->vm_status, &vm->freed); |
spin_unlock(&vm->status_lock); |
} |
interval_tree_remove(&bo_va->it, &vm->va); |
545,7 → 542,8 |
r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8, |
RADEON_GPU_PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_VRAM, 0, NULL, &pt); |
RADEON_GEM_DOMAIN_VRAM, 0, |
NULL, NULL, &pt); |
if (r) |
return r; |
571,7 → 569,7 |
} |
mutex_unlock(&vm->mutex); |
return radeon_bo_reserve(bo_va->bo, false); |
return 0; |
} |
/** |
694,8 → 692,8 |
if (ib.length_dw != 0) { |
radeon_asic_vm_pad_ib(rdev, &ib); |
radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); |
radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); |
radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv, true); |
WARN_ON(ib.length_dw > ndw); |
r = radeon_ib_schedule(rdev, &ib, NULL, false); |
if (r) { |
702,9 → 700,8 |
radeon_ib_free(rdev, &ib); |
return r; |
} |
radeon_fence_unref(&vm->fence); |
vm->fence = radeon_fence_ref(ib.fence); |
radeon_fence_unref(&vm->last_flush); |
ib.fence->is_vm_update = true; |
radeon_bo_fence(pd, ib.fence, false); |
} |
radeon_ib_free(rdev, &ib); |
803,7 → 800,7 |
* |
* Global and local mutex must be locked! |
*/ |
static void radeon_vm_update_ptes(struct radeon_device *rdev, |
static int radeon_vm_update_ptes(struct radeon_device *rdev, |
struct radeon_vm *vm, |
struct radeon_ib *ib, |
uint64_t start, uint64_t end, |
820,8 → 817,12 |
struct radeon_bo *pt = vm->page_tables[pt_idx].bo; |
unsigned nptes; |
uint64_t pte; |
int r; |
radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj); |
radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true); |
r = reservation_object_reserve_shared(pt->tbo.resv); |
if (r) |
return r; |
if ((addr & ~mask) == (end & ~mask)) |
nptes = end - addr; |
855,9 → 856,36 |
last_pte + 8 * count, |
last_dst, flags); |
} |
return 0; |
} |
/** |
* radeon_vm_fence_pts - fence page tables after an update |
* |
* @vm: requested vm |
* @start: start of GPU address range |
* @end: end of GPU address range |
* @fence: fence to use |
* |
* Fence the page tables in the range @start - @end (cayman+). |
* |
* Global and local mutex must be locked! |
*/ |
static void radeon_vm_fence_pts(struct radeon_vm *vm, |
uint64_t start, uint64_t end, |
struct radeon_fence *fence) |
{ |
unsigned i; |
start >>= radeon_vm_block_size; |
end >>= radeon_vm_block_size; |
for (i = start; i <= end; ++i) |
radeon_bo_fence(vm->page_tables[i].bo, fence, true); |
} |
/** |
* radeon_vm_bo_update - map a bo into the vm page table |
* |
* @rdev: radeon_device pointer |
887,11 → 915,16 |
return -EINVAL; |
} |
spin_lock(&vm->status_lock); |
list_del_init(&bo_va->vm_status); |
spin_unlock(&vm->status_lock); |
bo_va->flags &= ~RADEON_VM_PAGE_VALID; |
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; |
bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED; |
// if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm)) |
// bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE; |
if (mem) { |
addr = mem->start << PAGE_SHIFT; |
if (mem->mem_type != TTM_PL_SYSTEM) { |
953,23 → 986,34 |
return r; |
ib.length_dw = 0; |
radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start, |
if (!(bo_va->flags & RADEON_VM_PAGE_VALID)) { |
unsigned i; |
for (i = 0; i < RADEON_NUM_RINGS; ++i) |
radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use); |
} |
r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start, |
bo_va->it.last + 1, addr, |
radeon_vm_page_flags(bo_va->flags)); |
if (r) { |
radeon_ib_free(rdev, &ib); |
return r; |
} |
radeon_asic_vm_pad_ib(rdev, &ib); |
WARN_ON(ib.length_dw > ndw); |
radeon_semaphore_sync_to(ib.semaphore, vm->fence); |
r = radeon_ib_schedule(rdev, &ib, NULL, false); |
if (r) { |
radeon_ib_free(rdev, &ib); |
return r; |
} |
radeon_fence_unref(&vm->fence); |
vm->fence = radeon_fence_ref(ib.fence); |
ib.fence->is_vm_update = true; |
radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence); |
radeon_fence_unref(&bo_va->last_pt_update); |
bo_va->last_pt_update = radeon_fence_ref(ib.fence); |
radeon_ib_free(rdev, &ib); |
radeon_fence_unref(&vm->last_flush); |
return 0; |
} |
988,16 → 1032,25 |
int radeon_vm_clear_freed(struct radeon_device *rdev, |
struct radeon_vm *vm) |
{ |
struct radeon_bo_va *bo_va, *tmp; |
struct radeon_bo_va *bo_va; |
int r; |
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { |
spin_lock(&vm->status_lock); |
while (!list_empty(&vm->freed)) { |
bo_va = list_first_entry(&vm->freed, |
struct radeon_bo_va, vm_status); |
spin_unlock(&vm->status_lock); |
r = radeon_vm_bo_update(rdev, bo_va, NULL); |
radeon_bo_unref(&bo_va->bo); |
radeon_fence_unref(&bo_va->last_pt_update); |
kfree(bo_va); |
if (r) |
return r; |
spin_lock(&vm->status_lock); |
} |
spin_unlock(&vm->status_lock); |
return 0; |
} |
1016,14 → 1069,23 |
int radeon_vm_clear_invalids(struct radeon_device *rdev, |
struct radeon_vm *vm) |
{ |
struct radeon_bo_va *bo_va, *tmp; |
struct radeon_bo_va *bo_va; |
int r; |
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, vm_status) { |
spin_lock(&vm->status_lock); |
while (!list_empty(&vm->invalidated)) { |
bo_va = list_first_entry(&vm->invalidated, |
struct radeon_bo_va, vm_status); |
spin_unlock(&vm->status_lock); |
r = radeon_vm_bo_update(rdev, bo_va, NULL); |
if (r) |
return r; |
spin_lock(&vm->status_lock); |
} |
spin_unlock(&vm->status_lock); |
return 0; |
} |
1046,6 → 1108,7 |
mutex_lock(&vm->mutex); |
interval_tree_remove(&bo_va->it, &vm->va); |
spin_lock(&vm->status_lock); |
list_del(&bo_va->vm_status); |
if (bo_va->addr) { |
1052,8 → 1115,10 |
bo_va->bo = radeon_bo_ref(bo_va->bo); |
list_add(&bo_va->vm_status, &vm->freed); |
} else { |
radeon_fence_unref(&bo_va->last_pt_update); |
kfree(bo_va); |
} |
spin_unlock(&vm->status_lock); |
mutex_unlock(&vm->mutex); |
} |
1074,10 → 1139,10 |
list_for_each_entry(bo_va, &bo->va, bo_list) { |
if (bo_va->addr) { |
mutex_lock(&bo_va->vm->mutex); |
spin_lock(&bo_va->vm->status_lock); |
list_del(&bo_va->vm_status); |
list_add(&bo_va->vm_status, &bo_va->vm->invalidated); |
mutex_unlock(&bo_va->vm->mutex); |
spin_unlock(&bo_va->vm->status_lock); |
} |
} |
} |
1095,15 → 1160,17 |
const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE, |
RADEON_VM_PTE_COUNT * 8); |
unsigned pd_size, pd_entries, pts_size; |
int r; |
int i, r; |
vm->id = 0; |
vm->ib_bo_va = NULL; |
vm->fence = NULL; |
vm->last_flush = NULL; |
vm->last_id_use = NULL; |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
vm->ids[i].id = 0; |
vm->ids[i].flushed_updates = NULL; |
vm->ids[i].last_id_use = NULL; |
} |
mutex_init(&vm->mutex); |
vm->va = RB_ROOT; |
spin_lock_init(&vm->status_lock); |
INIT_LIST_HEAD(&vm->invalidated); |
INIT_LIST_HEAD(&vm->freed); |
1120,7 → 1187,7 |
r = radeon_bo_create(rdev, pd_size, align, true, |
RADEON_GEM_DOMAIN_VRAM, 0, NULL, |
&vm->page_directory); |
NULL, &vm->page_directory); |
if (r) |
return r; |
1157,11 → 1224,13 |
if (!r) { |
list_del_init(&bo_va->bo_list); |
radeon_bo_unreserve(bo_va->bo); |
radeon_fence_unref(&bo_va->last_pt_update); |
kfree(bo_va); |
} |
} |
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { |
radeon_bo_unref(&bo_va->bo); |
radeon_fence_unref(&bo_va->last_pt_update); |
kfree(bo_va); |
} |
1171,9 → 1240,10 |
radeon_bo_unref(&vm->page_directory); |
radeon_fence_unref(&vm->fence); |
radeon_fence_unref(&vm->last_flush); |
radeon_fence_unref(&vm->last_id_use); |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
radeon_fence_unref(&vm->ids[i].flushed_updates); |
radeon_fence_unref(&vm->ids[i].last_id_use); |
} |
mutex_destroy(&vm->mutex); |
} |
/drivers/video/drm/radeon/rdisplay.c |
---|
1,8 → 1,6 |
#include <drm/drmP.h> |
#include <drm/radeon_drm.h> |
#include <drm.h> |
#include <drm_mm.h> |
#include "radeon.h" |
#include "radeon_object.h" |
#include "bitmap.h" |
34,7 → 32,7 |
rdev = (struct radeon_device *)os_display->ddev->dev_private; |
r = radeon_bo_create(rdev, CURSOR_WIDTH*CURSOR_HEIGHT*4, |
PAGE_SIZE, false, RADEON_GEM_DOMAIN_VRAM, 0, NULL, &cursor->robj); |
4096, false, RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL, &cursor->robj); |
if (unlikely(r != 0)) |
return r; |
229,7 → 227,7 |
cursor_t *cursor; |
bool retval = true; |
u32_t ifl; |
u32 ifl; |
ENTER(); |
/drivers/video/drm/radeon/rdisplay_kms.c |
---|
1,8 → 1,6 |
#include <drm/drmP.h> |
#include <drm/radeon_drm.h> |
#include <drm.h> |
#include <drm_mm.h> |
#include "radeon.h" |
#include "radeon_object.h" |
#include "drm_fb_helper.h" |
407,7 → 405,7 |
struct drm_framebuffer *fb; |
cursor_t *cursor; |
u32_t ifl; |
u32 ifl; |
int ret; |
mutex_lock(&dev->mode_config.mutex); |
/drivers/video/drm/radeon/rs600.c |
---|
840,6 → 840,9 |
u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt; |
/* FIXME: implement full support */ |
if (!rdev->mode_info.mode_config_initialized) |
return; |
radeon_update_display_priority(rdev); |
if (rdev->mode_info.crtcs[0]->base.enabled) |
/drivers/video/drm/radeon/rs690.c |
---|
579,6 → 579,9 |
u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt; |
u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt; |
if (!rdev->mode_info.mode_config_initialized) |
return; |
radeon_update_display_priority(rdev); |
if (rdev->mode_info.crtcs[0]->base.enabled) |
/drivers/video/drm/radeon/rs780_dpm.c |
---|
24,6 → 24,7 |
#include "drmP.h" |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "rs780d.h" |
#include "r600_dpm.h" |
#include "rs780_dpm.h" |
/drivers/video/drm/radeon/rv515.c |
---|
1214,6 → 1214,9 |
struct drm_display_mode *mode0 = NULL; |
struct drm_display_mode *mode1 = NULL; |
if (!rdev->mode_info.mode_config_initialized) |
return; |
radeon_update_display_priority(rdev); |
if (rdev->mode_info.crtcs[0]->base.enabled) |
/drivers/video/drm/radeon/rv6xx_dpm.c |
---|
24,6 → 24,7 |
#include "drmP.h" |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "rv6xxd.h" |
#include "r600_dpm.h" |
#include "rv6xx_dpm.h" |
/drivers/video/drm/radeon/rv770.c |
---|
26,7 → 26,6 |
* Jerome Glisse |
*/ |
#include <linux/firmware.h> |
//#include <linux/platform_device.h> |
#include <linux/slab.h> |
#include <drm/drmP.h> |
#include "radeon.h" |
/drivers/video/drm/radeon/rv770_dma.c |
---|
33,18 → 33,19 |
* @src_offset: src GPU address |
* @dst_offset: dst GPU address |
* @num_gpu_pages: number of GPU pages to xfer |
* @fence: radeon fence object |
* @resv: reservation object to sync to |
* |
* Copy GPU paging using the DMA engine (r7xx). |
* Used by the radeon ttm implementation to move pages if |
* registered as the asic copy callback. |
*/ |
int rv770_copy_dma(struct radeon_device *rdev, |
struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence) |
struct reservation_object *resv) |
{ |
struct radeon_semaphore *sem = NULL; |
struct radeon_fence *fence; |
struct radeon_sync sync; |
int ring_index = rdev->asic->copy.dma_ring_index; |
struct radeon_ring *ring = &rdev->ring[ring_index]; |
u32 size_in_dw, cur_size_in_dw; |
51,11 → 52,7 |
int i, num_loops; |
int r = 0; |
r = radeon_semaphore_create(rdev, &sem); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
return r; |
} |
radeon_sync_create(&sync); |
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; |
num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF); |
62,12 → 59,12 |
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
radeon_sync_free(rdev, &sync, NULL); |
return ERR_PTR(r); |
} |
radeon_semaphore_sync_to(sem, *fence); |
radeon_semaphore_sync_rings(rdev, sem, ring->idx); |
radeon_sync_resv(rdev, &sync, resv, false); |
radeon_sync_rings(rdev, &sync, ring->idx); |
for (i = 0; i < num_loops; i++) { |
cur_size_in_dw = size_in_dw; |
83,15 → 80,15 |
dst_offset += cur_size_in_dw * 4; |
} |
r = radeon_fence_emit(rdev, fence, ring->idx); |
r = radeon_fence_emit(rdev, &fence, ring->idx); |
if (r) { |
radeon_ring_unlock_undo(rdev, ring); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
radeon_sync_free(rdev, &sync, NULL); |
return ERR_PTR(r); |
} |
radeon_ring_unlock_commit(rdev, ring, false); |
radeon_semaphore_free(rdev, &sem, *fence); |
radeon_sync_free(rdev, &sync, fence); |
return r; |
return fence; |
} |
/drivers/video/drm/radeon/rv770_dpm.c |
---|
24,6 → 24,7 |
#include "drmP.h" |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "rv770d.h" |
#include "r600_dpm.h" |
#include "rv770_dpm.h" |
/drivers/video/drm/radeon/si.c |
---|
2384,6 → 2384,9 |
u32 num_heads = 0, lb_size; |
int i; |
if (!rdev->mode_info.mode_config_initialized) |
return; |
radeon_update_display_priority(rdev); |
for (i = 0; i < rdev->num_crtc; i++) { |
3362,6 → 3365,7 |
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) |
{ |
struct radeon_ring *ring = &rdev->ring[ib->ring]; |
unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0; |
u32 header; |
if (ib->is_const_ib) { |
3397,14 → 3401,13 |
#endif |
(ib->gpu_addr & 0xFFFFFFFC)); |
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); |
radeon_ring_write(ring, ib->length_dw | |
(ib->vm ? (ib->vm->id << 24) : 0)); |
radeon_ring_write(ring, ib->length_dw | (vm_id << 24)); |
if (!ib->is_const_ib) { |
/* flush read cache over gart for this vmid */ |
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); |
radeon_ring_write(ring, ib->vm ? ib->vm->id : 0); |
radeon_ring_write(ring, vm_id); |
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); |
radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA | |
PACKET3_TC_ACTION_ENA | |
4684,7 → 4687,7 |
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) |
{ |
int ret = 0; |
u32 idx = 0; |
u32 idx = 0, i; |
struct radeon_cs_packet pkt; |
do { |
4695,6 → 4698,12 |
switch (pkt.type) { |
case RADEON_PACKET_TYPE0: |
dev_err(rdev->dev, "Packet0 not allowed!\n"); |
for (i = 0; i < ib->length_dw; i++) { |
if (i == idx) |
printk("\t0x%08x <---\n", ib->ptr[i]); |
else |
printk("\t0x%08x\n", ib->ptr[i]); |
} |
ret = -EINVAL; |
break; |
case RADEON_PACKET_TYPE2: |
5014,27 → 5023,23 |
block, mc_id); |
} |
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) |
void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, |
unsigned vm_id, uint64_t pd_addr) |
{ |
struct radeon_ring *ring = &rdev->ring[ridx]; |
if (vm == NULL) |
return; |
/* write new base address */ |
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | |
WRITE_DATA_DST_SEL(0))); |
if (vm->id < 8) { |
if (vm_id < 8) { |
radeon_ring_write(ring, |
(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); |
(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2); |
} else { |
radeon_ring_write(ring, |
(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2); |
(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2); |
} |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, vm->pd_gpu_addr >> 12); |
radeon_ring_write(ring, pd_addr >> 12); |
/* flush hdp cache */ |
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
5050,7 → 5055,7 |
WRITE_DATA_DST_SEL(0))); |
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, 1 << vm->id); |
radeon_ring_write(ring, 1 << vm_id); |
/* sync PFP to ME, otherwise we might get invalid PFP reads */ |
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); |
/drivers/video/drm/radeon/si_dma.c |
---|
185,20 → 185,17 |
} |
} |
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) |
void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, |
unsigned vm_id, uint64_t pd_addr) |
{ |
struct radeon_ring *ring = &rdev->ring[ridx]; |
if (vm == NULL) |
return; |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); |
if (vm->id < 8) { |
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); |
if (vm_id < 8) { |
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2)); |
} else { |
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2)); |
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2)); |
} |
radeon_ring_write(ring, vm->pd_gpu_addr >> 12); |
radeon_ring_write(ring, pd_addr >> 12); |
/* flush hdp cache */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); |
208,7 → 205,7 |
/* bits 0-7 are the VM contexts0-7 */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); |
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); |
radeon_ring_write(ring, 1 << vm->id); |
radeon_ring_write(ring, 1 << vm_id); |
} |
/** |
218,18 → 215,19 |
* @src_offset: src GPU address |
* @dst_offset: dst GPU address |
* @num_gpu_pages: number of GPU pages to xfer |
* @fence: radeon fence object |
* @resv: reservation object to sync to |
* |
* Copy GPU paging using the DMA engine (SI). |
* Used by the radeon ttm implementation to move pages if |
* registered as the asic copy callback. |
*/ |
int si_copy_dma(struct radeon_device *rdev, |
struct radeon_fence *si_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence) |
struct reservation_object *resv) |
{ |
struct radeon_semaphore *sem = NULL; |
struct radeon_fence *fence; |
struct radeon_sync sync; |
int ring_index = rdev->asic->copy.dma_ring_index; |
struct radeon_ring *ring = &rdev->ring[ring_index]; |
u32 size_in_bytes, cur_size_in_bytes; |
236,11 → 234,7 |
int i, num_loops; |
int r = 0; |
r = radeon_semaphore_create(rdev, &sem); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
return r; |
} |
radeon_sync_create(&sync); |
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); |
num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff); |
247,12 → 241,12 |
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
radeon_sync_free(rdev, &sync, NULL); |
return ERR_PTR(r); |
} |
radeon_semaphore_sync_to(sem, *fence); |
radeon_semaphore_sync_rings(rdev, sem, ring->idx); |
radeon_sync_resv(rdev, &sync, resv, false); |
radeon_sync_rings(rdev, &sync, ring->idx); |
for (i = 0; i < num_loops; i++) { |
cur_size_in_bytes = size_in_bytes; |
268,16 → 262,16 |
dst_offset += cur_size_in_bytes; |
} |
r = radeon_fence_emit(rdev, fence, ring->idx); |
r = radeon_fence_emit(rdev, &fence, ring->idx); |
if (r) { |
radeon_ring_unlock_undo(rdev, ring); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
radeon_sync_free(rdev, &sync, NULL); |
return ERR_PTR(r); |
} |
radeon_ring_unlock_commit(rdev, ring, false); |
radeon_semaphore_free(rdev, &sem, *fence); |
radeon_sync_free(rdev, &sync, fence); |
return r; |
return fence; |
} |
/drivers/video/drm/radeon/si_dpm.c |
---|
23,6 → 23,7 |
#include "drmP.h" |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "sid.h" |
#include "r600_dpm.h" |
#include "si_dpm.h" |
3397,6 → 3398,15 |
ret = si_read_smc_sram_dword(rdev, |
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + |
SISLANDS_SMC_FIRMWARE_HEADER_fanTable, |
&tmp, si_pi->sram_end); |
if (ret) |
return ret; |
si_pi->fan_table_start = tmp; |
ret = si_read_smc_sram_dword(rdev, |
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + |
SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable, |
&tmp, si_pi->sram_end); |
if (ret) |
5816,7 → 5826,32 |
si_enable_acpi_power_management(rdev); |
} |
static int si_set_thermal_temperature_range(struct radeon_device *rdev, |
static int si_thermal_enable_alert(struct radeon_device *rdev, |
bool enable) |
{ |
u32 thermal_int = RREG32(CG_THERMAL_INT); |
if (enable) { |
PPSMC_Result result; |
thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); |
WREG32(CG_THERMAL_INT, thermal_int); |
rdev->irq.dpm_thermal = false; |
result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt); |
if (result != PPSMC_Result_OK) { |
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); |
return -EINVAL; |
} |
} else { |
thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; |
WREG32(CG_THERMAL_INT, thermal_int); |
rdev->irq.dpm_thermal = true; |
} |
return 0; |
} |
static int si_thermal_set_temperature_range(struct radeon_device *rdev, |
int min_temp, int max_temp) |
{ |
int low_temp = 0 * 1000; |
5841,6 → 5876,309 |
return 0; |
} |
static void si_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode) |
{ |
struct si_power_info *si_pi = si_get_pi(rdev); |
u32 tmp; |
if (si_pi->fan_ctrl_is_in_default_mode) { |
tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT; |
si_pi->fan_ctrl_default_mode = tmp; |
tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT; |
si_pi->t_min = tmp; |
si_pi->fan_ctrl_is_in_default_mode = false; |
} |
tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK; |
tmp |= TMIN(0); |
WREG32(CG_FDO_CTRL2, tmp); |
tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; |
tmp |= FDO_PWM_MODE(mode); |
WREG32(CG_FDO_CTRL2, tmp); |
} |
static int si_thermal_setup_fan_table(struct radeon_device *rdev) |
{ |
struct si_power_info *si_pi = si_get_pi(rdev); |
PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE }; |
u32 duty100; |
u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2; |
u16 fdo_min, slope1, slope2; |
u32 reference_clock, tmp; |
int ret; |
u64 tmp64; |
if (!si_pi->fan_table_start) { |
rdev->pm.dpm.fan.ucode_fan_control = false; |
return 0; |
} |
duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; |
if (duty100 == 0) { |
rdev->pm.dpm.fan.ucode_fan_control = false; |
return 0; |
} |
tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100; |
do_div(tmp64, 10000); |
fdo_min = (u16)tmp64; |
t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min; |
t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med; |
pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min; |
pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med; |
slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); |
slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); |
fan_table.slope1 = cpu_to_be16(slope1); |
fan_table.slope2 = cpu_to_be16(slope2); |
fan_table.fdo_min = cpu_to_be16(fdo_min); |
fan_table.hys_down = cpu_to_be16(rdev->pm.dpm.fan.t_hyst); |
fan_table.hys_up = cpu_to_be16(1); |
fan_table.hys_slope = cpu_to_be16(1); |
fan_table.temp_resp_lim = cpu_to_be16(5); |
reference_clock = radeon_get_xclk(rdev); |
fan_table.refresh_period = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay * |
reference_clock) / 1600); |
fan_table.fdo_max = cpu_to_be16((u16)duty100); |
tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT; |
fan_table.temp_src = (uint8_t)tmp; |
ret = si_copy_bytes_to_smc(rdev, |
si_pi->fan_table_start, |
(u8 *)(&fan_table), |
sizeof(fan_table), |
si_pi->sram_end); |
if (ret) { |
DRM_ERROR("Failed to load fan table to the SMC."); |
rdev->pm.dpm.fan.ucode_fan_control = false; |
} |
return 0; |
} |
static int si_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev) |
{ |
PPSMC_Result ret; |
ret = si_send_msg_to_smc(rdev, PPSMC_StartFanControl); |
if (ret == PPSMC_Result_OK) |
return 0; |
else |
return -EINVAL; |
} |
static int si_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev) |
{ |
PPSMC_Result ret; |
ret = si_send_msg_to_smc(rdev, PPSMC_StopFanControl); |
if (ret == PPSMC_Result_OK) |
return 0; |
else |
return -EINVAL; |
} |
#if 0 |
static int si_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev, |
u32 *speed) |
{ |
u32 duty, duty100; |
u64 tmp64; |
if (rdev->pm.no_fan) |
return -ENOENT; |
duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; |
duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT; |
if (duty100 == 0) |
return -EINVAL; |
tmp64 = (u64)duty * 100; |
do_div(tmp64, duty100); |
*speed = (u32)tmp64; |
if (*speed > 100) |
*speed = 100; |
return 0; |
} |
static int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev, |
u32 speed) |
{ |
u32 tmp; |
u32 duty, duty100; |
u64 tmp64; |
if (rdev->pm.no_fan) |
return -ENOENT; |
if (speed > 100) |
return -EINVAL; |
if (rdev->pm.dpm.fan.ucode_fan_control) |
si_fan_ctrl_stop_smc_fan_control(rdev); |
duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; |
if (duty100 == 0) |
return -EINVAL; |
tmp64 = (u64)speed * duty100; |
do_div(tmp64, 100); |
duty = (u32)tmp64; |
tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK; |
tmp |= FDO_STATIC_DUTY(duty); |
WREG32(CG_FDO_CTRL0, tmp); |
si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC); |
return 0; |
} |
static int si_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev, |
u32 *speed) |
{ |
u32 tach_period; |
u32 xclk = radeon_get_xclk(rdev); |
if (rdev->pm.no_fan) |
return -ENOENT; |
if (rdev->pm.fan_pulses_per_revolution == 0) |
return -ENOENT; |
tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT; |
if (tach_period == 0) |
return -ENOENT; |
*speed = 60 * xclk * 10000 / tach_period; |
return 0; |
} |
static int si_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev, |
u32 speed) |
{ |
u32 tach_period, tmp; |
u32 xclk = radeon_get_xclk(rdev); |
if (rdev->pm.no_fan) |
return -ENOENT; |
if (rdev->pm.fan_pulses_per_revolution == 0) |
return -ENOENT; |
if ((speed < rdev->pm.fan_min_rpm) || |
(speed > rdev->pm.fan_max_rpm)) |
return -EINVAL; |
if (rdev->pm.dpm.fan.ucode_fan_control) |
si_fan_ctrl_stop_smc_fan_control(rdev); |
tach_period = 60 * xclk * 10000 / (8 * speed); |
tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK; |
tmp |= TARGET_PERIOD(tach_period); |
WREG32(CG_TACH_CTRL, tmp); |
si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM); |
return 0; |
} |
#endif |
static void si_fan_ctrl_set_default_mode(struct radeon_device *rdev) |
{ |
struct si_power_info *si_pi = si_get_pi(rdev); |
u32 tmp; |
if (!si_pi->fan_ctrl_is_in_default_mode) { |
tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; |
tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode); |
WREG32(CG_FDO_CTRL2, tmp); |
tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK; |
tmp |= TMIN(si_pi->t_min); |
WREG32(CG_FDO_CTRL2, tmp); |
si_pi->fan_ctrl_is_in_default_mode = true; |
} |
} |
static void si_thermal_start_smc_fan_control(struct radeon_device *rdev) |
{ |
if (rdev->pm.dpm.fan.ucode_fan_control) { |
si_fan_ctrl_start_smc_fan_control(rdev); |
si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC); |
} |
} |
static void si_thermal_initialize(struct radeon_device *rdev) |
{ |
u32 tmp; |
if (rdev->pm.fan_pulses_per_revolution) { |
tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK; |
tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1); |
WREG32(CG_TACH_CTRL, tmp); |
} |
tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK; |
tmp |= TACH_PWM_RESP_RATE(0x28); |
WREG32(CG_FDO_CTRL2, tmp); |
} |
static int si_thermal_start_thermal_controller(struct radeon_device *rdev) |
{ |
int ret; |
si_thermal_initialize(rdev); |
ret = si_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); |
if (ret) |
return ret; |
ret = si_thermal_enable_alert(rdev, true); |
if (ret) |
return ret; |
if (rdev->pm.dpm.fan.ucode_fan_control) { |
ret = si_halt_smc(rdev); |
if (ret) |
return ret; |
ret = si_thermal_setup_fan_table(rdev); |
if (ret) |
return ret; |
ret = si_resume_smc(rdev); |
if (ret) |
return ret; |
si_thermal_start_smc_fan_control(rdev); |
} |
return 0; |
} |
static void si_thermal_stop_thermal_controller(struct radeon_device *rdev) |
{ |
if (!rdev->pm.no_fan) { |
si_fan_ctrl_set_default_mode(rdev); |
si_fan_ctrl_stop_smc_fan_control(rdev); |
} |
} |
int si_dpm_enable(struct radeon_device *rdev) |
{ |
struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
5953,31 → 6291,39 |
si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); |
si_thermal_start_thermal_controller(rdev); |
ni_update_current_ps(rdev, boot_ps); |
return 0; |
} |
int si_dpm_late_enable(struct radeon_device *rdev) |
static int si_set_temperature_range(struct radeon_device *rdev) |
{ |
int ret; |
if (rdev->irq.installed && |
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { |
PPSMC_Result result; |
ret = si_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); |
ret = si_thermal_enable_alert(rdev, false); |
if (ret) |
return ret; |
rdev->irq.dpm_thermal = true; |
radeon_irq_set(rdev); |
result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt); |
ret = si_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); |
if (ret) |
return ret; |
ret = si_thermal_enable_alert(rdev, true); |
if (ret) |
return ret; |
if (result != PPSMC_Result_OK) |
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); |
return ret; |
} |
return 0; |
int si_dpm_late_enable(struct radeon_device *rdev) |
{ |
int ret; |
ret = si_set_temperature_range(rdev); |
if (ret) |
return ret; |
return ret; |
} |
void si_dpm_disable(struct radeon_device *rdev) |
5987,6 → 6333,7 |
if (!si_is_smc_running(rdev)) |
return; |
si_thermal_stop_thermal_controller(rdev); |
si_disable_ulv(rdev); |
si_clear_vc(rdev); |
if (pi->thermal_protection) |
6525,6 → 6872,9 |
rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc = |
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; |
si_pi->fan_ctrl_is_in_default_mode = true; |
rdev->pm.dpm.fan.ucode_fan_control = false; |
return 0; |
} |
/drivers/video/drm/radeon/si_dpm.h |
---|
182,6 → 182,7 |
u32 dte_table_start; |
u32 spll_table_start; |
u32 papm_cfg_table_start; |
u32 fan_table_start; |
/* CAC stuff */ |
const struct si_cac_config_reg *cac_weights; |
const struct si_cac_config_reg *lcac_config; |
197,6 → 198,10 |
/* SVI2 */ |
u8 svd_gpio_id; |
u8 svc_gpio_id; |
/* fan control */ |
bool fan_ctrl_is_in_default_mode; |
u32 t_min; |
u32 fan_ctrl_default_mode; |
}; |
#define SISLANDS_INITIAL_STATE_ARB_INDEX 0 |
/drivers/video/drm/radeon/si_smc.c |
---|
135,7 → 135,7 |
int si_program_jump_on_start(struct radeon_device *rdev) |
{ |
static u8 data[] = { 0x0E, 0x00, 0x40, 0x40 }; |
static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 }; |
return si_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1); |
} |
/drivers/video/drm/radeon/sid.h |
---|
180,7 → 180,10 |
#define DIG_THERM_DPM(x) ((x) << 14) |
#define DIG_THERM_DPM_MASK 0x003FC000 |
#define DIG_THERM_DPM_SHIFT 14 |
#define CG_THERMAL_STATUS 0x704 |
#define FDO_PWM_DUTY(x) ((x) << 9) |
#define FDO_PWM_DUTY_MASK (0xff << 9) |
#define FDO_PWM_DUTY_SHIFT 9 |
#define CG_THERMAL_INT 0x708 |
#define DIG_THERM_INTH(x) ((x) << 8) |
#define DIG_THERM_INTH_MASK 0x0000FF00 |
191,6 → 194,10 |
#define THERM_INT_MASK_HIGH (1 << 24) |
#define THERM_INT_MASK_LOW (1 << 25) |
#define CG_MULT_THERMAL_CTRL 0x710 |
#define TEMP_SEL(x) ((x) << 20) |
#define TEMP_SEL_MASK (0xff << 20) |
#define TEMP_SEL_SHIFT 20 |
#define CG_MULT_THERMAL_STATUS 0x714 |
#define ASIC_MAX_TEMP(x) ((x) << 0) |
#define ASIC_MAX_TEMP_MASK 0x000001ff |
199,6 → 206,37 |
#define CTF_TEMP_MASK 0x0003fe00 |
#define CTF_TEMP_SHIFT 9 |
#define CG_FDO_CTRL0 0x754 |
#define FDO_STATIC_DUTY(x) ((x) << 0) |
#define FDO_STATIC_DUTY_MASK 0x000000FF |
#define FDO_STATIC_DUTY_SHIFT 0 |
#define CG_FDO_CTRL1 0x758 |
#define FMAX_DUTY100(x) ((x) << 0) |
#define FMAX_DUTY100_MASK 0x000000FF |
#define FMAX_DUTY100_SHIFT 0 |
#define CG_FDO_CTRL2 0x75C |
#define TMIN(x) ((x) << 0) |
#define TMIN_MASK 0x000000FF |
#define TMIN_SHIFT 0 |
#define FDO_PWM_MODE(x) ((x) << 11) |
#define FDO_PWM_MODE_MASK (7 << 11) |
#define FDO_PWM_MODE_SHIFT 11 |
#define TACH_PWM_RESP_RATE(x) ((x) << 25) |
#define TACH_PWM_RESP_RATE_MASK (0x7f << 25) |
#define TACH_PWM_RESP_RATE_SHIFT 25 |
#define CG_TACH_CTRL 0x770 |
# define EDGE_PER_REV(x) ((x) << 0) |
# define EDGE_PER_REV_MASK (0x7 << 0) |
# define EDGE_PER_REV_SHIFT 0 |
# define TARGET_PERIOD(x) ((x) << 3) |
# define TARGET_PERIOD_MASK 0xfffffff8 |
# define TARGET_PERIOD_SHIFT 3 |
#define CG_TACH_STATUS 0x774 |
# define TACH_PERIOD(x) ((x) << 0) |
# define TACH_PERIOD_MASK 0xffffffff |
# define TACH_PERIOD_SHIFT 0 |
#define GENERAL_PWRMGT 0x780 |
# define GLOBAL_PWRMGT_EN (1 << 0) |
# define STATIC_PM_EN (1 << 1) |
736,7 → 774,7 |
# define DESCRIPTION16(x) (((x) & 0xff) << 0) |
# define DESCRIPTION17(x) (((x) & 0xff) << 8) |
#define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL 0x54 |
#define AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x54 |
# define AUDIO_ENABLED (1 << 31) |
#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56 |
/drivers/video/drm/radeon/sislands_smc.h |
---|
245,6 → 245,31 |
#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c |
#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120 |
struct PP_SIslands_FanTable |
{ |
uint8_t fdo_mode; |
uint8_t padding; |
int16_t temp_min; |
int16_t temp_med; |
int16_t temp_max; |
int16_t slope1; |
int16_t slope2; |
int16_t fdo_min; |
int16_t hys_up; |
int16_t hys_down; |
int16_t hys_slope; |
int16_t temp_resp_lim; |
int16_t temp_curr; |
int16_t slope_curr; |
int16_t pwm_curr; |
uint32_t refresh_period; |
int16_t fdo_max; |
uint8_t temp_src; |
int8_t padding2; |
}; |
typedef struct PP_SIslands_FanTable PP_SIslands_FanTable; |
#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16 |
#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32 |
/drivers/video/drm/radeon/smu7_discrete.h |
---|
431,6 → 431,31 |
typedef struct SMU7_Discrete_MCRegisters SMU7_Discrete_MCRegisters; |
struct SMU7_Discrete_FanTable |
{ |
uint16_t FdoMode; |
int16_t TempMin; |
int16_t TempMed; |
int16_t TempMax; |
int16_t Slope1; |
int16_t Slope2; |
int16_t FdoMin; |
int16_t HystUp; |
int16_t HystDown; |
int16_t HystSlope; |
int16_t TempRespLim; |
int16_t TempCurr; |
int16_t SlopeCurr; |
int16_t PwmCurr; |
uint32_t RefreshPeriod; |
int16_t FdoMax; |
uint8_t TempSrc; |
int8_t Padding; |
}; |
typedef struct SMU7_Discrete_FanTable SMU7_Discrete_FanTable; |
struct SMU7_Discrete_PmFuses { |
// dw0-dw1 |
uint8_t BapmVddCVidHiSidd[8]; |
462,7 → 487,10 |
uint8_t BapmVddCVidHiSidd2[8]; |
// dw11-dw12 |
uint32_t Reserved6[2]; |
int16_t FuzzyFan_ErrorSetDelta; |
int16_t FuzzyFan_ErrorRateSetDelta; |
int16_t FuzzyFan_PwmSetDelta; |
uint16_t CalcMeasPowerBlend; |
// dw13-dw16 |
uint8_t GnbLPML[16]; |
/drivers/video/drm/radeon/sumo_dpm.c |
---|
23,6 → 23,7 |
#include "drmP.h" |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "sumod.h" |
#include "r600_dpm.h" |
#include "cypress_dpm.h" |
/drivers/video/drm/radeon/trinity_dpm.c |
---|
23,6 → 23,7 |
#include "drmP.h" |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "trinityd.h" |
#include "r600_dpm.h" |
#include "trinity_dpm.h" |
/drivers/video/drm/radeon/utils.c |
---|
1,8 → 1,8 |
#include <ddk.h> |
#include <linux/mm.h> |
#include <linux/err.h> |
#include <drm/drmP.h> |
#include <linux/hdmi.h> |
#include "radeon.h" |
int x86_clflush_size; |
unsigned int tsc_khz; |
12,7 → 12,7 |
struct file *filep; |
int count; |
filep = malloc(sizeof(*filep)); |
filep = __builtin_malloc(sizeof(*filep)); |
if(unlikely(filep == NULL)) |
return ERR_PTR(-ENOMEM); |
159,7 → 159,6 |
} |
//const char hex_asc[] = "0123456789abcdef"; |
/** |
378,45 → 377,93 |
buf, len, true); |
} |
void msleep(unsigned int msecs) |
{ |
msecs /= 10; |
if(!msecs) msecs = 1; |
static inline void __cpuid(unsigned int *eax, unsigned int *ebx, |
unsigned int *ecx, unsigned int *edx) |
__asm__ __volatile__ ( |
"call *__imp__Delay" |
::"b" (msecs)); |
__asm__ __volatile__ ( |
"":::"ebx"); |
}; |
/* simple loop based delay: */ |
static void delay_loop(unsigned long loops) |
{ |
/* ecx is often an input as well as an output. */ |
asm volatile("cpuid" |
: "=a" (*eax), |
"=b" (*ebx), |
"=c" (*ecx), |
"=d" (*edx) |
: "0" (*eax), "2" (*ecx) |
: "memory"); |
asm volatile( |
" test %0,%0 \n" |
" jz 3f \n" |
" jmp 1f \n" |
".align 16 \n" |
"1: jmp 2f \n" |
".align 16 \n" |
"2: dec %0 \n" |
" jnz 2b \n" |
"3: dec %0 \n" |
: /* we don't need output */ |
:"a" (loops) |
); |
} |
static inline void cpuid(unsigned int op, |
unsigned int *eax, unsigned int *ebx, |
unsigned int *ecx, unsigned int *edx) |
static void (*delay_fn)(unsigned long) = delay_loop; |
void __delay(unsigned long loops) |
{ |
*eax = op; |
*ecx = 0; |
__cpuid(eax, ebx, ecx, edx); |
delay_fn(loops); |
} |
void cpu_detect() |
inline void __const_udelay(unsigned long xloops) |
{ |
u32 junk, tfms, cap0, misc; |
int d0; |
cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
xloops *= 4; |
asm("mull %%edx" |
: "=d" (xloops), "=&a" (d0) |
: "1" (xloops), "" |
(loops_per_jiffy * (HZ/4))); |
if (cap0 & (1<<19)) |
__delay(++xloops); |
} |
void __udelay(unsigned long usecs) |
{ |
x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
__const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */ |
} |
tsc_khz = GetCpuFreq()/1000; |
unsigned int _sw_hweight32(unsigned int w) |
{ |
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER |
w -= (w >> 1) & 0x55555555; |
w = (w & 0x33333333) + ((w >> 2) & 0x33333333); |
w = (w + (w >> 4)) & 0x0f0f0f0f; |
return (w * 0x01010101) >> 24; |
#else |
unsigned int res = w - ((w >> 1) & 0x55555555); |
res = (res & 0x33333333) + ((res >> 2) & 0x33333333); |
res = (res + (res >> 4)) & 0x0F0F0F0F; |
res = res + (res >> 8); |
return (res + (res >> 16)) & 0x000000FF; |
#endif |
} |
EXPORT_SYMBOL(_sw_hweight32); |
void usleep_range(unsigned long min, unsigned long max) |
{ |
udelay(max); |
} |
EXPORT_SYMBOL(usleep_range); |
void *kmemdup(const void *src, size_t len, gfp_t gfp) |
{ |
void *p; |
427,26 → 474,504 |
return p; |
} |
void cpu_detect1() |
{ |
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) |
u32 junk, tfms, cap0, misc; |
int i; |
cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
if (cap0 & (1<<19)) |
{ |
const unsigned long *p = addr; |
unsigned long result = 0; |
unsigned long tmp; |
x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
} |
while (size & ~(BITS_PER_LONG-1)) { |
if (~(tmp = *(p++))) |
goto found; |
result += BITS_PER_LONG; |
size -= BITS_PER_LONG; |
#if 0 |
cpuid(0x80000002, (unsigned int*)&cpuinfo.model_name[0], (unsigned int*)&cpuinfo.model_name[4], |
(unsigned int*)&cpuinfo.model_name[8], (unsigned int*)&cpuinfo.model_name[12]); |
cpuid(0x80000003, (unsigned int*)&cpuinfo.model_name[16], (unsigned int*)&cpuinfo.model_name[20], |
(unsigned int*)&cpuinfo.model_name[24], (unsigned int*)&cpuinfo.model_name[28]); |
cpuid(0x80000004, (unsigned int*)&cpuinfo.model_name[32], (unsigned int*)&cpuinfo.model_name[36], |
(unsigned int*)&cpuinfo.model_name[40], (unsigned int*)&cpuinfo.model_name[44]); |
printf("\n%s\n\n",cpuinfo.model_name); |
cpuinfo.def_mtrr = read_msr(MSR_MTRRdefType); |
cpuinfo.mtrr_cap = read_msr(IA32_MTRRCAP); |
printf("MSR_MTRRdefType %016llx\n\n", cpuinfo.def_mtrr); |
cpuinfo.var_mtrr_count = (u8_t)cpuinfo.mtrr_cap; |
for(i = 0; i < cpuinfo.var_mtrr_count; i++) |
{ |
u64_t mtrr_base; |
u64_t mtrr_mask; |
cpuinfo.var_mtrr[i].base = read_msr(MTRRphysBase_MSR(i)); |
cpuinfo.var_mtrr[i].mask = read_msr(MTRRphysMask_MSR(i)); |
printf("MTRR_%d base: %016llx mask: %016llx\n", i, |
cpuinfo.var_mtrr[i].base, |
cpuinfo.var_mtrr[i].mask); |
}; |
unsigned int cr0, cr3, cr4, eflags; |
eflags = safe_cli(); |
/* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ |
cr0 = read_cr0() | (1<<30); |
write_cr0(cr0); |
wbinvd(); |
cr4 = read_cr4(); |
write_cr4(cr4 & ~(1<<7)); |
cr3 = read_cr3(); |
write_cr3(cr3); |
/* Save MTRR state */ |
rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); |
/* Disable MTRRs, and set the default type to uncached */ |
native_write_msr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); |
wbinvd(); |
i = 0; |
set_mtrr(i++,0,0x80000000>>12,MTRR_WB); |
set_mtrr(i++,0x80000000>>12,0x40000000>>12,MTRR_WB); |
set_mtrr(i++,0xC0000000>>12,0x20000000>>12,MTRR_WB); |
set_mtrr(i++,0xdb800000>>12,0x00800000>>12,MTRR_UC); |
set_mtrr(i++,0xdc000000>>12,0x04000000>>12,MTRR_UC); |
set_mtrr(i++,0xE0000000>>12,0x10000000>>12,MTRR_WC); |
for(; i < cpuinfo.var_mtrr_count; i++) |
set_mtrr(i,0,0,0); |
write_cr3(cr3); |
/* Intel (P6) standard MTRRs */ |
native_write_msr(MSR_MTRRdefType, deftype_lo, deftype_hi); |
/* Enable caches */ |
write_cr0(read_cr0() & ~(1<<30)); |
/* Restore value of CR4 */ |
write_cr4(cr4); |
safe_sti(eflags); |
printf("\nnew MTRR map\n\n"); |
for(i = 0; i < cpuinfo.var_mtrr_count; i++) |
{ |
u64_t mtrr_base; |
u64_t mtrr_mask; |
cpuinfo.var_mtrr[i].base = read_msr(MTRRphysBase_MSR(i)); |
cpuinfo.var_mtrr[i].mask = read_msr(MTRRphysMask_MSR(i)); |
printf("MTRR_%d base: %016llx mask: %016llx\n", i, |
cpuinfo.var_mtrr[i].base, |
cpuinfo.var_mtrr[i].mask); |
}; |
#endif |
tsc_khz = (unsigned int)(GetCpuFreq()/1000); |
} |
if (!size) |
return result; |
tmp = (*p) | (~0UL << size); |
if (tmp == ~0UL) /* Are any bits zero? */ |
return result + size; /* Nope. */ |
found: |
return result + ffz(tmp); |
static atomic_t fence_context_counter = ATOMIC_INIT(0); |
/** |
* fence_context_alloc - allocate an array of fence contexts |
* @num: [in] amount of contexts to allocate |
* |
* This function will return the first index of the number of fences allocated. |
* The fence context is used for setting fence->context to a unique number. |
*/ |
unsigned fence_context_alloc(unsigned num) |
{ |
BUG_ON(!num); |
return atomic_add_return(num, &fence_context_counter) - num; |
} |
EXPORT_SYMBOL(fence_context_alloc); |
int fence_signal(struct fence *fence) |
{ |
unsigned long flags; |
if (!fence) |
return -EINVAL; |
// if (!ktime_to_ns(fence->timestamp)) { |
// fence->timestamp = ktime_get(); |
// smp_mb__before_atomic(); |
// } |
if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
return -EINVAL; |
// trace_fence_signaled(fence); |
if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { |
struct fence_cb *cur, *tmp; |
spin_lock_irqsave(fence->lock, flags); |
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { |
list_del_init(&cur->node); |
cur->func(fence, cur); |
} |
spin_unlock_irqrestore(fence->lock, flags); |
} |
return 0; |
} |
EXPORT_SYMBOL(fence_signal); |
int fence_signal_locked(struct fence *fence) |
{ |
struct fence_cb *cur, *tmp; |
int ret = 0; |
if (WARN_ON(!fence)) |
return -EINVAL; |
// if (!ktime_to_ns(fence->timestamp)) { |
// fence->timestamp = ktime_get(); |
// smp_mb__before_atomic(); |
// } |
if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { |
ret = -EINVAL; |
/* |
* we might have raced with the unlocked fence_signal, |
* still run through all callbacks |
*/ |
}// else |
// trace_fence_signaled(fence); |
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { |
list_del_init(&cur->node); |
cur->func(fence, cur); |
} |
return ret; |
} |
EXPORT_SYMBOL(fence_signal_locked); |
void fence_enable_sw_signaling(struct fence *fence) |
{ |
unsigned long flags; |
if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) && |
!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { |
// trace_fence_enable_signal(fence); |
spin_lock_irqsave(fence->lock, flags); |
if (!fence->ops->enable_signaling(fence)) |
fence_signal_locked(fence); |
spin_unlock_irqrestore(fence->lock, flags); |
} |
} |
EXPORT_SYMBOL(fence_enable_sw_signaling); |
signed long |
fence_wait_timeout(struct fence *fence, bool intr, signed long timeout) |
{ |
signed long ret; |
if (WARN_ON(timeout < 0)) |
return -EINVAL; |
// trace_fence_wait_start(fence); |
ret = fence->ops->wait(fence, intr, timeout); |
// trace_fence_wait_end(fence); |
return ret; |
} |
EXPORT_SYMBOL(fence_wait_timeout); |
void fence_release(struct kref *kref) |
{ |
struct fence *fence = |
container_of(kref, struct fence, refcount); |
// trace_fence_destroy(fence); |
BUG_ON(!list_empty(&fence->cb_list)); |
if (fence->ops->release) |
fence->ops->release(fence); |
else |
fence_free(fence); |
} |
EXPORT_SYMBOL(fence_release); |
void fence_free(struct fence *fence) |
{ |
kfree_rcu(fence, rcu); |
} |
EXPORT_SYMBOL(fence_free); |
reservation_object_add_shared_inplace(struct reservation_object *obj, |
struct reservation_object_list *fobj, |
struct fence *fence) |
{ |
u32 i; |
fence_get(fence); |
// preempt_disable(); |
write_seqcount_begin(&obj->seq); |
for (i = 0; i < fobj->shared_count; ++i) { |
struct fence *old_fence; |
old_fence = rcu_dereference_protected(fobj->shared[i], |
reservation_object_held(obj)); |
if (old_fence->context == fence->context) { |
/* memory barrier is added by write_seqcount_begin */ |
RCU_INIT_POINTER(fobj->shared[i], fence); |
write_seqcount_end(&obj->seq); |
preempt_enable(); |
fence_put(old_fence); |
return; |
} |
} |
/* |
* memory barrier is added by write_seqcount_begin, |
* fobj->shared_count is protected by this lock too |
*/ |
RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); |
fobj->shared_count++; |
write_seqcount_end(&obj->seq); |
// preempt_enable(); |
} |
static void |
reservation_object_add_shared_replace(struct reservation_object *obj, |
struct reservation_object_list *old, |
struct reservation_object_list *fobj, |
struct fence *fence) |
{ |
unsigned i; |
struct fence *old_fence = NULL; |
fence_get(fence); |
if (!old) { |
RCU_INIT_POINTER(fobj->shared[0], fence); |
fobj->shared_count = 1; |
goto done; |
} |
/* |
* no need to bump fence refcounts, rcu_read access |
* requires the use of kref_get_unless_zero, and the |
* references from the old struct are carried over to |
* the new. |
*/ |
fobj->shared_count = old->shared_count; |
for (i = 0; i < old->shared_count; ++i) { |
struct fence *check; |
check = rcu_dereference_protected(old->shared[i], |
reservation_object_held(obj)); |
if (!old_fence && check->context == fence->context) { |
old_fence = check; |
RCU_INIT_POINTER(fobj->shared[i], fence); |
} else |
RCU_INIT_POINTER(fobj->shared[i], check); |
} |
if (!old_fence) { |
RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence); |
fobj->shared_count++; |
} |
done: |
// preempt_disable(); |
write_seqcount_begin(&obj->seq); |
/* |
* RCU_INIT_POINTER can be used here, |
* seqcount provides the necessary barriers |
*/ |
RCU_INIT_POINTER(obj->fence, fobj); |
write_seqcount_end(&obj->seq); |
// preempt_enable(); |
if (old) |
kfree_rcu(old, rcu); |
if (old_fence) |
fence_put(old_fence); |
} |
int reservation_object_reserve_shared(struct reservation_object *obj) |
{ |
struct reservation_object_list *fobj, *old; |
u32 max; |
old = reservation_object_get_list(obj); |
if (old && old->shared_max) { |
if (old->shared_count < old->shared_max) { |
/* perform an in-place update */ |
kfree(obj->staged); |
obj->staged = NULL; |
return 0; |
} else |
max = old->shared_max * 2; |
} else |
max = 4; |
/* |
* resize obj->staged or allocate if it doesn't exist, |
* noop if already correct size |
*/ |
fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]), |
GFP_KERNEL); |
if (!fobj) |
return -ENOMEM; |
obj->staged = fobj; |
fobj->shared_max = max; |
return 0; |
} |
EXPORT_SYMBOL(reservation_object_reserve_shared); |
void reservation_object_add_shared_fence(struct reservation_object *obj, |
struct fence *fence) |
{ |
struct reservation_object_list *old, *fobj = obj->staged; |
old = reservation_object_get_list(obj); |
obj->staged = NULL; |
if (!fobj) { |
BUG_ON(old->shared_count >= old->shared_max); |
reservation_object_add_shared_inplace(obj, old, fence); |
} else |
reservation_object_add_shared_replace(obj, old, fobj, fence); |
} |
EXPORT_SYMBOL(reservation_object_add_shared_fence); |
void reservation_object_add_excl_fence(struct reservation_object *obj, |
struct fence *fence) |
{ |
struct fence *old_fence = reservation_object_get_excl(obj); |
struct reservation_object_list *old; |
u32 i = 0; |
old = reservation_object_get_list(obj); |
if (old) |
i = old->shared_count; |
if (fence) |
fence_get(fence); |
// preempt_disable(); |
write_seqcount_begin(&obj->seq); |
/* write_seqcount_begin provides the necessary memory barrier */ |
RCU_INIT_POINTER(obj->fence_excl, fence); |
if (old) |
old->shared_count = 0; |
write_seqcount_end(&obj->seq); |
// preempt_enable(); |
/* inplace update, no shared fences */ |
while (i--) |
fence_put(rcu_dereference_protected(old->shared[i], |
reservation_object_held(obj))); |
if (old_fence) |
fence_put(old_fence); |
} |
EXPORT_SYMBOL(reservation_object_add_excl_fence); |
void |
fence_init(struct fence *fence, const struct fence_ops *ops, |
spinlock_t *lock, unsigned context, unsigned seqno) |
{ |
BUG_ON(!lock); |
BUG_ON(!ops || !ops->wait || !ops->enable_signaling || |
!ops->get_driver_name || !ops->get_timeline_name); |
kref_init(&fence->refcount); |
fence->ops = ops; |
INIT_LIST_HEAD(&fence->cb_list); |
fence->lock = lock; |
fence->context = context; |
fence->seqno = seqno; |
fence->flags = 0UL; |
// trace_fence_init(fence); |
} |
EXPORT_SYMBOL(fence_init); |
#include <linux/rcupdate.h> |
struct rcu_ctrlblk { |
struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */ |
struct rcu_head **donetail; /* ->next pointer of last "done" CB. */ |
struct rcu_head **curtail; /* ->next pointer of last CB. */ |
// RCU_TRACE(long qlen); /* Number of pending CBs. */ |
// RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */ |
// RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */ |
// RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */ |
// RCU_TRACE(const char *name); /* Name of RCU type. */ |
}; |
/* Definition for rcupdate control block. */ |
static struct rcu_ctrlblk rcu_sched_ctrlblk = { |
.donetail = &rcu_sched_ctrlblk.rcucblist, |
.curtail = &rcu_sched_ctrlblk.rcucblist, |
// RCU_TRACE(.name = "rcu_sched") |
}; |
static void __call_rcu(struct rcu_head *head, |
void (*func)(struct rcu_head *rcu), |
struct rcu_ctrlblk *rcp) |
{ |
unsigned long flags; |
// debug_rcu_head_queue(head); |
head->func = func; |
head->next = NULL; |
local_irq_save(flags); |
*rcp->curtail = head; |
rcp->curtail = &head->next; |
// RCU_TRACE(rcp->qlen++); |
local_irq_restore(flags); |
} |
/* |
* Post an RCU callback to be invoked after the end of an RCU-sched grace |
* period. But since we have but one CPU, that would be after any |
* quiescent state. |
*/ |
void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
{ |
__call_rcu(head, func, &rcu_sched_ctrlblk); |
} |
/drivers/video/drm/radeon/uvd_v1_0.c |
---|
70,6 → 70,82 |
} |
/** |
* uvd_v1_0_fence_emit - emit an fence & trap command |
* |
* @rdev: radeon_device pointer |
* @fence: fence to emit |
* |
* Write a fence and a trap command to the ring. |
*/ |
void uvd_v1_0_fence_emit(struct radeon_device *rdev, |
struct radeon_fence *fence) |
{ |
struct radeon_ring *ring = &rdev->ring[fence->ring]; |
uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; |
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); |
radeon_ring_write(ring, addr & 0xffffffff); |
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); |
radeon_ring_write(ring, fence->seq); |
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0)); |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0)); |
radeon_ring_write(ring, 2); |
return; |
} |
/** |
* uvd_v1_0_resume - memory controller programming |
* |
* @rdev: radeon_device pointer |
* |
* Let the UVD memory controller know it's offsets |
*/ |
int uvd_v1_0_resume(struct radeon_device *rdev) |
{ |
uint64_t addr; |
uint32_t size; |
int r; |
r = radeon_uvd_resume(rdev); |
if (r) |
return r; |
/* programm the VCPU memory controller bits 0-27 */ |
addr = (rdev->uvd.gpu_addr >> 3) + 16; |
size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size) >> 3; |
WREG32(UVD_VCPU_CACHE_OFFSET0, addr); |
WREG32(UVD_VCPU_CACHE_SIZE0, size); |
addr += size; |
size = RADEON_UVD_STACK_SIZE >> 3; |
WREG32(UVD_VCPU_CACHE_OFFSET1, addr); |
WREG32(UVD_VCPU_CACHE_SIZE1, size); |
addr += size; |
size = RADEON_UVD_HEAP_SIZE >> 3; |
WREG32(UVD_VCPU_CACHE_OFFSET2, addr); |
WREG32(UVD_VCPU_CACHE_SIZE2, size); |
/* bits 28-31 */ |
addr = (rdev->uvd.gpu_addr >> 28) & 0xF; |
WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); |
/* bits 32-39 */ |
addr = (rdev->uvd.gpu_addr >> 32) & 0xFF; |
WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); |
WREG32(UVD_FW_START, *((uint32_t*)rdev->uvd.cpu_addr)); |
return 0; |
} |
/** |
* uvd_v1_0_init - start and test UVD block |
* |
* @rdev: radeon_device pointer |
130,8 → 206,32 |
/* lower clocks again */ |
radeon_set_uvd_clocks(rdev, 0, 0); |
if (!r) |
if (!r) { |
switch (rdev->family) { |
case CHIP_RV610: |
case CHIP_RV630: |
case CHIP_RV620: |
/* 64byte granularity workaround */ |
WREG32(MC_CONFIG, 0); |
WREG32(MC_CONFIG, 1 << 4); |
WREG32(RS_DQ_RD_RET_CONF, 0x3f); |
WREG32(MC_CONFIG, 0x1f); |
/* fall through */ |
case CHIP_RV670: |
case CHIP_RV635: |
/* write clean workaround */ |
WREG32_P(UVD_VCPU_CNTL, 0x10, ~0x10); |
break; |
default: |
/* TODO: Do we need more? */ |
break; |
} |
DRM_INFO("UVD initialized successfully.\n"); |
} |
return r; |
} |
218,12 → 318,12 |
/* enable UMC */ |
WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); |
WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3)); |
/* boot up the VCPU */ |
WREG32(UVD_SOFT_RESET, 0); |
mdelay(10); |
WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3)); |
for (i = 0; i < 10; ++i) { |
uint32_t status; |
for (j = 0; j < 100; ++j) { |
/drivers/video/drm/radeon/uvd_v2_2.c |
---|
72,6 → 72,10 |
uint32_t chip_id, size; |
int r; |
/* RV770 uses V1.0 MC */ |
if (rdev->family == CHIP_RV770) |
return uvd_v1_0_resume(rdev); |
r = radeon_uvd_resume(rdev); |
if (r) |
return r; |