/drivers/video/drm/radeon/Makefile |
---|
3,8 → 3,9 |
AS = as |
FASM = fasm.exe |
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6 |
DEFINES += -DCONFIG_ARCH_HAS_CACHE_LINE_SIZE |
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_X86_CMPXCHG64 -DCONFIG_TINY_RCU |
DEFINES += -DCONFIG_X86_L1_CACHE_SHIFT=6 |
DEFINES += -DCONFIG_DRM_FBDEV_EMULATION -DCONFIG_DMI -DCONFIG_ARCH_HAS_CACHE_LINE_SIZE |
DRV_TOPDIR = $(CURDIR)/../../.. |
DRM_TOPDIR = $(CURDIR)/.. |
50,12 → 51,15 |
../ttm/ttm_memory.c \ |
../ttm/ttm_page_alloc.c \ |
../ttm/ttm_tt.c \ |
$(DRM_TOPDIR)/drm_atomic.c \ |
$(DRM_TOPDIR)/drm_atomic_helper.c \ |
$(DRM_TOPDIR)/drm_bridge.c \ |
$(DRM_TOPDIR)/drm_cache.c \ |
$(DRM_TOPDIR)/drm_crtc.c \ |
$(DRM_TOPDIR)/drm_crtc_helper.c \ |
$(DRM_TOPDIR)/drm_dp_helper.c \ |
$(DRM_TOPDIR)/drm_dp_mst_topology.c \ |
$(DRM_TOPDIR)/drm_drv.c \ |
$(DRM_TOPDIR)/drm_atomic.c \ |
$(DRM_TOPDIR)/drm_edid.c \ |
$(DRM_TOPDIR)/drm_fb_helper.c \ |
$(DRM_TOPDIR)/drm_gem.c \ |
102,6 → 106,7 |
radeon_agp.c \ |
radeon_asic.c \ |
radeon_atombios.c \ |
radeon_audio.c \ |
radeon_benchmark.c \ |
radeon_bios.c \ |
radeon_combios.c \ |
109,6 → 114,8 |
radeon_cs.c \ |
radeon_clocks.c \ |
radeon_display.c \ |
radeon_dp_auxch.c \ |
radeon_dp_mst.c \ |
radeon_encoders.c \ |
radeon_fence.c \ |
radeon_fb.c \ |
117,6 → 124,7 |
radeon_i2c.c \ |
radeon_ib.c \ |
radeon_irq_kms.c \ |
radeon_kms.c \ |
radeon_legacy_crtc.c \ |
radeon_legacy_encoders.c \ |
radeon_legacy_tv.c \ |
/drivers/video/drm/radeon/atom.c |
---|
665,6 → 665,8 |
SDEBUG(" count: %d\n", count); |
if (arg == ATOM_UNIT_MICROSEC) |
udelay(count); |
else if (!drm_can_sleep()) |
mdelay(count); |
else |
msleep(count); |
} |
/drivers/video/drm/radeon/atombios.h |
---|
7944,8 → 7944,8 |
typedef struct { |
AMD_ACPI_DESCRIPTION_HEADER SHeader; |
UCHAR TableUUID[16]; //0x24 |
ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture. |
ULONG Lib1ImageOffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture. |
ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the structure. |
ULONG Lib1ImageOffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the structure. |
ULONG Reserved[4]; //0x3C |
}UEFI_ACPI_VFCT; |
/drivers/video/drm/radeon/atombios_crtc.c |
---|
330,8 → 330,10 |
misc |= ATOM_COMPOSITESYNC; |
if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
misc |= ATOM_INTERLACE; |
if (mode->flags & DRM_MODE_FLAG_DBLCLK) |
misc |= ATOM_DOUBLE_CLOCK_MODE; |
if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
misc |= ATOM_DOUBLE_CLOCK_MODE; |
misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2; |
args.susModeMiscInfo.usAccess = cpu_to_le16(misc); |
args.ucCRTC = radeon_crtc->crtc_id; |
374,8 → 376,10 |
misc |= ATOM_COMPOSITESYNC; |
if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
misc |= ATOM_INTERLACE; |
if (mode->flags & DRM_MODE_FLAG_DBLCLK) |
misc |= ATOM_DOUBLE_CLOCK_MODE; |
if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
misc |= ATOM_DOUBLE_CLOCK_MODE; |
misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2; |
args.susModeMiscInfo.usAccess = cpu_to_le16(misc); |
args.ucCRTC = radeon_crtc->crtc_id; |
606,6 → 610,13 |
} |
} |
if (radeon_encoder->is_mst_encoder) { |
struct radeon_encoder_mst *mst_enc = radeon_encoder->enc_priv; |
struct radeon_connector_atom_dig *dig_connector = mst_enc->connector->con_priv; |
dp_clock = dig_connector->dp_clock; |
} |
/* use recommended ref_div for ss */ |
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
if (radeon_crtc->ss_enabled) { |
952,7 → 963,9 |
radeon_crtc->bpc = 8; |
radeon_crtc->ss_enabled = false; |
if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || |
if (radeon_encoder->is_mst_encoder) { |
radeon_dp_mst_prepare_pll(crtc, mode); |
} else if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || |
(radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) { |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
struct drm_connector *connector = |
1405,6 → 1418,9 |
(x << 16) | y); |
viewport_w = crtc->mode.hdisplay; |
viewport_h = (crtc->mode.vdisplay + 1) & ~1; |
if ((rdev->family >= CHIP_BONAIRE) && |
(crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)) |
viewport_h *= 2; |
WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, |
(viewport_w << 16) | viewport_h); |
1851,10 → 1867,9 |
return pll; |
} |
/* otherwise, pick one of the plls */ |
if ((rdev->family == CHIP_KAVERI) || |
(rdev->family == CHIP_KABINI) || |
if ((rdev->family == CHIP_KABINI) || |
(rdev->family == CHIP_MULLINS)) { |
/* KB/KV/ML has PPLL1 and PPLL2 */ |
/* KB/ML has PPLL1 and PPLL2 */ |
pll_in_use = radeon_get_pll_use_mask(crtc); |
if (!(pll_in_use & (1 << ATOM_PPLL2))) |
return ATOM_PPLL2; |
1863,7 → 1878,7 |
DRM_ERROR("unable to allocate a PPLL\n"); |
return ATOM_PPLL_INVALID; |
} else { |
/* CI has PPLL0, PPLL1, and PPLL2 */ |
/* CI/KV has PPLL0, PPLL1, and PPLL2 */ |
pll_in_use = radeon_get_pll_use_mask(crtc); |
if (!(pll_in_use & (1 << ATOM_PPLL2))) |
return ATOM_PPLL2; |
2067,6 → 2082,12 |
radeon_crtc->connector = NULL; |
return false; |
} |
if (radeon_crtc->encoder) { |
struct radeon_encoder *radeon_encoder = |
to_radeon_encoder(radeon_crtc->encoder); |
radeon_crtc->output_csc = radeon_encoder->output_csc; |
} |
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) |
return false; |
if (!atombios_crtc_prepare_pll(crtc, adjusted_mode)) |
2155,6 → 2176,7 |
case ATOM_PPLL0: |
/* disable the ppll */ |
if ((rdev->family == CHIP_ARUBA) || |
(rdev->family == CHIP_KAVERI) || |
(rdev->family == CHIP_BONAIRE) || |
(rdev->family == CHIP_HAWAII)) |
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, |
/drivers/video/drm/radeon/atombios_dp.c |
---|
158,7 → 158,7 |
#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1) |
static ssize_t |
radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) |
radeon_dp_aux_transfer_atom(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) |
{ |
struct radeon_i2c_chan *chan = |
container_of(aux, struct radeon_i2c_chan, aux); |
171,13 → 171,22 |
return -E2BIG; |
tx_buf[0] = msg->address & 0xff; |
tx_buf[1] = msg->address >> 8; |
tx_buf[2] = msg->request << 4; |
tx_buf[1] = (msg->address >> 8) & 0xff; |
tx_buf[2] = (msg->request << 4) | |
((msg->address >> 16) & 0xf); |
tx_buf[3] = msg->size ? (msg->size - 1) : 0; |
switch (msg->request & ~DP_AUX_I2C_MOT) { |
case DP_AUX_NATIVE_WRITE: |
case DP_AUX_I2C_WRITE: |
case DP_AUX_I2C_WRITE_STATUS_UPDATE: |
/* The atom implementation only supports writes with a max payload of |
* 12 bytes since it uses 4 bits for the total count (header + payload) |
* in the parameter space. The atom interface supports 16 byte |
* payloads for reads. The hw itself supports up to 16 bytes of payload. |
*/ |
if (WARN_ON_ONCE(msg->size > 12)) |
return -E2BIG; |
/* tx_size needs to be 4 even for bare address packets since the atom |
* table needs the info in tx_buf[3]. |
*/ |
219,11 → 228,20 |
void radeon_dp_aux_init(struct radeon_connector *radeon_connector) |
{ |
struct drm_device *dev = radeon_connector->base.dev; |
struct radeon_device *rdev = dev->dev_private; |
int ret; |
radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd; |
radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev; |
radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer; |
if (ASIC_IS_DCE5(rdev)) { |
if (radeon_auxch) |
radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_native; |
else |
radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom; |
} else { |
radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_atom; |
} |
ret = drm_dp_aux_register(&radeon_connector->ddc_bus->aux); |
if (!ret) |
237,7 → 255,7 |
#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3 |
#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPH_LEVEL_3 |
static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], |
static void dp_get_adjust_train(const u8 link_status[DP_LINK_STATUS_SIZE], |
int lane_count, |
u8 train_set[4]) |
{ |
294,8 → 312,8 |
/***** radeon specific DP functions *****/ |
static int radeon_dp_get_max_link_rate(struct drm_connector *connector, |
u8 dpcd[DP_DPCD_SIZE]) |
int radeon_dp_get_max_link_rate(struct drm_connector *connector, |
const u8 dpcd[DP_DPCD_SIZE]) |
{ |
int max_link_rate; |
312,7 → 330,7 |
* if the max lane# < low rate lane# then use max lane# instead. |
*/ |
static int radeon_dp_get_dp_lane_number(struct drm_connector *connector, |
u8 dpcd[DP_DPCD_SIZE], |
const u8 dpcd[DP_DPCD_SIZE], |
int pix_clock) |
{ |
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); |
331,7 → 349,7 |
} |
static int radeon_dp_get_dp_link_clock(struct drm_connector *connector, |
u8 dpcd[DP_DPCD_SIZE], |
const u8 dpcd[DP_DPCD_SIZE], |
int pix_clock) |
{ |
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); |
405,11 → 423,12 |
{ |
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; |
u8 msg[DP_DPCD_SIZE]; |
int ret; |
int ret, i; |
for (i = 0; i < 7; i++) { |
ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, |
DP_DPCD_SIZE); |
if (ret > 0) { |
if (ret == DP_DPCD_SIZE) { |
memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); |
DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), |
419,6 → 438,7 |
return true; |
} |
} |
dig_connector->dpcd[0] = 0; |
return false; |
} |
492,6 → 512,10 |
struct radeon_connector_atom_dig *dig_connector; |
int dp_clock; |
if ((mode->clock > 340000) && |
(!radeon_connector_is_dp12_capable(connector))) |
return MODE_CLOCK_HIGH; |
if (!radeon_connector->con_priv) |
return MODE_CLOCK_HIGH; |
dig_connector = radeon_connector->con_priv; |
619,10 → 643,8 |
drm_dp_dpcd_writeb(dp_info->aux, |
DP_DOWNSPREAD_CTRL, 0); |
if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) && |
(dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) { |
if (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE) |
drm_dp_dpcd_writeb(dp_info->aux, DP_EDP_CONFIGURATION_SET, 1); |
} |
/* set the lane count on the sink */ |
tmp = dp_info->dp_lane_count; |
/drivers/video/drm/radeon/atombios_encoders.c |
---|
27,6 → 27,7 |
#include <drm/drm_crtc_helper.h> |
#include <drm/radeon_drm.h> |
#include "radeon.h" |
#include "radeon_audio.h" |
#include "atom.h" |
#include <linux/backlight.h> |
236,6 → 237,7 |
backlight_update_status(bd); |
DRM_INFO("radeon atom DIG backlight initialized\n"); |
rdev->mode_info.bl_encoder = radeon_encoder; |
return; |
309,7 → 311,7 |
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; |
/* get the native mode for scaling */ |
if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT|ATOM_DEVICE_DFP_SUPPORT)) { |
if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { |
radeon_panel_mode_fixup(encoder, adjusted_mode); |
} else if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { |
struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; |
664,11 → 666,21 |
int |
atombios_get_encoder_mode(struct drm_encoder *encoder) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector; |
struct radeon_connector_atom_dig *dig_connector; |
struct radeon_encoder_atom_dig *dig_enc; |
if (radeon_encoder_is_digital(encoder)) { |
dig_enc = radeon_encoder->enc_priv; |
if (dig_enc->active_mst_links) |
return ATOM_ENCODER_MODE_DP_MST; |
} |
if (radeon_encoder->is_mst_encoder || radeon_encoder->offset) |
return ATOM_ENCODER_MODE_DP_MST; |
/* dp bridges are always DP */ |
if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) |
return ATOM_ENCODER_MODE_DP; |
728,6 → 740,10 |
dig_connector = radeon_connector->con_priv; |
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || |
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { |
if (radeon_audio != 0 && |
drm_detect_monitor_audio(radeon_connector_edid(connector)) && |
ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) |
return ATOM_ENCODER_MODE_DP_AUDIO; |
return ATOM_ENCODER_MODE_DP; |
} else if (radeon_audio != 0) { |
if (radeon_connector->audio == RADEON_AUDIO_ENABLE) |
742,6 → 758,10 |
} |
break; |
case DRM_MODE_CONNECTOR_eDP: |
if (radeon_audio != 0 && |
drm_detect_monitor_audio(radeon_connector_edid(connector)) && |
ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) |
return ATOM_ENCODER_MODE_DP_AUDIO; |
return ATOM_ENCODER_MODE_DP; |
case DRM_MODE_CONNECTOR_DVIA: |
case DRM_MODE_CONNECTOR_VGA: |
812,7 → 832,7 |
}; |
void |
atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode) |
atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_mode, int enc_override) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
909,6 → 929,9 |
if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode) && (dp_clock == 270000)) |
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; |
if (enc_override != -1) |
args.v3.acConfig.ucDigSel = enc_override; |
else |
args.v3.acConfig.ucDigSel = dig->dig_encoder; |
args.v3.ucBitPerColor = radeon_atom_get_bpc(encoder); |
break; |
937,6 → 960,10 |
else |
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ; |
} |
if (enc_override != -1) |
args.v4.acConfig.ucDigSel = enc_override; |
else |
args.v4.acConfig.ucDigSel = dig->dig_encoder; |
args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder); |
if (hpd_id == RADEON_HPD_NONE) |
958,6 → 985,12 |
} |
void |
atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode) |
{ |
atombios_dig_encoder_setup2(encoder, action, panel_mode, -1); |
} |
union dig_transmitter_control { |
DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; |
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; |
967,7 → 1000,7 |
}; |
void |
atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set) |
atombios_dig_transmitter_setup2(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set, int fe) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
1317,7 → 1350,7 |
args.v5.asConfig.ucHPDSel = 0; |
else |
args.v5.asConfig.ucHPDSel = hpd_id + 1; |
args.v5.ucDigEncoderSel = 1 << dig_encoder; |
args.v5.ucDigEncoderSel = (fe != -1) ? (1 << fe) : (1 << dig_encoder); |
args.v5.ucDPLaneSet = lane_set; |
break; |
default: |
1333,6 → 1366,12 |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
} |
void |
atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set) |
{ |
atombios_dig_transmitter_setup2(encoder, action, lane_num, lane_set, -1); |
} |
bool |
atombios_set_edp_panel_power(struct drm_connector *connector, int action) |
{ |
1586,9 → 1625,15 |
} else |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
if (rdev->mode_info.bl_encoder) { |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
atombios_set_backlight_level(radeon_encoder, dig->backlight_level); |
} else { |
args.ucAction = ATOM_LCD_BLON; |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
} |
} |
break; |
case DRM_MODE_DPMS_STANDBY: |
case DRM_MODE_DPMS_SUSPEND: |
1667,9 → 1712,13 |
if (ASIC_IS_DCE4(rdev)) |
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); |
} |
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) |
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
if (rdev->mode_info.bl_encoder) |
atombios_set_backlight_level(radeon_encoder, dig->backlight_level); |
else |
atombios_dig_transmitter_setup(encoder, |
ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); |
} |
if (ext_encoder) |
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); |
break; |
1676,6 → 1725,11 |
case DRM_MODE_DPMS_STANDBY: |
case DRM_MODE_DPMS_SUSPEND: |
case DRM_MODE_DPMS_OFF: |
/* don't power off encoders with active MST links */ |
if (dig->active_mst_links) |
return; |
if (ASIC_IS_DCE4(rdev)) { |
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) |
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); |
1718,10 → 1772,17 |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
int encoder_mode = atombios_get_encoder_mode(encoder); |
DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", |
radeon_encoder->encoder_id, mode, radeon_encoder->devices, |
radeon_encoder->active_device); |
if ((radeon_audio != 0) && |
((encoder_mode == ATOM_ENCODER_MODE_HDMI) || |
ENCODER_MODE_IS_DP(encoder_mode))) |
radeon_audio_dpms(encoder, mode); |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_TMDS1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: |
1935,6 → 1996,53 |
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); |
} |
void |
atombios_set_mst_encoder_crtc_source(struct drm_encoder *encoder, int fe) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); |
uint8_t frev, crev; |
union crtc_source_param args; |
memset(&args, 0, sizeof(args)); |
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) |
return; |
if (frev != 1 && crev != 2) |
DRM_ERROR("Unknown table for MST %d, %d\n", frev, crev); |
args.v2.ucCRTC = radeon_crtc->crtc_id; |
args.v2.ucEncodeMode = ATOM_ENCODER_MODE_DP_MST; |
switch (fe) { |
case 0: |
args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; |
break; |
case 1: |
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; |
break; |
case 2: |
args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID; |
break; |
case 3: |
args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID; |
break; |
case 4: |
args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID; |
break; |
case 5: |
args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID; |
break; |
case 6: |
args.v2.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID; |
break; |
} |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
} |
static void |
atombios_apply_encoder_quirks(struct drm_encoder *encoder, |
struct drm_display_mode *mode) |
1983,8 → 2091,15 |
} |
} |
static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) |
void radeon_atom_release_dig_encoder(struct radeon_device *rdev, int enc_idx) |
{ |
if (enc_idx < 0) |
return; |
rdev->mode_info.active_encoders &= ~(1 << enc_idx); |
} |
int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
1992,32 → 2107,38 |
struct drm_encoder *test_encoder; |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
uint32_t dig_enc_in_use = 0; |
int enc_idx = -1; |
if (fe_idx >= 0) { |
enc_idx = fe_idx; |
goto assigned; |
} |
if (ASIC_IS_DCE6(rdev)) { |
/* DCE6 */ |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
if (dig->linkb) |
return 1; |
enc_idx = 1; |
else |
return 0; |
enc_idx = 0; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
if (dig->linkb) |
return 3; |
enc_idx = 3; |
else |
return 2; |
enc_idx = 2; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
if (dig->linkb) |
return 5; |
enc_idx = 5; |
else |
return 4; |
enc_idx = 4; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: |
return 6; |
enc_idx = 6; |
break; |
} |
goto assigned; |
} else if (ASIC_IS_DCE4(rdev)) { |
/* DCE4/5 */ |
if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) { |
2024,39 → 2145,41 |
/* ontario follows DCE4 */ |
if (rdev->family == CHIP_PALM) { |
if (dig->linkb) |
return 1; |
enc_idx = 1; |
else |
return 0; |
enc_idx = 0; |
} else |
/* llano follows DCE3.2 */ |
return radeon_crtc->crtc_id; |
enc_idx = radeon_crtc->crtc_id; |
} else { |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
if (dig->linkb) |
return 1; |
enc_idx = 1; |
else |
return 0; |
enc_idx = 0; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
if (dig->linkb) |
return 3; |
enc_idx = 3; |
else |
return 2; |
enc_idx = 2; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
if (dig->linkb) |
return 5; |
enc_idx = 5; |
else |
return 4; |
enc_idx = 4; |
break; |
} |
} |
goto assigned; |
} |
/* on DCE32 and encoder can driver any block so just crtc id */ |
if (ASIC_IS_DCE32(rdev)) { |
return radeon_crtc->crtc_id; |
enc_idx = radeon_crtc->crtc_id; |
goto assigned; |
} |
/* on DCE3 - LVTMA can only be driven by DIGB */ |
2084,7 → 2207,18 |
if (!(dig_enc_in_use & 1)) |
return 0; |
return 1; |
assigned: |
if (enc_idx == -1) { |
DRM_ERROR("Got encoder index incorrect - returning 0\n"); |
return 0; |
} |
if (rdev->mode_info.active_encoders & (1 << enc_idx)) { |
DRM_ERROR("chosen encoder in use %d\n", enc_idx); |
} |
rdev->mode_info.active_encoders |= (1 << enc_idx); |
return enc_idx; |
} |
/* This only needs to be called once at startup */ |
void |
2123,6 → 2257,8 |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
int encoder_mode; |
radeon_encoder->pixel_clock = adjusted_mode->clock; |
2171,13 → 2307,12 |
atombios_apply_encoder_quirks(encoder, adjusted_mode); |
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { |
if (rdev->asic->display.hdmi_enable) |
radeon_hdmi_enable(rdev, encoder, true); |
if (rdev->asic->display.hdmi_setmode) |
radeon_hdmi_setmode(rdev, encoder, adjusted_mode); |
encoder_mode = atombios_get_encoder_mode(encoder); |
if (connector && (radeon_audio != 0) && |
((encoder_mode == ATOM_ENCODER_MODE_HDMI) || |
ENCODER_MODE_IS_DP(encoder_mode))) |
radeon_audio_mode_set(encoder, adjusted_mode); |
} |
} |
static bool |
atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector) |
2340,7 → 2475,9 |
ENCODER_OBJECT_ID_NONE)) { |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
if (dig) { |
dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); |
if (dig->dig_encoder >= 0) |
radeon_atom_release_dig_encoder(rdev, dig->dig_encoder); |
dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder, -1); |
if (radeon_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) { |
if (rdev->family >= CHIP_R600) |
dig->afmt = rdev->mode_info.afmt[dig->dig_encoder]; |
2446,9 → 2583,13 |
if (rdev->asic->display.hdmi_enable) |
radeon_hdmi_enable(rdev, encoder, false); |
} |
if (atombios_get_encoder_mode(encoder) != ATOM_ENCODER_MODE_DP_MST) { |
dig = radeon_encoder->enc_priv; |
radeon_atom_release_dig_encoder(rdev, dig->dig_encoder); |
dig->dig_encoder = -1; |
radeon_encoder->active_device = 0; |
} |
} else |
radeon_encoder->active_device = 0; |
} |
/drivers/video/drm/radeon/btc_dpm.c |
---|
2277,6 → 2277,7 |
eg_pi->requested_rps.ps_priv = &eg_pi->requested_ps; |
} |
#if 0 |
void btc_dpm_reset_asic(struct radeon_device *rdev) |
{ |
rv770_restrict_performance_levels_before_switch(rdev); |
2284,6 → 2285,7 |
btc_set_boot_state_timing(rdev); |
rv770_set_boot_state(rdev); |
} |
#endif |
int btc_dpm_pre_set_power_state(struct radeon_device *rdev) |
{ |
2749,15 → 2751,56 |
else /* current_index == 2 */ |
pl = &ps->high; |
seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); |
if (rdev->family >= CHIP_CEDAR) { |
seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n", |
current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci); |
} |
} |
u32 btc_dpm_get_current_sclk(struct radeon_device *rdev) |
{ |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
struct radeon_ps *rps = &eg_pi->current_rps; |
struct rv7xx_ps *ps = rv770_get_ps(rps); |
struct rv7xx_pl *pl; |
u32 current_index = |
(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >> |
CURRENT_PROFILE_INDEX_SHIFT; |
if (current_index > 2) { |
return 0; |
} else { |
seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n", |
current_index, pl->sclk, pl->mclk, pl->vddc); |
if (current_index == 0) |
pl = &ps->low; |
else if (current_index == 1) |
pl = &ps->medium; |
else /* current_index == 2 */ |
pl = &ps->high; |
return pl->sclk; |
} |
} |
u32 btc_dpm_get_current_mclk(struct radeon_device *rdev) |
{ |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
struct radeon_ps *rps = &eg_pi->current_rps; |
struct rv7xx_ps *ps = rv770_get_ps(rps); |
struct rv7xx_pl *pl; |
u32 current_index = |
(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >> |
CURRENT_PROFILE_INDEX_SHIFT; |
if (current_index > 2) { |
return 0; |
} else { |
if (current_index == 0) |
pl = &ps->low; |
else if (current_index == 1) |
pl = &ps->medium; |
else /* current_index == 2 */ |
pl = &ps->high; |
return pl->mclk; |
} |
} |
u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low) |
{ |
/drivers/video/drm/radeon/cayman_blit_shaders.c |
---|
32,7 → 32,7 |
* evergreen cards need to use the 3D engine to blit data which requires |
* quite a bit of hw state setup. Rather than pull the whole 3D driver |
* (which normally generates the 3D state) into the DRM, we opt to use |
* statically generated state tables. The regsiter state and shaders |
* statically generated state tables. The register state and shaders |
* were hand generated to support blitting functionality. See the 3D |
* driver or documentation for descriptions of the registers and |
* shader instructions. |
/drivers/video/drm/radeon/ci_dpm.c |
---|
187,6 → 187,9 |
static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev, |
PPSMC_Msg msg, u32 parameter); |
static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev); |
static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev); |
static struct ci_power_info *ci_get_pi(struct radeon_device *rdev) |
{ |
struct ci_power_info *pi = rdev->pm.dpm.priv; |
1043,22 → 1046,24 |
return -EINVAL; |
} |
pi->fan_is_controlled_by_smc = true; |
return 0; |
} |
#if 0 |
static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev) |
{ |
PPSMC_Result ret; |
struct ci_power_info *pi = ci_get_pi(rdev); |
ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl); |
if (ret == PPSMC_Result_OK) |
if (ret == PPSMC_Result_OK) { |
pi->fan_is_controlled_by_smc = false; |
return 0; |
else |
} else |
return -EINVAL; |
} |
static int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev, |
int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev, |
u32 *speed) |
{ |
u32 duty, duty100; |
1083,22 → 1088,23 |
return 0; |
} |
static int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev, |
int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev, |
u32 speed) |
{ |
u32 tmp; |
u32 duty, duty100; |
u64 tmp64; |
struct ci_power_info *pi = ci_get_pi(rdev); |
if (rdev->pm.no_fan) |
return -ENOENT; |
if (pi->fan_is_controlled_by_smc) |
return -EINVAL; |
if (speed > 100) |
return -EINVAL; |
if (rdev->pm.dpm.fan.ucode_fan_control) |
ci_fan_ctrl_stop_smc_fan_control(rdev); |
duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; |
if (duty100 == 0) |
1112,11 → 1118,38 |
tmp |= FDO_STATIC_DUTY(duty); |
WREG32_SMC(CG_FDO_CTRL0, tmp); |
ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC); |
return 0; |
} |
void ci_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode) |
{ |
if (mode) { |
/* stop auto-manage */ |
if (rdev->pm.dpm.fan.ucode_fan_control) |
ci_fan_ctrl_stop_smc_fan_control(rdev); |
ci_fan_ctrl_set_static_mode(rdev, mode); |
} else { |
/* restart auto-manage */ |
if (rdev->pm.dpm.fan.ucode_fan_control) |
ci_thermal_start_smc_fan_control(rdev); |
else |
ci_fan_ctrl_set_default_mode(rdev); |
} |
} |
u32 ci_fan_ctrl_get_mode(struct radeon_device *rdev) |
{ |
struct ci_power_info *pi = ci_get_pi(rdev); |
u32 tmp; |
if (pi->fan_is_controlled_by_smc) |
return 0; |
tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK; |
return (tmp >> FDO_PWM_MODE_SHIFT); |
} |
#if 0 |
static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev, |
u32 *speed) |
{ |
1698,10 → 1731,12 |
return 0; |
} |
#if 0 |
static int ci_set_boot_state(struct radeon_device *rdev) |
{ |
return ci_enable_sclk_mclk_dpm(rdev, false); |
} |
#endif |
static u32 ci_get_average_sclk_freq(struct radeon_device *rdev) |
{ |
5343,10 → 5378,12 |
return 0; |
} |
#if 0 |
void ci_dpm_reset_asic(struct radeon_device *rdev) |
{ |
ci_set_boot_state(rdev); |
} |
#endif |
void ci_dpm_display_configuration_changed(struct radeon_device *rdev) |
{ |
5781,7 → 5818,7 |
tmp |= DPM_ENABLED; |
break; |
default: |
DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift); |
DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift); |
break; |
} |
WREG32_SMC(CNB_PWRMGT_CNTL, tmp); |
5885,6 → 5922,20 |
r600_dpm_print_ps_status(rdev, rps); |
} |
u32 ci_dpm_get_current_sclk(struct radeon_device *rdev) |
{ |
u32 sclk = ci_get_average_sclk_freq(rdev); |
return sclk; |
} |
u32 ci_dpm_get_current_mclk(struct radeon_device *rdev) |
{ |
u32 mclk = ci_get_average_mclk_freq(rdev); |
return mclk; |
} |
u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low) |
{ |
struct ci_power_info *pi = ci_get_pi(rdev); |
/drivers/video/drm/radeon/ci_dpm.h |
---|
291,6 → 291,7 |
struct ci_ps requested_ps; |
/* fan control */ |
bool fan_ctrl_is_in_default_mode; |
bool fan_is_controlled_by_smc; |
u32 t_min; |
u32 fan_ctrl_default_mode; |
}; |
/drivers/video/drm/radeon/ci_smc.c |
---|
184,6 → 184,7 |
return (PPSMC_Result)tmp; |
} |
#if 0 |
PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev) |
{ |
u32 tmp; |
201,6 → 202,7 |
return PPSMC_Result_OK; |
} |
#endif |
int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit) |
{ |
/drivers/video/drm/radeon/cik.c |
---|
27,6 → 27,7 |
#include "drmP.h" |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "radeon_audio.h" |
#include "cikd.h" |
#include "atom.h" |
#include "cik_blit_shaders.h" |
140,6 → 141,64 |
static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev, |
bool enable); |
/** |
* cik_get_allowed_info_register - fetch the register for the info ioctl |
* |
* @rdev: radeon_device pointer |
* @reg: register offset in bytes |
* @val: register value |
* |
* Returns 0 for success or -EINVAL for an invalid register |
* |
*/ |
int cik_get_allowed_info_register(struct radeon_device *rdev, |
u32 reg, u32 *val) |
{ |
switch (reg) { |
case GRBM_STATUS: |
case GRBM_STATUS2: |
case GRBM_STATUS_SE0: |
case GRBM_STATUS_SE1: |
case GRBM_STATUS_SE2: |
case GRBM_STATUS_SE3: |
case SRBM_STATUS: |
case SRBM_STATUS2: |
case (SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET): |
case (SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET): |
case UVD_STATUS: |
/* TODO VCE */ |
*val = RREG32(reg); |
return 0; |
default: |
return -EINVAL; |
} |
} |
/* |
* Indirect registers accessor |
*/ |
u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
spin_lock_irqsave(&rdev->didt_idx_lock, flags); |
WREG32(CIK_DIDT_IND_INDEX, (reg)); |
r = RREG32(CIK_DIDT_IND_DATA); |
spin_unlock_irqrestore(&rdev->didt_idx_lock, flags); |
return r; |
} |
void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
spin_lock_irqsave(&rdev->didt_idx_lock, flags); |
WREG32(CIK_DIDT_IND_INDEX, (reg)); |
WREG32(CIK_DIDT_IND_DATA, (v)); |
spin_unlock_irqrestore(&rdev->didt_idx_lock, flags); |
} |
/* get temperature in millidegrees */ |
int ci_get_temp(struct radeon_device *rdev) |
{ |
3612,6 → 3671,8 |
} |
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); |
WREG32(SRBM_INT_CNTL, 0x1); |
WREG32(SRBM_INT_ACK, 0x1); |
WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); |
3904,7 → 3965,9 |
struct radeon_ring *ring = &rdev->ring[fence->ring]; |
u64 addr = rdev->fence_drv[fence->ring].gpu_addr; |
/* EVENT_WRITE_EOP - flush caches, send int */ |
/* Workaround for cache flush problems. First send a dummy EOP |
* event down the pipe with seq one below. |
*/ |
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); |
radeon_ring_write(ring, (EOP_TCL1_ACTION_EN | |
EOP_TC_ACTION_EN | |
3911,6 → 3974,18 |
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | |
EVENT_INDEX(5))); |
radeon_ring_write(ring, addr & 0xfffffffc); |
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | |
DATA_SEL(1) | INT_SEL(0)); |
radeon_ring_write(ring, fence->seq - 1); |
radeon_ring_write(ring, 0); |
/* Then send the real EOP event down the pipe. */ |
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); |
radeon_ring_write(ring, (EOP_TCL1_ACTION_EN | |
EOP_TC_ACTION_EN | |
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | |
EVENT_INDEX(5))); |
radeon_ring_write(ring, addr & 0xfffffffc); |
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2)); |
radeon_ring_write(ring, fence->seq); |
radeon_ring_write(ring, 0); |
4098,11 → 4173,7 |
control |= ib->length_dw | (vm_id << 24); |
radeon_ring_write(ring, header); |
radeon_ring_write(ring, |
#ifdef __BIG_ENDIAN |
(2 << 0) | |
#endif |
(ib->gpu_addr & 0xFFFFFFFC)); |
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFFC)); |
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); |
radeon_ring_write(ring, control); |
} |
4529,6 → 4600,31 |
WDOORBELL32(ring->doorbell_index, ring->wptr); |
} |
static void cik_compute_stop(struct radeon_device *rdev, |
struct radeon_ring *ring) |
{ |
u32 j, tmp; |
cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); |
/* Disable wptr polling. */ |
tmp = RREG32(CP_PQ_WPTR_POLL_CNTL); |
tmp &= ~WPTR_POLL_EN; |
WREG32(CP_PQ_WPTR_POLL_CNTL, tmp); |
/* Disable HQD. */ |
if (RREG32(CP_HQD_ACTIVE) & 1) { |
WREG32(CP_HQD_DEQUEUE_REQUEST, 1); |
for (j = 0; j < rdev->usec_timeout; j++) { |
if (!(RREG32(CP_HQD_ACTIVE) & 1)) |
break; |
udelay(1); |
} |
WREG32(CP_HQD_DEQUEUE_REQUEST, 0); |
WREG32(CP_HQD_PQ_RPTR, 0); |
WREG32(CP_HQD_PQ_WPTR, 0); |
} |
cik_srbm_select(rdev, 0, 0, 0, 0); |
} |
/** |
* cik_cp_compute_enable - enable/disable the compute CP MEs |
* |
4542,6 → 4638,15 |
if (enable) |
WREG32(CP_MEC_CNTL, 0); |
else { |
/* |
* To make hibernation reliable we need to clear compute ring |
* configuration before halting the compute ring. |
*/ |
mutex_lock(&rdev->srbm_mutex); |
cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]); |
cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]); |
mutex_unlock(&rdev->srbm_mutex); |
WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); |
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; |
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; |
5707,6 → 5812,28 |
WREG32(VM_INVALIDATE_REQUEST, 0x1); |
} |
static void cik_pcie_init_compute_vmid(struct radeon_device *rdev) |
{ |
int i; |
uint32_t sh_mem_bases, sh_mem_config; |
sh_mem_bases = 0x6000 | 0x6000 << 16; |
sh_mem_config = ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED); |
sh_mem_config |= DEFAULT_MTYPE(MTYPE_NONCACHED); |
mutex_lock(&rdev->srbm_mutex); |
for (i = 8; i < 16; i++) { |
cik_srbm_select(rdev, 0, 0, 0, i); |
/* CP and shaders */ |
WREG32(SH_MEM_CONFIG, sh_mem_config); |
WREG32(SH_MEM_APE1_BASE, 1); |
WREG32(SH_MEM_APE1_LIMIT, 0); |
WREG32(SH_MEM_BASES, sh_mem_bases); |
} |
cik_srbm_select(rdev, 0, 0, 0, 0); |
mutex_unlock(&rdev->srbm_mutex); |
} |
/** |
* cik_pcie_gart_enable - gart enable |
* |
5765,7 → 5892,7 |
/* restore context1-15 */ |
/* set vm size, must be a multiple of 4 */ |
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); |
WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); |
WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1); |
for (i = 1; i < 16; i++) { |
if (i < 8) |
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), |
5820,6 → 5947,8 |
cik_srbm_select(rdev, 0, 0, 0, 0); |
mutex_unlock(&rdev->srbm_mutex); |
cik_pcie_init_compute_vmid(rdev); |
cik_pcie_gart_tlb_flush(rdev); |
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", |
(unsigned)(rdev->mc.gtt_size >> 20), |
6033,6 → 6162,17 |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, 1 << vm_id); |
/* wait for the invalidate to complete */ |
radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); |
radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ |
WAIT_REG_MEM_FUNCTION(0) | /* always */ |
WAIT_REG_MEM_ENGINE(0))); /* me */ |
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, 0); /* ref */ |
radeon_ring_write(ring, 0); /* mask */ |
radeon_ring_write(ring, 0x20); /* poll interval */ |
/* compute doesn't have PFP */ |
if (usepfp) { |
/* sync PFP to ME, otherwise we might get invalid PFP reads */ |
7180,6 → 7320,8 |
WREG32(CP_ME2_PIPE3_INT_CNTL, 0); |
/* grbm */ |
WREG32(GRBM_INT_CNTL, 0); |
/* SRBM */ |
WREG32(SRBM_INT_CNTL, 0); |
/* vline/vblank, etc. */ |
WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
7323,7 → 7465,6 |
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; |
u32 grbm_int_cntl = 0; |
u32 dma_cntl, dma_cntl1; |
u32 thermal_int; |
if (!rdev->irq.installed) { |
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
7341,12 → 7482,12 |
(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); |
cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE; |
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
dma_cntl = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; |
dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; |
7353,13 → 7494,6 |
cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; |
if (rdev->flags & RADEON_IS_IGP) |
thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) & |
~(THERM_INTH_MASK | THERM_INTL_MASK); |
else |
thermal_int = RREG32_SMC(CG_THERMAL_INT) & |
~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); |
/* enable CP interrupts on all rings */ |
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { |
DRM_DEBUG("cik_irq_set: sw int gfx\n"); |
7440,37 → 7574,29 |
} |
if (rdev->irq.hpd[0]) { |
DRM_DEBUG("cik_irq_set: hpd 1\n"); |
hpd1 |= DC_HPDx_INT_EN; |
hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.hpd[1]) { |
DRM_DEBUG("cik_irq_set: hpd 2\n"); |
hpd2 |= DC_HPDx_INT_EN; |
hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.hpd[2]) { |
DRM_DEBUG("cik_irq_set: hpd 3\n"); |
hpd3 |= DC_HPDx_INT_EN; |
hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.hpd[3]) { |
DRM_DEBUG("cik_irq_set: hpd 4\n"); |
hpd4 |= DC_HPDx_INT_EN; |
hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.hpd[4]) { |
DRM_DEBUG("cik_irq_set: hpd 5\n"); |
hpd5 |= DC_HPDx_INT_EN; |
hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.hpd[5]) { |
DRM_DEBUG("cik_irq_set: hpd 6\n"); |
hpd6 |= DC_HPDx_INT_EN; |
hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.dpm_thermal) { |
DRM_DEBUG("dpm thermal\n"); |
if (rdev->flags & RADEON_IS_IGP) |
thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; |
else |
thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; |
} |
WREG32(CP_INT_CNTL_RING0, cp_int_cntl); |
WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl); |
7517,10 → 7643,8 |
WREG32(DC_HPD5_INT_CONTROL, hpd5); |
WREG32(DC_HPD6_INT_CONTROL, hpd6); |
if (rdev->flags & RADEON_IS_IGP) |
WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); |
else |
WREG32_SMC(CG_THERMAL_INT, thermal_int); |
/* posting read */ |
RREG32(SRBM_STATUS); |
return 0; |
} |
7642,7 → 7766,37 |
tmp |= DC_HPDx_INT_ACK; |
WREG32(DC_HPD6_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD1_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD1_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD2_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD2_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD3_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD3_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD4_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD4_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD5_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD5_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD5_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD6_INT_CONTROL, tmp); |
} |
} |
/** |
* cik_irq_disable - disable interrupts |
7767,6 → 7921,7 |
u8 me_id, pipe_id, queue_id; |
u32 ring_index; |
bool queue_hotplug = false; |
bool queue_dp = false; |
bool queue_reset = false; |
u32 addr, status, mc_client; |
bool queue_thermal = false; |
7805,19 → 7960,27 |
case 1: /* D1 vblank/vline */ |
switch (src_data) { |
case 0: /* D1 vblank */ |
if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
if (rdev->irq.crtc_vblank_int[0]) { |
drm_handle_vblank(rdev->ddev, 0); |
rdev->pm.vblank_sync = true; |
wake_up(&rdev->irq.vblank_queue); |
} |
if (atomic_read(&rdev->irq.pflip[0])) |
radeon_crtc_handle_vblank(rdev, 0); |
rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D1 vblank\n"); |
} |
break; |
case 1: /* D1 vline */ |
if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D1 vline\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
7827,19 → 7990,27 |
case 2: /* D2 vblank/vline */ |
switch (src_data) { |
case 0: /* D2 vblank */ |
if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
if (rdev->irq.crtc_vblank_int[1]) { |
drm_handle_vblank(rdev->ddev, 1); |
rdev->pm.vblank_sync = true; |
wake_up(&rdev->irq.vblank_queue); |
} |
if (atomic_read(&rdev->irq.pflip[1])) |
radeon_crtc_handle_vblank(rdev, 1); |
rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D2 vblank\n"); |
} |
break; |
case 1: /* D2 vline */ |
if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D2 vline\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
7849,19 → 8020,27 |
case 3: /* D3 vblank/vline */ |
switch (src_data) { |
case 0: /* D3 vblank */ |
if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
if (rdev->irq.crtc_vblank_int[2]) { |
drm_handle_vblank(rdev->ddev, 2); |
rdev->pm.vblank_sync = true; |
wake_up(&rdev->irq.vblank_queue); |
} |
if (atomic_read(&rdev->irq.pflip[2])) |
radeon_crtc_handle_vblank(rdev, 2); |
rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D3 vblank\n"); |
} |
break; |
case 1: /* D3 vline */ |
if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D3 vline\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
7871,19 → 8050,27 |
case 4: /* D4 vblank/vline */ |
switch (src_data) { |
case 0: /* D4 vblank */ |
if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
if (rdev->irq.crtc_vblank_int[3]) { |
drm_handle_vblank(rdev->ddev, 3); |
rdev->pm.vblank_sync = true; |
wake_up(&rdev->irq.vblank_queue); |
} |
if (atomic_read(&rdev->irq.pflip[3])) |
radeon_crtc_handle_vblank(rdev, 3); |
rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D4 vblank\n"); |
} |
break; |
case 1: /* D4 vline */ |
if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D4 vline\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
7893,19 → 8080,27 |
case 5: /* D5 vblank/vline */ |
switch (src_data) { |
case 0: /* D5 vblank */ |
if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
if (rdev->irq.crtc_vblank_int[4]) { |
drm_handle_vblank(rdev->ddev, 4); |
rdev->pm.vblank_sync = true; |
wake_up(&rdev->irq.vblank_queue); |
} |
if (atomic_read(&rdev->irq.pflip[4])) |
radeon_crtc_handle_vblank(rdev, 4); |
rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D5 vblank\n"); |
} |
break; |
case 1: /* D5 vline */ |
if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D5 vline\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
7915,19 → 8110,27 |
case 6: /* D6 vblank/vline */ |
switch (src_data) { |
case 0: /* D6 vblank */ |
if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
if (rdev->irq.crtc_vblank_int[5]) { |
drm_handle_vblank(rdev->ddev, 5); |
rdev->pm.vblank_sync = true; |
wake_up(&rdev->irq.vblank_queue); |
} |
if (atomic_read(&rdev->irq.pflip[5])) |
radeon_crtc_handle_vblank(rdev, 5); |
rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D6 vblank\n"); |
} |
break; |
case 1: /* D6 vline */ |
if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D6 vline\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
7945,52 → 8148,122 |
case 42: /* HPD hotplug */ |
switch (src_data) { |
case 0: |
if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) { |
if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD1\n"); |
} |
break; |
case 1: |
if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) { |
if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD2\n"); |
} |
break; |
case 2: |
if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) { |
if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD3\n"); |
} |
break; |
case 3: |
if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) { |
if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD4\n"); |
} |
break; |
case 4: |
if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) { |
if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD5\n"); |
} |
break; |
case 5: |
if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) { |
if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD6\n"); |
} |
break; |
case 6: |
if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 1\n"); |
break; |
case 7: |
if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 2\n"); |
break; |
case 8: |
if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 3\n"); |
break; |
case 9: |
if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 4\n"); |
break; |
case 10: |
if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 5\n"); |
break; |
case 11: |
if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 6\n"); |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
break; |
} |
break; |
case 96: |
DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR)); |
WREG32(SRBM_INT_ACK, 0x1); |
break; |
case 124: /* UVD */ |
DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); |
radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); |
8319,6 → 8592,16 |
if (r) |
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; |
r = radeon_vce_resume(rdev); |
if (!r) { |
r = vce_v2_0_resume(rdev); |
if (!r) |
r = radeon_fence_driver_start_ring(rdev, |
TN_RING_TYPE_VCE1_INDEX); |
if (!r) |
r = radeon_fence_driver_start_ring(rdev, |
TN_RING_TYPE_VCE2_INDEX); |
} |
if (r) { |
dev_err(rdev->dev, "VCE init error (%d).\n", r); |
rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0; |
8408,6 → 8691,24 |
if (r) |
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); |
} |
r = -ENOENT; |
ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; |
if (ring->ring_size) |
r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
VCE_CMD_NO_OP); |
ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; |
if (ring->ring_size) |
r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
VCE_CMD_NO_OP); |
if (!r) |
r = vce_v1_0_init(rdev); |
else if (r != -ENOENT) |
DRM_ERROR("radeon: failed initializing VCE (%d).\n", r); |
r = radeon_ib_pool_init(rdev); |
if (r) { |
dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
8551,6 → 8852,18 |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 4096); |
} |
r = radeon_vce_init(rdev); |
if (!r) { |
ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 4096); |
ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 4096); |
} |
rdev->ih.ring_obj = NULL; |
r600_ih_ring_init(rdev, 64 * 1024); |
8562,6 → 8875,16 |
r = cik_startup(rdev); |
if (r) { |
dev_err(rdev->dev, "disabling GPU acceleration\n"); |
cik_cp_fini(rdev); |
cik_sdma_fini(rdev); |
cik_irq_fini(rdev); |
sumo_rlc_fini(rdev); |
cik_mec_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_vm_manager_fini(rdev); |
radeon_irq_kms_fini(rdev); |
cik_pcie_gart_fini(rdev); |
rdev->accel_working = false; |
} |
8588,6 → 8911,27 |
*/ |
void cik_fini(struct radeon_device *rdev) |
{ |
radeon_pm_fini(rdev); |
cik_cp_fini(rdev); |
cik_sdma_fini(rdev); |
cik_fini_pg(rdev); |
cik_fini_cg(rdev); |
cik_irq_fini(rdev); |
sumo_rlc_fini(rdev); |
cik_mec_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_vm_manager_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_irq_kms_fini(rdev); |
uvd_v1_0_fini(rdev); |
radeon_uvd_fini(rdev); |
radeon_vce_fini(rdev); |
cik_pcie_gart_fini(rdev); |
r600_vram_scratch_fini(rdev); |
radeon_gem_fini(rdev); |
radeon_fence_driver_fini(rdev); |
radeon_bo_fini(rdev); |
radeon_atombios_fini(rdev); |
kfree(rdev->bios); |
rdev->bios = NULL; |
} |
9205,6 → 9549,9 |
(rdev->disp_priority == 2)) { |
DRM_DEBUG_KMS("force priority to high\n"); |
} |
/* Save number of lines the linebuffer leads before the scanout */ |
radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); |
} |
/* select wm A */ |
/drivers/video/drm/radeon/cik_reg.h |
---|
147,140 → 147,97 |
#define CIK_LB_DESKTOP_HEIGHT 0x6b0c |
#define KFD_CIK_SDMA_QUEUE_OFFSET 0x200 |
#define SQ_IND_INDEX 0x8DE0 |
#define SQ_CMD 0x8DEC |
#define SQ_IND_DATA 0x8DE4 |
/* |
* The TCP_WATCHx_xxxx addresses that are shown here are in dwords, |
* and that's why they are multiplied by 4 |
*/ |
#define TCP_WATCH0_ADDR_H (0x32A0*4) |
#define TCP_WATCH1_ADDR_H (0x32A3*4) |
#define TCP_WATCH2_ADDR_H (0x32A6*4) |
#define TCP_WATCH3_ADDR_H (0x32A9*4) |
#define TCP_WATCH0_ADDR_L (0x32A1*4) |
#define TCP_WATCH1_ADDR_L (0x32A4*4) |
#define TCP_WATCH2_ADDR_L (0x32A7*4) |
#define TCP_WATCH3_ADDR_L (0x32AA*4) |
#define TCP_WATCH0_CNTL (0x32A2*4) |
#define TCP_WATCH1_CNTL (0x32A5*4) |
#define TCP_WATCH2_CNTL (0x32A8*4) |
#define TCP_WATCH3_CNTL (0x32AB*4) |
#define CPC_INT_CNTL 0xC2D0 |
#define CP_HQD_IQ_RPTR 0xC970u |
#define AQL_ENABLE (1U << 0) |
#define SDMA0_RLC0_RB_CNTL 0xD400u |
#define SDMA_RB_VMID(x) (x << 24) |
#define SDMA0_RLC0_RB_BASE 0xD404u |
#define SDMA0_RLC0_RB_BASE_HI 0xD408u |
#define SDMA0_RLC0_RB_RPTR 0xD40Cu |
#define SDMA0_RLC0_RB_WPTR 0xD410u |
#define SDMA0_RLC0_RB_WPTR_POLL_CNTL 0xD414u |
#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI 0xD418u |
#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO 0xD41Cu |
#define SDMA0_RLC0_RB_RPTR_ADDR_HI 0xD420u |
#define SDMA0_RLC0_RB_RPTR_ADDR_LO 0xD424u |
#define SDMA0_RLC0_IB_CNTL 0xD428u |
#define SDMA0_RLC0_IB_RPTR 0xD42Cu |
#define SDMA0_RLC0_IB_OFFSET 0xD430u |
#define SDMA0_RLC0_IB_BASE_LO 0xD434u |
#define SDMA0_RLC0_IB_BASE_HI 0xD438u |
#define SDMA0_RLC0_IB_SIZE 0xD43Cu |
#define SDMA0_RLC0_SKIP_CNTL 0xD440u |
#define SDMA0_RLC0_CONTEXT_STATUS 0xD444u |
#define SDMA_RLC_IDLE (1 << 2) |
#define SDMA0_RLC0_DOORBELL 0xD448u |
#define SDMA_OFFSET(x) (x << 0) |
#define SDMA_DB_ENABLE (1 << 28) |
#define SDMA0_RLC0_VIRTUAL_ADDR 0xD49Cu |
#define SDMA_ATC (1 << 0) |
#define SDMA_VA_PTR32 (1 << 4) |
#define SDMA_VA_SHARED_BASE(x) (x << 8) |
#define SDMA0_RLC0_APE1_CNTL 0xD4A0u |
#define SDMA0_RLC0_DOORBELL_LOG 0xD4A4u |
#define SDMA0_RLC0_WATERMARK 0xD4A8u |
#define SDMA0_CNTL 0xD010 |
#define SDMA1_CNTL 0xD810 |
#define IDLE (1 << 2) |
enum { |
MAX_TRAPID = 8, /* 3 bits in the bitfield. */ |
MAX_WATCH_ADDRESSES = 4 |
}; |
struct cik_mqd { |
uint32_t header; |
uint32_t compute_dispatch_initiator; |
uint32_t compute_dim_x; |
uint32_t compute_dim_y; |
uint32_t compute_dim_z; |
uint32_t compute_start_x; |
uint32_t compute_start_y; |
uint32_t compute_start_z; |
uint32_t compute_num_thread_x; |
uint32_t compute_num_thread_y; |
uint32_t compute_num_thread_z; |
uint32_t compute_pipelinestat_enable; |
uint32_t compute_perfcount_enable; |
uint32_t compute_pgm_lo; |
uint32_t compute_pgm_hi; |
uint32_t compute_tba_lo; |
uint32_t compute_tba_hi; |
uint32_t compute_tma_lo; |
uint32_t compute_tma_hi; |
uint32_t compute_pgm_rsrc1; |
uint32_t compute_pgm_rsrc2; |
uint32_t compute_vmid; |
uint32_t compute_resource_limits; |
uint32_t compute_static_thread_mgmt_se0; |
uint32_t compute_static_thread_mgmt_se1; |
uint32_t compute_tmpring_size; |
uint32_t compute_static_thread_mgmt_se2; |
uint32_t compute_static_thread_mgmt_se3; |
uint32_t compute_restart_x; |
uint32_t compute_restart_y; |
uint32_t compute_restart_z; |
uint32_t compute_thread_trace_enable; |
uint32_t compute_misc_reserved; |
uint32_t compute_user_data_0; |
uint32_t compute_user_data_1; |
uint32_t compute_user_data_2; |
uint32_t compute_user_data_3; |
uint32_t compute_user_data_4; |
uint32_t compute_user_data_5; |
uint32_t compute_user_data_6; |
uint32_t compute_user_data_7; |
uint32_t compute_user_data_8; |
uint32_t compute_user_data_9; |
uint32_t compute_user_data_10; |
uint32_t compute_user_data_11; |
uint32_t compute_user_data_12; |
uint32_t compute_user_data_13; |
uint32_t compute_user_data_14; |
uint32_t compute_user_data_15; |
uint32_t cp_compute_csinvoc_count_lo; |
uint32_t cp_compute_csinvoc_count_hi; |
uint32_t cp_mqd_base_addr_lo; |
uint32_t cp_mqd_base_addr_hi; |
uint32_t cp_hqd_active; |
uint32_t cp_hqd_vmid; |
uint32_t cp_hqd_persistent_state; |
uint32_t cp_hqd_pipe_priority; |
uint32_t cp_hqd_queue_priority; |
uint32_t cp_hqd_quantum; |
uint32_t cp_hqd_pq_base_lo; |
uint32_t cp_hqd_pq_base_hi; |
uint32_t cp_hqd_pq_rptr; |
uint32_t cp_hqd_pq_rptr_report_addr_lo; |
uint32_t cp_hqd_pq_rptr_report_addr_hi; |
uint32_t cp_hqd_pq_wptr_poll_addr_lo; |
uint32_t cp_hqd_pq_wptr_poll_addr_hi; |
uint32_t cp_hqd_pq_doorbell_control; |
uint32_t cp_hqd_pq_wptr; |
uint32_t cp_hqd_pq_control; |
uint32_t cp_hqd_ib_base_addr_lo; |
uint32_t cp_hqd_ib_base_addr_hi; |
uint32_t cp_hqd_ib_rptr; |
uint32_t cp_hqd_ib_control; |
uint32_t cp_hqd_iq_timer; |
uint32_t cp_hqd_iq_rptr; |
uint32_t cp_hqd_dequeue_request; |
uint32_t cp_hqd_dma_offload; |
uint32_t cp_hqd_sema_cmd; |
uint32_t cp_hqd_msg_type; |
uint32_t cp_hqd_atomic0_preop_lo; |
uint32_t cp_hqd_atomic0_preop_hi; |
uint32_t cp_hqd_atomic1_preop_lo; |
uint32_t cp_hqd_atomic1_preop_hi; |
uint32_t cp_hqd_hq_status0; |
uint32_t cp_hqd_hq_control0; |
uint32_t cp_mqd_control; |
uint32_t cp_mqd_query_time_lo; |
uint32_t cp_mqd_query_time_hi; |
uint32_t cp_mqd_connect_start_time_lo; |
uint32_t cp_mqd_connect_start_time_hi; |
uint32_t cp_mqd_connect_end_time_lo; |
uint32_t cp_mqd_connect_end_time_hi; |
uint32_t cp_mqd_connect_end_wf_count; |
uint32_t cp_mqd_connect_end_pq_rptr; |
uint32_t cp_mqd_connect_end_pq_wptr; |
uint32_t cp_mqd_connect_end_ib_rptr; |
uint32_t reserved_96; |
uint32_t reserved_97; |
uint32_t reserved_98; |
uint32_t reserved_99; |
uint32_t iqtimer_pkt_header; |
uint32_t iqtimer_pkt_dw0; |
uint32_t iqtimer_pkt_dw1; |
uint32_t iqtimer_pkt_dw2; |
uint32_t iqtimer_pkt_dw3; |
uint32_t iqtimer_pkt_dw4; |
uint32_t iqtimer_pkt_dw5; |
uint32_t iqtimer_pkt_dw6; |
uint32_t reserved_108; |
uint32_t reserved_109; |
uint32_t reserved_110; |
uint32_t reserved_111; |
uint32_t queue_doorbell_id0; |
uint32_t queue_doorbell_id1; |
uint32_t queue_doorbell_id2; |
uint32_t queue_doorbell_id3; |
uint32_t queue_doorbell_id4; |
uint32_t queue_doorbell_id5; |
uint32_t queue_doorbell_id6; |
uint32_t queue_doorbell_id7; |
uint32_t queue_doorbell_id8; |
uint32_t queue_doorbell_id9; |
uint32_t queue_doorbell_id10; |
uint32_t queue_doorbell_id11; |
uint32_t queue_doorbell_id12; |
uint32_t queue_doorbell_id13; |
uint32_t queue_doorbell_id14; |
uint32_t queue_doorbell_id15; |
enum { |
ADDRESS_WATCH_REG_ADDR_HI = 0, |
ADDRESS_WATCH_REG_ADDR_LO, |
ADDRESS_WATCH_REG_CNTL, |
ADDRESS_WATCH_REG_MAX |
}; |
enum { /* not defined in the CI/KV reg file */ |
ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL, |
ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF, |
ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000, |
/* extend the mask to 26 bits in order to match the low address field */ |
ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6, |
ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF |
}; |
union TCP_WATCH_CNTL_BITS { |
struct { |
uint32_t mask:24; |
uint32_t vmid:4; |
uint32_t atc:1; |
uint32_t mode:2; |
uint32_t valid:1; |
} bitfields, bits; |
uint32_t u32All; |
signed int i32All; |
float f32All; |
}; |
#endif |
/drivers/video/drm/radeon/cik_sdma.c |
---|
268,6 → 268,17 |
} |
rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; |
rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; |
/* FIXME use something else than big hammer but after few days can not |
* seem to find good combination so reset SDMA blocks as it seems we |
* do not shut them down properly. This fix hibernation and does not |
* affect suspend to ram. |
*/ |
WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1); |
(void)RREG32(SRBM_SOFT_RESET); |
udelay(50); |
WREG32(SRBM_SOFT_RESET, 0); |
(void)RREG32(SRBM_SOFT_RESET); |
} |
/** |
283,6 → 294,33 |
} |
/** |
* cik_sdma_ctx_switch_enable - enable/disable sdma engine preemption |
* |
* @rdev: radeon_device pointer |
* @enable: enable/disable preemption. |
* |
* Halt or unhalt the async dma engines (CIK). |
*/ |
static void cik_sdma_ctx_switch_enable(struct radeon_device *rdev, bool enable) |
{ |
uint32_t reg_offset, value; |
int i; |
for (i = 0; i < 2; i++) { |
if (i == 0) |
reg_offset = SDMA0_REGISTER_OFFSET; |
else |
reg_offset = SDMA1_REGISTER_OFFSET; |
value = RREG32(SDMA0_CNTL + reg_offset); |
if (enable) |
value |= AUTO_CTXSW_ENABLE; |
else |
value &= ~AUTO_CTXSW_ENABLE; |
WREG32(SDMA0_CNTL + reg_offset, value); |
} |
} |
/** |
* cik_sdma_enable - stop the async dma engines |
* |
* @rdev: radeon_device pointer |
312,6 → 350,8 |
me_cntl |= SDMA_HALT; |
WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl); |
} |
cik_sdma_ctx_switch_enable(rdev, enable); |
} |
/** |
816,7 → 856,6 |
for (; ndw > 0; ndw -= 2, --count, pe += 8) { |
if (flags & R600_PTE_SYSTEM) { |
value = radeon_vm_map_gart(rdev, addr); |
value &= 0xFFFFFFFFFFFFF000ULL; |
} else if (flags & R600_PTE_VALID) { |
value = addr; |
} else { |
903,6 → 942,9 |
void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, |
unsigned vm_id, uint64_t pd_addr) |
{ |
u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) | |
SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */ |
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); |
if (vm_id < 8) { |
radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2); |
943,5 → 985,12 |
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); |
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); |
radeon_ring_write(ring, 1 << vm_id); |
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); |
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, 0); /* reference */ |
radeon_ring_write(ring, 0); /* mask */ |
radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ |
} |
/drivers/video/drm/radeon/cikd.h |
---|
482,6 → 482,10 |
#define SOFT_RESET_ORB (1 << 23) |
#define SOFT_RESET_VCE (1 << 24) |
#define SRBM_READ_ERROR 0xE98 |
#define SRBM_INT_CNTL 0xEA0 |
#define SRBM_INT_ACK 0xEA8 |
#define VM_L2_CNTL 0x1400 |
#define ENABLE_L2_CACHE (1 << 0) |
#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) |
1331,6 → 1335,7 |
# define CNTX_EMPTY_INT_ENABLE (1 << 20) |
# define PRIV_INSTR_INT_ENABLE (1 << 22) |
# define PRIV_REG_INT_ENABLE (1 << 23) |
# define OPCODE_ERROR_INT_ENABLE (1 << 24) |
# define TIME_STAMP_INT_ENABLE (1 << 26) |
# define CP_RINGID2_INT_ENABLE (1 << 29) |
# define CP_RINGID1_INT_ENABLE (1 << 30) |
2084,6 → 2089,8 |
# define CLK_OD(x) ((x) << 6) |
# define CLK_OD_MASK (0x1f << 6) |
#define UVD_STATUS 0xf6bc |
/* UVD clocks */ |
#define CG_DCLK_CNTL 0xC050009C |
2125,6 → 2132,7 |
#define VCE_UENC_REG_CLOCK_GATING 0x207c0 |
#define VCE_SYS_INT_EN 0x21300 |
# define VCE_SYS_INT_TRAP_INTERRUPT_EN (1 << 3) |
#define VCE_LMI_VCPU_CACHE_40BIT_BAR 0x2145c |
#define VCE_LMI_CTRL2 0x21474 |
#define VCE_LMI_CTRL 0x21498 |
#define VCE_LMI_VM_CTRL 0x214a0 |
2140,9 → 2148,12 |
#define VCE_CMD_IB_AUTO 0x00000005 |
#define VCE_CMD_SEMAPHORE 0x00000006 |
#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS 0x3398u |
#define ATC_VMID0_PASID_MAPPING 0x339Cu |
#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS 0x3398u |
#define ATC_VMID_PASID_MAPPING_VALID (1U << 31) |
#define ATC_VMID_PASID_MAPPING_PASID_MASK (0xFFFF) |
#define ATC_VMID_PASID_MAPPING_PASID_SHIFT 0 |
#define ATC_VMID_PASID_MAPPING_VALID_MASK (0x1 << 31) |
#define ATC_VMID_PASID_MAPPING_VALID_SHIFT 31 |
#define ATC_VM_APERTURE0_CNTL 0x3310u |
#define ATS_ACCESS_MODE_NEVER 0 |
2156,4 → 2167,6 |
#define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu |
#define ATC_VM_APERTURE1_LOW_ADDR 0x3304u |
#define IH_VMID_0_LUT 0x3D40u |
#endif |
/drivers/video/drm/radeon/cypress_dpm.c |
---|
2005,11 → 2005,13 |
return 0; |
} |
#if 0 |
void cypress_dpm_reset_asic(struct radeon_device *rdev) |
{ |
rv770_restrict_performance_levels_before_switch(rdev); |
rv770_set_boot_state(rdev); |
} |
#endif |
void cypress_dpm_display_configuration_changed(struct radeon_device *rdev) |
{ |
/drivers/video/drm/radeon/dce3_1_afmt.c |
---|
24,37 → 24,17 |
#include <drm/drmP.h> |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "radeon_audio.h" |
#include "r600d.h" |
static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder) |
void dce3_2_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder, |
u8 *sadb, int sad_count) |
{ |
struct radeon_device *rdev = encoder->dev->dev_private; |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector = NULL; |
u32 tmp; |
u8 *sadb = NULL; |
int sad_count; |
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { |
if (connector->encoder == encoder) { |
radeon_connector = to_radeon_connector(connector); |
break; |
} |
} |
if (!radeon_connector) { |
DRM_ERROR("Couldn't find encoder's connector\n"); |
return; |
} |
sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); |
if (sad_count < 0) { |
DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); |
sad_count = 0; |
} |
/* program the speaker allocation */ |
tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER); |
tmp = RREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER); |
tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK); |
/* set HDMI mode */ |
tmp |= HDMI_CONNECTION; |
62,19 → 42,32 |
tmp |= SPEAKER_ALLOCATION(sadb[0]); |
else |
tmp |= SPEAKER_ALLOCATION(5); /* stereo */ |
WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp); |
WREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp); |
} |
kfree(sadb); |
void dce3_2_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder, |
u8 *sadb, int sad_count) |
{ |
struct radeon_device *rdev = encoder->dev->dev_private; |
u32 tmp; |
/* program the speaker allocation */ |
tmp = RREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER); |
tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK); |
/* set DP mode */ |
tmp |= DP_CONNECTION; |
if (sad_count) |
tmp |= SPEAKER_ALLOCATION(sadb[0]); |
else |
tmp |= SPEAKER_ALLOCATION(5); /* stereo */ |
WREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp); |
} |
static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder) |
void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder, |
struct cea_sad *sads, int sad_count) |
{ |
int i; |
struct radeon_device *rdev = encoder->dev->dev_private; |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector = NULL; |
struct cea_sad *sads; |
int i, sad_count; |
static const u16 eld_reg_to_type[][2] = { |
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, |
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, |
90,25 → 83,6 |
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, |
}; |
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { |
if (connector->encoder == encoder) { |
radeon_connector = to_radeon_connector(connector); |
break; |
} |
} |
if (!radeon_connector) { |
DRM_ERROR("Couldn't find encoder's connector\n"); |
return; |
} |
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); |
if (sad_count < 0) { |
DRM_ERROR("Couldn't read SADs: %d\n", sad_count); |
return; |
} |
BUG_ON(!sads); |
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { |
u32 value = 0; |
u8 stereo_freqs = 0; |
135,110 → 109,124 |
value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs); |
WREG32(eld_reg_to_type[i][0], value); |
WREG32_ENDPOINT(0, eld_reg_to_type[i][0], value); |
} |
} |
kfree(sads); |
void dce3_2_audio_set_dto(struct radeon_device *rdev, |
struct radeon_crtc *crtc, unsigned int clock) |
{ |
struct radeon_encoder *radeon_encoder; |
struct radeon_encoder_atom_dig *dig; |
unsigned int max_ratio = clock / 24000; |
u32 dto_phase; |
u32 wallclock_ratio; |
u32 dto_cntl; |
if (!crtc) |
return; |
radeon_encoder = to_radeon_encoder(crtc->encoder); |
dig = radeon_encoder->enc_priv; |
if (!dig) |
return; |
if (max_ratio >= 8) { |
dto_phase = 192 * 1000; |
wallclock_ratio = 3; |
} else if (max_ratio >= 4) { |
dto_phase = 96 * 1000; |
wallclock_ratio = 2; |
} else if (max_ratio >= 2) { |
dto_phase = 48 * 1000; |
wallclock_ratio = 1; |
} else { |
dto_phase = 24 * 1000; |
wallclock_ratio = 0; |
} |
/* |
* update the info frames with the data from the current display mode |
/* Express [24MHz / target pixel clock] as an exact rational |
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE |
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
*/ |
void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode) |
if (dig->dig_encoder == 0) { |
dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; |
dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); |
WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl); |
WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase); |
WREG32(DCCG_AUDIO_DTO0_MODULE, clock); |
WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ |
} else { |
dto_cntl = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; |
dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); |
WREG32(DCCG_AUDIO_DTO1_CNTL, dto_cntl); |
WREG32(DCCG_AUDIO_DTO1_PHASE, dto_phase); |
WREG32(DCCG_AUDIO_DTO1_MODULE, clock); |
WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ |
} |
} |
void dce3_2_hdmi_update_acr(struct drm_encoder *encoder, long offset, |
const struct radeon_hdmi_acr *acr) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; |
struct hdmi_avi_infoframe frame; |
uint32_t offset; |
ssize_t err; |
if (!dig || !dig->afmt) |
return; |
WREG32(DCE3_HDMI0_ACR_PACKET_CONTROL + offset, |
HDMI0_ACR_SOURCE | /* select SW CTS value */ |
HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ |
/* Silent, r600_hdmi_enable will raise WARN for us */ |
if (!dig->afmt->enabled) |
return; |
offset = dig->afmt->offset; |
WREG32_P(HDMI0_ACR_32_0 + offset, |
HDMI0_ACR_CTS_32(acr->cts_32khz), |
~HDMI0_ACR_CTS_32_MASK); |
WREG32_P(HDMI0_ACR_32_1 + offset, |
HDMI0_ACR_N_32(acr->n_32khz), |
~HDMI0_ACR_N_32_MASK); |
/* disable audio prior to setting up hw */ |
dig->afmt->pin = r600_audio_get_pin(rdev); |
r600_audio_enable(rdev, dig->afmt->pin, 0); |
WREG32_P(HDMI0_ACR_44_0 + offset, |
HDMI0_ACR_CTS_44(acr->cts_44_1khz), |
~HDMI0_ACR_CTS_44_MASK); |
WREG32_P(HDMI0_ACR_44_1 + offset, |
HDMI0_ACR_N_44(acr->n_44_1khz), |
~HDMI0_ACR_N_44_MASK); |
r600_audio_set_dto(encoder, mode->clock); |
WREG32_P(HDMI0_ACR_48_0 + offset, |
HDMI0_ACR_CTS_48(acr->cts_48khz), |
~HDMI0_ACR_CTS_48_MASK); |
WREG32_P(HDMI0_ACR_48_1 + offset, |
HDMI0_ACR_N_48(acr->n_48khz), |
~HDMI0_ACR_N_48_MASK); |
} |
WREG32(HDMI0_VBI_PACKET_CONTROL + offset, |
HDMI0_NULL_SEND); /* send null packets when required */ |
void dce3_2_set_audio_packet(struct drm_encoder *encoder, u32 offset) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000); |
if (ASIC_IS_DCE32(rdev)) { |
WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, |
HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ |
HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ |
WREG32(AFMT_AUDIO_PACKET_CONTROL + offset, |
AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */ |
AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ |
} else { |
WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, |
HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ |
HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ |
HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */ |
HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ |
} |
if (ASIC_IS_DCE32(rdev)) { |
dce3_2_afmt_write_speaker_allocation(encoder); |
dce3_2_afmt_write_sad_regs(encoder); |
} |
WREG32(HDMI0_ACR_PACKET_CONTROL + offset, |
HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */ |
HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ |
WREG32(HDMI0_VBI_PACKET_CONTROL + offset, |
HDMI0_NULL_SEND | /* send null packets when required */ |
HDMI0_GC_SEND | /* send general control packets */ |
HDMI0_GC_CONT); /* send general control packets every frame */ |
/* TODO: HDMI0_AUDIO_INFO_UPDATE */ |
WREG32(HDMI0_INFOFRAME_CONTROL0 + offset, |
HDMI0_AVI_INFO_SEND | /* enable AVI info frames */ |
HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */ |
WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset, |
HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ |
HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */ |
WREG32(HDMI0_INFOFRAME_CONTROL1 + offset, |
HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */ |
WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset, |
HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */ |
WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */ |
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); |
if (err < 0) { |
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); |
return; |
} |
err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); |
if (err < 0) { |
DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); |
return; |
} |
void dce3_2_set_mute(struct drm_encoder *encoder, u32 offset, bool mute) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer)); |
r600_hdmi_update_ACR(encoder, mode->clock); |
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ |
WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF); |
WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF); |
WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001); |
WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001); |
r600_hdmi_audio_workaround(encoder); |
/* enable audio after to setting up hw */ |
r600_audio_enable(rdev, dig->afmt->pin, 0xf); |
if (mute) |
WREG32_OR(HDMI0_GC + offset, HDMI0_GC_AVMUTE); |
else |
WREG32_AND(HDMI0_GC + offset, ~HDMI0_GC_AVMUTE); |
} |
/drivers/video/drm/radeon/dce6_afmt.c |
---|
23,9 → 23,13 |
#include <linux/hdmi.h> |
#include <drm/drmP.h> |
#include "radeon.h" |
#include "radeon_audio.h" |
#include "sid.h" |
static u32 dce6_endpoint_rreg(struct radeon_device *rdev, |
#define DCE8_DCCG_AUDIO_DTO1_PHASE 0x05b8 |
#define DCE8_DCCG_AUDIO_DTO1_MODULE 0x05bc |
u32 dce6_endpoint_rreg(struct radeon_device *rdev, |
u32 block_offset, u32 reg) |
{ |
unsigned long flags; |
39,7 → 43,7 |
return r; |
} |
static void dce6_endpoint_wreg(struct radeon_device *rdev, |
void dce6_endpoint_wreg(struct radeon_device *rdev, |
u32 block_offset, u32 reg, u32 v) |
{ |
unsigned long flags; |
54,10 → 58,6 |
spin_unlock_irqrestore(&rdev->end_idx_lock, flags); |
} |
#define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg)) |
#define WREG32_ENDPOINT(block, reg, v) dce6_endpoint_wreg(rdev, (block), (reg), (v)) |
static void dce6_afmt_get_connected_pins(struct radeon_device *rdev) |
{ |
int i; |
76,16 → 76,35 |
struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev) |
{ |
int i; |
struct drm_encoder *encoder; |
struct radeon_encoder *radeon_encoder; |
struct radeon_encoder_atom_dig *dig; |
struct r600_audio_pin *pin = NULL; |
int i, pin_count; |
dce6_afmt_get_connected_pins(rdev); |
for (i = 0; i < rdev->audio.num_pins; i++) { |
if (rdev->audio.pin[i].connected) |
return &rdev->audio.pin[i]; |
if (rdev->audio.pin[i].connected) { |
pin = &rdev->audio.pin[i]; |
pin_count = 0; |
list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) { |
if (radeon_encoder_is_digital(encoder)) { |
radeon_encoder = to_radeon_encoder(encoder); |
dig = radeon_encoder->enc_priv; |
if (dig->pin == pin) |
pin_count++; |
} |
} |
if (pin_count == 0) |
return pin; |
} |
} |
if (!pin) |
DRM_ERROR("No connected audio pins found!\n"); |
return NULL; |
return pin; |
} |
void dce6_afmt_select_pin(struct drm_encoder *encoder) |
93,44 → 112,26 |
struct radeon_device *rdev = encoder->dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
u32 offset; |
if (!dig || !dig->afmt || !dig->afmt->pin) |
if (!dig || !dig->afmt || !dig->pin) |
return; |
offset = dig->afmt->offset; |
WREG32(AFMT_AUDIO_SRC_CONTROL + offset, |
AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id)); |
WREG32(AFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, |
AFMT_AUDIO_SRC_SELECT(dig->pin->id)); |
} |
void dce6_afmt_write_latency_fields(struct drm_encoder *encoder, |
struct drm_connector *connector, |
struct drm_display_mode *mode) |
{ |
struct radeon_device *rdev = encoder->dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector = NULL; |
u32 tmp = 0, offset; |
u32 tmp = 0; |
if (!dig || !dig->afmt || !dig->afmt->pin) |
if (!dig || !dig->afmt || !dig->pin) |
return; |
offset = dig->afmt->pin->offset; |
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { |
if (connector->encoder == encoder) { |
radeon_connector = to_radeon_connector(connector); |
break; |
} |
} |
if (!radeon_connector) { |
DRM_ERROR("Couldn't find encoder's connector\n"); |
return; |
} |
if (mode->flags & DRM_MODE_FLAG_INTERLACE) { |
if (connector->latency_present[1]) |
tmp = VIDEO_LIPSYNC(connector->video_latency[1]) | |
144,45 → 145,24 |
else |
tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0); |
} |
WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); |
WREG32_ENDPOINT(dig->pin->offset, |
AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); |
} |
void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) |
void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder, |
u8 *sadb, int sad_count) |
{ |
struct radeon_device *rdev = encoder->dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector = NULL; |
u32 offset, tmp; |
u8 *sadb = NULL; |
int sad_count; |
u32 tmp; |
if (!dig || !dig->afmt || !dig->afmt->pin) |
if (!dig || !dig->afmt || !dig->pin) |
return; |
offset = dig->afmt->pin->offset; |
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { |
if (connector->encoder == encoder) { |
radeon_connector = to_radeon_connector(connector); |
break; |
} |
} |
if (!radeon_connector) { |
DRM_ERROR("Couldn't find encoder's connector\n"); |
return; |
} |
sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb); |
if (sad_count < 0) { |
DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); |
sad_count = 0; |
} |
/* program the speaker allocation */ |
tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); |
tmp = RREG32_ENDPOINT(dig->pin->offset, |
AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); |
tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK); |
/* set HDMI mode */ |
tmp |= HDMI_CONNECTION; |
190,22 → 170,42 |
tmp |= SPEAKER_ALLOCATION(sadb[0]); |
else |
tmp |= SPEAKER_ALLOCATION(5); /* stereo */ |
WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); |
kfree(sadb); |
WREG32_ENDPOINT(dig->pin->offset, |
AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); |
} |
void dce6_afmt_write_sad_regs(struct drm_encoder *encoder) |
void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder, |
u8 *sadb, int sad_count) |
{ |
struct radeon_device *rdev = encoder->dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
u32 offset; |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector = NULL; |
struct cea_sad *sads; |
int i, sad_count; |
u32 tmp; |
if (!dig || !dig->afmt || !dig->pin) |
return; |
/* program the speaker allocation */ |
tmp = RREG32_ENDPOINT(dig->pin->offset, |
AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); |
tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK); |
/* set DP mode */ |
tmp |= DP_CONNECTION; |
if (sad_count) |
tmp |= SPEAKER_ALLOCATION(sadb[0]); |
else |
tmp |= SPEAKER_ALLOCATION(5); /* stereo */ |
WREG32_ENDPOINT(dig->pin->offset, |
AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); |
} |
void dce6_afmt_write_sad_regs(struct drm_encoder *encoder, |
struct cea_sad *sads, int sad_count) |
{ |
int i; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
struct radeon_device *rdev = encoder->dev->dev_private; |
static const u16 eld_reg_to_type[][2] = { |
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, |
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, |
221,30 → 221,9 |
{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, |
}; |
if (!dig || !dig->afmt || !dig->afmt->pin) |
if (!dig || !dig->afmt || !dig->pin) |
return; |
offset = dig->afmt->pin->offset; |
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { |
if (connector->encoder == encoder) { |
radeon_connector = to_radeon_connector(connector); |
break; |
} |
} |
if (!radeon_connector) { |
DRM_ERROR("Couldn't find encoder's connector\n"); |
return; |
} |
sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads); |
if (sad_count <= 0) { |
DRM_ERROR("Couldn't read SADs: %d\n", sad_count); |
return; |
} |
BUG_ON(!sads); |
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { |
u32 value = 0; |
u8 stereo_freqs = 0; |
271,17 → 250,10 |
value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs); |
WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value); |
WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value); |
} |
kfree(sads); |
} |
static int dce6_audio_chipset_supported(struct radeon_device *rdev) |
{ |
return !ASIC_IS_NODCE(rdev); |
} |
void dce6_audio_enable(struct radeon_device *rdev, |
struct r600_audio_pin *pin, |
u8 enable_mask) |
293,64 → 265,46 |
enable_mask ? AUDIO_ENABLED : 0); |
} |
static const u32 pin_offsets[7] = |
void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, |
struct radeon_crtc *crtc, unsigned int clock) |
{ |
(0x5e00 - 0x5e00), |
(0x5e18 - 0x5e00), |
(0x5e30 - 0x5e00), |
(0x5e48 - 0x5e00), |
(0x5e60 - 0x5e00), |
(0x5e78 - 0x5e00), |
(0x5e90 - 0x5e00), |
}; |
/* Two dtos; generally use dto0 for HDMI */ |
u32 value = 0; |
int dce6_audio_init(struct radeon_device *rdev) |
{ |
int i; |
if (crtc) |
value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); |
if (!radeon_audio || !dce6_audio_chipset_supported(rdev)) |
return 0; |
WREG32(DCCG_AUDIO_DTO_SOURCE, value); |
rdev->audio.enabled = true; |
if (ASIC_IS_DCE81(rdev)) /* KV: 4 streams, 7 endpoints */ |
rdev->audio.num_pins = 7; |
else if (ASIC_IS_DCE83(rdev)) /* KB: 2 streams, 3 endpoints */ |
rdev->audio.num_pins = 3; |
else if (ASIC_IS_DCE8(rdev)) /* BN/HW: 6 streams, 7 endpoints */ |
rdev->audio.num_pins = 7; |
else if (ASIC_IS_DCE61(rdev)) /* TN: 4 streams, 6 endpoints */ |
rdev->audio.num_pins = 6; |
else if (ASIC_IS_DCE64(rdev)) /* OL: 2 streams, 2 endpoints */ |
rdev->audio.num_pins = 2; |
else /* SI: 6 streams, 6 endpoints */ |
rdev->audio.num_pins = 6; |
for (i = 0; i < rdev->audio.num_pins; i++) { |
rdev->audio.pin[i].channels = -1; |
rdev->audio.pin[i].rate = -1; |
rdev->audio.pin[i].bits_per_sample = -1; |
rdev->audio.pin[i].status_bits = 0; |
rdev->audio.pin[i].category_code = 0; |
rdev->audio.pin[i].connected = false; |
rdev->audio.pin[i].offset = pin_offsets[i]; |
rdev->audio.pin[i].id = i; |
/* disable audio. it will be set up later */ |
dce6_audio_enable(rdev, &rdev->audio.pin[i], false); |
/* Express [24MHz / target pixel clock] as an exact rational |
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE |
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
*/ |
WREG32(DCCG_AUDIO_DTO0_PHASE, 24000); |
WREG32(DCCG_AUDIO_DTO0_MODULE, clock); |
} |
return 0; |
} |
void dce6_audio_fini(struct radeon_device *rdev) |
void dce6_dp_audio_set_dto(struct radeon_device *rdev, |
struct radeon_crtc *crtc, unsigned int clock) |
{ |
int i; |
/* Two dtos; generally use dto1 for DP */ |
u32 value = 0; |
value |= DCCG_AUDIO_DTO_SEL; |
if (!rdev->audio.enabled) |
return; |
if (crtc) |
value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); |
for (i = 0; i < rdev->audio.num_pins; i++) |
dce6_audio_enable(rdev, &rdev->audio.pin[i], false); |
WREG32(DCCG_AUDIO_DTO_SOURCE, value); |
rdev->audio.enabled = false; |
/* Express [24MHz / target pixel clock] as an exact rational |
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE |
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
*/ |
if (ASIC_IS_DCE8(rdev)) { |
WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000); |
WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock); |
} else { |
WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); |
WREG32(DCCG_AUDIO_DTO1_MODULE, clock); |
} |
} |
/drivers/video/drm/radeon/evergreen.c |
---|
26,6 → 26,7 |
#include <drm/drmP.h> |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "radeon_audio.h" |
#include <drm/radeon_drm.h> |
#include "evergreend.h" |
#include "atom.h" |
34,6 → 35,75 |
#include "evergreen_blit_shaders.h" |
#include "radeon_ucode.h" |
/* |
* Indirect registers accessor |
*/ |
u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
spin_lock_irqsave(&rdev->cg_idx_lock, flags); |
WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); |
r = RREG32(EVERGREEN_CG_IND_DATA); |
spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); |
return r; |
} |
void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
spin_lock_irqsave(&rdev->cg_idx_lock, flags); |
WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); |
WREG32(EVERGREEN_CG_IND_DATA, (v)); |
spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); |
} |
u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
spin_lock_irqsave(&rdev->pif_idx_lock, flags); |
WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); |
r = RREG32(EVERGREEN_PIF_PHY0_DATA); |
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); |
return r; |
} |
void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
spin_lock_irqsave(&rdev->pif_idx_lock, flags); |
WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); |
WREG32(EVERGREEN_PIF_PHY0_DATA, (v)); |
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); |
} |
u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
spin_lock_irqsave(&rdev->pif_idx_lock, flags); |
WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); |
r = RREG32(EVERGREEN_PIF_PHY1_DATA); |
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); |
return r; |
} |
void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
spin_lock_irqsave(&rdev->pif_idx_lock, flags); |
WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); |
WREG32(EVERGREEN_PIF_PHY1_DATA, (v)); |
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); |
} |
static const u32 crtc_offsets[6] = |
{ |
EVERGREEN_CRTC0_REGISTER_OFFSET, |
1005,6 → 1075,34 |
} |
} |
/** |
* evergreen_get_allowed_info_register - fetch the register for the info ioctl |
* |
* @rdev: radeon_device pointer |
* @reg: register offset in bytes |
* @val: register value |
* |
* Returns 0 for success or -EINVAL for an invalid register |
* |
*/ |
int evergreen_get_allowed_info_register(struct radeon_device *rdev, |
u32 reg, u32 *val) |
{ |
switch (reg) { |
case GRBM_STATUS: |
case GRBM_STATUS_SE0: |
case GRBM_STATUS_SE1: |
case SRBM_STATUS: |
case SRBM_STATUS2: |
case DMA_STATUS_REG: |
case UVD_STATUS: |
*val = RREG32(reg); |
return 0; |
default: |
return -EINVAL; |
} |
} |
void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw, |
unsigned *bankh, unsigned *mtaspect, |
unsigned *tile_split) |
1103,9 → 1201,9 |
return 0; |
} |
// r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000, |
// 16384, 0x03FFFFFF, 0, 128, 5, |
// &fb_div, &vclk_div, &dclk_div); |
r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000, |
16384, 0x03FFFFFF, 0, 128, 5, |
&fb_div, &vclk_div, &dclk_div); |
if (r) |
return r; |
1121,9 → 1219,9 |
mdelay(1); |
// r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); |
// if (r) |
// return r; |
r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); |
if (r) |
return r; |
/* assert UPLL_RESET again */ |
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK); |
1158,9 → 1256,9 |
/* switch from bypass mode to normal mode */ |
WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK); |
// r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); |
// if (r) |
// return r; |
r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); |
if (r) |
return r; |
/* switch VCLK and DCLK selection */ |
WREG32_P(CG_UPLL_FUNC_CNTL_2, |
1306,46 → 1404,22 |
* @crtc_id: crtc to cleanup pageflip on |
* @crtc_base: new address of the crtc (GPU MC address) |
* |
* Does the actual pageflip (evergreen+). |
* During vblank we take the crtc lock and wait for the update_pending |
* bit to go high, when it does, we release the lock, and allow the |
* double buffered update to take place. |
* Returns the current update pending status. |
* Triggers the actual pageflip by updating the primary |
* surface base address (evergreen+). |
*/ |
void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) |
{ |
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; |
u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); |
int i; |
/* Lock the graphics update lock */ |
tmp |= EVERGREEN_GRPH_UPDATE_LOCK; |
WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); |
/* update the scanout addresses */ |
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, |
upper_32_bits(crtc_base)); |
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, |
(u32)crtc_base); |
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, |
upper_32_bits(crtc_base)); |
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, |
(u32)crtc_base); |
/* Wait for update_pending to go high. */ |
for (i = 0; i < rdev->usec_timeout; i++) { |
if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) |
break; |
udelay(1); |
/* post the write */ |
RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset); |
} |
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); |
/* Unlock the lock, so double-buffering can take place inside vblank */ |
tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK; |
WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); |
} |
/** |
* evergreen_page_flip_pending - check if page flip is still pending |
* |
2298,6 → 2372,9 |
c.full = dfixed_div(c, a); |
priority_b_mark = dfixed_trunc(c); |
priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; |
/* Save number of lines the linebuffer leads before the scanout */ |
radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); |
} |
/* select wm A */ |
3252,6 → 3329,8 |
} |
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); |
WREG32(SRBM_INT_CNTL, 0x1); |
WREG32(SRBM_INT_ACK, 0x1); |
evergreen_fix_pci_max_read_req_size(rdev); |
4323,6 → 4402,7 |
tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; |
WREG32(DMA_CNTL, tmp); |
WREG32(GRBM_INT_CNTL, 0); |
WREG32(SRBM_INT_CNTL, 0); |
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
if (rdev->num_crtc >= 4) { |
4388,12 → 4468,12 |
return 0; |
} |
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
if (rdev->family == CHIP_ARUBA) |
thermal_int = RREG32(TN_CG_THERMAL_INT_CTRL) & |
~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); |
4482,27 → 4562,27 |
} |
if (rdev->irq.hpd[0]) { |
DRM_DEBUG("evergreen_irq_set: hpd 1\n"); |
hpd1 |= DC_HPDx_INT_EN; |
hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.hpd[1]) { |
DRM_DEBUG("evergreen_irq_set: hpd 2\n"); |
hpd2 |= DC_HPDx_INT_EN; |
hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.hpd[2]) { |
DRM_DEBUG("evergreen_irq_set: hpd 3\n"); |
hpd3 |= DC_HPDx_INT_EN; |
hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.hpd[3]) { |
DRM_DEBUG("evergreen_irq_set: hpd 4\n"); |
hpd4 |= DC_HPDx_INT_EN; |
hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.hpd[4]) { |
DRM_DEBUG("evergreen_irq_set: hpd 5\n"); |
hpd5 |= DC_HPDx_INT_EN; |
hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.hpd[5]) { |
DRM_DEBUG("evergreen_irq_set: hpd 6\n"); |
hpd6 |= DC_HPDx_INT_EN; |
hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.afmt[0]) { |
DRM_DEBUG("evergreen_irq_set: hdmi 0\n"); |
4589,6 → 4669,9 |
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5); |
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6); |
/* posting read */ |
RREG32(SRBM_STATUS); |
return 0; |
} |
4693,6 → 4776,38 |
tmp |= DC_HPDx_INT_ACK; |
WREG32(DC_HPD6_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD1_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD1_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD2_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD2_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD3_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD3_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD4_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD4_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD5_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD5_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD5_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD6_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) { |
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); |
tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; |
4773,6 → 4888,7 |
u32 ring_index; |
bool queue_hotplug = false; |
bool queue_hdmi = false; |
bool queue_dp = false; |
bool queue_thermal = false; |
u32 status, addr; |
4787,7 → 4903,7 |
return IRQ_NONE; |
rptr = rdev->ih.rptr; |
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); |
DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr); |
/* Order reading of wptr vs. reading of IH ring data */ |
rmb(); |
4805,23 → 4921,27 |
case 1: /* D1 vblank/vline */ |
switch (src_data) { |
case 0: /* D1 vblank */ |
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n"); |
if (rdev->irq.crtc_vblank_int[0]) { |
// drm_handle_vblank(rdev->ddev, 0); |
drm_handle_vblank(rdev->ddev, 0); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (rdev->irq.pflip[0]) |
// radeon_crtc_handle_flip(rdev, 0); |
if (atomic_read(&rdev->irq.pflip[0])) |
radeon_crtc_handle_vblank(rdev, 0); |
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D1 vblank\n"); |
} |
break; |
case 1: /* D1 vline */ |
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D1 vline\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
4831,23 → 4951,27 |
case 2: /* D2 vblank/vline */ |
switch (src_data) { |
case 0: /* D2 vblank */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n"); |
if (rdev->irq.crtc_vblank_int[1]) { |
// drm_handle_vblank(rdev->ddev, 1); |
drm_handle_vblank(rdev->ddev, 1); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (rdev->irq.pflip[1]) |
// radeon_crtc_handle_flip(rdev, 1); |
if (atomic_read(&rdev->irq.pflip[1])) |
radeon_crtc_handle_vblank(rdev, 1); |
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D2 vblank\n"); |
} |
break; |
case 1: /* D2 vline */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D2 vline\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
4857,23 → 4981,27 |
case 3: /* D3 vblank/vline */ |
switch (src_data) { |
case 0: /* D3 vblank */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: D3 vblank - IH event w/o asserted irq bit?\n"); |
if (rdev->irq.crtc_vblank_int[2]) { |
// drm_handle_vblank(rdev->ddev, 2); |
drm_handle_vblank(rdev->ddev, 2); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (rdev->irq.pflip[2]) |
// radeon_crtc_handle_flip(rdev, 2); |
if (atomic_read(&rdev->irq.pflip[2])) |
radeon_crtc_handle_vblank(rdev, 2); |
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D3 vblank\n"); |
} |
break; |
case 1: /* D3 vline */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: D3 vline - IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D3 vline\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
4883,23 → 5011,27 |
case 4: /* D4 vblank/vline */ |
switch (src_data) { |
case 0: /* D4 vblank */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: D4 vblank - IH event w/o asserted irq bit?\n"); |
if (rdev->irq.crtc_vblank_int[3]) { |
// drm_handle_vblank(rdev->ddev, 3); |
drm_handle_vblank(rdev->ddev, 3); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (rdev->irq.pflip[3]) |
// radeon_crtc_handle_flip(rdev, 3); |
if (atomic_read(&rdev->irq.pflip[3])) |
radeon_crtc_handle_vblank(rdev, 3); |
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D4 vblank\n"); |
} |
break; |
case 1: /* D4 vline */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: D4 vline - IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D4 vline\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
4909,23 → 5041,27 |
case 5: /* D5 vblank/vline */ |
switch (src_data) { |
case 0: /* D5 vblank */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: D5 vblank - IH event w/o asserted irq bit?\n"); |
if (rdev->irq.crtc_vblank_int[4]) { |
// drm_handle_vblank(rdev->ddev, 4); |
drm_handle_vblank(rdev->ddev, 4); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (rdev->irq.pflip[4]) |
// radeon_crtc_handle_flip(rdev, 4); |
if (atomic_read(&rdev->irq.pflip[4])) |
radeon_crtc_handle_vblank(rdev, 4); |
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D5 vblank\n"); |
} |
break; |
case 1: /* D5 vline */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: D5 vline - IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D5 vline\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
4935,23 → 5071,27 |
case 6: /* D6 vblank/vline */ |
switch (src_data) { |
case 0: /* D6 vblank */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: D6 vblank - IH event w/o asserted irq bit?\n"); |
if (rdev->irq.crtc_vblank_int[5]) { |
// drm_handle_vblank(rdev->ddev, 5); |
drm_handle_vblank(rdev->ddev, 5); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (rdev->irq.pflip[5]) |
// radeon_crtc_handle_flip(rdev, 5); |
if (atomic_read(&rdev->irq.pflip[5])) |
radeon_crtc_handle_vblank(rdev, 5); |
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D6 vblank\n"); |
} |
break; |
case 1: /* D6 vline */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: D6 vline - IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D6 vline\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
4969,47 → 5109,101 |
case 42: /* HPD hotplug */ |
switch (src_data) { |
case 0: |
if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD1\n"); |
} |
break; |
case 1: |
if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD2\n"); |
} |
break; |
case 2: |
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD3\n"); |
} |
break; |
case 3: |
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD4\n"); |
} |
break; |
case 4: |
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD5\n"); |
} |
break; |
case 5: |
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD6\n"); |
} |
break; |
case 6: |
if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 1\n"); |
break; |
case 7: |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 2\n"); |
break; |
case 8: |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 3\n"); |
break; |
case 9: |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 4\n"); |
break; |
case 10: |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 5\n"); |
break; |
case 11: |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 6\n"); |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
break; |
5018,51 → 5212,61 |
case 44: /* hdmi */ |
switch (src_data) { |
case 0: |
if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) { |
if (!(rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG; |
queue_hdmi = true; |
DRM_DEBUG("IH: HDMI0\n"); |
} |
break; |
case 1: |
if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) { |
if (!(rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG; |
queue_hdmi = true; |
DRM_DEBUG("IH: HDMI1\n"); |
} |
break; |
case 2: |
if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) { |
if (!(rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG; |
queue_hdmi = true; |
DRM_DEBUG("IH: HDMI2\n"); |
} |
break; |
case 3: |
if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) { |
if (!(rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG; |
queue_hdmi = true; |
DRM_DEBUG("IH: HDMI3\n"); |
} |
break; |
case 4: |
if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) { |
if (!(rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG; |
queue_hdmi = true; |
DRM_DEBUG("IH: HDMI4\n"); |
} |
break; |
case 5: |
if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) { |
if (!(rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG; |
queue_hdmi = true; |
DRM_DEBUG("IH: HDMI5\n"); |
} |
break; |
default: |
DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); |
break; |
} |
case 96: |
DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR)); |
WREG32(SRBM_INT_ACK, 0x1); |
break; |
case 124: /* UVD */ |
DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); |
radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); |
5213,13 → 5417,13 |
return r; |
} |
// r = rv770_uvd_resume(rdev); |
// if (!r) { |
// r = radeon_fence_driver_start_ring(rdev, |
// R600_RING_TYPE_UVD_INDEX); |
// if (r) |
// dev_err(rdev->dev, "UVD fences init error (%d).\n", r); |
// } |
r = uvd_v2_2_resume(rdev); |
if (!r) { |
r = radeon_fence_driver_start_ring(rdev, |
R600_RING_TYPE_UVD_INDEX); |
if (r) |
dev_err(rdev->dev, "UVD fences init error (%d).\n", r); |
} |
if (r) |
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; |
5234,7 → 5438,7 |
r = r600_irq_init(rdev); |
if (r) { |
DRM_ERROR("radeon: IH init failed (%d).\n", r); |
// radeon_irq_kms_fini(rdev); |
radeon_irq_kms_fini(rdev); |
return r; |
} |
evergreen_irq_set(rdev); |
5261,7 → 5465,17 |
if (r) |
return r; |
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
if (ring->ring_size) { |
r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
RADEON_CP_PACKET2); |
if (!r) |
r = uvd_v1_0_init(rdev); |
if (r) |
DRM_ERROR("radeon: error initializing UVD (%d).\n", r); |
} |
r = radeon_ib_pool_init(rdev); |
if (r) { |
dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
5365,12 → 5579,12 |
rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; |
r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); |
// r = radeon_uvd_init(rdev); |
// if (!r) { |
// rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL; |
// r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], |
// 4096); |
// } |
r = radeon_uvd_init(rdev); |
if (!r) { |
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL; |
r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], |
4096); |
} |
rdev->ih.ring_obj = NULL; |
r600_ih_ring_init(rdev, 64 * 1024); |
5383,6 → 5597,15 |
r = evergreen_startup(rdev); |
if (r) { |
dev_err(rdev->dev, "disabling GPU acceleration\n"); |
r700_cp_fini(rdev); |
r600_dma_fini(rdev); |
r600_irq_fini(rdev); |
if (rdev->flags & RADEON_IS_IGP) |
sumo_rlc_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_irq_kms_fini(rdev); |
evergreen_pcie_gart_fini(rdev); |
rdev->accel_working = false; |
} |
5400,6 → 5623,30 |
return 0; |
} |
void evergreen_fini(struct radeon_device *rdev) |
{ |
radeon_pm_fini(rdev); |
radeon_audio_fini(rdev); |
r700_cp_fini(rdev); |
r600_dma_fini(rdev); |
r600_irq_fini(rdev); |
if (rdev->flags & RADEON_IS_IGP) |
sumo_rlc_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_irq_kms_fini(rdev); |
uvd_v1_0_fini(rdev); |
radeon_uvd_fini(rdev); |
evergreen_pcie_gart_fini(rdev); |
r600_vram_scratch_fini(rdev); |
radeon_gem_fini(rdev); |
radeon_fence_driver_fini(rdev); |
radeon_agp_fini(rdev); |
radeon_bo_fini(rdev); |
radeon_atombios_fini(rdev); |
kfree(rdev->bios); |
rdev->bios = NULL; |
} |
void evergreen_pcie_gen2_enable(struct radeon_device *rdev) |
{ |
/drivers/video/drm/radeon/evergreen_blit_shaders.c |
---|
32,7 → 32,7 |
* evergreen cards need to use the 3D engine to blit data which requires |
* quite a bit of hw state setup. Rather than pull the whole 3D driver |
* (which normally generates the 3D state) into the DRM, we opt to use |
* statically generated state tables. The regsiter state and shaders |
* statically generated state tables. The register state and shaders |
* were hand generated to support blitting functionality. See the 3D |
* driver or documentation for descriptions of the registers and |
* shader instructions. |
/drivers/video/drm/radeon/evergreen_cs.c |
---|
34,6 → 34,8 |
#define MAX(a,b) (((a)>(b))?(a):(b)) |
#define MIN(a,b) (((a)<(b))?(a):(b)) |
#define REG_SAFE_BM_SIZE ARRAY_SIZE(evergreen_reg_safe_bm) |
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, |
struct radeon_bo_list **cs_reloc); |
struct evergreen_cs_track { |
83,6 → 85,8 |
u32 htile_offset; |
u32 htile_surface; |
struct radeon_bo *htile_bo; |
unsigned long indirect_draw_buffer_size; |
const unsigned *reg_safe_bm; |
}; |
static u32 evergreen_cs_get_aray_mode(u32 tiling_flags) |
443,7 → 447,7 |
* command stream. |
*/ |
if (!surf.mode) { |
volatile u32 *ib = p->ib.ptr; |
uint32_t *ib = p->ib.ptr; |
unsigned long tmp, nby, bsize, size, min = 0; |
/* find the height the ddx wants */ |
1082,41 → 1086,18 |
} |
/** |
* evergreen_cs_check_reg() - check if register is authorized or not |
* evergreen_cs_handle_reg() - process registers that need special handling. |
* @parser: parser structure holding parsing context |
* @reg: register we are testing |
* @idx: index into the cs buffer |
* |
* This function will test against evergreen_reg_safe_bm and return 0 |
* if register is safe. If register is not flag as safe this function |
* will test it against a list of register needind special handling. |
*/ |
static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) |
static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) |
{ |
struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track; |
struct radeon_bo_list *reloc; |
u32 last_reg; |
u32 m, i, tmp, *ib; |
u32 tmp, *ib; |
int r; |
if (p->rdev->family >= CHIP_CAYMAN) |
last_reg = ARRAY_SIZE(cayman_reg_safe_bm); |
else |
last_reg = ARRAY_SIZE(evergreen_reg_safe_bm); |
i = (reg >> 7); |
if (i >= last_reg) { |
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); |
return -EINVAL; |
} |
m = 1 << ((reg >> 2) & 31); |
if (p->rdev->family >= CHIP_CAYMAN) { |
if (!(cayman_reg_safe_bm[i] & m)) |
return 0; |
} else { |
if (!(evergreen_reg_safe_bm[i] & m)) |
return 0; |
} |
ib = p->ib.ptr; |
switch (reg) { |
/* force following reg to 0 in an attempt to disable out buffer |
1763,29 → 1744,27 |
return 0; |
} |
static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) |
/** |
* evergreen_is_safe_reg() - check if register is authorized or not |
* @parser: parser structure holding parsing context |
* @reg: register we are testing |
* |
* This function will test against reg_safe_bm and return true |
* if register is safe or false otherwise. |
*/ |
static inline bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg) |
{ |
u32 last_reg, m, i; |
struct evergreen_cs_track *track = p->track; |
u32 m, i; |
if (p->rdev->family >= CHIP_CAYMAN) |
last_reg = ARRAY_SIZE(cayman_reg_safe_bm); |
else |
last_reg = ARRAY_SIZE(evergreen_reg_safe_bm); |
i = (reg >> 7); |
if (i >= last_reg) { |
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); |
if (unlikely(i >= REG_SAFE_BM_SIZE)) { |
return false; |
} |
m = 1 << ((reg >> 2) & 31); |
if (p->rdev->family >= CHIP_CAYMAN) { |
if (!(cayman_reg_safe_bm[i] & m)) |
if (!(track->reg_safe_bm[i] & m)) |
return true; |
} else { |
if (!(evergreen_reg_safe_bm[i] & m)) |
return true; |
} |
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); |
return false; |
} |
1794,7 → 1773,7 |
{ |
struct radeon_bo_list *reloc; |
struct evergreen_cs_track *track; |
volatile u32 *ib; |
uint32_t *ib; |
unsigned idx; |
unsigned i; |
unsigned start_reg, end_reg, reg; |
1896,6 → 1875,14 |
} |
break; |
} |
case PACKET3_INDEX_BUFFER_SIZE: |
{ |
if (pkt->count != 0) { |
DRM_ERROR("bad INDEX_BUFFER_SIZE\n"); |
return -EINVAL; |
} |
break; |
} |
case PACKET3_DRAW_INDEX: |
{ |
uint64_t offset; |
2006,6 → 1993,67 |
return r; |
} |
break; |
case PACKET3_SET_BASE: |
{ |
/* |
DW 1 HEADER Header of the packet. Shader_Type in bit 1 of the Header will correspond to the shader type of the Load, see Type-3 Packet. |
2 BASE_INDEX Bits [3:0] BASE_INDEX - Base Index specifies which base address is specified in the last two DWs. |
0001: DX11 Draw_Index_Indirect Patch Table Base: Base address for Draw_Index_Indirect data. |
3 ADDRESS_LO Bits [31:3] - Lower bits of QWORD-Aligned Address. Bits [2:0] - Reserved |
4 ADDRESS_HI Bits [31:8] - Reserved. Bits [7:0] - Upper bits of Address [47:32] |
*/ |
if (pkt->count != 2) { |
DRM_ERROR("bad SET_BASE\n"); |
return -EINVAL; |
} |
/* currently only supporting setting indirect draw buffer base address */ |
if (idx_value != 1) { |
DRM_ERROR("bad SET_BASE\n"); |
return -EINVAL; |
} |
r = radeon_cs_packet_next_reloc(p, &reloc, 0); |
if (r) { |
DRM_ERROR("bad SET_BASE\n"); |
return -EINVAL; |
} |
track->indirect_draw_buffer_size = radeon_bo_size(reloc->robj); |
ib[idx+1] = reloc->gpu_offset; |
ib[idx+2] = upper_32_bits(reloc->gpu_offset) & 0xff; |
break; |
} |
case PACKET3_DRAW_INDIRECT: |
case PACKET3_DRAW_INDEX_INDIRECT: |
{ |
u64 size = pkt->opcode == PACKET3_DRAW_INDIRECT ? 16 : 20; |
/* |
DW 1 HEADER |
2 DATA_OFFSET Bits [31:0] + byte aligned offset where the required data structure starts. Bits 1:0 are zero |
3 DRAW_INITIATOR Draw Initiator Register. Written to the VGT_DRAW_INITIATOR register for the assigned context |
*/ |
if (pkt->count != 1) { |
DRM_ERROR("bad DRAW_INDIRECT\n"); |
return -EINVAL; |
} |
if (idx_value + size > track->indirect_draw_buffer_size) { |
dev_warn(p->dev, "DRAW_INDIRECT buffer too small %u + %llu > %lu\n", |
idx_value, size, track->indirect_draw_buffer_size); |
return -EINVAL; |
} |
r = evergreen_cs_track_check(p); |
if (r) { |
dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); |
return r; |
} |
break; |
} |
case PACKET3_DISPATCH_DIRECT: |
if (pkt->count != 3) { |
DRM_ERROR("bad DISPATCH_DIRECT\n"); |
2251,9 → 2299,10 |
DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); |
return -EINVAL; |
} |
for (i = 0; i < pkt->count; i++) { |
reg = start_reg + (4 * i); |
r = evergreen_cs_check_reg(p, reg, idx+1+i); |
for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) { |
if (evergreen_is_safe_reg(p, reg)) |
continue; |
r = evergreen_cs_handle_reg(p, reg, idx); |
if (r) |
return r; |
} |
2267,9 → 2316,10 |
DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); |
return -EINVAL; |
} |
for (i = 0; i < pkt->count; i++) { |
reg = start_reg + (4 * i); |
r = evergreen_cs_check_reg(p, reg, idx+1+i); |
for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) { |
if (evergreen_is_safe_reg(p, reg)) |
continue; |
r = evergreen_cs_handle_reg(p, reg, idx); |
if (r) |
return r; |
} |
2524,9 → 2574,12 |
} else { |
/* SRC is a reg. */ |
reg = radeon_get_ib_value(p, idx+1) << 2; |
if (!evergreen_is_safe_reg(p, reg, idx+1)) |
if (!evergreen_is_safe_reg(p, reg)) { |
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", |
reg, idx + 1); |
return -EINVAL; |
} |
} |
if (idx_value & 0x2) { |
u64 offset; |
/* DST is memory. */ |
2548,9 → 2601,12 |
} else { |
/* DST is a reg. */ |
reg = radeon_get_ib_value(p, idx+3) << 2; |
if (!evergreen_is_safe_reg(p, reg, idx+3)) |
if (!evergreen_is_safe_reg(p, reg)) { |
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", |
reg, idx + 3); |
return -EINVAL; |
} |
} |
break; |
case PACKET3_NOP: |
break; |
2574,11 → 2630,15 |
if (track == NULL) |
return -ENOMEM; |
evergreen_cs_track_init(track); |
if (p->rdev->family >= CHIP_CAYMAN) |
if (p->rdev->family >= CHIP_CAYMAN) { |
tmp = p->rdev->config.cayman.tile_config; |
else |
track->reg_safe_bm = cayman_reg_safe_bm; |
} else { |
tmp = p->rdev->config.evergreen.tile_config; |
track->reg_safe_bm = evergreen_reg_safe_bm; |
} |
BUILD_BUG_ON(ARRAY_SIZE(cayman_reg_safe_bm) != REG_SAFE_BM_SIZE); |
BUILD_BUG_ON(ARRAY_SIZE(evergreen_reg_safe_bm) != REG_SAFE_BM_SIZE); |
switch (tmp & 0xf) { |
case 0: |
track->npipes = 1; |
2687,7 → 2747,7 |
struct radeon_cs_chunk *ib_chunk = p->chunk_ib; |
struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc; |
u32 header, cmd, count, sub_cmd; |
volatile u32 *ib = p->ib.ptr; |
uint32_t *ib = p->ib.ptr; |
u32 idx; |
u64 src_offset, dst_offset, dst2_offset; |
int r; |
3243,7 → 3303,13 |
switch (pkt->opcode) { |
case PACKET3_NOP: |
break; |
case PACKET3_SET_BASE: |
if (idx_value != 1) { |
DRM_ERROR("bad SET_BASE"); |
return -EINVAL; |
} |
break; |
case PACKET3_CLEAR_STATE: |
case PACKET3_INDEX_BUFFER_SIZE: |
case PACKET3_DISPATCH_DIRECT: |
/drivers/video/drm/radeon/evergreen_hdmi.c |
---|
29,17 → 29,12 |
#include <drm/radeon_drm.h> |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "radeon_audio.h" |
#include "evergreend.h" |
#include "atom.h" |
extern void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder); |
extern void dce6_afmt_write_sad_regs(struct drm_encoder *encoder); |
extern void dce6_afmt_select_pin(struct drm_encoder *encoder); |
extern void dce6_afmt_write_latency_fields(struct drm_encoder *encoder, |
struct drm_display_mode *mode); |
/* enable the audio stream */ |
static void dce4_audio_enable(struct radeon_device *rdev, |
void dce4_audio_enable(struct radeon_device *rdev, |
struct r600_audio_pin *pin, |
u8 enable_mask) |
{ |
69,48 → 64,42 |
WREG32(AZ_HOT_PLUG_CONTROL, tmp); |
} |
/* |
* update the N and CTS parameters for a given pixel clock rate |
*/ |
static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock) |
void evergreen_hdmi_update_acr(struct drm_encoder *encoder, long offset, |
const struct radeon_hdmi_acr *acr) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_hdmi_acr acr = r600_hdmi_acr(clock); |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
uint32_t offset = dig->afmt->offset; |
int bpc = 8; |
WREG32(HDMI_ACR_32_0 + offset, HDMI_ACR_CTS_32(acr.cts_32khz)); |
WREG32(HDMI_ACR_32_1 + offset, acr.n_32khz); |
if (encoder->crtc) { |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
bpc = radeon_crtc->bpc; |
} |
WREG32(HDMI_ACR_44_0 + offset, HDMI_ACR_CTS_44(acr.cts_44_1khz)); |
WREG32(HDMI_ACR_44_1 + offset, acr.n_44_1khz); |
if (bpc > 8) |
WREG32(HDMI_ACR_PACKET_CONTROL + offset, |
HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ |
else |
WREG32(HDMI_ACR_PACKET_CONTROL + offset, |
HDMI_ACR_SOURCE | /* select SW CTS value */ |
HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ |
WREG32(HDMI_ACR_48_0 + offset, HDMI_ACR_CTS_48(acr.cts_48khz)); |
WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz); |
WREG32(HDMI_ACR_32_0 + offset, HDMI_ACR_CTS_32(acr->cts_32khz)); |
WREG32(HDMI_ACR_32_1 + offset, acr->n_32khz); |
WREG32(HDMI_ACR_44_0 + offset, HDMI_ACR_CTS_44(acr->cts_44_1khz)); |
WREG32(HDMI_ACR_44_1 + offset, acr->n_44_1khz); |
WREG32(HDMI_ACR_48_0 + offset, HDMI_ACR_CTS_48(acr->cts_48khz)); |
WREG32(HDMI_ACR_48_1 + offset, acr->n_48khz); |
} |
static void dce4_afmt_write_latency_fields(struct drm_encoder *encoder, |
struct drm_display_mode *mode) |
void dce4_afmt_write_latency_fields(struct drm_encoder *encoder, |
struct drm_connector *connector, struct drm_display_mode *mode) |
{ |
struct radeon_device *rdev = encoder->dev->dev_private; |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector = NULL; |
u32 tmp = 0; |
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { |
if (connector->encoder == encoder) { |
radeon_connector = to_radeon_connector(connector); |
break; |
} |
} |
if (!radeon_connector) { |
DRM_ERROR("Couldn't find encoder's connector\n"); |
return; |
} |
if (mode->flags & DRM_MODE_FLAG_INTERLACE) { |
if (connector->latency_present[1]) |
tmp = VIDEO_LIPSYNC(connector->video_latency[1]) | |
124,38 → 113,17 |
else |
tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255); |
} |
WREG32(AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC, tmp); |
WREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC, tmp); |
} |
static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder) |
void dce4_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder, |
u8 *sadb, int sad_count) |
{ |
struct radeon_device *rdev = encoder->dev->dev_private; |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector = NULL; |
u32 tmp; |
u8 *sadb = NULL; |
int sad_count; |
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { |
if (connector->encoder == encoder) { |
radeon_connector = to_radeon_connector(connector); |
break; |
} |
} |
if (!radeon_connector) { |
DRM_ERROR("Couldn't find encoder's connector\n"); |
return; |
} |
sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb); |
if (sad_count < 0) { |
DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); |
sad_count = 0; |
} |
/* program the speaker allocation */ |
tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER); |
tmp = RREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER); |
tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK); |
/* set HDMI mode */ |
tmp |= HDMI_CONNECTION; |
163,19 → 131,32 |
tmp |= SPEAKER_ALLOCATION(sadb[0]); |
else |
tmp |= SPEAKER_ALLOCATION(5); /* stereo */ |
WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp); |
WREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp); |
} |
kfree(sadb); |
void dce4_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder, |
u8 *sadb, int sad_count) |
{ |
struct radeon_device *rdev = encoder->dev->dev_private; |
u32 tmp; |
/* program the speaker allocation */ |
tmp = RREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER); |
tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK); |
/* set DP mode */ |
tmp |= DP_CONNECTION; |
if (sad_count) |
tmp |= SPEAKER_ALLOCATION(sadb[0]); |
else |
tmp |= SPEAKER_ALLOCATION(5); /* stereo */ |
WREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp); |
} |
static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder) |
void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder, |
struct cea_sad *sads, int sad_count) |
{ |
int i; |
struct radeon_device *rdev = encoder->dev->dev_private; |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector = NULL; |
struct cea_sad *sads; |
int i, sad_count; |
static const u16 eld_reg_to_type[][2] = { |
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, |
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, |
191,25 → 172,6 |
{ AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, |
}; |
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { |
if (connector->encoder == encoder) { |
radeon_connector = to_radeon_connector(connector); |
break; |
} |
} |
if (!radeon_connector) { |
DRM_ERROR("Couldn't find encoder's connector\n"); |
return; |
} |
sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads); |
if (sad_count <= 0) { |
DRM_ERROR("Couldn't read SADs: %d\n", sad_count); |
return; |
} |
BUG_ON(!sads); |
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { |
u32 value = 0; |
u8 stereo_freqs = 0; |
236,25 → 198,17 |
value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs); |
WREG32(eld_reg_to_type[i][0], value); |
WREG32_ENDPOINT(0, eld_reg_to_type[i][0], value); |
} |
kfree(sads); |
} |
/* |
* build a HDMI Video Info Frame |
* build a AVI Info Frame |
*/ |
static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder, |
void *buffer, size_t size) |
void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset, |
unsigned char *buffer, size_t size) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
uint32_t offset = dig->afmt->offset; |
uint8_t *frame = buffer + 3; |
uint8_t *header = buffer; |
WREG32(AFMT_AVI_INFO0 + offset, |
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); |
263,29 → 217,21 |
WREG32(AFMT_AVI_INFO2 + offset, |
frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); |
WREG32(AFMT_AVI_INFO3 + offset, |
frame[0xC] | (frame[0xD] << 8) | (header[1] << 24)); |
frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24)); |
WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset, |
HDMI_AVI_INFO_LINE(2), /* anything other than 0 */ |
~HDMI_AVI_INFO_LINE_MASK); |
} |
static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock) |
void dce4_hdmi_audio_set_dto(struct radeon_device *rdev, |
struct radeon_crtc *crtc, unsigned int clock) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
u32 base_rate = 24000; |
u32 max_ratio = clock / base_rate; |
unsigned int max_ratio = clock / 24000; |
u32 dto_phase; |
u32 dto_modulo = clock; |
u32 wallclock_ratio; |
u32 dto_cntl; |
u32 value; |
if (!dig || !dig->afmt) |
return; |
if (ASIC_IS_DCE6(rdev)) { |
dto_phase = 24 * 1000; |
} else { |
if (max_ratio >= 8) { |
dto_phase = 192 * 1000; |
wallclock_ratio = 3; |
299,68 → 245,71 |
dto_phase = 24 * 1000; |
wallclock_ratio = 0; |
} |
dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; |
dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); |
WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl); |
} |
/* XXX two dtos; generally use dto0 for hdmi */ |
value = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; |
value |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); |
value &= ~DCCG_AUDIO_DTO1_USE_512FBR_DTO; |
WREG32(DCCG_AUDIO_DTO0_CNTL, value); |
/* Two dtos; generally use dto0 for HDMI */ |
value = 0; |
if (crtc) |
value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); |
WREG32(DCCG_AUDIO_DTO_SOURCE, value); |
/* Express [24MHz / target pixel clock] as an exact rational |
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE |
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
*/ |
WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id)); |
WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase); |
WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo); |
WREG32(DCCG_AUDIO_DTO0_MODULE, clock); |
} |
/* |
* update the info frames with the data from the current display mode |
*/ |
void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode) |
void dce4_dp_audio_set_dto(struct radeon_device *rdev, |
struct radeon_crtc *crtc, unsigned int clock) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; |
struct hdmi_avi_infoframe frame; |
uint32_t offset; |
ssize_t err; |
uint32_t val; |
int bpc = 8; |
u32 value; |
if (!dig || !dig->afmt) |
return; |
value = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; |
value |= DCCG_AUDIO_DTO1_USE_512FBR_DTO; |
WREG32(DCCG_AUDIO_DTO1_CNTL, value); |
/* Silent, r600_hdmi_enable will raise WARN for us */ |
if (!dig->afmt->enabled) |
return; |
offset = dig->afmt->offset; |
/* Two dtos; generally use dto1 for DP */ |
value = 0; |
value |= DCCG_AUDIO_DTO_SEL; |
/* hdmi deep color mode general control packets setup, if bpc > 8 */ |
if (encoder->crtc) { |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
bpc = radeon_crtc->bpc; |
} |
if (crtc) |
value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id); |
/* disable audio prior to setting up hw */ |
if (ASIC_IS_DCE6(rdev)) { |
dig->afmt->pin = dce6_audio_get_pin(rdev); |
dce6_audio_enable(rdev, dig->afmt->pin, 0); |
} else { |
dig->afmt->pin = r600_audio_get_pin(rdev); |
dce4_audio_enable(rdev, dig->afmt->pin, 0); |
WREG32(DCCG_AUDIO_DTO_SOURCE, value); |
/* Express [24MHz / target pixel clock] as an exact rational |
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE |
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
*/ |
WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); |
WREG32(DCCG_AUDIO_DTO1_MODULE, clock); |
} |
evergreen_audio_set_dto(encoder, mode->clock); |
void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
WREG32(HDMI_VBI_PACKET_CONTROL + offset, |
HDMI_NULL_SEND); /* send null packets when required */ |
HDMI_NULL_SEND | /* send null packets when required */ |
HDMI_GC_SEND | /* send general control packets */ |
HDMI_GC_CONT); /* send general control packets every frame */ |
} |
WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000); |
void dce4_hdmi_set_color_depth(struct drm_encoder *encoder, u32 offset, int bpc) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
uint32_t val; |
val = RREG32(HDMI_CONTROL + offset); |
val &= ~HDMI_DEEP_COLOR_ENABLE; |
390,43 → 339,16 |
} |
WREG32(HDMI_CONTROL + offset, val); |
} |
WREG32(HDMI_VBI_PACKET_CONTROL + offset, |
HDMI_NULL_SEND | /* send null packets when required */ |
HDMI_GC_SEND | /* send general control packets */ |
HDMI_GC_CONT); /* send general control packets every frame */ |
void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
WREG32(HDMI_INFOFRAME_CONTROL0 + offset, |
HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ |
HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ |
WREG32(AFMT_INFOFRAME_CONTROL0 + offset, |
AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */ |
WREG32(HDMI_INFOFRAME_CONTROL1 + offset, |
HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */ |
WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */ |
WREG32(HDMI_AUDIO_PACKET_CONTROL + offset, |
HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */ |
HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ |
WREG32(AFMT_AUDIO_PACKET_CONTROL + offset, |
AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ |
/* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */ |
if (bpc > 8) |
WREG32(HDMI_ACR_PACKET_CONTROL + offset, |
HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ |
else |
WREG32(HDMI_ACR_PACKET_CONTROL + offset, |
HDMI_ACR_SOURCE | /* select SW CTS value */ |
HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ |
evergreen_hdmi_update_ACR(encoder, mode->clock); |
WREG32(AFMT_60958_0 + offset, |
AFMT_60958_CS_CHANNEL_NUMBER_L(1)); |
441,90 → 363,117 |
AFMT_60958_CS_CHANNEL_NUMBER_6(7) | |
AFMT_60958_CS_CHANNEL_NUMBER_7(8)); |
if (ASIC_IS_DCE6(rdev)) { |
dce6_afmt_write_speaker_allocation(encoder); |
} else { |
dce4_afmt_write_speaker_allocation(encoder); |
} |
WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset, |
AFMT_AUDIO_CHANNEL_ENABLE(0xff)); |
/* fglrx sets 0x40 in 0x5f80 here */ |
WREG32(HDMI_AUDIO_PACKET_CONTROL + offset, |
HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */ |
HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ |
if (ASIC_IS_DCE6(rdev)) { |
dce6_afmt_select_pin(encoder); |
dce6_afmt_write_sad_regs(encoder); |
dce6_afmt_write_latency_fields(encoder, mode); |
} else { |
evergreen_hdmi_write_sad_regs(encoder); |
dce4_afmt_write_latency_fields(encoder, mode); |
/* allow 60958 channel status and send audio packets fields to be updated */ |
WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset, |
AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE); |
} |
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); |
if (err < 0) { |
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); |
return; |
void dce4_set_mute(struct drm_encoder *encoder, u32 offset, bool mute) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
if (mute) |
WREG32_OR(HDMI_GC + offset, HDMI_GC_AVMUTE); |
else |
WREG32_AND(HDMI_GC + offset, ~HDMI_GC_AVMUTE); |
} |
err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); |
if (err < 0) { |
DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); |
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
if (!dig || !dig->afmt) |
return; |
} |
evergreen_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer)); |
if (enable) { |
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset, |
if (connector && drm_detect_monitor_audio(radeon_connector_edid(connector))) { |
WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, |
HDMI_AVI_INFO_SEND | /* enable AVI info frames */ |
HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */ |
HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ |
HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ |
WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, |
AFMT_AUDIO_SAMPLE_SEND); |
} else { |
WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, |
HDMI_AVI_INFO_SEND | /* enable AVI info frames */ |
HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */ |
WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, |
~AFMT_AUDIO_SAMPLE_SEND); |
} |
} else { |
WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, |
~AFMT_AUDIO_SAMPLE_SEND); |
WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0); |
} |
WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset, |
HDMI_AVI_INFO_LINE(2), /* anything other than 0 */ |
~HDMI_AVI_INFO_LINE_MASK); |
dig->afmt->enabled = enable; |
WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset, |
AFMT_AUDIO_SAMPLE_SEND); /* send audio packets */ |
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ |
WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF); |
WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF); |
WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001); |
WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001); |
/* enable audio after to setting up hw */ |
if (ASIC_IS_DCE6(rdev)) |
dce6_audio_enable(rdev, dig->afmt->pin, 1); |
else |
dce4_audio_enable(rdev, dig->afmt->pin, 0xf); |
DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n", |
enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id); |
} |
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) |
void evergreen_dp_enable(struct drm_encoder *encoder, bool enable) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
if (!dig || !dig->afmt) |
return; |
/* Silent, r600_hdmi_enable will raise WARN for us */ |
if (enable && dig->afmt->enabled) |
return; |
if (!enable && !dig->afmt->enabled) |
return; |
if (enable && connector && |
drm_detect_monitor_audio(radeon_connector_edid(connector))) { |
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
struct radeon_connector_atom_dig *dig_connector; |
uint32_t val; |
if (!enable && dig->afmt->pin) { |
if (ASIC_IS_DCE6(rdev)) |
dce6_audio_enable(rdev, dig->afmt->pin, 0); |
WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, |
AFMT_AUDIO_SAMPLE_SEND); |
WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset, |
EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); |
if (!ASIC_IS_DCE6(rdev) && radeon_connector->con_priv) { |
dig_connector = radeon_connector->con_priv; |
val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset); |
val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf); |
if (dig_connector->dp_clock == 162000) |
val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(3); |
else |
dce4_audio_enable(rdev, dig->afmt->pin, 0); |
dig->afmt->pin = NULL; |
val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5); |
WREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset, val); |
} |
WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, |
EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ |
EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ |
EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ |
EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ |
} else { |
WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0); |
WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, |
~AFMT_AUDIO_SAMPLE_SEND); |
} |
dig->afmt->enabled = enable; |
DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n", |
enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id); |
} |
/drivers/video/drm/radeon/evergreen_reg.h |
---|
251,4 → 251,19 |
/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */ |
#define EVERGREEN_HDMI_BASE 0x7030 |
/* Display Port block */ |
#define EVERGREEN_DP_SEC_CNTL 0x7280 |
# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0) |
# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4) |
# define EVERGREEN_DP_SEC_ATP_ENABLE (1 << 8) |
# define EVERGREEN_DP_SEC_AIP_ENABLE (1 << 12) |
# define EVERGREEN_DP_SEC_GSP_ENABLE (1 << 20) |
# define EVERGREEN_DP_SEC_AVI_ENABLE (1 << 24) |
# define EVERGREEN_DP_SEC_MPG_ENABLE (1 << 28) |
#define EVERGREEN_DP_SEC_TIMESTAMP 0x72a4 |
# define EVERGREEN_DP_SEC_TIMESTAMP_MODE(x) (((x) & 0x3) << 0) |
#define EVERGREEN_DP_SEC_AUD_N 0x7294 |
# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24) |
# define EVERGREEN_DP_SEC_SS_EN (1 << 28) |
#endif |
/drivers/video/drm/radeon/evergreend.h |
---|
509,6 → 509,7 |
#define DCCG_AUDIO_DTO1_MODULE 0x05c4 |
#define DCCG_AUDIO_DTO1_LOAD 0x05c8 |
#define DCCG_AUDIO_DTO1_CNTL 0x05cc |
# define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3) |
/* DCE 4.0 AFMT */ |
#define HDMI_CONTROL 0x7030 |
1190,6 → 1191,10 |
#define SOFT_RESET_REGBB (1 << 22) |
#define SOFT_RESET_ORB (1 << 23) |
#define SRBM_READ_ERROR 0xE98 |
#define SRBM_INT_CNTL 0xEA0 |
#define SRBM_INT_ACK 0xEA8 |
/* display watermarks */ |
#define DC_LB_MEMORY_SPLIT 0x6b0c |
#define PRIORITY_A_CNT 0x6b18 |
1515,6 → 1520,7 |
#define UVD_UDEC_DBW_ADDR_CONFIG 0xef54 |
#define UVD_RBC_RB_RPTR 0xf690 |
#define UVD_RBC_RB_WPTR 0xf694 |
#define UVD_STATUS 0xf6bc |
/* |
* PM4 |
/drivers/video/drm/radeon/kv_dpm.c |
---|
1169,6 → 1169,19 |
} |
} |
static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable) |
{ |
u32 thermal_int; |
thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL); |
if (enable) |
thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK; |
else |
thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK); |
WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int); |
} |
int kv_dpm_enable(struct radeon_device *rdev) |
{ |
struct kv_power_info *pi = kv_get_pi(rdev); |
1280,8 → 1293,7 |
DRM_ERROR("kv_set_thermal_temperature_range failed\n"); |
return ret; |
} |
rdev->irq.dpm_thermal = true; |
radeon_irq_set(rdev); |
kv_enable_thermal_int(rdev, true); |
} |
/* powerdown unused blocks for now */ |
1312,6 → 1324,7 |
kv_stop_dpm(rdev); |
kv_enable_ulv(rdev, false); |
kv_reset_am(rdev); |
kv_enable_thermal_int(rdev, false); |
kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps); |
} |
1925,6 → 1938,7 |
kv_init_sclk_t(rdev); |
} |
#if 0 |
void kv_dpm_reset_asic(struct radeon_device *rdev) |
{ |
struct kv_power_info *pi = kv_get_pi(rdev); |
1945,6 → 1959,7 |
kv_set_enabled_level(rdev, pi->graphics_boot_level); |
} |
} |
#endif |
//XXX use sumo_dpm_display_configuration_changed |
2745,13 → 2760,11 |
pi->enable_auto_thermal_throttling = true; |
pi->disable_nb_ps3_in_battery = false; |
if (radeon_bapm == -1) { |
/* There are stability issues reported on with |
* bapm enabled on an asrock system. |
*/ |
if (rdev->pdev->subsystem_vendor == 0x1849) |
/* only enable bapm on KB, ML by default */ |
if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) |
pi->bapm_enable = true; |
else |
pi->bapm_enable = false; |
else |
pi->bapm_enable = true; |
} else if (radeon_bapm == 0) { |
pi->bapm_enable = false; |
} else { |
2807,6 → 2820,29 |
} |
} |
u32 kv_dpm_get_current_sclk(struct radeon_device *rdev) |
{ |
struct kv_power_info *pi = kv_get_pi(rdev); |
u32 current_index = |
(RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >> |
CURR_SCLK_INDEX_SHIFT; |
u32 sclk; |
if (current_index >= SMU__NUM_SCLK_DPM_STATE) { |
return 0; |
} else { |
sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); |
return sclk; |
} |
} |
u32 kv_dpm_get_current_mclk(struct radeon_device *rdev) |
{ |
struct kv_power_info *pi = kv_get_pi(rdev); |
return pi->sys_info.bootup_uma_clk; |
} |
void kv_dpm_print_power_state(struct radeon_device *rdev, |
struct radeon_ps *rps) |
{ |
/drivers/video/drm/radeon/main.c |
---|
5,6 → 5,8 |
#include "radeon.h" |
#include "bitmap.h" |
#define DRV_NAME "atikms v4.4" |
void __init dmi_scan_machine(void); |
#define KMS_DEV_CLOSE 0 |
11,21 → 13,9 |
#define KMS_DEV_INIT 1 |
#define KMS_DEV_READY 2 |
struct pci_device { |
uint16_t domain; |
uint8_t bus; |
uint8_t dev; |
uint8_t func; |
uint16_t vendor_id; |
uint16_t device_id; |
uint16_t subvendor_id; |
uint16_t subdevice_id; |
uint32_t device_class; |
uint8_t revision; |
}; |
struct drm_device *main_device; |
struct drm_file *drm_file_handlers[256]; |
int oops_in_progress; |
videomode_t usermode; |
56,8 → 46,9 |
while(driver_wq_state == KMS_DEV_INIT) |
{ |
jiffies = GetTimerTicks(); |
jiffies_64 = jiffies; |
jiffies_64 = GetClockNs() / 10000000; |
jiffies = (unsigned long)jiffies_64; |
delay(1); |
}; |
134,7 → 125,7 |
if( GetService("DISPLAY") != 0 ) |
return 0; |
printf("Radeon v3.19-rc3 cmdline %s\n", cmdline); |
printf("%s cmdline %s\n",DRV_NAME, cmdline); |
if( cmdline && *cmdline ) |
parse_cmdline(cmdline, &usermode, log, &radeon_modeset); |
144,6 → 135,10 |
printf("Can't open %s\nExit\n", log); |
return 0; |
} |
else |
{ |
dbgprintf("\nLOG: %s build %s %s\n",DRV_NAME,__DATE__, __TIME__); |
} |
cpu_detect1(); |
154,6 → 149,16 |
return 0; |
} |
err = kmap_init(); |
if( unlikely(err != 0) ) |
{ |
dbgprintf("kmap initialization failed\n"); |
return 0; |
} |
dmi_scan_machine(); |
driver_wq_state = KMS_DEV_INIT; |
CreateKernelThread(ati_driver_thread); |
311,7 → 316,7 |
{ |
s64 quot, t; |
quot = div64_u64(abs64(dividend), abs64(divisor)); |
quot = div64_u64(abs(dividend), abs(divisor)); |
t = (dividend ^ divisor) >> 63; |
return (quot ^ t) - t; |
/drivers/video/drm/radeon/ni.c |
---|
27,6 → 27,7 |
#include <drm/drmP.h> |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "radeon_audio.h" |
#include <drm/radeon_drm.h> |
#include "nid.h" |
#include "atom.h" |
35,6 → 36,31 |
#include "radeon_ucode.h" |
#include "clearstate_cayman.h" |
/* |
* Indirect registers accessor |
*/ |
u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
spin_lock_irqsave(&rdev->smc_idx_lock, flags); |
WREG32(TN_SMC_IND_INDEX_0, (reg)); |
r = RREG32(TN_SMC_IND_DATA_0); |
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); |
return r; |
} |
void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
spin_lock_irqsave(&rdev->smc_idx_lock, flags); |
WREG32(TN_SMC_IND_INDEX_0, (reg)); |
WREG32(TN_SMC_IND_DATA_0, (v)); |
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); |
} |
static const u32 tn_rlc_save_restore_register_list[] = |
{ |
0x98fc, |
827,6 → 853,35 |
return err; |
} |
/** |
* cayman_get_allowed_info_register - fetch the register for the info ioctl |
* |
* @rdev: radeon_device pointer |
* @reg: register offset in bytes |
* @val: register value |
* |
* Returns 0 for success or -EINVAL for an invalid register |
* |
*/ |
int cayman_get_allowed_info_register(struct radeon_device *rdev, |
u32 reg, u32 *val) |
{ |
switch (reg) { |
case GRBM_STATUS: |
case GRBM_STATUS_SE0: |
case GRBM_STATUS_SE1: |
case SRBM_STATUS: |
case SRBM_STATUS2: |
case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET): |
case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET): |
case UVD_STATUS: |
*val = RREG32(reg); |
return 0; |
default: |
return -EINVAL; |
} |
} |
int tn_get_temp(struct radeon_device *rdev) |
{ |
u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff; |
961,6 → 1016,8 |
} |
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); |
WREG32(SRBM_INT_CNTL, 0x1); |
WREG32(SRBM_INT_ACK, 0x1); |
evergreen_fix_pci_max_read_req_size(rdev); |
1085,12 → 1142,12 |
if ((rdev->config.cayman.max_backends_per_se == 1) && |
(rdev->flags & RADEON_IS_IGP)) { |
if ((disabled_rb_mask & 3) == 1) { |
if ((disabled_rb_mask & 3) == 2) { |
/* RB1 disabled, RB0 enabled */ |
tmp = 0x00000000; |
} else { |
/* RB0 disabled, RB1 enabled */ |
tmp = 0x11111111; |
} else { |
/* RB1 disabled, RB0 enabled */ |
tmp = 0x00000000; |
} |
} else { |
tmp = gb_addr_config & NUM_PIPES_MASK; |
1269,7 → 1326,8 |
*/ |
for (i = 1; i < 8; i++) { |
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); |
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn); |
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), |
rdev->vm_manager.max_pfn - 1); |
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), |
rdev->vm_manager.saved_table_addr[i]); |
} |
1328,6 → 1386,13 |
radeon_gart_table_vram_unpin(rdev); |
} |
static void cayman_pcie_gart_fini(struct radeon_device *rdev) |
{ |
cayman_pcie_gart_disable(rdev); |
radeon_gart_table_vram_free(rdev); |
radeon_gart_fini(rdev); |
} |
void cayman_cp_int_cntl_setup(struct radeon_device *rdev, |
int ring, u32 cp_int_cntl) |
{ |
1554,6 → 1619,13 |
return 0; |
} |
static void cayman_cp_fini(struct radeon_device *rdev) |
{ |
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
cayman_cp_enable(rdev, false); |
radeon_ring_fini(rdev, ring); |
radeon_scratch_free(rdev, ring->rptr_save_reg); |
} |
static int cayman_cp_resume(struct radeon_device *rdev) |
{ |
1984,16 → 2056,35 |
return r; |
} |
// r = rv770_uvd_resume(rdev); |
// if (!r) { |
// r = radeon_fence_driver_start_ring(rdev, |
// R600_RING_TYPE_UVD_INDEX); |
// if (r) |
// dev_err(rdev->dev, "UVD fences init error (%d).\n", r); |
// } |
// if (r) |
// rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; |
r = uvd_v2_2_resume(rdev); |
if (!r) { |
r = radeon_fence_driver_start_ring(rdev, |
R600_RING_TYPE_UVD_INDEX); |
if (r) |
dev_err(rdev->dev, "UVD fences init error (%d).\n", r); |
} |
if (r) |
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; |
if (rdev->family == CHIP_ARUBA) { |
r = radeon_vce_resume(rdev); |
if (!r) |
r = vce_v1_0_resume(rdev); |
if (!r) |
r = radeon_fence_driver_start_ring(rdev, |
TN_RING_TYPE_VCE1_INDEX); |
if (!r) |
r = radeon_fence_driver_start_ring(rdev, |
TN_RING_TYPE_VCE2_INDEX); |
if (r) { |
dev_err(rdev->dev, "VCE init error (%d).\n", r); |
rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0; |
rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0; |
} |
} |
r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); |
2028,7 → 2119,7 |
r = r600_irq_init(rdev); |
if (r) { |
DRM_ERROR("radeon: IH init failed (%d).\n", r); |
// radeon_irq_kms_fini(rdev); |
radeon_irq_kms_fini(rdev); |
return r; |
} |
evergreen_irq_set(rdev); |
2061,7 → 2152,31 |
if (r) |
return r; |
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
if (ring->ring_size) { |
r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
RADEON_CP_PACKET2); |
if (!r) |
r = uvd_v1_0_init(rdev); |
if (r) |
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); |
} |
if (rdev->family == CHIP_ARUBA) { |
ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; |
if (ring->ring_size) |
r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0); |
ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; |
if (ring->ring_size) |
r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0); |
if (!r) |
r = vce_v1_0_init(rdev); |
if (r) |
DRM_ERROR("radeon: failed initializing VCE (%d).\n", r); |
} |
r = radeon_ib_pool_init(rdev); |
if (r) { |
dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
2169,13 → 2284,26 |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 64 * 1024); |
// r = radeon_uvd_init(rdev); |
// if (!r) { |
// ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
// ring->ring_obj = NULL; |
// r600_ring_init(rdev, ring, 4096); |
// } |
r = radeon_uvd_init(rdev); |
if (!r) { |
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 4096); |
} |
if (rdev->family == CHIP_ARUBA) { |
r = radeon_vce_init(rdev); |
if (!r) { |
ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 4096); |
ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 4096); |
} |
} |
rdev->ih.ring_obj = NULL; |
r600_ih_ring_init(rdev, 64 * 1024); |
2187,6 → 2315,16 |
r = cayman_startup(rdev); |
if (r) { |
dev_err(rdev->dev, "disabling GPU acceleration\n"); |
cayman_cp_fini(rdev); |
cayman_dma_fini(rdev); |
r600_irq_fini(rdev); |
if (rdev->flags & RADEON_IS_IGP) |
sumo_rlc_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_vm_manager_fini(rdev); |
radeon_irq_kms_fini(rdev); |
cayman_pcie_gart_fini(rdev); |
rdev->accel_working = false; |
} |
2205,6 → 2343,32 |
return 0; |
} |
void cayman_fini(struct radeon_device *rdev) |
{ |
radeon_pm_fini(rdev); |
cayman_cp_fini(rdev); |
cayman_dma_fini(rdev); |
r600_irq_fini(rdev); |
if (rdev->flags & RADEON_IS_IGP) |
sumo_rlc_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_vm_manager_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_irq_kms_fini(rdev); |
uvd_v1_0_fini(rdev); |
radeon_uvd_fini(rdev); |
if (rdev->family == CHIP_ARUBA) |
radeon_vce_fini(rdev); |
cayman_pcie_gart_fini(rdev); |
r600_vram_scratch_fini(rdev); |
radeon_gem_fini(rdev); |
radeon_fence_driver_fini(rdev); |
radeon_bo_fini(rdev); |
radeon_atombios_fini(rdev); |
kfree(rdev->bios); |
rdev->bios = NULL; |
} |
/* |
* vm |
*/ |
2409,7 → 2573,48 |
radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); |
radeon_ring_write(ring, 1 << vm_id); |
/* wait for the invalidate to complete */ |
radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); |
radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */ |
WAIT_REG_MEM_ENGINE(0))); /* me */ |
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, 0); /* ref */ |
radeon_ring_write(ring, 0); /* mask */ |
radeon_ring_write(ring, 0x20); /* poll interval */ |
/* sync PFP to ME, otherwise we might get invalid PFP reads */ |
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); |
radeon_ring_write(ring, 0x0); |
} |
int tn_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk) |
{ |
struct atom_clock_dividers dividers; |
int r, i; |
r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, |
ecclk, false, ÷rs); |
if (r) |
return r; |
for (i = 0; i < 100; i++) { |
if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS) |
break; |
mdelay(10); |
} |
if (i == 100) |
return -ETIMEDOUT; |
WREG32_P(CG_ECLK_CNTL, dividers.post_div, ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK)); |
for (i = 0; i < 100; i++) { |
if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS) |
break; |
mdelay(10); |
} |
if (i == 100) |
return -ETIMEDOUT; |
return 0; |
} |
/drivers/video/drm/radeon/ni_dma.c |
---|
372,7 → 372,6 |
for (; ndw > 0; ndw -= 2, --count, pe += 8) { |
if (flags & R600_PTE_SYSTEM) { |
value = radeon_vm_map_gart(rdev, addr); |
value &= 0xFFFFFFFFFFFFF000ULL; |
} else if (flags & R600_PTE_VALID) { |
value = addr; |
} else { |
463,5 → 462,11 |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); |
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); |
radeon_ring_write(ring, 1 << vm_id); |
/* wait for invalidate to complete */ |
radeon_ring_write(ring, DMA_SRBM_READ_PACKET); |
radeon_ring_write(ring, (0xff << 20) | (VM_INVALIDATE_REQUEST >> 2)); |
radeon_ring_write(ring, 0); /* mask */ |
radeon_ring_write(ring, 0); /* value */ |
} |
/drivers/video/drm/radeon/ni_dpm.c |
---|
3862,11 → 3862,13 |
ni_update_current_ps(rdev, new_ps); |
} |
#if 0 |
void ni_dpm_reset_asic(struct radeon_device *rdev) |
{ |
ni_restrict_performance_levels_before_switch(rdev); |
rv770_set_boot_state(rdev); |
} |
#endif |
union power_info { |
struct _ATOM_POWERPLAY_INFO info; |
4317,6 → 4319,42 |
} |
} |
u32 ni_dpm_get_current_sclk(struct radeon_device *rdev) |
{ |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
struct radeon_ps *rps = &eg_pi->current_rps; |
struct ni_ps *ps = ni_get_ps(rps); |
struct rv7xx_pl *pl; |
u32 current_index = |
(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> |
CURRENT_STATE_INDEX_SHIFT; |
if (current_index >= ps->performance_level_count) { |
return 0; |
} else { |
pl = &ps->performance_levels[current_index]; |
return pl->sclk; |
} |
} |
u32 ni_dpm_get_current_mclk(struct radeon_device *rdev) |
{ |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
struct radeon_ps *rps = &eg_pi->current_rps; |
struct ni_ps *ps = ni_get_ps(rps); |
struct rv7xx_pl *pl; |
u32 current_index = |
(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> |
CURRENT_STATE_INDEX_SHIFT; |
if (current_index >= ps->performance_level_count) { |
return 0; |
} else { |
pl = &ps->performance_levels[current_index]; |
return pl->mclk; |
} |
} |
u32 ni_dpm_get_sclk(struct radeon_device *rdev, bool low) |
{ |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
/drivers/video/drm/radeon/ni_reg.h |
---|
83,4 → 83,48 |
# define NI_REGAMMA_PROG_B 4 |
# define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4) |
#define NI_DP_MSE_LINK_TIMING 0x73a0 |
# define NI_DP_MSE_LINK_FRAME (((x) & 0x3ff) << 0) |
# define NI_DP_MSE_LINK_LINE (((x) & 0x3) << 16) |
#define NI_DP_MSE_MISC_CNTL 0x736c |
# define NI_DP_MSE_BLANK_CODE (((x) & 0x1) << 0) |
# define NI_DP_MSE_TIMESTAMP_MODE (((x) & 0x1) << 4) |
# define NI_DP_MSE_ZERO_ENCODER (((x) & 0x1) << 8) |
#define NI_DP_MSE_RATE_CNTL 0x7384 |
# define NI_DP_MSE_RATE_Y(x) (((x) & 0x3ffffff) << 0) |
# define NI_DP_MSE_RATE_X(x) (((x) & 0x3f) << 26) |
#define NI_DP_MSE_RATE_UPDATE 0x738c |
#define NI_DP_MSE_SAT0 0x7390 |
# define NI_DP_MSE_SAT_SRC0(x) (((x) & 0x7) << 0) |
# define NI_DP_MSE_SAT_SLOT_COUNT0(x) (((x) & 0x3f) << 8) |
# define NI_DP_MSE_SAT_SRC1(x) (((x) & 0x7) << 16) |
# define NI_DP_MSE_SAT_SLOT_COUNT1(x) (((x) & 0x3f) << 24) |
#define NI_DP_MSE_SAT1 0x7394 |
#define NI_DP_MSE_SAT2 0x7398 |
#define NI_DP_MSE_SAT_UPDATE 0x739c |
#define NI_DIG_BE_CNTL 0x7140 |
# define NI_DIG_FE_SOURCE_SELECT(x) (((x) & 0x7f) << 8) |
# define NI_DIG_FE_DIG_MODE(x) (((x) & 0x7) << 16) |
# define NI_DIG_MODE_DP_SST 0 |
# define NI_DIG_MODE_LVDS 1 |
# define NI_DIG_MODE_TMDS_DVI 2 |
# define NI_DIG_MODE_TMDS_HDMI 3 |
# define NI_DIG_MODE_DP_MST 5 |
# define NI_DIG_HPD_SELECT(x) (((x) & 0x7) << 28) |
#define NI_DIG_FE_CNTL 0x7000 |
# define NI_DIG_SOURCE_SELECT(x) (((x) & 0x3) << 0) |
# define NI_DIG_STEREOSYNC_SELECT(x) (((x) & 0x3) << 4) |
# define NI_DIG_STEREOSYNC_GATE_EN(x) (((x) & 0x1) << 8) |
# define NI_DIG_DUAL_LINK_ENABLE(x) (((x) & 0x1) << 16) |
# define NI_DIG_SWAP(x) (((x) & 0x1) << 18) |
# define NI_DIG_SYMCLK_FE_ON (0x1 << 24) |
#endif |
/drivers/video/drm/radeon/nid.h |
---|
46,6 → 46,13 |
#define DMIF_ADDR_CONFIG 0xBD4 |
/* fusion vce clocks */ |
#define CG_ECLK_CNTL 0x620 |
# define ECLK_DIVIDER_MASK 0x7f |
# define ECLK_DIR_CNTL_EN (1 << 8) |
#define CG_ECLK_STATUS 0x624 |
# define ECLK_STATUS (1 << 0) |
/* DCE6 only */ |
#define DMIF_ADDR_CALC 0xC00 |
82,6 → 89,10 |
#define SOFT_RESET_REGBB (1 << 22) |
#define SOFT_RESET_ORB (1 << 23) |
#define SRBM_READ_ERROR 0xE98 |
#define SRBM_INT_CNTL 0xEA0 |
#define SRBM_INT_ACK 0xEA8 |
#define SRBM_STATUS2 0x0EC4 |
#define DMA_BUSY (1 << 5) |
#define DMA1_BUSY (1 << 6) |
812,6 → 823,52 |
#define MC_PMG_CMD_MRS2 0x2b5c |
#define MC_SEQ_PMG_CMD_MRS2_LP 0x2b60 |
#define AUX_CONTROL 0x6200 |
#define AUX_EN (1 << 0) |
#define AUX_LS_READ_EN (1 << 8) |
#define AUX_LS_UPDATE_DISABLE(x) (((x) & 0x1) << 12) |
#define AUX_HPD_DISCON(x) (((x) & 0x1) << 16) |
#define AUX_DET_EN (1 << 18) |
#define AUX_HPD_SEL(x) (((x) & 0x7) << 20) |
#define AUX_IMPCAL_REQ_EN (1 << 24) |
#define AUX_TEST_MODE (1 << 28) |
#define AUX_DEGLITCH_EN (1 << 29) |
#define AUX_SW_CONTROL 0x6204 |
#define AUX_SW_GO (1 << 0) |
#define AUX_LS_READ_TRIG (1 << 2) |
#define AUX_SW_START_DELAY(x) (((x) & 0xf) << 4) |
#define AUX_SW_WR_BYTES(x) (((x) & 0x1f) << 16) |
#define AUX_SW_INTERRUPT_CONTROL 0x620c |
#define AUX_SW_DONE_INT (1 << 0) |
#define AUX_SW_DONE_ACK (1 << 1) |
#define AUX_SW_DONE_MASK (1 << 2) |
#define AUX_SW_LS_DONE_INT (1 << 4) |
#define AUX_SW_LS_DONE_MASK (1 << 6) |
#define AUX_SW_STATUS 0x6210 |
#define AUX_SW_DONE (1 << 0) |
#define AUX_SW_REQ (1 << 1) |
#define AUX_SW_RX_TIMEOUT_STATE(x) (((x) & 0x7) << 4) |
#define AUX_SW_RX_TIMEOUT (1 << 7) |
#define AUX_SW_RX_OVERFLOW (1 << 8) |
#define AUX_SW_RX_HPD_DISCON (1 << 9) |
#define AUX_SW_RX_PARTIAL_BYTE (1 << 10) |
#define AUX_SW_NON_AUX_MODE (1 << 11) |
#define AUX_SW_RX_MIN_COUNT_VIOL (1 << 12) |
#define AUX_SW_RX_INVALID_STOP (1 << 14) |
#define AUX_SW_RX_SYNC_INVALID_L (1 << 17) |
#define AUX_SW_RX_SYNC_INVALID_H (1 << 18) |
#define AUX_SW_RX_INVALID_START (1 << 19) |
#define AUX_SW_RX_RECV_NO_DET (1 << 20) |
#define AUX_SW_RX_RECV_INVALID_H (1 << 22) |
#define AUX_SW_RX_RECV_INVALID_V (1 << 23) |
#define AUX_SW_DATA 0x6218 |
#define AUX_SW_DATA_RW (1 << 0) |
#define AUX_SW_DATA_MASK(x) (((x) & 0xff) << 8) |
#define AUX_SW_DATA_INDEX(x) (((x) & 0x1f) << 16) |
#define AUX_SW_AUTOINCREMENT_DISABLE (1 << 31) |
#define LB_SYNC_RESET_SEL 0x6b28 |
#define LB_SYNC_RESET_SEL_MASK (3 << 0) |
#define LB_SYNC_RESET_SEL_SHIFT 0 |
1082,6 → 1139,7 |
#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54 |
#define UVD_RBC_RB_RPTR 0xF690 |
#define UVD_RBC_RB_WPTR 0xF694 |
#define UVD_STATUS 0xf6bc |
/* |
* PM4 |
1133,6 → 1191,23 |
#define PACKET3_MEM_SEMAPHORE 0x39 |
#define PACKET3_MPEG_INDEX 0x3A |
#define PACKET3_WAIT_REG_MEM 0x3C |
#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0) |
/* 0 - always |
* 1 - < |
* 2 - <= |
* 3 - == |
* 4 - != |
* 5 - >= |
* 6 - > |
*/ |
#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4) |
/* 0 - reg |
* 1 - mem |
*/ |
#define WAIT_REG_MEM_ENGINE(x) ((x) << 8) |
/* 0 - me |
* 1 - pfp |
*/ |
#define PACKET3_MEM_WRITE 0x3D |
#define PACKET3_PFP_SYNC_ME 0x42 |
#define PACKET3_SURFACE_SYNC 0x43 |
1272,6 → 1347,13 |
(1 << 21) | \ |
(((n) & 0xFFFFF) << 0)) |
#define DMA_SRBM_POLL_PACKET ((9 << 28) | \ |
(1 << 27) | \ |
(1 << 26)) |
#define DMA_SRBM_READ_PACKET ((9 << 28) | \ |
(1 << 27)) |
/* async DMA Packet types */ |
#define DMA_PACKET_WRITE 0x2 |
#define DMA_PACKET_COPY 0x3 |
/drivers/video/drm/radeon/pci.c |
---|
1,11 → 1,16 |
#define CONFIG_PCI |
#include <syscall.h> |
#include <linux/kernel.h> |
#include <linux/mutex.h> |
#include <linux/mod_devicetable.h> |
#include <linux/slab.h> |
#include <linux/pm.h> |
#include <linux/pci.h> |
#include <syscall.h> |
extern int pci_scan_filter(u32 id, u32 busnr, u32 devfn); |
static LIST_HEAD(devices); |
372,7 → 377,7 |
int pci_scan_slot(u32 bus, int devfn) |
int _pci_scan_slot(u32 bus, int devfn) |
{ |
int func, nr = 0; |
493,7 → 498,7 |
for(;bus <= last_bus; bus++) |
{ |
for (devfn = 0; devfn < 0x100; devfn += 8) |
pci_scan_slot(bus, devfn); |
_pci_scan_slot(bus, devfn); |
} |
571,7 → 576,7 |
}; |
struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) |
struct pci_dev * _pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) |
{ |
pci_dev_t *dev; |
665,7 → 670,7 |
static inline void |
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, |
_pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, |
struct resource *res) |
{ |
region->start = res->start; |
682,7 → 687,7 |
if (!res->flags) |
return -1; |
pcibios_resource_to_bus(pdev, ®ion, res); |
_pcibios_resource_to_bus(pdev, ®ion, res); |
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); |
rom_addr &= ~PCI_ROM_ADDRESS_MASK; |
rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE; |
/drivers/video/drm/radeon/r100.c |
---|
644,6 → 644,7 |
return r; |
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; |
rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; |
rdev->asic->gart.set_page = &r100_pci_gart_set_page; |
return radeon_gart_table_ram_alloc(rdev); |
} |
681,11 → 682,16 |
WREG32(RADEON_AIC_HI_ADDR, 0); |
} |
uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags) |
{ |
return addr; |
} |
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, |
uint64_t addr, uint32_t flags) |
uint64_t entry) |
{ |
u32 *gtt = rdev->gart.ptr; |
gtt[i] = cpu_to_le32(lower_32_bits(addr)); |
gtt[i] = cpu_to_le32(lower_32_bits(entry)); |
} |
void r100_pci_gart_fini(struct radeon_device *rdev) |
722,6 → 728,10 |
tmp |= RADEON_FP2_DETECT_MASK; |
} |
WREG32(RADEON_GEN_INT_CNTL, tmp); |
/* read back to post the write */ |
RREG32(RADEON_GEN_INT_CNTL); |
return 0; |
} |
769,21 → 779,21 |
/* Vertical blank interrupts */ |
if (status & RADEON_CRTC_VBLANK_STAT) { |
if (rdev->irq.crtc_vblank_int[0]) { |
// drm_handle_vblank(rdev->ddev, 0); |
drm_handle_vblank(rdev->ddev, 0); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (rdev->irq.pflip[0]) |
// radeon_crtc_handle_flip(rdev, 0); |
if (atomic_read(&rdev->irq.pflip[0])) |
radeon_crtc_handle_vblank(rdev, 0); |
} |
if (status & RADEON_CRTC2_VBLANK_STAT) { |
if (rdev->irq.crtc_vblank_int[1]) { |
// drm_handle_vblank(rdev->ddev, 1); |
drm_handle_vblank(rdev->ddev, 1); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (rdev->irq.pflip[1]) |
// radeon_crtc_handle_flip(rdev, 1); |
if (atomic_read(&rdev->irq.pflip[1])) |
radeon_crtc_handle_vblank(rdev, 1); |
} |
if (status & RADEON_FP_DETECT_STAT) { |
queue_hotplug = true; |
3203,6 → 3213,9 |
uint32_t pixel_bytes1 = 0; |
uint32_t pixel_bytes2 = 0; |
/* Guess line buffer size to be 8192 pixels */ |
u32 lb_size = 8192; |
if (!rdev->mode_info.mode_config_initialized) |
return; |
3617,6 → 3630,13 |
DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", |
(unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); |
} |
/* Save number of lines the linebuffer leads before the scanout */ |
if (mode1) |
rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay); |
if (mode2) |
rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay); |
} |
int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) |
3907,6 → 3927,24 |
return 0; |
} |
void r100_fini(struct radeon_device *rdev) |
{ |
radeon_pm_fini(rdev); |
r100_cp_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_gem_fini(rdev); |
if (rdev->flags & RADEON_IS_PCI) |
r100_pci_gart_fini(rdev); |
radeon_agp_fini(rdev); |
radeon_irq_kms_fini(rdev); |
radeon_fence_driver_fini(rdev); |
radeon_bo_fini(rdev); |
radeon_atombios_fini(rdev); |
kfree(rdev->bios); |
rdev->bios = NULL; |
} |
/* |
* Due to how kexec works, it can leave the hw fully initialised when it |
* boots the new kernel. However doing our init sequence with the CP and |
4006,6 → 4044,10 |
if (r) { |
/* Somethings want wront with the accel init stop accel */ |
dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
r100_cp_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_irq_kms_fini(rdev); |
if (rdev->flags & RADEON_IS_PCI) |
r100_pci_gart_fini(rdev); |
rdev->accel_working = false; |
4013,6 → 4055,28 |
return 0; |
} |
uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg) |
{ |
unsigned long flags; |
uint32_t ret; |
spin_lock_irqsave(&rdev->mmio_idx_lock, flags); |
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); |
return ret; |
} |
void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
{ |
unsigned long flags; |
spin_lock_irqsave(&rdev->mmio_idx_lock, flags); |
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); |
} |
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg) |
{ |
if (reg < rdev->rio_mem_size) |
/drivers/video/drm/radeon/r300.c |
---|
50,6 → 50,31 |
*/ |
/* |
* Indirect registers accessor |
*/ |
uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) |
{ |
unsigned long flags; |
uint32_t r; |
spin_lock_irqsave(&rdev->pcie_idx_lock, flags); |
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); |
r = RREG32(RADEON_PCIE_DATA); |
spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); |
return r; |
} |
void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
{ |
unsigned long flags; |
spin_lock_irqsave(&rdev->pcie_idx_lock, flags); |
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); |
WREG32(RADEON_PCIE_DATA, (v)); |
spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); |
} |
/* |
* rv370,rv380 PCIE GART |
*/ |
static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); |
73,11 → 98,8 |
#define R300_PTE_WRITEABLE (1 << 2) |
#define R300_PTE_READABLE (1 << 3) |
void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, |
uint64_t addr, uint32_t flags) |
uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags) |
{ |
void __iomem *ptr = rdev->gart.ptr; |
addr = (lower_32_bits(addr) >> 8) | |
((upper_32_bits(addr) & 0xff) << 24); |
if (flags & RADEON_GART_PAGE_READ) |
86,10 → 108,18 |
addr |= R300_PTE_WRITEABLE; |
if (!(flags & RADEON_GART_PAGE_SNOOP)) |
addr |= R300_PTE_UNSNOOPED; |
return addr; |
} |
void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, |
uint64_t entry) |
{ |
void __iomem *ptr = rdev->gart.ptr; |
/* on x86 we want this to be CPU endian, on powerpc |
* on powerpc without HW swappers, it'll get swapped on way |
* into VRAM - so no need for cpu_to_le32 on VRAM tables */ |
writel(addr, ((void __iomem *)ptr) + (i * 4)); |
writel(entry, ((void __iomem *)ptr) + (i * 4)); |
} |
int rv370_pcie_gart_init(struct radeon_device *rdev) |
109,6 → 139,7 |
DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); |
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; |
rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; |
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; |
return radeon_gart_table_vram_alloc(rdev); |
} |
1411,6 → 1442,25 |
void r300_fini(struct radeon_device *rdev) |
{ |
radeon_pm_fini(rdev); |
r100_cp_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_gem_fini(rdev); |
if (rdev->flags & RADEON_IS_PCIE) |
rv370_pcie_gart_fini(rdev); |
if (rdev->flags & RADEON_IS_PCI) |
r100_pci_gart_fini(rdev); |
radeon_agp_fini(rdev); |
radeon_irq_kms_fini(rdev); |
radeon_fence_driver_fini(rdev); |
radeon_bo_fini(rdev); |
radeon_atombios_fini(rdev); |
kfree(rdev->bios); |
rdev->bios = NULL; |
} |
int r300_init(struct radeon_device *rdev) |
{ |
1489,6 → 1539,10 |
if (r) { |
/* Something went wrong with the accel init, so stop accel */ |
dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
r100_cp_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_irq_kms_fini(rdev); |
if (rdev->flags & RADEON_IS_PCIE) |
rv370_pcie_gart_fini(rdev); |
if (rdev->flags & RADEON_IS_PCI) |
/drivers/video/drm/radeon/r420.c |
---|
301,6 → 301,29 |
void r420_fini(struct radeon_device *rdev) |
{ |
radeon_pm_fini(rdev); |
r100_cp_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_gem_fini(rdev); |
if (rdev->flags & RADEON_IS_PCIE) |
rv370_pcie_gart_fini(rdev); |
if (rdev->flags & RADEON_IS_PCI) |
r100_pci_gart_fini(rdev); |
radeon_agp_fini(rdev); |
radeon_irq_kms_fini(rdev); |
radeon_fence_driver_fini(rdev); |
radeon_bo_fini(rdev); |
if (rdev->is_atom_bios) { |
radeon_atombios_fini(rdev); |
} else { |
radeon_combios_fini(rdev); |
} |
kfree(rdev->bios); |
rdev->bios = NULL; |
} |
int r420_init(struct radeon_device *rdev) |
{ |
385,6 → 408,10 |
if (r) { |
/* Somethings want wront with the accel init stop accel */ |
dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
r100_cp_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_irq_kms_fini(rdev); |
if (rdev->flags & RADEON_IS_PCIE) |
rv370_pcie_gart_fini(rdev); |
if (rdev->flags & RADEON_IS_PCI) |
/drivers/video/drm/radeon/r520.c |
---|
292,6 → 292,10 |
if (r) { |
/* Somethings want wront with the accel init stop accel */ |
dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
r100_cp_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_irq_kms_fini(rdev); |
rv370_pcie_gart_fini(rdev); |
rdev->accel_working = false; |
} |
/drivers/video/drm/radeon/r600.c |
---|
33,6 → 33,7 |
#include <drm/radeon_drm.h> |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "radeon_audio.h" |
#include "radeon_mode.h" |
#include "r600d.h" |
#include "atom.h" |
107,7 → 108,80 |
extern int evergreen_rlc_resume(struct radeon_device *rdev); |
extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev); |
/* |
* Indirect registers accessor |
*/ |
u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
spin_lock_irqsave(&rdev->rcu_idx_lock, flags); |
WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); |
r = RREG32(R600_RCU_DATA); |
spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); |
return r; |
} |
void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
spin_lock_irqsave(&rdev->rcu_idx_lock, flags); |
WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); |
WREG32(R600_RCU_DATA, (v)); |
spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); |
} |
u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
spin_lock_irqsave(&rdev->uvd_idx_lock, flags); |
WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); |
r = RREG32(R600_UVD_CTX_DATA); |
spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); |
return r; |
} |
void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
spin_lock_irqsave(&rdev->uvd_idx_lock, flags); |
WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); |
WREG32(R600_UVD_CTX_DATA, (v)); |
spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); |
} |
/** |
* r600_get_allowed_info_register - fetch the register for the info ioctl |
* |
* @rdev: radeon_device pointer |
* @reg: register offset in bytes |
* @val: register value |
* |
* Returns 0 for success or -EINVAL for an invalid register |
* |
*/ |
int r600_get_allowed_info_register(struct radeon_device *rdev, |
u32 reg, u32 *val) |
{ |
switch (reg) { |
case GRBM_STATUS: |
case GRBM_STATUS2: |
case R_000E50_SRBM_STATUS: |
case DMA_STATUS_REG: |
case UVD_STATUS: |
*val = RREG32(reg); |
return 0; |
default: |
return -EINVAL; |
} |
} |
/** |
* r600_get_xclk - get the xclk |
* |
* @rdev: radeon_device pointer |
2996,6 → 3070,18 |
return r; |
} |
if (rdev->has_uvd) { |
r = uvd_v1_0_resume(rdev); |
if (!r) { |
r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r); |
} |
} |
if (r) |
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; |
} |
/* Enable IRQ */ |
if (!rdev->irq.installed) { |
r = radeon_irq_kms_init(rdev); |
3006,7 → 3092,7 |
r = r600_irq_init(rdev); |
if (r) { |
DRM_ERROR("radeon: IH init failed (%d).\n", r); |
// radeon_irq_kms_fini(rdev); |
radeon_irq_kms_fini(rdev); |
return r; |
} |
r600_irq_set(rdev); |
3024,6 → 3110,18 |
if (r) |
return r; |
if (rdev->has_uvd) { |
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
if (ring->ring_size) { |
r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
RADEON_CP_PACKET2); |
if (!r) |
r = uvd_v1_0_init(rdev); |
if (r) |
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); |
} |
} |
r = radeon_ib_pool_init(rdev); |
if (r) { |
dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
3124,6 → 3222,14 |
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; |
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); |
if (rdev->has_uvd) { |
r = radeon_uvd_init(rdev); |
if (!r) { |
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL; |
r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096); |
} |
} |
rdev->ih.ring_obj = NULL; |
r600_ih_ring_init(rdev, 64 * 1024); |
3135,6 → 3241,11 |
r = r600_startup(rdev); |
if (r) { |
dev_err(rdev->dev, "disabling GPU acceleration\n"); |
r600_cp_fini(rdev); |
r600_irq_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_irq_kms_fini(rdev); |
r600_pcie_gart_fini(rdev); |
rdev->accel_working = false; |
} |
3142,6 → 3253,31 |
return 0; |
} |
void r600_fini(struct radeon_device *rdev) |
{ |
radeon_pm_fini(rdev); |
radeon_audio_fini(rdev); |
r600_cp_fini(rdev); |
r600_irq_fini(rdev); |
if (rdev->has_uvd) { |
uvd_v1_0_fini(rdev); |
radeon_uvd_fini(rdev); |
} |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_irq_kms_fini(rdev); |
r600_pcie_gart_fini(rdev); |
r600_vram_scratch_fini(rdev); |
radeon_agp_fini(rdev); |
radeon_gem_fini(rdev); |
radeon_fence_driver_fini(rdev); |
radeon_bo_fini(rdev); |
radeon_atombios_fini(rdev); |
kfree(rdev->bios); |
rdev->bios = NULL; |
} |
/* |
* CS stuff |
*/ |
3521,6 → 3657,19 |
return ret; |
} |
void r600_irq_suspend(struct radeon_device *rdev) |
{ |
r600_irq_disable(rdev); |
r600_rlc_stop(rdev); |
} |
void r600_irq_fini(struct radeon_device *rdev) |
{ |
r600_irq_suspend(rdev); |
r600_ih_ring_fini(rdev); |
} |
int r600_irq_set(struct radeon_device *rdev) |
{ |
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; |
3666,6 → 3815,9 |
WREG32(RV770_CG_THERMAL_INT, thermal_int); |
} |
/* posting read */ |
RREG32(R_000E50_SRBM_STATUS); |
return 0; |
} |
3848,8 → 4000,6 |
* Note, these are based on r600 and may need to be |
* adjusted or added to on newer asics |
*/ |
#undef DRM_DEBUG |
#define DRM_DEBUG(...) |
int r600_irq_process(struct radeon_device *rdev) |
{ |
3894,23 → 4044,27 |
case 1: /* D1 vblank/vline */ |
switch (src_data) { |
case 0: /* D1 vblank */ |
if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n"); |
if (rdev->irq.crtc_vblank_int[0]) { |
// drm_handle_vblank(rdev->ddev, 0); |
drm_handle_vblank(rdev->ddev, 0); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (rdev->irq.pflip[0]) |
// radeon_crtc_handle_flip(rdev, 0); |
if (atomic_read(&rdev->irq.pflip[0])) |
radeon_crtc_handle_vblank(rdev, 0); |
rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D1 vblank\n"); |
} |
break; |
case 1: /* D1 vline */ |
if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D1 vline\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
3920,23 → 4074,27 |
case 5: /* D2 vblank/vline */ |
switch (src_data) { |
case 0: /* D2 vblank */ |
if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n"); |
if (rdev->irq.crtc_vblank_int[1]) { |
// drm_handle_vblank(rdev->ddev, 1); |
drm_handle_vblank(rdev->ddev, 1); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (rdev->irq.pflip[1]) |
// radeon_crtc_handle_flip(rdev, 1); |
if (atomic_read(&rdev->irq.pflip[1])) |
radeon_crtc_handle_vblank(rdev, 1); |
rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D2 vblank\n"); |
} |
break; |
case 1: /* D1 vline */ |
if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D2 vline\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
3943,49 → 4101,62 |
break; |
} |
break; |
case 9: /* D1 pflip */ |
DRM_DEBUG("IH: D1 flip\n"); |
break; |
case 11: /* D2 pflip */ |
DRM_DEBUG("IH: D2 flip\n"); |
break; |
case 19: /* HPD/DAC hotplug */ |
switch (src_data) { |
case 0: |
if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { |
if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT)) |
DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD1\n"); |
} |
break; |
case 1: |
if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { |
if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT)) |
DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD2\n"); |
} |
break; |
case 4: |
if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { |
if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT)) |
DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD3\n"); |
} |
break; |
case 5: |
if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { |
if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT)) |
DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD4\n"); |
} |
break; |
case 10: |
if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { |
if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT)) |
DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD5\n"); |
} |
break; |
case 12: |
if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { |
if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT)) |
DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD6\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
3995,18 → 4166,22 |
case 21: /* hdmi */ |
switch (src_data) { |
case 4: |
if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) { |
if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG)) |
DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG; |
queue_hdmi = true; |
DRM_DEBUG("IH: HDMI0\n"); |
} |
break; |
case 5: |
if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) { |
if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG)) |
DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG; |
queue_hdmi = true; |
DRM_DEBUG("IH: HDMI1\n"); |
} |
break; |
default: |
DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); |
/drivers/video/drm/radeon/r600_blit_shaders.c |
---|
32,7 → 32,7 |
* R6xx+ cards need to use the 3D engine to blit data which requires |
* quite a bit of hw state setup. Rather than pull the whole 3D driver |
* (which normally generates the 3D state) into the DRM, we opt to use |
* statically generated state tables. The regsiter state and shaders |
* statically generated state tables. The register state and shaders |
* were hand generated to support blitting functionality. See the 3D |
* driver or documentation for descriptions of the registers and |
* shader instructions. |
/drivers/video/drm/radeon/r600_dpm.c |
---|
188,7 → 188,7 |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
radeon_crtc = to_radeon_crtc(crtc); |
if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { |
vrefresh = radeon_crtc->hw_mode.vrefresh; |
vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode); |
break; |
} |
} |
/drivers/video/drm/radeon/r600_hdmi.c |
---|
24,10 → 24,12 |
* Authors: Christian König |
*/ |
#include <linux/hdmi.h> |
#include <linux/gcd.h> |
#include <drm/drmP.h> |
#include <drm/radeon_drm.h> |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "radeon_audio.h" |
#include "r600d.h" |
#include "atom.h" |
54,30 → 56,6 |
AUDIO_STATUS_LEVEL = 0x80 |
}; |
static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = { |
/* 32kHz 44.1kHz 48kHz */ |
/* Clock N CTS N CTS N CTS */ |
{ 25175, 4096, 25175, 28224, 125875, 6144, 25175 }, /* 25,20/1.001 MHz */ |
{ 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */ |
{ 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */ |
{ 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */ |
{ 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */ |
{ 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */ |
{ 74176, 4096, 74176, 5733, 75335, 6144, 74176 }, /* 74.25/1.001 MHz */ |
{ 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */ |
{ 148352, 4096, 148352, 5733, 150670, 6144, 148352 }, /* 148.50/1.001 MHz */ |
{ 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */ |
}; |
/* |
* check if the chipset is supported |
*/ |
static int r600_audio_chipset_supported(struct radeon_device *rdev) |
{ |
return ASIC_IS_DCE2(rdev) && !ASIC_IS_NODCE(rdev); |
} |
static struct r600_audio_pin r600_audio_status(struct radeon_device *rdev) |
{ |
struct r600_audio_pin status; |
190,43 → 168,6 |
WREG32(AZ_HOT_PLUG_CONTROL, tmp); |
} |
/* |
* initialize the audio vars |
*/ |
int r600_audio_init(struct radeon_device *rdev) |
{ |
if (!radeon_audio || !r600_audio_chipset_supported(rdev)) |
return 0; |
rdev->audio.enabled = true; |
rdev->audio.num_pins = 1; |
rdev->audio.pin[0].channels = -1; |
rdev->audio.pin[0].rate = -1; |
rdev->audio.pin[0].bits_per_sample = -1; |
rdev->audio.pin[0].status_bits = 0; |
rdev->audio.pin[0].category_code = 0; |
rdev->audio.pin[0].id = 0; |
/* disable audio. it will be set up later */ |
r600_audio_enable(rdev, &rdev->audio.pin[0], 0); |
return 0; |
} |
/* |
* release the audio timer |
* TODO: How to do this correctly on SMP systems? |
*/ |
void r600_audio_fini(struct radeon_device *rdev) |
{ |
if (!rdev->audio.enabled) |
return; |
r600_audio_enable(rdev, &rdev->audio.pin[0], 0); |
rdev->audio.enabled = false; |
} |
struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev) |
{ |
/* only one pin on 6xx-NI */ |
233,96 → 174,40 |
return &rdev->audio.pin[0]; |
} |
/* |
* calculate CTS and N values if they are not found in the table |
*/ |
static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int *N, int freq) |
void r600_hdmi_update_acr(struct drm_encoder *encoder, long offset, |
const struct radeon_hdmi_acr *acr) |
{ |
int n, cts; |
unsigned long div, mul; |
/* Safe, but overly large values */ |
n = 128 * freq; |
cts = clock * 1000; |
/* Smallest valid fraction */ |
div = gcd(n, cts); |
n /= div; |
cts /= div; |
/* |
* The optimal N is 128*freq/1000. Calculate the closest larger |
* value that doesn't truncate any bits. |
*/ |
mul = ((128*freq/1000) + (n-1))/n; |
n *= mul; |
cts *= mul; |
/* Check that we are in spec (not always possible) */ |
if (n < (128*freq/1500)) |
printk(KERN_WARNING "Calculated ACR N value is too small. You may experience audio problems.\n"); |
if (n > (128*freq/300)) |
printk(KERN_WARNING "Calculated ACR N value is too large. You may experience audio problems.\n"); |
*N = n; |
*CTS = cts; |
DRM_DEBUG("Calculated ACR timing N=%d CTS=%d for frequency %d\n", |
*N, *CTS, freq); |
} |
struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock) |
{ |
struct radeon_hdmi_acr res; |
u8 i; |
/* Precalculated values for common clocks */ |
for (i = 0; i < ARRAY_SIZE(r600_hdmi_predefined_acr); i++) { |
if (r600_hdmi_predefined_acr[i].clock == clock) |
return r600_hdmi_predefined_acr[i]; |
} |
/* And odd clocks get manually calculated */ |
r600_hdmi_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000); |
r600_hdmi_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100); |
r600_hdmi_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000); |
return res; |
} |
/* |
* update the N and CTS parameters for a given pixel clock rate |
*/ |
void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_hdmi_acr acr = r600_hdmi_acr(clock); |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
uint32_t offset = dig->afmt->offset; |
/* DCE 3.0 uses register that's normally for CRC_CONTROL */ |
uint32_t acr_ctl = ASIC_IS_DCE3(rdev) ? DCE3_HDMI0_ACR_PACKET_CONTROL : |
HDMI0_ACR_PACKET_CONTROL; |
WREG32_P(acr_ctl + offset, |
HDMI0_ACR_SOURCE | /* select SW CTS value */ |
HDMI0_ACR_AUTO_SEND, /* allow hw to sent ACR packets when required */ |
~(HDMI0_ACR_SOURCE | |
HDMI0_ACR_AUTO_SEND)); |
WREG32_P(HDMI0_ACR_32_0 + offset, |
HDMI0_ACR_CTS_32(acr.cts_32khz), |
HDMI0_ACR_CTS_32(acr->cts_32khz), |
~HDMI0_ACR_CTS_32_MASK); |
WREG32_P(HDMI0_ACR_32_1 + offset, |
HDMI0_ACR_N_32(acr.n_32khz), |
HDMI0_ACR_N_32(acr->n_32khz), |
~HDMI0_ACR_N_32_MASK); |
WREG32_P(HDMI0_ACR_44_0 + offset, |
HDMI0_ACR_CTS_44(acr.cts_44_1khz), |
HDMI0_ACR_CTS_44(acr->cts_44_1khz), |
~HDMI0_ACR_CTS_44_MASK); |
WREG32_P(HDMI0_ACR_44_1 + offset, |
HDMI0_ACR_N_44(acr.n_44_1khz), |
HDMI0_ACR_N_44(acr->n_44_1khz), |
~HDMI0_ACR_N_44_MASK); |
WREG32_P(HDMI0_ACR_48_0 + offset, |
HDMI0_ACR_CTS_48(acr.cts_48khz), |
HDMI0_ACR_CTS_48(acr->cts_48khz), |
~HDMI0_ACR_CTS_48_MASK); |
WREG32_P(HDMI0_ACR_48_1 + offset, |
HDMI0_ACR_N_48(acr.n_48khz), |
HDMI0_ACR_N_48(acr->n_48khz), |
~HDMI0_ACR_N_48_MASK); |
} |
329,16 → 214,10 |
/* |
* build a HDMI Video Info Frame |
*/ |
void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer, |
size_t size) |
void r600_set_avi_packet(struct radeon_device *rdev, u32 offset, |
unsigned char *buffer, size_t size) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
uint32_t offset = dig->afmt->offset; |
uint8_t *frame = buffer + 3; |
uint8_t *header = buffer; |
WREG32(HDMI0_AVI_INFO0 + offset, |
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); |
347,7 → 226,15 |
WREG32(HDMI0_AVI_INFO2 + offset, |
frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); |
WREG32(HDMI0_AVI_INFO3 + offset, |
frame[0xC] | (frame[0xD] << 8) | (header[1] << 24)); |
frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24)); |
WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset, |
HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */ |
WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset, |
HDMI0_AVI_INFO_SEND | /* enable AVI info frames */ |
HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */ |
} |
/* |
424,105 → 311,48 |
value, ~HDMI0_AUDIO_TEST_EN); |
} |
void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) |
void r600_hdmi_audio_set_dto(struct radeon_device *rdev, |
struct radeon_crtc *crtc, unsigned int clock) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
u32 base_rate = 24000; |
u32 max_ratio = clock / base_rate; |
u32 dto_phase; |
u32 dto_modulo = clock; |
u32 wallclock_ratio; |
u32 dto_cntl; |
struct radeon_encoder *radeon_encoder; |
struct radeon_encoder_atom_dig *dig; |
if (!dig || !dig->afmt) |
if (!crtc) |
return; |
if (max_ratio >= 8) { |
dto_phase = 192 * 1000; |
wallclock_ratio = 3; |
} else if (max_ratio >= 4) { |
dto_phase = 96 * 1000; |
wallclock_ratio = 2; |
} else if (max_ratio >= 2) { |
dto_phase = 48 * 1000; |
wallclock_ratio = 1; |
} else { |
dto_phase = 24 * 1000; |
wallclock_ratio = 0; |
} |
radeon_encoder = to_radeon_encoder(crtc->encoder); |
dig = radeon_encoder->enc_priv; |
/* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT. |
* doesn't matter which one you use. Just use the first one. |
*/ |
/* XXX two dtos; generally use dto0 for hdmi */ |
/* Express [24MHz / target pixel clock] as an exact rational |
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE |
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
*/ |
if (ASIC_IS_DCE32(rdev)) { |
if (!dig) |
return; |
if (dig->dig_encoder == 0) { |
dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; |
dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); |
WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl); |
WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase); |
WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo); |
WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ |
} else { |
dto_cntl = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; |
dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); |
WREG32(DCCG_AUDIO_DTO1_CNTL, dto_cntl); |
WREG32(DCCG_AUDIO_DTO1_PHASE, dto_phase); |
WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); |
WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ |
} |
} else { |
/* according to the reg specs, this should DCE3.2 only, but in |
* practice it seems to cover DCE2.0/3.0/3.1 as well. |
*/ |
if (dig->dig_encoder == 0) { |
WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); |
WREG32(DCCG_AUDIO_DTO0_PHASE, 24000 * 100); |
WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); |
WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ |
} else { |
WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100); |
WREG32(DCCG_AUDIO_DTO1_PHASE, 24000 * 100); |
WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100); |
WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ |
} |
} |
} |
/* |
* update the info frames with the data from the current display mode |
*/ |
void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode) |
void r600_set_vbi_packet(struct drm_encoder *encoder, u32 offset) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; |
struct hdmi_avi_infoframe frame; |
uint32_t offset; |
uint32_t acr_ctl; |
ssize_t err; |
if (!dig || !dig->afmt) |
return; |
WREG32_OR(HDMI0_VBI_PACKET_CONTROL + offset, |
HDMI0_NULL_SEND | /* send null packets when required */ |
HDMI0_GC_SEND | /* send general control packets */ |
HDMI0_GC_CONT); /* send general control packets every frame */ |
} |
/* Silent, r600_hdmi_enable will raise WARN for us */ |
if (!dig->afmt->enabled) |
return; |
offset = dig->afmt->offset; |
void r600_set_audio_packet(struct drm_encoder *encoder, u32 offset) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
/* disable audio prior to setting up hw */ |
dig->afmt->pin = r600_audio_get_pin(rdev); |
r600_audio_enable(rdev, dig->afmt->pin, 0xf); |
r600_audio_set_dto(encoder, mode->clock); |
WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset, |
HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ |
HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ |
533,51 → 363,14 |
HDMI0_AUDIO_PACKETS_PER_LINE_MASK | |
HDMI0_60958_CS_UPDATE)); |
/* DCE 3.0 uses register that's normally for CRC_CONTROL */ |
acr_ctl = ASIC_IS_DCE3(rdev) ? DCE3_HDMI0_ACR_PACKET_CONTROL : |
HDMI0_ACR_PACKET_CONTROL; |
WREG32_P(acr_ctl + offset, |
HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */ |
HDMI0_ACR_AUTO_SEND, /* allow hw to sent ACR packets when required */ |
~(HDMI0_ACR_SOURCE | |
HDMI0_ACR_AUTO_SEND)); |
WREG32_OR(HDMI0_VBI_PACKET_CONTROL + offset, |
HDMI0_NULL_SEND | /* send null packets when required */ |
HDMI0_GC_SEND | /* send general control packets */ |
HDMI0_GC_CONT); /* send general control packets every frame */ |
WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset, |
HDMI0_AVI_INFO_SEND | /* enable AVI info frames */ |
HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */ |
HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ |
HDMI0_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */ |
WREG32_P(HDMI0_INFOFRAME_CONTROL1 + offset, |
HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */ |
HDMI0_AUDIO_INFO_LINE(2), /* anything other than 0 */ |
~(HDMI0_AVI_INFO_LINE_MASK | |
HDMI0_AUDIO_INFO_LINE_MASK)); |
~HDMI0_AUDIO_INFO_LINE_MASK); |
WREG32_AND(HDMI0_GC + offset, |
~HDMI0_GC_AVMUTE); /* unset HDMI0_GC_AVMUTE */ |
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); |
if (err < 0) { |
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); |
return; |
} |
err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); |
if (err < 0) { |
DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); |
return; |
} |
r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer)); |
/* fglrx duplicates INFOFRAME_CONTROL0 & INFOFRAME_CONTROL1 ops here */ |
WREG32_AND(HDMI0_GENERIC_PACKET_CONTROL + offset, |
~(HDMI0_GENERIC0_SEND | |
HDMI0_GENERIC0_CONT | |
587,8 → 380,6 |
HDMI0_GENERIC0_LINE_MASK | |
HDMI0_GENERIC1_LINE_MASK)); |
r600_hdmi_update_ACR(encoder, mode->clock); |
WREG32_P(HDMI0_60958_0 + offset, |
HDMI0_60958_CS_CHANNEL_NUMBER_L(1), |
~(HDMI0_60958_CS_CHANNEL_NUMBER_L_MASK | |
597,15 → 388,17 |
WREG32_P(HDMI0_60958_1 + offset, |
HDMI0_60958_CS_CHANNEL_NUMBER_R(2), |
~HDMI0_60958_CS_CHANNEL_NUMBER_R_MASK); |
} |
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ |
WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF); |
WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF); |
WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001); |
WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001); |
void r600_set_mute(struct drm_encoder *encoder, u32 offset, bool mute) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
/* enable audio after to setting up hw */ |
r600_audio_enable(rdev, dig->afmt->pin, 0xf); |
if (mute) |
WREG32_OR(HDMI0_GC + offset, HDMI0_GC_AVMUTE); |
else |
WREG32_AND(HDMI0_GC + offset, ~HDMI0_GC_AVMUTE); |
} |
/** |
684,17 → 477,6 |
if (!dig || !dig->afmt) |
return; |
/* Silent, r600_hdmi_enable will raise WARN for us */ |
if (enable && dig->afmt->enabled) |
return; |
if (!enable && !dig->afmt->enabled) |
return; |
if (!enable && dig->afmt->pin) { |
r600_audio_enable(rdev, dig->afmt->pin, 0); |
dig->afmt->pin = NULL; |
} |
/* Older chipsets require setting HDMI and routing manually */ |
if (!ASIC_IS_DCE3(rdev)) { |
if (enable) |
/drivers/video/drm/radeon/radeon.h |
---|
117,22 → 117,10 |
extern int radeon_use_pflipirq; |
extern int radeon_bapm; |
extern int radeon_backlight; |
extern int radeon_auxch; |
extern int radeon_mst; |
typedef struct pm_message { |
int event; |
} pm_message_t; |
typedef struct |
{ |
int width; |
int height; |
int bpp; |
int freq; |
}videomode_t; |
static inline u32 ioread32(const volatile void __iomem *addr) |
{ |
return in32((u32)addr); |
274,6 → 262,7 |
* Dummy page |
*/ |
struct radeon_dummy_page { |
uint64_t entry; |
struct page *page; |
dma_addr_t addr; |
}; |
535,6 → 524,7 |
pid_t pid; |
struct radeon_mn *mn; |
struct list_head mn_list; |
}; |
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) |
675,7 → 665,7 |
unsigned num_cpu_pages; |
unsigned table_size; |
struct page **pages; |
dma_addr_t *pages_addr; |
uint64_t *pages_entry; |
bool ready; |
}; |
746,7 → 736,7 |
resource_size_t size; |
u32 __iomem *ptr; |
u32 num_doorbells; /* Number of doorbells actually reserved for radeon. */ |
unsigned long used[DIV_ROUND_UP(RADEON_MAX_DOORBELLS, BITS_PER_LONG)]; |
DECLARE_BITMAP(used, RADEON_MAX_DOORBELLS); |
}; |
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page); |
956,6 → 946,9 |
/* BOs freed, but not yet updated in the PT */ |
struct list_head freed; |
/* BOs cleared in the PT */ |
struct list_head cleared; |
/* contains the page directory */ |
struct radeon_bo *page_directory; |
unsigned max_pde_used; |
1582,6 → 1575,7 |
int new_active_crtc_count; |
u32 current_active_crtcs; |
int current_active_crtc_count; |
bool single_display; |
struct radeon_dpm_dynamic_state dyn_state; |
struct radeon_dpm_fan fan; |
u32 tdp_limit; |
1670,6 → 1664,7 |
u8 fan_max_rpm; |
/* dpm */ |
bool dpm_enabled; |
bool sysfs_initialized; |
struct radeon_dpm dpm; |
}; |
1687,7 → 1682,6 |
struct radeon_bo *vcpu_bo; |
void *cpu_addr; |
uint64_t gpu_addr; |
void *saved_bo; |
atomic_t handles[RADEON_MAX_UVD_HANDLES]; |
struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; |
unsigned img_size[RADEON_MAX_UVD_HANDLES]; |
1724,8 → 1718,6 |
* VCE |
*/ |
#define RADEON_MAX_VCE_HANDLES 16 |
#define RADEON_VCE_STACK_SIZE (1024*1024) |
#define RADEON_VCE_HEAP_SIZE (4*1024*1024) |
struct radeon_vce { |
struct radeon_bo *vcpu_bo; |
1736,6 → 1728,7 |
struct drm_file *filp[RADEON_MAX_VCE_HANDLES]; |
unsigned img_size[RADEON_MAX_VCE_HANDLES]; |
struct delayed_work idle_work; |
uint32_t keyselect; |
}; |
int radeon_vce_init(struct radeon_device *rdev); |
1775,6 → 1768,9 |
bool enabled; |
struct r600_audio_pin pin[RADEON_MAX_AFMT_BLOCKS]; |
int num_pins; |
struct radeon_audio_funcs *hdmi_funcs; |
struct radeon_audio_funcs *dp_funcs; |
struct radeon_audio_basic_funcs *funcs; |
}; |
/* |
1795,8 → 1791,16 |
/* |
* MMU Notifier |
*/ |
#if defined(CONFIG_MMU_NOTIFIER) |
int radeon_mn_register(struct radeon_bo *bo, unsigned long addr); |
void radeon_mn_unregister(struct radeon_bo *bo); |
#else |
static inline int radeon_mn_register(struct radeon_bo *bo, unsigned long addr) |
{ |
return -ENODEV; |
} |
static inline void radeon_mn_unregister(struct radeon_bo *bo) {} |
#endif |
/* |
* Debugfs |
1862,11 → 1866,14 |
u32 (*get_xclk)(struct radeon_device *rdev); |
/* get the gpu clock counter */ |
uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev); |
/* get register for info ioctl */ |
int (*get_allowed_info_register)(struct radeon_device *rdev, u32 reg, u32 *val); |
/* gart */ |
struct { |
void (*tlb_flush)(struct radeon_device *rdev); |
uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags); |
void (*set_page)(struct radeon_device *rdev, unsigned i, |
uint64_t addr, uint32_t flags); |
uint64_t entry); |
} gart; |
struct { |
int (*init)(struct radeon_device *rdev); |
1985,6 → 1992,12 |
bool (*vblank_too_short)(struct radeon_device *rdev); |
void (*powergate_uvd)(struct radeon_device *rdev, bool gate); |
void (*enable_bapm)(struct radeon_device *rdev, bool enable); |
void (*fan_ctrl_set_mode)(struct radeon_device *rdev, u32 mode); |
u32 (*fan_ctrl_get_mode)(struct radeon_device *rdev); |
int (*set_fan_speed_percent)(struct radeon_device *rdev, u32 speed); |
int (*get_fan_speed_percent)(struct radeon_device *rdev, u32 *speed); |
u32 (*get_current_sclk)(struct radeon_device *rdev); |
u32 (*get_current_mclk)(struct radeon_device *rdev); |
} dpm; |
/* pageflipping */ |
struct { |
2394,6 → 2407,7 |
atomic64_t vram_usage; |
atomic64_t gtt_usage; |
atomic64_t num_bytes_moved; |
atomic_t gpu_reset_counter; |
/* ACPI interface */ |
struct radeon_atif atif; |
struct radeon_atcs atcs; |
2425,6 → 2439,8 |
#define RADEON_MIN_MMIO_SIZE 0x10000 |
uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg); |
void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg, |
bool always_indirect) |
{ |
2431,33 → 2447,17 |
/* The mmio size is 64kb at minimum. Allows the if to be optimized out. */ |
if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect) |
return readl(((void __iomem *)rdev->rmmio) + reg); |
else { |
unsigned long flags; |
uint32_t ret; |
spin_lock_irqsave(&rdev->mmio_idx_lock, flags); |
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); |
return ret; |
else |
return r100_mm_rreg_slow(rdev, reg); |
} |
} |
static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, |
bool always_indirect) |
{ |
if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect) |
writel(v, ((void __iomem *)rdev->rmmio) + reg); |
else { |
unsigned long flags; |
spin_lock_irqsave(&rdev->mmio_idx_lock, flags); |
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); |
else |
r100_mm_wreg_slow(rdev, reg, v); |
} |
} |
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); |
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
2532,6 → 2532,13 |
tmp_ |= ((val) & ~(mask)); \ |
WREG32_PLL(reg, tmp_); \ |
} while (0) |
#define WREG32_SMC_P(reg, val, mask) \ |
do { \ |
uint32_t tmp_ = RREG32_SMC(reg); \ |
tmp_ &= (mask); \ |
tmp_ |= ((val) & ~(mask)); \ |
WREG32_SMC(reg, tmp_); \ |
} while (0) |
#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false)) |
#define RREG32_IO(reg) r100_io_rreg(rdev, (reg)) |
#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v)) |
2540,185 → 2547,30 |
#define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v)) |
/* |
* Indirect registers accessor |
* Indirect registers accessors. |
* They used to be inlined, but this increases code size by ~65 kbytes. |
* Since each performs a pair of MMIO ops |
* within a spin_lock_irqsave/spin_unlock_irqrestore region, |
* the cost of call+ret is almost negligible. MMIO and locking |
* costs several dozens of cycles each at best, call+ret is ~5 cycles. |
*/ |
static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) |
{ |
unsigned long flags; |
uint32_t r; |
uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); |
void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg); |
void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg); |
void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg); |
void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg); |
void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg); |
void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg); |
void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg); |
void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
spin_lock_irqsave(&rdev->pcie_idx_lock, flags); |
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); |
r = RREG32(RADEON_PCIE_DATA); |
spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); |
return r; |
} |
static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
{ |
unsigned long flags; |
spin_lock_irqsave(&rdev->pcie_idx_lock, flags); |
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); |
WREG32(RADEON_PCIE_DATA, (v)); |
spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); |
} |
static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
spin_lock_irqsave(&rdev->smc_idx_lock, flags); |
WREG32(TN_SMC_IND_INDEX_0, (reg)); |
r = RREG32(TN_SMC_IND_DATA_0); |
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); |
return r; |
} |
static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
spin_lock_irqsave(&rdev->smc_idx_lock, flags); |
WREG32(TN_SMC_IND_INDEX_0, (reg)); |
WREG32(TN_SMC_IND_DATA_0, (v)); |
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); |
} |
static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
spin_lock_irqsave(&rdev->rcu_idx_lock, flags); |
WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); |
r = RREG32(R600_RCU_DATA); |
spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); |
return r; |
} |
static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
spin_lock_irqsave(&rdev->rcu_idx_lock, flags); |
WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); |
WREG32(R600_RCU_DATA, (v)); |
spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); |
} |
static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
spin_lock_irqsave(&rdev->cg_idx_lock, flags); |
WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); |
r = RREG32(EVERGREEN_CG_IND_DATA); |
spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); |
return r; |
} |
static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
spin_lock_irqsave(&rdev->cg_idx_lock, flags); |
WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); |
WREG32(EVERGREEN_CG_IND_DATA, (v)); |
spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); |
} |
static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
spin_lock_irqsave(&rdev->pif_idx_lock, flags); |
WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); |
r = RREG32(EVERGREEN_PIF_PHY0_DATA); |
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); |
return r; |
} |
static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
spin_lock_irqsave(&rdev->pif_idx_lock, flags); |
WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); |
WREG32(EVERGREEN_PIF_PHY0_DATA, (v)); |
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); |
} |
static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
spin_lock_irqsave(&rdev->pif_idx_lock, flags); |
WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); |
r = RREG32(EVERGREEN_PIF_PHY1_DATA); |
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); |
return r; |
} |
static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
spin_lock_irqsave(&rdev->pif_idx_lock, flags); |
WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); |
WREG32(EVERGREEN_PIF_PHY1_DATA, (v)); |
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); |
} |
static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
spin_lock_irqsave(&rdev->uvd_idx_lock, flags); |
WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); |
r = RREG32(R600_UVD_CTX_DATA); |
spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); |
return r; |
} |
static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
spin_lock_irqsave(&rdev->uvd_idx_lock, flags); |
WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); |
WREG32(R600_UVD_CTX_DATA, (v)); |
spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); |
} |
static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
spin_lock_irqsave(&rdev->didt_idx_lock, flags); |
WREG32(CIK_DIDT_IND_INDEX, (reg)); |
r = RREG32(CIK_DIDT_IND_DATA); |
spin_unlock_irqrestore(&rdev->didt_idx_lock, flags); |
return r; |
} |
static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
spin_lock_irqsave(&rdev->didt_idx_lock, flags); |
WREG32(CIK_DIDT_IND_INDEX, (reg)); |
WREG32(CIK_DIDT_IND_DATA, (v)); |
spin_unlock_irqrestore(&rdev->didt_idx_lock, flags); |
} |
void r100_pll_errata_after_index(struct radeon_device *rdev); |
2829,7 → 2681,8 |
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) |
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) |
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) |
#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) |
#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f)) |
#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e)) |
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) |
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) |
#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) |
2890,6 → 2743,7 |
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev)) |
#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev)) |
#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev)) |
#define radeon_get_allowed_info_register(rdev, r, v) (rdev)->asic->get_allowed_info_register((rdev), (r), (v)) |
#define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev)) |
#define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev)) |
#define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev)) |
2908,6 → 2762,8 |
#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev)) |
#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g)) |
#define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e)) |
#define radeon_dpm_get_current_sclk(rdev) rdev->asic->dpm.get_current_sclk((rdev)) |
#define radeon_dpm_get_current_mclk(rdev) rdev->asic->dpm.get_current_mclk((rdev)) |
/* Common functions */ |
/* AGP */ |
3074,6 → 2930,7 |
#include "radeon_object.h" |
#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159 |
#define PCI_VENDOR_ID_ATI 0x1002 |
resource_size_t |
drm_get_resource_start(struct drm_device *dev, unsigned int resource); |
/drivers/video/drm/radeon/radeon_agp.c |
---|
28,7 → 28,7 |
#include "radeon.h" |
#include <drm/radeon_drm.h> |
#if __OS_HAS_AGP |
#if IS_ENABLED(CONFIG_AGP) |
struct radeon_agpmode_quirk { |
u32 hostbridge_vendor; |
54,6 → 54,9 |
/* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */ |
{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50, |
PCI_VENDOR_ID_IBM, 0x0550, 1}, |
/* Intel 82855PM host bridge / RV250/M9 GL [Mobility FireGL 9000/Radeon 9000] needs AGPMode 1 (Thinkpad T40p) */ |
{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66, |
PCI_VENDOR_ID_IBM, 0x054d, 1}, |
/* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */ |
{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57, |
PCI_VENDOR_ID_IBM, 0x0530, 1}, |
123,7 → 126,7 |
int radeon_agp_init(struct radeon_device *rdev) |
{ |
#if __OS_HAS_AGP |
#if IS_ENABLED(CONFIG_AGP) |
struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list; |
struct drm_agp_mode mode; |
struct drm_agp_info info; |
257,7 → 260,7 |
void radeon_agp_resume(struct radeon_device *rdev) |
{ |
#if __OS_HAS_AGP |
#if IS_ENABLED(CONFIG_AGP) |
int r; |
if (rdev->flags & RADEON_IS_AGP) { |
r = radeon_agp_init(rdev); |
269,7 → 272,7 |
void radeon_agp_fini(struct radeon_device *rdev) |
{ |
#if __OS_HAS_AGP |
#if IS_ENABLED(CONFIG_AGP) |
if (rdev->ddev->agp && rdev->ddev->agp->acquired) { |
drm_agp_release(rdev->ddev); |
} |
/drivers/video/drm/radeon/radeon_asic.c |
---|
30,8 → 30,7 |
#include <drm/drmP.h> |
#include <drm/drm_crtc_helper.h> |
#include <drm/radeon_drm.h> |
//#include <linux/vgaarb.h> |
//#include <linux/vga_switcheroo.h> |
#include <linux/vgaarb.h> |
#include "radeon_reg.h" |
#include "radeon.h" |
#include "radeon_asic.h" |
136,6 → 135,11 |
} |
} |
static int radeon_invalid_get_allowed_info_register(struct radeon_device *rdev, |
u32 reg, u32 *val) |
{ |
return -EINVAL; |
} |
/* helper to disable agp */ |
/** |
159,11 → 163,13 |
DRM_INFO("Forcing AGP to PCIE mode\n"); |
rdev->flags |= RADEON_IS_PCIE; |
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; |
rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; |
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; |
} else { |
DRM_INFO("Forcing AGP to PCI mode\n"); |
rdev->flags |= RADEON_IS_PCI; |
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; |
rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; |
rdev->asic->gart.set_page = &r100_pci_gart_set_page; |
} |
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
189,7 → 195,7 |
static struct radeon_asic r100_asic = { |
.init = &r100_init, |
// .fini = &r100_fini, |
.fini = &r100_fini, |
// .suspend = &r100_suspend, |
// .resume = &r100_resume, |
// .vga_set_state = &r100_vga_set_state, |
197,8 → 203,10 |
.mmio_hdp_flush = NULL, |
.gui_idle = &r100_gui_idle, |
.mc_wait_for_idle = &r100_mc_wait_for_idle, |
.get_allowed_info_register = radeon_invalid_get_allowed_info_register, |
.gart = { |
.tlb_flush = &r100_pci_gart_tlb_flush, |
.get_page_entry = &r100_pci_gart_get_page_entry, |
.set_page = &r100_pci_gart_set_page, |
}, |
.ring = { |
255,7 → 263,7 |
static struct radeon_asic r200_asic = { |
.init = &r100_init, |
// .fini = &r100_fini, |
.fini = &r100_fini, |
// .suspend = &r100_suspend, |
// .resume = &r100_resume, |
// .vga_set_state = &r100_vga_set_state, |
263,8 → 271,10 |
.mmio_hdp_flush = NULL, |
.gui_idle = &r100_gui_idle, |
.mc_wait_for_idle = &r100_mc_wait_for_idle, |
.get_allowed_info_register = radeon_invalid_get_allowed_info_register, |
.gart = { |
.tlb_flush = &r100_pci_gart_tlb_flush, |
.get_page_entry = &r100_pci_gart_get_page_entry, |
.set_page = &r100_pci_gart_set_page, |
}, |
.ring = { |
333,9 → 343,23 |
.set_wptr = &r100_gfx_set_wptr, |
}; |
static struct radeon_asic_ring rv515_gfx_ring = { |
.ib_execute = &r100_ring_ib_execute, |
.emit_fence = &r300_fence_ring_emit, |
.emit_semaphore = &r100_semaphore_ring_emit, |
.cs_parse = &r300_cs_parse, |
.ring_start = &rv515_ring_start, |
.ring_test = &r100_ring_test, |
.ib_test = &r100_ib_test, |
.is_lockup = &r100_gpu_is_lockup, |
.get_rptr = &r100_gfx_get_rptr, |
.get_wptr = &r100_gfx_get_wptr, |
.set_wptr = &r100_gfx_set_wptr, |
}; |
static struct radeon_asic r300_asic = { |
.init = &r300_init, |
// .fini = &r300_fini, |
.fini = &r300_fini, |
// .suspend = &r300_suspend, |
// .resume = &r300_resume, |
// .vga_set_state = &r100_vga_set_state, |
343,8 → 367,10 |
.mmio_hdp_flush = NULL, |
.gui_idle = &r100_gui_idle, |
.mc_wait_for_idle = &r300_mc_wait_for_idle, |
.get_allowed_info_register = radeon_invalid_get_allowed_info_register, |
.gart = { |
.tlb_flush = &r100_pci_gart_tlb_flush, |
.get_page_entry = &r100_pci_gart_get_page_entry, |
.set_page = &r100_pci_gart_set_page, |
}, |
.ring = { |
401,7 → 427,7 |
static struct radeon_asic r300_asic_pcie = { |
.init = &r300_init, |
// .fini = &r300_fini, |
.fini = &r300_fini, |
// .suspend = &r300_suspend, |
// .resume = &r300_resume, |
// .vga_set_state = &r100_vga_set_state, |
409,8 → 435,10 |
.mmio_hdp_flush = NULL, |
.gui_idle = &r100_gui_idle, |
.mc_wait_for_idle = &r300_mc_wait_for_idle, |
.get_allowed_info_register = radeon_invalid_get_allowed_info_register, |
.gart = { |
.tlb_flush = &rv370_pcie_gart_tlb_flush, |
.get_page_entry = &rv370_pcie_gart_get_page_entry, |
.set_page = &rv370_pcie_gart_set_page, |
}, |
.ring = { |
467,7 → 495,7 |
static struct radeon_asic r420_asic = { |
.init = &r420_init, |
// .fini = &r420_fini, |
.fini = &r420_fini, |
// .suspend = &r420_suspend, |
// .resume = &r420_resume, |
// .vga_set_state = &r100_vga_set_state, |
475,8 → 503,10 |
.mmio_hdp_flush = NULL, |
.gui_idle = &r100_gui_idle, |
.mc_wait_for_idle = &r300_mc_wait_for_idle, |
.get_allowed_info_register = radeon_invalid_get_allowed_info_register, |
.gart = { |
.tlb_flush = &rv370_pcie_gart_tlb_flush, |
.get_page_entry = &rv370_pcie_gart_get_page_entry, |
.set_page = &rv370_pcie_gart_set_page, |
}, |
.ring = { |
533,7 → 563,7 |
static struct radeon_asic rs400_asic = { |
.init = &rs400_init, |
// .fini = &rs400_fini, |
.fini = &rs400_fini, |
// .suspend = &rs400_suspend, |
// .resume = &rs400_resume, |
// .vga_set_state = &r100_vga_set_state, |
541,8 → 571,10 |
.mmio_hdp_flush = NULL, |
.gui_idle = &r100_gui_idle, |
.mc_wait_for_idle = &rs400_mc_wait_for_idle, |
.get_allowed_info_register = radeon_invalid_get_allowed_info_register, |
.gart = { |
.tlb_flush = &rs400_gart_tlb_flush, |
.get_page_entry = &rs400_gart_get_page_entry, |
.set_page = &rs400_gart_set_page, |
}, |
.ring = { |
599,7 → 631,7 |
static struct radeon_asic rs600_asic = { |
.init = &rs600_init, |
// .fini = &rs600_fini, |
.fini = &rs600_fini, |
// .suspend = &rs600_suspend, |
// .resume = &rs600_resume, |
// .vga_set_state = &r100_vga_set_state, |
607,8 → 639,10 |
.mmio_hdp_flush = NULL, |
.gui_idle = &r100_gui_idle, |
.mc_wait_for_idle = &rs600_mc_wait_for_idle, |
.get_allowed_info_register = radeon_invalid_get_allowed_info_register, |
.gart = { |
.tlb_flush = &rs600_gart_tlb_flush, |
.get_page_entry = &rs600_gart_get_page_entry, |
.set_page = &rs600_gart_set_page, |
}, |
.ring = { |
624,8 → 658,6 |
.wait_for_vblank = &avivo_wait_for_vblank, |
.set_backlight_level = &atombios_set_backlight_level, |
.get_backlight_level = &atombios_get_backlight_level, |
.hdmi_enable = &r600_hdmi_enable, |
.hdmi_setmode = &r600_hdmi_setmode, |
}, |
.copy = { |
.blit = &r100_copy_blit, |
667,7 → 699,7 |
static struct radeon_asic rs690_asic = { |
.init = &rs690_init, |
// .fini = &rs690_fini, |
.fini = &rs690_fini, |
// .suspend = &rs690_suspend, |
// .resume = &rs690_resume, |
// .vga_set_state = &r100_vga_set_state, |
675,8 → 707,10 |
.mmio_hdp_flush = NULL, |
.gui_idle = &r100_gui_idle, |
.mc_wait_for_idle = &rs690_mc_wait_for_idle, |
.get_allowed_info_register = radeon_invalid_get_allowed_info_register, |
.gart = { |
.tlb_flush = &rs400_gart_tlb_flush, |
.get_page_entry = &rs400_gart_get_page_entry, |
.set_page = &rs400_gart_set_page, |
}, |
.ring = { |
692,8 → 726,6 |
.wait_for_vblank = &avivo_wait_for_vblank, |
.set_backlight_level = &atombios_set_backlight_level, |
.get_backlight_level = &atombios_get_backlight_level, |
.hdmi_enable = &r600_hdmi_enable, |
.hdmi_setmode = &r600_hdmi_setmode, |
}, |
.copy = { |
.blit = &r100_copy_blit, |
735,7 → 767,7 |
static struct radeon_asic rv515_asic = { |
.init = &rv515_init, |
// .fini = &rv515_fini, |
.fini = &rv515_fini, |
// .suspend = &rv515_suspend, |
// .resume = &rv515_resume, |
// .vga_set_state = &r100_vga_set_state, |
743,12 → 775,14 |
.mmio_hdp_flush = NULL, |
.gui_idle = &r100_gui_idle, |
.mc_wait_for_idle = &rv515_mc_wait_for_idle, |
.get_allowed_info_register = radeon_invalid_get_allowed_info_register, |
.gart = { |
.tlb_flush = &rv370_pcie_gart_tlb_flush, |
.get_page_entry = &rv370_pcie_gart_get_page_entry, |
.set_page = &rv370_pcie_gart_set_page, |
}, |
.ring = { |
[RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring |
[RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring |
}, |
.irq = { |
.set = &rs600_irq_set, |
801,7 → 835,7 |
static struct radeon_asic r520_asic = { |
.init = &r520_init, |
// .fini = &rv515_fini, |
.fini = &rv515_fini, |
// .suspend = &rv515_suspend, |
// .resume = &r520_resume, |
// .vga_set_state = &r100_vga_set_state, |
809,12 → 843,14 |
.mmio_hdp_flush = NULL, |
.gui_idle = &r100_gui_idle, |
.mc_wait_for_idle = &r520_mc_wait_for_idle, |
.get_allowed_info_register = radeon_invalid_get_allowed_info_register, |
.gart = { |
.tlb_flush = &rv370_pcie_gart_tlb_flush, |
.get_page_entry = &rv370_pcie_gart_get_page_entry, |
.set_page = &rv370_pcie_gart_set_page, |
}, |
.ring = { |
[RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring |
[RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring |
}, |
.irq = { |
.set = &rs600_irq_set, |
893,7 → 929,7 |
static struct radeon_asic r600_asic = { |
.init = &r600_init, |
// .fini = &r600_fini, |
.fini = &r600_fini, |
// .suspend = &r600_suspend, |
// .resume = &r600_resume, |
// .vga_set_state = &r600_vga_set_state, |
903,8 → 939,10 |
.mc_wait_for_idle = &r600_mc_wait_for_idle, |
.get_xclk = &r600_get_xclk, |
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
.get_allowed_info_register = r600_get_allowed_info_register, |
.gart = { |
.tlb_flush = &r600_pcie_gart_tlb_flush, |
.get_page_entry = &rs600_gart_get_page_entry, |
.set_page = &rs600_gart_set_page, |
}, |
.ring = { |
921,8 → 959,6 |
.wait_for_vblank = &avivo_wait_for_vblank, |
.set_backlight_level = &atombios_set_backlight_level, |
.get_backlight_level = &atombios_get_backlight_level, |
.hdmi_enable = &r600_hdmi_enable, |
.hdmi_setmode = &r600_hdmi_setmode, |
}, |
.copy = { |
.blit = &r600_copy_cpdma, |
963,9 → 999,22 |
}, |
}; |
static struct radeon_asic_ring rv6xx_uvd_ring = { |
.ib_execute = &uvd_v1_0_ib_execute, |
.emit_fence = &uvd_v1_0_fence_emit, |
.emit_semaphore = &uvd_v1_0_semaphore_emit, |
.cs_parse = &radeon_uvd_cs_parse, |
.ring_test = &uvd_v1_0_ring_test, |
.ib_test = &uvd_v1_0_ib_test, |
.is_lockup = &radeon_ring_test_lockup, |
.get_rptr = &uvd_v1_0_get_rptr, |
.get_wptr = &uvd_v1_0_get_wptr, |
.set_wptr = &uvd_v1_0_set_wptr, |
}; |
static struct radeon_asic rv6xx_asic = { |
.init = &r600_init, |
// .fini = &r600_fini, |
.fini = &r600_fini, |
// .suspend = &r600_suspend, |
// .resume = &r600_resume, |
// .vga_set_state = &r600_vga_set_state, |
975,13 → 1024,16 |
.mc_wait_for_idle = &r600_mc_wait_for_idle, |
.get_xclk = &r600_get_xclk, |
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
.get_allowed_info_register = r600_get_allowed_info_register, |
.gart = { |
.tlb_flush = &r600_pcie_gart_tlb_flush, |
.get_page_entry = &rs600_gart_get_page_entry, |
.set_page = &rs600_gart_set_page, |
}, |
.ring = { |
[RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring, |
[R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring, |
[R600_RING_TYPE_UVD_INDEX] = &rv6xx_uvd_ring, |
}, |
.irq = { |
.set = &r600_irq_set, |
993,8 → 1045,6 |
.wait_for_vblank = &avivo_wait_for_vblank, |
.set_backlight_level = &atombios_set_backlight_level, |
.get_backlight_level = &atombios_get_backlight_level, |
.hdmi_enable = &r600_hdmi_enable, |
.hdmi_setmode = &r600_hdmi_setmode, |
}, |
.copy = { |
.blit = &r600_copy_cpdma, |
1046,6 → 1096,8 |
.print_power_state = &rv6xx_dpm_print_power_state, |
.debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level, |
.force_performance_level = &rv6xx_dpm_force_performance_level, |
.get_current_sclk = &rv6xx_dpm_get_current_sclk, |
.get_current_mclk = &rv6xx_dpm_get_current_mclk, |
}, |
.pflip = { |
// .pre_page_flip = &rs600_pre_page_flip, |
1055,7 → 1107,7 |
static struct radeon_asic rs780_asic = { |
.init = &r600_init, |
// .fini = &r600_fini, |
.fini = &r600_fini, |
// .suspend = &r600_suspend, |
// .resume = &r600_resume, |
// .vga_set_state = &r600_vga_set_state, |
1065,13 → 1117,16 |
.mc_wait_for_idle = &r600_mc_wait_for_idle, |
.get_xclk = &r600_get_xclk, |
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
.get_allowed_info_register = r600_get_allowed_info_register, |
.gart = { |
.tlb_flush = &r600_pcie_gart_tlb_flush, |
.get_page_entry = &rs600_gart_get_page_entry, |
.set_page = &rs600_gart_set_page, |
}, |
.ring = { |
[RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring, |
[R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring, |
[R600_RING_TYPE_UVD_INDEX] = &rv6xx_uvd_ring, |
}, |
.irq = { |
.set = &r600_irq_set, |
1083,8 → 1138,6 |
.wait_for_vblank = &avivo_wait_for_vblank, |
.set_backlight_level = &atombios_set_backlight_level, |
.get_backlight_level = &atombios_get_backlight_level, |
.hdmi_enable = &r600_hdmi_enable, |
.hdmi_setmode = &r600_hdmi_setmode, |
}, |
.copy = { |
.blit = &r600_copy_cpdma, |
1136,6 → 1189,8 |
.print_power_state = &rs780_dpm_print_power_state, |
.debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level, |
.force_performance_level = &rs780_dpm_force_performance_level, |
.get_current_sclk = &rs780_dpm_get_current_sclk, |
.get_current_mclk = &rs780_dpm_get_current_mclk, |
}, |
.pflip = { |
// .pre_page_flip = &rs600_pre_page_flip, |
1146,7 → 1201,7 |
static struct radeon_asic_ring rv770_uvd_ring = { |
.ib_execute = &uvd_v1_0_ib_execute, |
.emit_fence = &uvd_v2_2_fence_emit, |
.emit_semaphore = &uvd_v1_0_semaphore_emit, |
.emit_semaphore = &uvd_v2_2_semaphore_emit, |
.cs_parse = &radeon_uvd_cs_parse, |
.ring_test = &uvd_v1_0_ring_test, |
.ib_test = &uvd_v1_0_ib_test, |
1158,7 → 1213,7 |
static struct radeon_asic rv770_asic = { |
.init = &rv770_init, |
// .fini = &rv770_fini, |
.fini = &rv770_fini, |
// .suspend = &rv770_suspend, |
// .resume = &rv770_resume, |
.asic_reset = &r600_asic_reset, |
1168,8 → 1223,10 |
.mc_wait_for_idle = &r600_mc_wait_for_idle, |
.get_xclk = &rv770_get_xclk, |
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
.get_allowed_info_register = r600_get_allowed_info_register, |
.gart = { |
.tlb_flush = &r600_pcie_gart_tlb_flush, |
.get_page_entry = &rs600_gart_get_page_entry, |
.set_page = &rs600_gart_set_page, |
}, |
.ring = { |
1187,8 → 1244,6 |
.wait_for_vblank = &avivo_wait_for_vblank, |
.set_backlight_level = &atombios_set_backlight_level, |
.get_backlight_level = &atombios_get_backlight_level, |
.hdmi_enable = &r600_hdmi_enable, |
.hdmi_setmode = &dce3_1_hdmi_setmode, |
}, |
.copy = { |
.blit = &r600_copy_cpdma, |
1241,6 → 1296,8 |
.debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level, |
.force_performance_level = &rv770_dpm_force_performance_level, |
.vblank_too_short = &rv770_dpm_vblank_too_short, |
.get_current_sclk = &rv770_dpm_get_current_sclk, |
.get_current_mclk = &rv770_dpm_get_current_mclk, |
}, |
.pflip = { |
// .pre_page_flip = &rs600_pre_page_flip, |
1276,7 → 1333,7 |
static struct radeon_asic evergreen_asic = { |
.init = &evergreen_init, |
// .fini = &evergreen_fini, |
.fini = &evergreen_fini, |
// .suspend = &evergreen_suspend, |
// .resume = &evergreen_resume, |
.asic_reset = &evergreen_asic_reset, |
1286,8 → 1343,10 |
.mc_wait_for_idle = &evergreen_mc_wait_for_idle, |
.get_xclk = &rv770_get_xclk, |
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
.get_allowed_info_register = evergreen_get_allowed_info_register, |
.gart = { |
.tlb_flush = &evergreen_pcie_gart_tlb_flush, |
.get_page_entry = &rs600_gart_get_page_entry, |
.set_page = &rs600_gart_set_page, |
}, |
.ring = { |
1305,8 → 1364,6 |
.wait_for_vblank = &dce4_wait_for_vblank, |
.set_backlight_level = &atombios_set_backlight_level, |
.get_backlight_level = &atombios_get_backlight_level, |
.hdmi_enable = &evergreen_hdmi_enable, |
.hdmi_setmode = &evergreen_hdmi_setmode, |
}, |
.copy = { |
.blit = &r600_copy_cpdma, |
1359,6 → 1416,8 |
.debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level, |
.force_performance_level = &rv770_dpm_force_performance_level, |
.vblank_too_short = &cypress_dpm_vblank_too_short, |
.get_current_sclk = &rv770_dpm_get_current_sclk, |
.get_current_mclk = &rv770_dpm_get_current_mclk, |
}, |
.pflip = { |
}, |
1366,7 → 1425,7 |
static struct radeon_asic sumo_asic = { |
.init = &evergreen_init, |
// .fini = &evergreen_fini, |
.fini = &evergreen_fini, |
// .suspend = &evergreen_suspend, |
// .resume = &evergreen_resume, |
.asic_reset = &evergreen_asic_reset, |
1376,8 → 1435,10 |
.mc_wait_for_idle = &evergreen_mc_wait_for_idle, |
.get_xclk = &r600_get_xclk, |
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
.get_allowed_info_register = evergreen_get_allowed_info_register, |
.gart = { |
.tlb_flush = &evergreen_pcie_gart_tlb_flush, |
.get_page_entry = &rs600_gart_get_page_entry, |
.set_page = &rs600_gart_set_page, |
}, |
.ring = { |
1395,8 → 1456,6 |
.wait_for_vblank = &dce4_wait_for_vblank, |
.set_backlight_level = &atombios_set_backlight_level, |
.get_backlight_level = &atombios_get_backlight_level, |
.hdmi_enable = &evergreen_hdmi_enable, |
.hdmi_setmode = &evergreen_hdmi_setmode, |
}, |
.copy = { |
.blit = &r600_copy_cpdma, |
1448,6 → 1507,8 |
.print_power_state = &sumo_dpm_print_power_state, |
.debugfs_print_current_performance_level = &sumo_dpm_debugfs_print_current_performance_level, |
.force_performance_level = &sumo_dpm_force_performance_level, |
.get_current_sclk = &sumo_dpm_get_current_sclk, |
.get_current_mclk = &sumo_dpm_get_current_mclk, |
}, |
.pflip = { |
// .pre_page_flip = &evergreen_pre_page_flip, |
1457,7 → 1518,7 |
static struct radeon_asic btc_asic = { |
.init = &evergreen_init, |
// .fini = &evergreen_fini, |
.fini = &evergreen_fini, |
// .suspend = &evergreen_suspend, |
// .resume = &evergreen_resume, |
.asic_reset = &evergreen_asic_reset, |
1467,8 → 1528,10 |
.mc_wait_for_idle = &evergreen_mc_wait_for_idle, |
.get_xclk = &rv770_get_xclk, |
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
.get_allowed_info_register = evergreen_get_allowed_info_register, |
.gart = { |
.tlb_flush = &evergreen_pcie_gart_tlb_flush, |
.get_page_entry = &rs600_gart_get_page_entry, |
.set_page = &rs600_gart_set_page, |
}, |
.ring = { |
1486,8 → 1549,6 |
.wait_for_vblank = &dce4_wait_for_vblank, |
.set_backlight_level = &atombios_set_backlight_level, |
.get_backlight_level = &atombios_get_backlight_level, |
.hdmi_enable = &evergreen_hdmi_enable, |
.hdmi_setmode = &evergreen_hdmi_setmode, |
}, |
.copy = { |
.blit = &r600_copy_cpdma, |
1540,6 → 1601,8 |
.debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level, |
.force_performance_level = &rv770_dpm_force_performance_level, |
.vblank_too_short = &btc_dpm_vblank_too_short, |
.get_current_sclk = &btc_dpm_get_current_sclk, |
.get_current_mclk = &btc_dpm_get_current_mclk, |
}, |
.pflip = { |
// .pre_page_flip = &evergreen_pre_page_flip, |
1592,7 → 1655,7 |
static struct radeon_asic cayman_asic = { |
.init = &cayman_init, |
// .fini = &cayman_fini, |
.fini = &cayman_fini, |
// .suspend = &cayman_suspend, |
// .resume = &cayman_resume, |
.asic_reset = &cayman_asic_reset, |
1602,8 → 1665,10 |
.mc_wait_for_idle = &evergreen_mc_wait_for_idle, |
.get_xclk = &rv770_get_xclk, |
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
.get_allowed_info_register = cayman_get_allowed_info_register, |
.gart = { |
.tlb_flush = &cayman_pcie_gart_tlb_flush, |
.get_page_entry = &rs600_gart_get_page_entry, |
.set_page = &rs600_gart_set_page, |
}, |
.vm = { |
1632,8 → 1697,6 |
.wait_for_vblank = &dce4_wait_for_vblank, |
.set_backlight_level = &atombios_set_backlight_level, |
.get_backlight_level = &atombios_get_backlight_level, |
.hdmi_enable = &evergreen_hdmi_enable, |
.hdmi_setmode = &evergreen_hdmi_setmode, |
}, |
.copy = { |
.blit = &r600_copy_cpdma, |
1686,6 → 1749,8 |
.debugfs_print_current_performance_level = &ni_dpm_debugfs_print_current_performance_level, |
.force_performance_level = &ni_dpm_force_performance_level, |
.vblank_too_short = &ni_dpm_vblank_too_short, |
.get_current_sclk = &ni_dpm_get_current_sclk, |
.get_current_mclk = &ni_dpm_get_current_mclk, |
}, |
.pflip = { |
// .pre_page_flip = &evergreen_pre_page_flip, |
1693,9 → 1758,22 |
}, |
}; |
static struct radeon_asic_ring trinity_vce_ring = { |
.ib_execute = &radeon_vce_ib_execute, |
.emit_fence = &radeon_vce_fence_emit, |
.emit_semaphore = &radeon_vce_semaphore_emit, |
.cs_parse = &radeon_vce_cs_parse, |
.ring_test = &radeon_vce_ring_test, |
.ib_test = &radeon_vce_ib_test, |
.is_lockup = &radeon_ring_test_lockup, |
.get_rptr = &vce_v1_0_get_rptr, |
.get_wptr = &vce_v1_0_get_wptr, |
.set_wptr = &vce_v1_0_set_wptr, |
}; |
static struct radeon_asic trinity_asic = { |
.init = &cayman_init, |
// .fini = &cayman_fini, |
.fini = &cayman_fini, |
// .suspend = &cayman_suspend, |
// .resume = &cayman_resume, |
.asic_reset = &cayman_asic_reset, |
1705,8 → 1783,10 |
.mc_wait_for_idle = &evergreen_mc_wait_for_idle, |
.get_xclk = &r600_get_xclk, |
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, |
.get_allowed_info_register = cayman_get_allowed_info_register, |
.gart = { |
.tlb_flush = &cayman_pcie_gart_tlb_flush, |
.get_page_entry = &rs600_gart_get_page_entry, |
.set_page = &rs600_gart_set_page, |
}, |
.vm = { |
1724,6 → 1804,8 |
[R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring, |
[CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring, |
[R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring, |
[TN_RING_TYPE_VCE1_INDEX] = &trinity_vce_ring, |
[TN_RING_TYPE_VCE2_INDEX] = &trinity_vce_ring, |
}, |
.irq = { |
.set = &evergreen_irq_set, |
1735,8 → 1817,6 |
.wait_for_vblank = &dce4_wait_for_vblank, |
.set_backlight_level = &atombios_set_backlight_level, |
.get_backlight_level = &atombios_get_backlight_level, |
.hdmi_enable = &evergreen_hdmi_enable, |
.hdmi_setmode = &evergreen_hdmi_setmode, |
}, |
.copy = { |
.blit = &r600_copy_cpdma, |
1770,6 → 1850,7 |
.set_pcie_lanes = NULL, |
.set_clock_gating = NULL, |
.set_uvd_clocks = &sumo_set_uvd_clocks, |
.set_vce_clocks = &tn_set_vce_clocks, |
.get_temperature = &tn_get_temp, |
}, |
.dpm = { |
1789,6 → 1870,8 |
.debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level, |
.force_performance_level = &trinity_dpm_force_performance_level, |
.enable_bapm = &trinity_dpm_enable_bapm, |
.get_current_sclk = &trinity_dpm_get_current_sclk, |
.get_current_mclk = &trinity_dpm_get_current_mclk, |
}, |
.pflip = { |
// .pre_page_flip = &evergreen_pre_page_flip, |
1828,9 → 1911,9 |
static struct radeon_asic si_asic = { |
.init = &si_init, |
// .fini = &si_fini, |
.fini = &si_fini, |
// .suspend = &si_suspend, |
// .resume = &si_resume, |
.resume = &si_resume, |
.asic_reset = &si_asic_reset, |
// .vga_set_state = &r600_vga_set_state, |
.mmio_hdp_flush = r600_mmio_hdp_flush, |
1838,8 → 1921,10 |
.mc_wait_for_idle = &evergreen_mc_wait_for_idle, |
.get_xclk = &si_get_xclk, |
.get_gpu_clock_counter = &si_get_gpu_clock_counter, |
.get_allowed_info_register = si_get_allowed_info_register, |
.gart = { |
.tlb_flush = &si_pcie_gart_tlb_flush, |
.get_page_entry = &rs600_gart_get_page_entry, |
.set_page = &rs600_gart_set_page, |
}, |
.vm = { |
1857,6 → 1942,8 |
[R600_RING_TYPE_DMA_INDEX] = &si_dma_ring, |
[CAYMAN_RING_TYPE_DMA1_INDEX] = &si_dma_ring, |
[R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring, |
[TN_RING_TYPE_VCE1_INDEX] = &trinity_vce_ring, |
[TN_RING_TYPE_VCE2_INDEX] = &trinity_vce_ring, |
}, |
.irq = { |
.set = &si_irq_set, |
1868,8 → 1955,6 |
.wait_for_vblank = &dce4_wait_for_vblank, |
.set_backlight_level = &atombios_set_backlight_level, |
.get_backlight_level = &atombios_get_backlight_level, |
.hdmi_enable = &evergreen_hdmi_enable, |
.hdmi_setmode = &evergreen_hdmi_setmode, |
}, |
.copy = { |
.blit = &r600_copy_cpdma, |
1903,6 → 1988,7 |
.set_pcie_lanes = &r600_set_pcie_lanes, |
.set_clock_gating = NULL, |
.set_uvd_clocks = &si_set_uvd_clocks, |
.set_vce_clocks = &si_set_vce_clocks, |
.get_temperature = &si_get_temp, |
}, |
.dpm = { |
1922,6 → 2008,12 |
.debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level, |
.force_performance_level = &si_dpm_force_performance_level, |
.vblank_too_short = &ni_dpm_vblank_too_short, |
.fan_ctrl_set_mode = &si_fan_ctrl_set_mode, |
.fan_ctrl_get_mode = &si_fan_ctrl_get_mode, |
.get_fan_speed_percent = &si_fan_ctrl_get_fan_speed_percent, |
.set_fan_speed_percent = &si_fan_ctrl_set_fan_speed_percent, |
.get_current_sclk = &si_dpm_get_current_sclk, |
.get_current_mclk = &si_dpm_get_current_mclk, |
}, |
.pflip = { |
// .pre_page_flip = &evergreen_pre_page_flip, |
1989,9 → 2081,9 |
static struct radeon_asic ci_asic = { |
.init = &cik_init, |
// .fini = &si_fini, |
// .suspend = &si_suspend, |
// .resume = &si_resume, |
.fini = &cik_fini, |
// .suspend = &cik_suspend, |
// .resume = &cik_resume, |
.asic_reset = &cik_asic_reset, |
// .vga_set_state = &r600_vga_set_state, |
.mmio_hdp_flush = &r600_mmio_hdp_flush, |
1999,8 → 2091,10 |
.mc_wait_for_idle = &evergreen_mc_wait_for_idle, |
.get_xclk = &cik_get_xclk, |
.get_gpu_clock_counter = &cik_get_gpu_clock_counter, |
.get_allowed_info_register = cik_get_allowed_info_register, |
.gart = { |
.tlb_flush = &cik_pcie_gart_tlb_flush, |
.get_page_entry = &rs600_gart_get_page_entry, |
.set_page = &rs600_gart_set_page, |
}, |
.vm = { |
2031,8 → 2125,6 |
.wait_for_vblank = &dce4_wait_for_vblank, |
.set_backlight_level = &atombios_set_backlight_level, |
.get_backlight_level = &atombios_get_backlight_level, |
.hdmi_enable = &evergreen_hdmi_enable, |
.hdmi_setmode = &evergreen_hdmi_setmode, |
}, |
.copy = { |
.blit = &cik_copy_cpdma, |
2087,6 → 2179,12 |
.force_performance_level = &ci_dpm_force_performance_level, |
.vblank_too_short = &ci_dpm_vblank_too_short, |
.powergate_uvd = &ci_dpm_powergate_uvd, |
.fan_ctrl_set_mode = &ci_fan_ctrl_set_mode, |
.fan_ctrl_get_mode = &ci_fan_ctrl_get_mode, |
.get_fan_speed_percent = &ci_fan_ctrl_get_fan_speed_percent, |
.set_fan_speed_percent = &ci_fan_ctrl_set_fan_speed_percent, |
.get_current_sclk = &ci_dpm_get_current_sclk, |
.get_current_mclk = &ci_dpm_get_current_mclk, |
}, |
.pflip = { |
// .pre_page_flip = &evergreen_pre_page_flip, |
2096,9 → 2194,9 |
static struct radeon_asic kv_asic = { |
.init = &cik_init, |
// .fini = &si_fini, |
// .suspend = &si_suspend, |
// .resume = &si_resume, |
.fini = &cik_fini, |
// .suspend = &cik_suspend, |
// .resume = &cik_resume, |
.asic_reset = &cik_asic_reset, |
// .vga_set_state = &r600_vga_set_state, |
.mmio_hdp_flush = &r600_mmio_hdp_flush, |
2106,8 → 2204,10 |
.mc_wait_for_idle = &evergreen_mc_wait_for_idle, |
.get_xclk = &cik_get_xclk, |
.get_gpu_clock_counter = &cik_get_gpu_clock_counter, |
.get_allowed_info_register = cik_get_allowed_info_register, |
.gart = { |
.tlb_flush = &cik_pcie_gart_tlb_flush, |
.get_page_entry = &rs600_gart_get_page_entry, |
.set_page = &rs600_gart_set_page, |
}, |
.vm = { |
2138,8 → 2238,6 |
.wait_for_vblank = &dce4_wait_for_vblank, |
.set_backlight_level = &atombios_set_backlight_level, |
.get_backlight_level = &atombios_get_backlight_level, |
.hdmi_enable = &evergreen_hdmi_enable, |
.hdmi_setmode = &evergreen_hdmi_setmode, |
}, |
.copy = { |
.blit = &cik_copy_cpdma, |
2194,6 → 2292,8 |
.force_performance_level = &kv_dpm_force_performance_level, |
.powergate_uvd = &kv_dpm_powergate_uvd, |
.enable_bapm = &kv_dpm_enable_bapm, |
.get_current_sclk = &kv_dpm_get_current_sclk, |
.get_current_mclk = &kv_dpm_get_current_mclk, |
}, |
.pflip = { |
// .pre_page_flip = &evergreen_pre_page_flip, |
2352,6 → 2452,8 |
/* set num crtcs */ |
rdev->num_crtc = 4; |
rdev->has_uvd = true; |
rdev->cg_flags = |
RADEON_CG_SUPPORT_VCE_MGCG; |
break; |
case CHIP_TAHITI: |
case CHIP_PITCAIRN: |
/drivers/video/drm/radeon/radeon_asic.h |
---|
67,8 → 67,9 |
int r100_asic_reset(struct radeon_device *rdev); |
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); |
void r100_pci_gart_tlb_flush(struct radeon_device *rdev); |
uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags); |
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, |
uint64_t addr, uint32_t flags); |
uint64_t entry); |
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); |
int r100_irq_set(struct radeon_device *rdev); |
int r100_irq_process(struct radeon_device *rdev); |
172,8 → 173,9 |
struct radeon_fence *fence); |
extern int r300_cs_parse(struct radeon_cs_parser *p); |
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); |
extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags); |
extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, |
uint64_t addr, uint32_t flags); |
uint64_t entry); |
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); |
extern int rv370_get_pcie_lanes(struct radeon_device *rdev); |
extern void r300_set_reg_safe(struct radeon_device *rdev); |
208,8 → 210,9 |
extern int rs400_suspend(struct radeon_device *rdev); |
extern int rs400_resume(struct radeon_device *rdev); |
void rs400_gart_tlb_flush(struct radeon_device *rdev); |
uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags); |
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, |
uint64_t addr, uint32_t flags); |
uint64_t entry); |
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
int rs400_gart_init(struct radeon_device *rdev); |
232,8 → 235,9 |
void rs600_irq_disable(struct radeon_device *rdev); |
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); |
void rs600_gart_tlb_flush(struct radeon_device *rdev); |
uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags); |
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, |
uint64_t addr, uint32_t flags); |
uint64_t entry); |
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
void rs600_bandwidth_update(struct radeon_device *rdev); |
380,6 → 384,8 |
struct radeon_ring *ring); |
void r600_gfx_set_wptr(struct radeon_device *rdev, |
struct radeon_ring *ring); |
int r600_get_allowed_info_register(struct radeon_device *rdev, |
u32 reg, u32 *val); |
/* r600 irq */ |
int r600_irq_process(struct radeon_device *rdev); |
int r600_irq_init(struct radeon_device *rdev); |
390,7 → 396,6 |
void r600_disable_interrupts(struct radeon_device *rdev); |
void r600_rlc_stop(struct radeon_device *rdev); |
/* r600 audio */ |
int r600_audio_init(struct radeon_device *rdev); |
void r600_audio_fini(struct radeon_device *rdev); |
void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock); |
void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer, |
399,8 → 404,6 |
void r600_hdmi_audio_workaround(struct drm_encoder *encoder); |
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); |
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); |
void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); |
void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); |
int r600_mc_wait_for_idle(struct radeon_device *rdev); |
u32 r600_get_xclk(struct radeon_device *rdev); |
uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev); |
432,6 → 435,8 |
struct seq_file *m); |
int rv6xx_dpm_force_performance_level(struct radeon_device *rdev, |
enum radeon_dpm_forced_level level); |
u32 rv6xx_dpm_get_current_sclk(struct radeon_device *rdev); |
u32 rv6xx_dpm_get_current_mclk(struct radeon_device *rdev); |
/* rs780 dpm */ |
int rs780_dpm_init(struct radeon_device *rdev); |
int rs780_dpm_enable(struct radeon_device *rdev); |
448,6 → 453,8 |
struct seq_file *m); |
int rs780_dpm_force_performance_level(struct radeon_device *rdev, |
enum radeon_dpm_forced_level level); |
u32 rs780_dpm_get_current_sclk(struct radeon_device *rdev); |
u32 rs780_dpm_get_current_mclk(struct radeon_device *rdev); |
/* |
* rv770,rv730,rv710,rv740 |
469,8 → 476,6 |
u32 rv770_get_xclk(struct radeon_device *rdev); |
int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); |
int rv770_get_temp(struct radeon_device *rdev); |
/* hdmi */ |
void dce3_1_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); |
/* rv7xx pm */ |
int rv770_dpm_init(struct radeon_device *rdev); |
int rv770_dpm_enable(struct radeon_device *rdev); |
489,6 → 494,8 |
int rv770_dpm_force_performance_level(struct radeon_device *rdev, |
enum radeon_dpm_forced_level level); |
bool rv770_dpm_vblank_too_short(struct radeon_device *rdev); |
u32 rv770_dpm_get_current_sclk(struct radeon_device *rdev); |
u32 rv770_dpm_get_current_mclk(struct radeon_device *rdev); |
/* |
* evergreen |
540,9 → 547,9 |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct reservation_object *resv); |
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); |
void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); |
int evergreen_get_temp(struct radeon_device *rdev); |
int evergreen_get_allowed_info_register(struct radeon_device *rdev, |
u32 reg, u32 *val); |
int sumo_get_temp(struct radeon_device *rdev); |
int tn_get_temp(struct radeon_device *rdev); |
int cypress_dpm_init(struct radeon_device *rdev); |
566,6 → 573,8 |
bool btc_dpm_vblank_too_short(struct radeon_device *rdev); |
void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, |
struct seq_file *m); |
u32 btc_dpm_get_current_sclk(struct radeon_device *rdev); |
u32 btc_dpm_get_current_mclk(struct radeon_device *rdev); |
int sumo_dpm_init(struct radeon_device *rdev); |
int sumo_dpm_enable(struct radeon_device *rdev); |
int sumo_dpm_late_enable(struct radeon_device *rdev); |
584,6 → 593,8 |
struct seq_file *m); |
int sumo_dpm_force_performance_level(struct radeon_device *rdev, |
enum radeon_dpm_forced_level level); |
u32 sumo_dpm_get_current_sclk(struct radeon_device *rdev); |
u32 sumo_dpm_get_current_mclk(struct radeon_device *rdev); |
/* |
* cayman |
640,6 → 651,8 |
struct radeon_ring *ring); |
void cayman_dma_set_wptr(struct radeon_device *rdev, |
struct radeon_ring *ring); |
int cayman_get_allowed_info_register(struct radeon_device *rdev, |
u32 reg, u32 *val); |
int ni_dpm_init(struct radeon_device *rdev); |
void ni_dpm_setup_asic(struct radeon_device *rdev); |
658,6 → 671,8 |
int ni_dpm_force_performance_level(struct radeon_device *rdev, |
enum radeon_dpm_forced_level level); |
bool ni_dpm_vblank_too_short(struct radeon_device *rdev); |
u32 ni_dpm_get_current_sclk(struct radeon_device *rdev); |
u32 ni_dpm_get_current_mclk(struct radeon_device *rdev); |
int trinity_dpm_init(struct radeon_device *rdev); |
int trinity_dpm_enable(struct radeon_device *rdev); |
int trinity_dpm_late_enable(struct radeon_device *rdev); |
677,10 → 692,12 |
int trinity_dpm_force_performance_level(struct radeon_device *rdev, |
enum radeon_dpm_forced_level level); |
void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable); |
u32 trinity_dpm_get_current_sclk(struct radeon_device *rdev); |
u32 trinity_dpm_get_current_mclk(struct radeon_device *rdev); |
int tn_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk); |
/* DCE6 - SI */ |
void dce6_bandwidth_update(struct radeon_device *rdev); |
int dce6_audio_init(struct radeon_device *rdev); |
void dce6_audio_fini(struct radeon_device *rdev); |
/* |
729,7 → 746,10 |
u32 si_get_xclk(struct radeon_device *rdev); |
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev); |
int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); |
int si_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk); |
int si_get_temp(struct radeon_device *rdev); |
int si_get_allowed_info_register(struct radeon_device *rdev, |
u32 reg, u32 *val); |
int si_dpm_init(struct radeon_device *rdev); |
void si_dpm_setup_asic(struct radeon_device *rdev); |
int si_dpm_enable(struct radeon_device *rdev); |
744,6 → 764,14 |
struct seq_file *m); |
int si_dpm_force_performance_level(struct radeon_device *rdev, |
enum radeon_dpm_forced_level level); |
int si_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev, |
u32 *speed); |
int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev, |
u32 speed); |
u32 si_fan_ctrl_get_mode(struct radeon_device *rdev); |
void si_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode); |
u32 si_dpm_get_current_sclk(struct radeon_device *rdev); |
u32 si_dpm_get_current_mclk(struct radeon_device *rdev); |
/* DCE8 - CIK */ |
void dce8_bandwidth_update(struct radeon_device *rdev); |
839,6 → 867,8 |
struct radeon_ring *ring); |
int ci_get_temp(struct radeon_device *rdev); |
int kv_get_temp(struct radeon_device *rdev); |
int cik_get_allowed_info_register(struct radeon_device *rdev, |
u32 reg, u32 *val); |
int ci_dpm_init(struct radeon_device *rdev); |
int ci_dpm_enable(struct radeon_device *rdev); |
860,7 → 890,16 |
enum radeon_dpm_forced_level level); |
bool ci_dpm_vblank_too_short(struct radeon_device *rdev); |
void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); |
u32 ci_dpm_get_current_sclk(struct radeon_device *rdev); |
u32 ci_dpm_get_current_mclk(struct radeon_device *rdev); |
int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev, |
u32 *speed); |
int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev, |
u32 speed); |
u32 ci_fan_ctrl_get_mode(struct radeon_device *rdev); |
void ci_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode); |
int kv_dpm_init(struct radeon_device *rdev); |
int kv_dpm_enable(struct radeon_device *rdev); |
int kv_dpm_late_enable(struct radeon_device *rdev); |
881,6 → 920,8 |
enum radeon_dpm_forced_level level); |
void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate); |
void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable); |
u32 kv_dpm_get_current_sclk(struct radeon_device *rdev); |
u32 kv_dpm_get_current_mclk(struct radeon_device *rdev); |
/* uvd v1.0 */ |
uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev, |
910,6 → 951,10 |
int uvd_v2_2_resume(struct radeon_device *rdev); |
void uvd_v2_2_fence_emit(struct radeon_device *rdev, |
struct radeon_fence *fence); |
bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev, |
struct radeon_ring *ring, |
struct radeon_semaphore *semaphore, |
bool emit_wait); |
/* uvd v3.1 */ |
bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev, |
927,10 → 972,14 |
struct radeon_ring *ring); |
void vce_v1_0_set_wptr(struct radeon_device *rdev, |
struct radeon_ring *ring); |
int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data); |
unsigned vce_v1_0_bo_size(struct radeon_device *rdev); |
int vce_v1_0_resume(struct radeon_device *rdev); |
int vce_v1_0_init(struct radeon_device *rdev); |
int vce_v1_0_start(struct radeon_device *rdev); |
/* vce v2.0 */ |
unsigned vce_v2_0_bo_size(struct radeon_device *rdev); |
int vce_v2_0_resume(struct radeon_device *rdev); |
#endif |
/drivers/video/drm/radeon/radeon_atombios.c |
---|
845,6 → 845,7 |
radeon_link_encoder_connector(dev); |
radeon_setup_mst_connector(dev); |
return true; |
} |
3289,6 → 3290,7 |
args.in.ucVoltageType = VOLTAGE_TYPE_VDDC; |
args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; |
args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id); |
args.in.ulSCLKFreq = |
cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk); |
/drivers/video/drm/radeon/radeon_benchmark.c |
---|
34,7 → 34,8 |
static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, |
uint64_t saddr, uint64_t daddr, |
int flag, int n) |
int flag, int n, |
struct reservation_object *resv) |
{ |
unsigned long start_jiffies; |
unsigned long end_jiffies; |
47,12 → 48,12 |
case RADEON_BENCHMARK_COPY_DMA: |
fence = radeon_copy_dma(rdev, saddr, daddr, |
size / RADEON_GPU_PAGE_SIZE, |
NULL); |
resv); |
break; |
case RADEON_BENCHMARK_COPY_BLIT: |
fence = radeon_copy_blit(rdev, saddr, daddr, |
size / RADEON_GPU_PAGE_SIZE, |
NULL); |
resv); |
break; |
default: |
DRM_ERROR("Unknown copy method\n"); |
92,9 → 93,6 |
int r, n; |
int time; |
ENTER(); |
n = RADEON_BENCHMARK_ITERATIONS; |
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, NULL, &sobj); |
if (r) { |
120,11 → 118,11 |
if (r) { |
goto out_cleanup; |
} |
dbgprintf("done\n"); |
if (rdev->asic->copy.dma) { |
time = radeon_benchmark_do_move(rdev, size, saddr, daddr, |
RADEON_BENCHMARK_COPY_DMA, n); |
RADEON_BENCHMARK_COPY_DMA, n, |
dobj->tbo.resv); |
if (time < 0) |
goto out_cleanup; |
if (time > 0) |
134,7 → 132,8 |
if (rdev->asic->copy.blit) { |
time = radeon_benchmark_do_move(rdev, size, saddr, daddr, |
RADEON_BENCHMARK_COPY_BLIT, n); |
RADEON_BENCHMARK_COPY_BLIT, n, |
dobj->tbo.resv); |
if (time < 0) |
goto out_cleanup; |
if (time > 0) |
163,9 → 162,6 |
if (r) { |
DRM_ERROR("Error while benchmarking BO move.\n"); |
} |
LEAVE(); |
} |
void radeon_benchmark(struct radeon_device *rdev, int test_number) |
/drivers/video/drm/radeon/radeon_bios.c |
---|
30,7 → 30,6 |
#include "radeon.h" |
#include "atom.h" |
//#include <linux/vga_switcheroo.h> |
#include <linux/slab.h> |
/* |
* BIOS. |
75,7 → 74,7 |
static bool radeon_read_bios(struct radeon_device *rdev) |
{ |
uint8_t __iomem *bios; |
uint8_t __iomem *bios, val1, val2; |
size_t size; |
rdev->bios = NULL; |
/drivers/video/drm/radeon/radeon_combios.c |
---|
1255,10 → 1255,15 |
if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && |
(RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) { |
u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; |
if (hss > lvds->native_mode.hdisplay) |
hss = (10 - 1) * 8; |
lvds->native_mode.htotal = lvds->native_mode.hdisplay + |
(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8; |
lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + |
(RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; |
hss; |
lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + |
(RBIOS8(tmp + 23) * 8); |
3382,6 → 3387,14 |
rdev->pdev->subsystem_device == 0x30ae) |
return; |
/* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume |
* - it hangs on resume inside the dynclk 1 table. |
*/ |
if (rdev->family == CHIP_RS480 && |
rdev->pdev->subsystem_vendor == 0x103c && |
rdev->pdev->subsystem_device == 0x280a) |
return; |
/* DYN CLK 1 */ |
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); |
if (table) |
/drivers/video/drm/radeon/radeon_connectors.c |
---|
27,12 → 27,24 |
#include <drm/drm_edid.h> |
#include <drm/drm_crtc_helper.h> |
#include <drm/drm_fb_helper.h> |
#include <drm/drm_dp_mst_helper.h> |
#include <drm/radeon_drm.h> |
#include "radeon.h" |
#include "radeon_audio.h" |
#include "atom.h" |
#include <linux/pm_runtime.h> |
static int radeon_dp_handle_hpd(struct drm_connector *connector) |
{ |
struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
int ret; |
ret = radeon_dp_mst_check_status(radeon_connector); |
if (ret == -EINVAL) |
return 1; |
return 0; |
} |
void radeon_connector_hotplug(struct drm_connector *connector) |
{ |
struct drm_device *dev = connector->dev; |
39,6 → 51,17 |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { |
struct radeon_connector_atom_dig *dig_connector = |
radeon_connector->con_priv; |
if (radeon_connector->is_mst_connector) |
return; |
if (dig_connector->is_mst) { |
radeon_dp_handle_hpd(connector); |
return; |
} |
} |
/* bail if the connector does not have hpd pin, e.g., |
* VGA, TV, etc. |
*/ |
72,6 → 95,11 |
if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { |
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); |
} else if (radeon_dp_needs_link_train(radeon_connector)) { |
/* Don't try to start link training before we |
* have the dpcd */ |
if (!radeon_dp_getdpcd(radeon_connector)) |
return; |
/* set it to OFF so that drm_helper_connector_dpms() |
* won't return immediately since the current state |
* is ON at this point. |
134,7 → 162,7 |
if (connector->display_info.bpc) |
bpc = connector->display_info.bpc; |
else if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { |
struct drm_connector_helper_funcs *connector_funcs = |
const struct drm_connector_helper_funcs *connector_funcs = |
connector->helper_private; |
struct drm_encoder *encoder = connector_funcs->best_encoder(connector); |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
224,7 → 252,7 |
struct radeon_device *rdev = dev->dev_private; |
struct drm_encoder *best_encoder = NULL; |
struct drm_encoder *encoder = NULL; |
struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; |
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; |
bool connected; |
int i; |
701,7 → 729,7 |
if (connector->encoder) |
radeon_encoder = to_radeon_encoder(connector->encoder); |
else { |
struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; |
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; |
radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector)); |
} |
724,9 → 752,33 |
radeon_property_change_mode(&radeon_encoder->base); |
} |
if (property == rdev->mode_info.output_csc_property) { |
if (connector->encoder) |
radeon_encoder = to_radeon_encoder(connector->encoder); |
else { |
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; |
radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector)); |
} |
if (radeon_encoder->output_csc == val) |
return 0; |
radeon_encoder->output_csc = val; |
if (connector->encoder->crtc) { |
struct drm_crtc *crtc = connector->encoder->crtc; |
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
radeon_crtc->output_csc = radeon_encoder->output_csc; |
(*crtc_funcs->load_lut)(crtc); |
} |
} |
return 0; |
} |
static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, |
struct drm_connector *connector) |
{ |
845,7 → 897,11 |
/* check if panel is valid */ |
if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) |
ret = connector_status_connected; |
/* don't fetch the edid from the vbios if ddc fails and runpm is |
* enabled so we report disconnected. |
*/ |
if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) |
ret = connector_status_disconnected; |
} |
/* check for edid as well */ |
884,7 → 940,7 |
if (connector->encoder) |
radeon_encoder = to_radeon_encoder(connector->encoder); |
else { |
struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; |
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; |
radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector)); |
} |
952,7 → 1008,7 |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
struct drm_encoder *encoder; |
struct drm_encoder_helper_funcs *encoder_funcs; |
const struct drm_encoder_helper_funcs *encoder_funcs; |
bool dret = false; |
enum drm_connector_status ret = connector_status_disconnected; |
1074,7 → 1130,7 |
radeon_tv_detect(struct drm_connector *connector, bool force) |
{ |
struct drm_encoder *encoder; |
struct drm_encoder_helper_funcs *encoder_funcs; |
const struct drm_encoder_helper_funcs *encoder_funcs; |
struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
enum drm_connector_status ret = connector_status_disconnected; |
int r; |
1148,7 → 1204,7 |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
struct drm_encoder *encoder = NULL; |
struct drm_encoder_helper_funcs *encoder_funcs; |
const struct drm_encoder_helper_funcs *encoder_funcs; |
int i, r; |
enum drm_connector_status ret = connector_status_disconnected; |
bool dret = false, broken_edid = false; |
1159,8 → 1215,9 |
goto exit; |
} |
if (radeon_connector->ddc_bus) |
if (radeon_connector->ddc_bus) { |
dret = radeon_ddc_probe(radeon_connector, false); |
} |
if (dret) { |
radeon_connector->detected_by_load = false; |
radeon_connector_free_edid(connector); |
1304,6 → 1361,17 |
/* updated in get modes as well since we need to know if it's analog or digital */ |
radeon_connector_update_scratch_regs(connector, ret); |
if ((radeon_audio != 0) && radeon_connector->use_digital) { |
const struct drm_connector_helper_funcs *connector_funcs = |
connector->helper_private; |
encoder = connector_funcs->best_encoder(connector); |
if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) { |
radeon_connector_get_edid(connector); |
radeon_audio_detect(connector, encoder, ret); |
} |
} |
exit: |
return ret; |
} |
1550,6 → 1618,8 |
struct drm_encoder *encoder = radeon_best_single_encoder(connector); |
int r; |
if (radeon_dig_connector->is_mst) |
return connector_status_disconnected; |
if (!force && radeon_check_hpd_status_unchanged(connector)) { |
ret = connector->status; |
1567,6 → 1637,11 |
/* check if panel is valid */ |
if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) |
ret = connector_status_connected; |
/* don't fetch the edid from the vbios if ddc fails and runpm is |
* enabled so we report disconnected. |
*/ |
if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) |
ret = connector_status_disconnected; |
} |
/* eDP is always DP */ |
radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; |
1592,7 → 1667,7 |
if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */ |
ret = connector_status_connected; |
else if (radeon_connector->dac_load_detect) { /* try load detection */ |
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
ret = encoder_funcs->detect(encoder, connector); |
} |
} |
1600,12 → 1675,21 |
radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); |
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { |
ret = connector_status_connected; |
if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) |
if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { |
radeon_dp_getdpcd(radeon_connector); |
r = radeon_dp_mst_probe(radeon_connector); |
if (r == 1) |
ret = connector_status_disconnected; |
} |
} else { |
if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { |
if (radeon_dp_getdpcd(radeon_connector)) |
if (radeon_dp_getdpcd(radeon_connector)) { |
r = radeon_dp_mst_probe(radeon_connector); |
if (r == 1) |
ret = connector_status_disconnected; |
else |
ret = connector_status_connected; |
} |
} else { |
/* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */ |
if (radeon_ddc_probe(radeon_connector, false)) |
1615,6 → 1699,12 |
} |
radeon_connector_update_scratch_regs(connector, ret); |
if ((radeon_audio != 0) && encoder) { |
radeon_connector_get_edid(connector); |
radeon_audio_detect(connector, encoder, ret); |
} |
out: |
return ret; |
} |
1822,6 → 1912,10 |
drm_object_attach_property(&radeon_connector->base.base, |
dev->mode_config.scaling_mode_property, |
DRM_MODE_SCALE_NONE); |
if (ASIC_IS_DCE5(rdev)) |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.output_csc_property, |
RADEON_OUTPUT_CSC_BYPASS); |
break; |
case DRM_MODE_CONNECTOR_DVII: |
case DRM_MODE_CONNECTOR_DVID: |
1854,6 → 1948,10 |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.audio_property, |
RADEON_AUDIO_AUTO); |
if (ASIC_IS_DCE5(rdev)) |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.output_csc_property, |
RADEON_OUTPUT_CSC_BYPASS); |
subpixel_order = SubPixelHorizontalRGB; |
connector->interlace_allowed = true; |
1900,6 → 1998,10 |
drm_object_attach_property(&radeon_connector->base.base, |
dev->mode_config.scaling_mode_property, |
DRM_MODE_SCALE_NONE); |
if (ASIC_IS_DCE5(rdev)) |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.output_csc_property, |
RADEON_OUTPUT_CSC_BYPASS); |
/* no HPD on analog connectors */ |
radeon_connector->hpd.hpd = RADEON_HPD_NONE; |
connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
1922,6 → 2024,10 |
drm_object_attach_property(&radeon_connector->base.base, |
dev->mode_config.scaling_mode_property, |
DRM_MODE_SCALE_NONE); |
if (ASIC_IS_DCE5(rdev)) |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.output_csc_property, |
RADEON_OUTPUT_CSC_BYPASS); |
/* no HPD on analog connectors */ |
radeon_connector->hpd.hpd = RADEON_HPD_NONE; |
connector->interlace_allowed = true; |
1973,6 → 2079,10 |
rdev->mode_info.load_detect_property, |
1); |
} |
if (ASIC_IS_DCE5(rdev)) |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.output_csc_property, |
RADEON_OUTPUT_CSC_BYPASS); |
connector->interlace_allowed = true; |
if (connector_type == DRM_MODE_CONNECTOR_DVII) |
connector->doublescan_allowed = true; |
2018,6 → 2128,10 |
rdev->mode_info.audio_property, |
RADEON_AUDIO_AUTO); |
} |
if (ASIC_IS_DCE5(rdev)) |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.output_csc_property, |
RADEON_OUTPUT_CSC_BYPASS); |
subpixel_order = SubPixelHorizontalRGB; |
connector->interlace_allowed = true; |
if (connector_type == DRM_MODE_CONNECTOR_HDMIB) |
2066,6 → 2180,10 |
rdev->mode_info.audio_property, |
RADEON_AUDIO_AUTO); |
} |
if (ASIC_IS_DCE5(rdev)) |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.output_csc_property, |
RADEON_OUTPUT_CSC_BYPASS); |
connector->interlace_allowed = true; |
/* in theory with a DP to VGA converter... */ |
connector->doublescan_allowed = false; |
2302,3 → 2420,27 |
connector->display_info.subpixel_order = subpixel_order; |
drm_connector_register(connector); |
} |
void radeon_setup_mst_connector(struct drm_device *dev) |
{ |
struct radeon_device *rdev = dev->dev_private; |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector; |
if (!ASIC_IS_DCE5(rdev)) |
return; |
if (radeon_mst == 0) |
return; |
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
int ret; |
radeon_connector = to_radeon_connector(connector); |
if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) |
continue; |
ret = radeon_dp_mst_init(radeon_connector); |
} |
} |
/drivers/video/drm/radeon/radeon_cs.c |
---|
258,11 → 258,13 |
u32 ring = RADEON_CS_RING_GFX; |
s32 priority = 0; |
INIT_LIST_HEAD(&p->validated); |
if (!cs->num_chunks) { |
return 0; |
} |
/* get chunks */ |
INIT_LIST_HEAD(&p->validated); |
p->idx = 0; |
p->ib.sa_bo = NULL; |
p->const_ib.sa_bo = NULL; |
710,6 → 712,7 |
struct radeon_cs_chunk *ib_chunk = p->chunk_ib; |
struct radeon_device *rdev = p->rdev; |
uint32_t header; |
int ret = 0, i; |
if (idx >= ib_chunk->length_dw) { |
DRM_ERROR("Can not parse packet at %d after CS end %d !\n", |
738,15 → 741,26 |
break; |
default: |
DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); |
return -EINVAL; |
ret = -EINVAL; |
goto dump_ib; |
} |
if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { |
DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", |
pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); |
return -EINVAL; |
ret = -EINVAL; |
goto dump_ib; |
} |
return 0; |
dump_ib: |
for (i = 0; i < ib_chunk->length_dw; i++) { |
if (i == idx) |
printk("\t0x%08x <---\n", radeon_get_ib_value(p, i)); |
else |
printk("\t0x%08x\n", radeon_get_ib_value(p, i)); |
} |
return ret; |
} |
/** |
* radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP |
/drivers/video/drm/radeon/radeon_cursor.c |
---|
91,15 → 91,34 |
struct radeon_device *rdev = crtc->dev->dev_private; |
if (ASIC_IS_DCE4(rdev)) { |
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, |
upper_32_bits(radeon_crtc->cursor_addr)); |
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, |
lower_32_bits(radeon_crtc->cursor_addr)); |
WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); |
WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN | |
EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | |
EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); |
} else if (ASIC_IS_AVIVO(rdev)) { |
if (rdev->family >= CHIP_RV770) { |
if (radeon_crtc->crtc_id) |
WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, |
upper_32_bits(radeon_crtc->cursor_addr)); |
else |
WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, |
upper_32_bits(radeon_crtc->cursor_addr)); |
} |
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, |
lower_32_bits(radeon_crtc->cursor_addr)); |
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); |
WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | |
(AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); |
} else { |
/* offset is from DISP(2)_BASE_ADDRESS */ |
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, |
radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr); |
switch (radeon_crtc->crtc_id) { |
case 0: |
WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL); |
117,109 → 136,10 |
} |
} |
static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, |
uint64_t gpu_addr) |
static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y) |
{ |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
struct radeon_device *rdev = crtc->dev->dev_private; |
if (ASIC_IS_DCE4(rdev)) { |
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, |
upper_32_bits(gpu_addr)); |
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, |
gpu_addr & 0xffffffff); |
} else if (ASIC_IS_AVIVO(rdev)) { |
if (rdev->family >= CHIP_RV770) { |
if (radeon_crtc->crtc_id) |
WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr)); |
else |
WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr)); |
} |
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, |
gpu_addr & 0xffffffff); |
} else { |
radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr; |
/* offset is from DISP(2)_BASE_ADDRESS */ |
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset); |
} |
} |
int radeon_crtc_cursor_set(struct drm_crtc *crtc, |
struct drm_file *file_priv, |
uint32_t handle, |
uint32_t width, |
uint32_t height) |
{ |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
struct radeon_device *rdev = crtc->dev->dev_private; |
struct drm_gem_object *obj; |
struct radeon_bo *robj; |
uint64_t gpu_addr; |
int ret; |
if (!handle) { |
/* turn off cursor */ |
radeon_hide_cursor(crtc); |
obj = NULL; |
goto unpin; |
} |
if ((width > radeon_crtc->max_cursor_width) || |
(height > radeon_crtc->max_cursor_height)) { |
DRM_ERROR("bad cursor width or height %d x %d\n", width, height); |
return -EINVAL; |
} |
obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); |
if (!obj) { |
DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id); |
return -ENOENT; |
} |
robj = gem_to_radeon_bo(obj); |
ret = radeon_bo_reserve(robj, false); |
if (unlikely(ret != 0)) |
goto fail; |
/* Only 27 bit offset for legacy cursor */ |
ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM, |
ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, |
&gpu_addr); |
radeon_bo_unreserve(robj); |
if (ret) |
goto fail; |
radeon_crtc->cursor_width = width; |
radeon_crtc->cursor_height = height; |
radeon_lock_cursor(crtc, true); |
radeon_set_cursor(crtc, obj, gpu_addr); |
radeon_show_cursor(crtc); |
radeon_lock_cursor(crtc, false); |
unpin: |
if (radeon_crtc->cursor_bo) { |
robj = gem_to_radeon_bo(radeon_crtc->cursor_bo); |
ret = radeon_bo_reserve(robj, false); |
if (likely(ret == 0)) { |
radeon_bo_unpin(robj); |
radeon_bo_unreserve(robj); |
} |
drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo); |
} |
radeon_crtc->cursor_bo = obj; |
return 0; |
fail: |
drm_gem_object_unreference_unlocked(obj); |
return ret; |
} |
int radeon_crtc_cursor_move(struct drm_crtc *crtc, |
int x, int y) |
{ |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
struct radeon_device *rdev = crtc->dev->dev_private; |
int xorigin = 0, yorigin = 0; |
int w = radeon_crtc->cursor_width; |
281,7 → 201,6 |
} |
} |
radeon_lock_cursor(crtc, true); |
if (ASIC_IS_DCE4(rdev)) { |
WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); |
WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); |
305,10 → 224,136 |
| (x << 16) |
| y)); |
/* offset is from DISP(2)_BASE_ADDRESS */ |
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + |
(yorigin * 256))); |
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, |
radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr + |
yorigin * 256); |
} |
radeon_crtc->cursor_x = x; |
radeon_crtc->cursor_y = y; |
return 0; |
} |
int radeon_crtc_cursor_move(struct drm_crtc *crtc, |
int x, int y) |
{ |
int ret; |
radeon_lock_cursor(crtc, true); |
ret = radeon_cursor_move_locked(crtc, x, y); |
radeon_lock_cursor(crtc, false); |
return ret; |
} |
int radeon_crtc_cursor_set2(struct drm_crtc *crtc, |
struct drm_file *file_priv, |
uint32_t handle, |
uint32_t width, |
uint32_t height, |
int32_t hot_x, |
int32_t hot_y) |
{ |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
struct radeon_device *rdev = crtc->dev->dev_private; |
struct drm_gem_object *obj; |
struct radeon_bo *robj; |
int ret; |
if (!handle) { |
/* turn off cursor */ |
radeon_hide_cursor(crtc); |
obj = NULL; |
goto unpin; |
} |
if ((width > radeon_crtc->max_cursor_width) || |
(height > radeon_crtc->max_cursor_height)) { |
DRM_ERROR("bad cursor width or height %d x %d\n", width, height); |
return -EINVAL; |
} |
obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); |
if (!obj) { |
DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id); |
return -ENOENT; |
} |
robj = gem_to_radeon_bo(obj); |
ret = radeon_bo_reserve(robj, false); |
if (ret != 0) { |
drm_gem_object_unreference_unlocked(obj); |
return ret; |
} |
/* Only 27 bit offset for legacy cursor */ |
ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM, |
ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, |
&radeon_crtc->cursor_addr); |
radeon_bo_unreserve(robj); |
if (ret) { |
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); |
drm_gem_object_unreference_unlocked(obj); |
return ret; |
} |
radeon_crtc->cursor_width = width; |
radeon_crtc->cursor_height = height; |
radeon_lock_cursor(crtc, true); |
if (hot_x != radeon_crtc->cursor_hot_x || |
hot_y != radeon_crtc->cursor_hot_y) { |
int x, y; |
x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x; |
y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y; |
radeon_cursor_move_locked(crtc, x, y); |
radeon_crtc->cursor_hot_x = hot_x; |
radeon_crtc->cursor_hot_y = hot_y; |
} |
radeon_show_cursor(crtc); |
radeon_lock_cursor(crtc, false); |
unpin: |
if (radeon_crtc->cursor_bo) { |
struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo); |
ret = radeon_bo_reserve(robj, false); |
if (likely(ret == 0)) { |
radeon_bo_unpin(robj); |
radeon_bo_unreserve(robj); |
} |
drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo); |
} |
radeon_crtc->cursor_bo = obj; |
return 0; |
} |
/** |
* radeon_cursor_reset - Re-set the current cursor, if any. |
* |
* @crtc: drm crtc |
* |
* If the CRTC passed in currently has a cursor assigned, this function |
* makes sure it's visible. |
*/ |
void radeon_cursor_reset(struct drm_crtc *crtc) |
{ |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
if (radeon_crtc->cursor_bo) { |
radeon_lock_cursor(crtc, true); |
radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x, |
radeon_crtc->cursor_y); |
radeon_show_cursor(crtc); |
radeon_lock_cursor(crtc, false); |
} |
} |
/drivers/video/drm/radeon/radeon_device.c |
---|
30,6 → 30,7 |
#include <drm/drmP.h> |
#include <drm/drm_crtc_helper.h> |
#include <drm/radeon_drm.h> |
#include <linux/vgaarb.h> |
#include "radeon_reg.h" |
#include "radeon.h" |
#include "atom.h" |
72,6 → 73,8 |
int irq_override = 0; |
int radeon_bapm = -1; |
int radeon_backlight = 0; |
int radeon_auxch = -1; |
int radeon_mst = 0; |
extern display_t *os_display; |
extern struct drm_device *main_device; |
1124,6 → 1127,22 |
} |
/** |
* Determine a sensible default GART size according to ASIC family. |
* |
* @family ASIC family name |
*/ |
static int radeon_gart_size_auto(enum radeon_family family) |
{ |
/* default to a larger gart size on newer asics */ |
if (family >= CHIP_TAHITI) |
return 2048; |
else if (family >= CHIP_RV770) |
return 1024; |
else |
return 512; |
} |
/** |
* radeon_check_arguments - validate module params |
* |
* @rdev: radeon_device pointer |
1141,27 → 1160,17 |
} |
if (radeon_gart_size == -1) { |
/* default to a larger gart size on newer asics */ |
if (rdev->family >= CHIP_RV770) |
radeon_gart_size = 1024; |
else |
radeon_gart_size = 512; |
radeon_gart_size = radeon_gart_size_auto(rdev->family); |
} |
/* gtt size must be power of two and greater or equal to 32M */ |
if (radeon_gart_size < 32) { |
dev_warn(rdev->dev, "gart size (%d) too small\n", |
radeon_gart_size); |
if (rdev->family >= CHIP_RV770) |
radeon_gart_size = 1024; |
else |
radeon_gart_size = 512; |
radeon_gart_size = radeon_gart_size_auto(rdev->family); |
} else if (!radeon_check_pot_argument(radeon_gart_size)) { |
dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", |
radeon_gart_size); |
if (rdev->family >= CHIP_RV770) |
radeon_gart_size = 1024; |
else |
radeon_gart_size = 512; |
radeon_gart_size = radeon_gart_size_auto(rdev->family); |
} |
rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20; |
1396,7 → 1405,7 |
r = radeon_init(rdev); |
if (r) |
return r; |
goto failed; |
1409,7 → 1418,7 |
radeon_agp_disable(rdev); |
r = radeon_init(rdev); |
if (r) |
return r; |
goto failed; |
} |
// r = radeon_ib_ring_tests(rdev); |
1435,6 → 1444,9 |
DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n"); |
} |
return 0; |
failed: |
return r; |
} |
/** |
1615,6 → 1627,14 |
radeon_PCI_IDS |
}; |
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe); |
int radeon_enable_vblank_kms(struct drm_device *dev, unsigned int pipe); |
void radeon_disable_vblank_kms(struct drm_device *dev, unsigned int pipe); |
int radeon_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe, |
int *max_error, |
struct timeval *vblank_time, |
unsigned flags); |
void radeon_gem_object_free(struct drm_gem_object *obj); |
void radeon_driver_irq_preinstall_kms(struct drm_device *dev); |
int radeon_driver_irq_postinstall_kms(struct drm_device *dev); |
void radeon_driver_irq_uninstall_kms(struct drm_device *dev); |
1632,11 → 1652,11 |
// .postclose = radeon_driver_postclose_kms, |
// .lastclose = radeon_driver_lastclose_kms, |
// .unload = radeon_driver_unload_kms, |
// .get_vblank_counter = radeon_get_vblank_counter_kms, |
// .enable_vblank = radeon_enable_vblank_kms, |
// .disable_vblank = radeon_disable_vblank_kms, |
// .get_vblank_timestamp = radeon_get_vblank_timestamp_kms, |
// .get_scanout_position = radeon_get_crtc_scanoutpos, |
.get_vblank_counter = radeon_get_vblank_counter_kms, |
.enable_vblank = radeon_enable_vblank_kms, |
.disable_vblank = radeon_disable_vblank_kms, |
.get_vblank_timestamp = radeon_get_vblank_timestamp_kms, |
.get_scanout_position = radeon_get_crtc_scanoutpos, |
#if defined(CONFIG_DEBUG_FS) |
.debugfs_init = radeon_debugfs_init, |
.debugfs_cleanup = radeon_debugfs_cleanup, |
1646,7 → 1666,7 |
.irq_uninstall = radeon_driver_irq_uninstall_kms, |
.irq_handler = radeon_driver_irq_handler_kms, |
// .ioctls = radeon_ioctls_kms, |
// .gem_free_object = radeon_gem_object_free, |
.gem_free_object = radeon_gem_object_free, |
// .gem_open_object = radeon_gem_object_open, |
// .gem_close_object = radeon_gem_object_close, |
// .dumb_create = radeon_mode_dumb_create, |
/drivers/video/drm/radeon/radeon_display.c |
---|
30,26 → 30,13 |
#include "atom.h" |
#include <asm/div64.h> |
#include <linux/pm_runtime.h> |
#include <drm/drm_crtc_helper.h> |
#include <drm/drm_plane_helper.h> |
#include <drm/drm_edid.h> |
/* Greatest common divisor */ |
unsigned long gcd(unsigned long a, unsigned long b) |
{ |
unsigned long r; |
#include <linux/gcd.h> |
if (a < b) |
swap(a, b); |
if (!b) |
return a; |
while ((r = a % b) != 0) { |
a = b; |
b = r; |
} |
return b; |
} |
static void avivo_crtc_load_lut(struct drm_crtc *crtc) |
{ |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
167,7 → 154,7 |
(NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) | |
NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS))); |
WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset, |
(NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) | |
(NI_OUTPUT_CSC_GRPH_MODE(radeon_crtc->output_csc) | |
NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS))); |
/* XXX match this to the depth of the crtc fmt block, move to modeset? */ |
WREG32(0x6940 + radeon_crtc->crtc_offset, 0); |
267,6 → 254,65 |
kfree(radeon_crtc); |
} |
void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) |
{ |
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; |
unsigned long flags; |
u32 update_pending; |
int vpos, hpos; |
/* can happen during initialization */ |
if (radeon_crtc == NULL) |
return; |
/* Skip the pageflip completion check below (based on polling) on |
* asics which reliably support hw pageflip completion irqs. pflip |
* irqs are a reliable and race-free method of handling pageflip |
* completion detection. A use_pflipirq module parameter < 2 allows |
* to override this in case of asics with faulty pflip irqs. |
* A module parameter of 0 would only use this polling based path, |
* a parameter of 1 would use pflip irq only as a backup to this |
* path, as in Linux 3.16. |
*/ |
if ((radeon_use_pflipirq == 2) && ASIC_IS_DCE4(rdev)) |
return; |
spin_lock_irqsave(&rdev->ddev->event_lock, flags); |
if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) { |
DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != " |
"RADEON_FLIP_SUBMITTED(%d)\n", |
radeon_crtc->flip_status, |
RADEON_FLIP_SUBMITTED); |
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); |
return; |
} |
update_pending = radeon_page_flip_pending(rdev, crtc_id); |
/* Has the pageflip already completed in crtc, or is it certain |
* to complete in this vblank? |
*/ |
if (update_pending && |
(DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, |
crtc_id, |
USE_REAL_VBLANKSTART, |
&vpos, &hpos, NULL, NULL, |
&rdev->mode_info.crtcs[crtc_id]->base.hwmode)) && |
((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || |
(vpos < 0 && !ASIC_IS_AVIVO(rdev)))) { |
/* crtc didn't flip in this target vblank interval, |
* but flip is pending in crtc. Based on the current |
* scanout position we know that the current frame is |
* (nearly) complete and the flip will (likely) |
* complete before the start of the next frame. |
*/ |
update_pending = 0; |
} |
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); |
// if (!update_pending) |
// radeon_crtc_handle_flip(rdev, crtc_id); |
} |
static int |
radeon_crtc_set_config(struct drm_mode_set *set) |
{ |
632,6 → 678,9 |
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && |
pll->flags & RADEON_PLL_USE_REF_DIV) |
ref_div_max = pll->reference_div; |
else if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) |
/* fix for problems on RS880 */ |
ref_div_max = min(pll->max_ref_div, 7u); |
else |
ref_div_max = pll->max_ref_div; |
1016,6 → 1065,13 |
{ RADEON_FMT_DITHER_ENABLE, "on" }, |
}; |
static struct drm_prop_enum_list radeon_output_csc_enum_list[] = |
{ { RADEON_OUTPUT_CSC_BYPASS, "bypass" }, |
{ RADEON_OUTPUT_CSC_TVRGB, "tvrgb" }, |
{ RADEON_OUTPUT_CSC_YCBCR601, "ycbcr601" }, |
{ RADEON_OUTPUT_CSC_YCBCR709, "ycbcr709" }, |
}; |
static int radeon_modeset_create_props(struct radeon_device *rdev) |
{ |
int sz; |
1078,6 → 1134,12 |
"dither", |
radeon_dither_enum_list, sz); |
sz = ARRAY_SIZE(radeon_output_csc_enum_list); |
rdev->mode_info.output_csc_property = |
drm_property_create_enum(rdev->ddev, 0, |
"output_csc", |
radeon_output_csc_enum_list, sz); |
return 0; |
} |
1408,8 → 1470,10 |
* unknown small number of scanlines wrt. real scanout position. |
* |
*/ |
int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags, |
int *vpos, int *hpos, void *stime, void *etime) |
int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, |
unsigned int flags, int *vpos, int *hpos, |
ktime_t *stime, ktime_t *etime, |
const struct drm_display_mode *mode) |
{ |
u32 stat_crtc = 0, vbl = 0, position = 0; |
int vbl_start, vbl_end, vtotal, ret = 0; |
1417,8 → 1481,14 |
struct radeon_device *rdev = dev->dev_private; |
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ |
/* Get optional system timestamp before query. */ |
if (stime) |
*stime = ktime_get(); |
if (ASIC_IS_DCE4(rdev)) { |
if (crtc == 0) { |
if (pipe == 0) { |
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
EVERGREEN_CRTC0_REGISTER_OFFSET); |
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1425,7 → 1495,7 |
EVERGREEN_CRTC0_REGISTER_OFFSET); |
ret |= DRM_SCANOUTPOS_VALID; |
} |
if (crtc == 1) { |
if (pipe == 1) { |
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
EVERGREEN_CRTC1_REGISTER_OFFSET); |
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1432,7 → 1502,7 |
EVERGREEN_CRTC1_REGISTER_OFFSET); |
ret |= DRM_SCANOUTPOS_VALID; |
} |
if (crtc == 2) { |
if (pipe == 2) { |
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
EVERGREEN_CRTC2_REGISTER_OFFSET); |
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1439,7 → 1509,7 |
EVERGREEN_CRTC2_REGISTER_OFFSET); |
ret |= DRM_SCANOUTPOS_VALID; |
} |
if (crtc == 3) { |
if (pipe == 3) { |
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
EVERGREEN_CRTC3_REGISTER_OFFSET); |
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1446,7 → 1516,7 |
EVERGREEN_CRTC3_REGISTER_OFFSET); |
ret |= DRM_SCANOUTPOS_VALID; |
} |
if (crtc == 4) { |
if (pipe == 4) { |
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
EVERGREEN_CRTC4_REGISTER_OFFSET); |
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1453,7 → 1523,7 |
EVERGREEN_CRTC4_REGISTER_OFFSET); |
ret |= DRM_SCANOUTPOS_VALID; |
} |
if (crtc == 5) { |
if (pipe == 5) { |
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + |
EVERGREEN_CRTC5_REGISTER_OFFSET); |
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + |
1461,12 → 1531,12 |
ret |= DRM_SCANOUTPOS_VALID; |
} |
} else if (ASIC_IS_AVIVO(rdev)) { |
if (crtc == 0) { |
if (pipe == 0) { |
vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END); |
position = RREG32(AVIVO_D1CRTC_STATUS_POSITION); |
ret |= DRM_SCANOUTPOS_VALID; |
} |
if (crtc == 1) { |
if (pipe == 1) { |
vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END); |
position = RREG32(AVIVO_D2CRTC_STATUS_POSITION); |
ret |= DRM_SCANOUTPOS_VALID; |
1473,7 → 1543,7 |
} |
} else { |
/* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */ |
if (crtc == 0) { |
if (pipe == 0) { |
/* Assume vbl_end == 0, get vbl_start from |
* upper 16 bits. |
*/ |
1487,7 → 1557,7 |
ret |= DRM_SCANOUTPOS_VALID; |
} |
if (crtc == 1) { |
if (pipe == 1) { |
vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) & |
RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT; |
position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; |
1499,6 → 1569,12 |
} |
} |
/* Get optional system timestamp after query. */ |
if (etime) |
*etime = ktime_get(); |
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ |
/* Decode into vertical and horizontal scanout position. */ |
*vpos = position & 0x1fff; |
*hpos = (position >> 16) & 0x1fff; |
1512,7 → 1588,7 |
} |
else { |
/* No: Fake something reasonable which gives at least ok results. */ |
vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; |
vbl_start = mode->crtc_vdisplay; |
vbl_end = 0; |
} |
1528,7 → 1604,7 |
/* Inside "upper part" of vblank area? Apply corrective offset if so: */ |
if (in_vbl && (*vpos >= vbl_start)) { |
vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; |
vtotal = mode->crtc_vtotal; |
*vpos = *vpos - vtotal; |
} |
1550,8 → 1626,8 |
* We only do this if DRM_CALLED_FROM_VBLIRQ. |
*/ |
if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) { |
vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; |
vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; |
vbl_start = mode->crtc_vdisplay; |
vtotal = mode->crtc_vtotal; |
if (vbl_start - *vpos < vtotal / 100) { |
*vpos -= vtotal; |
/drivers/video/drm/radeon/radeon_encoders.c |
---|
179,9 → 179,12 |
(rdev->pdev->subsystem_vendor == 0x1734) && |
(rdev->pdev->subsystem_device == 0x1107)) |
use_bl = false; |
/* Older PPC macs use on-GPU backlight controller */ |
#ifndef CONFIG_PPC_PMAC |
/* disable native backlight control on older asics */ |
else if (rdev->family < CHIP_R600) |
use_bl = false; |
#endif |
else |
use_bl = true; |
} |
191,7 → 194,6 |
radeon_atom_backlight_init(radeon_encoder, connector); |
else |
radeon_legacy_backlight_init(radeon_encoder, connector); |
rdev->mode_info.bl_encoder = radeon_encoder; |
} |
} |
244,8 → 246,17 |
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
radeon_connector = to_radeon_connector(connector); |
if (radeon_encoder->active_device & radeon_connector->devices) |
if (radeon_encoder->is_mst_encoder) { |
struct radeon_encoder_mst *mst_enc; |
if (!radeon_connector->is_mst_connector) |
continue; |
mst_enc = radeon_encoder->enc_priv; |
if (mst_enc->connector == radeon_connector->mst_port) |
return connector; |
} else if (radeon_encoder->active_device & radeon_connector->devices) |
return connector; |
} |
return NULL; |
} |
390,6 → 401,9 |
case DRM_MODE_CONNECTOR_DVID: |
case DRM_MODE_CONNECTOR_HDMIA: |
case DRM_MODE_CONNECTOR_DisplayPort: |
if (radeon_connector->is_mst_connector) |
return false; |
dig_connector = radeon_connector->con_priv; |
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || |
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) |
/drivers/video/drm/radeon/radeon_fb.c |
---|
192,7 → 192,6 |
struct drm_mode_fb_cmd2 mode_cmd; |
struct drm_gem_object *gobj = NULL; |
struct radeon_bo *rbo = NULL; |
struct device *device = &rdev->pdev->dev; |
int ret; |
unsigned long tmp; |
215,18 → 214,19 |
rbo = gem_to_radeon_bo(gobj); |
/* okay we have an object now allocate the framebuffer */ |
info = framebuffer_alloc(0, device); |
if (info == NULL) { |
ret = -ENOMEM; |
info = drm_fb_helper_alloc_fbi(helper); |
if (IS_ERR(info)) { |
ret = PTR_ERR(info); |
goto out_unref; |
} |
info->par = rfbdev; |
info->skip_vt_switch = true; |
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); |
if (ret) { |
DRM_ERROR("failed to initialize framebuffer %d\n", ret); |
goto out_unref; |
goto out_destroy_fbi; |
} |
fb = &rfbdev->rfb.base; |
233,7 → 233,6 |
/* setup helper */ |
rfbdev->helper.fb = fb; |
rfbdev->helper.fbdev = info; |
// memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo)); |
253,11 → 252,6 |
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); |
/* setup aperture base/size for vesafb takeover */ |
info->apertures = alloc_apertures(1); |
if (!info->apertures) { |
ret = -ENOMEM; |
goto out_unref; |
} |
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; |
info->apertures->ranges[0].size = rdev->mc.aper_size; |
275,6 → 269,8 |
return 0; |
out_destroy_fbi: |
// drm_fb_helper_release_fbi(helper); |
out_unref: |
if (rbo) { |
288,16 → 284,11 |
static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) |
{ |
struct fb_info *info; |
struct radeon_framebuffer *rfb = &rfbdev->rfb; |
if (rfbdev->helper.fbdev) { |
info = rfbdev->helper.fbdev; |
// drm_fb_helper_unregister_fbi(&rfbdev->helper); |
// drm_fb_helper_release_fbi(&rfbdev->helper); |
// unregister_framebuffer(info); |
// framebuffer_release(info); |
} |
if (rfb->obj) { |
rfb->obj = NULL; |
} |
318,7 → 309,6 |
struct radeon_fbdev *rfbdev; |
int bpp_sel = 32; |
int ret; |
ENTER(); |
/* select 8 bpp console on RN50 or 16MB cards */ |
if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) |
337,20 → 327,27 |
ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper, |
rdev->num_crtc, |
RADEONFB_CONN_LIMIT); |
if (ret) { |
kfree(rfbdev); |
return ret; |
} |
if (ret) |
goto free; |
drm_fb_helper_single_add_all_connectors(&rfbdev->helper); |
ret = drm_fb_helper_single_add_all_connectors(&rfbdev->helper); |
if (ret) |
goto fini; |
/* disable all the possible outputs/crtcs before entering KMS mode */ |
drm_helper_disable_unused_functions(rdev->ddev); |
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); |
LEAVE(); |
ret = drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); |
if (ret) |
goto fini; |
return 0; |
fini: |
// drm_fb_helper_fini(&rfbdev->helper); |
free: |
kfree(rfbdev); |
return ret; |
} |
void radeon_fbdev_fini(struct radeon_device *rdev) |
363,17 → 360,6 |
rdev->mode_info.rfbdev = NULL; |
} |
int radeon_fbdev_total_size(struct radeon_device *rdev) |
{ |
struct radeon_bo *robj; |
int size = 0; |
robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj); |
size += radeon_bo_size(robj); |
return size; |
} |
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) |
{ |
if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj)) |
380,3 → 366,13 |
return true; |
return false; |
} |
void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector) |
{ |
drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector); |
} |
void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector) |
{ |
drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector); |
} |
/drivers/video/drm/radeon/radeon_fence.c |
---|
33,6 → 33,7 |
#include <linux/wait.h> |
#include <linux/kref.h> |
#include <linux/slab.h> |
#include <linux/firmware.h> |
#include <drm/drmP.h> |
#include "radeon_reg.h" |
#include "radeon.h" |
341,10 → 342,9 |
return true; |
} |
// if (down_read_trylock(&rdev->exclusive_lock)) |
{ |
if (down_read_trylock(&rdev->exclusive_lock)) { |
radeon_fence_process(rdev, ring); |
// up_read(&rdev->exclusive_lock); |
up_read(&rdev->exclusive_lock); |
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) { |
return true; |
/drivers/video/drm/radeon/radeon_gart.c |
---|
30,8 → 30,7 |
#include "radeon.h" |
static inline void * |
pci_alloc_consistent(struct pci_dev *hwdev, size_t size, |
void* pci_alloc_consistent(struct pci_dev *hwdev, size_t size, |
addr_t *dma_handle) |
{ |
174,6 → 173,19 |
radeon_bo_unpin(rdev->gart.robj); |
radeon_bo_unreserve(rdev->gart.robj); |
rdev->gart.table_addr = gpu_addr; |
if (!r) { |
int i; |
/* We might have dropped some GART table updates while it wasn't |
* mapped, restore all entries |
*/ |
for (i = 0; i < rdev->gart.num_gpu_pages; i++) |
radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]); |
mb(); |
radeon_gart_tlb_flush(rdev); |
} |
return r; |
} |
237,7 → 249,6 |
unsigned t; |
unsigned p; |
int i, j; |
u64 page_base; |
if (!rdev->gart.ready) { |
WARN(1, "trying to unbind memory from uninitialized GART !\n"); |
248,20 → 259,20 |
for (i = 0; i < pages; i++, p++) { |
if (rdev->gart.pages[p]) { |
rdev->gart.pages[p] = NULL; |
rdev->gart.pages_addr[p] = rdev->dummy_page.addr; |
page_base = rdev->gart.pages_addr[p]; |
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
rdev->gart.pages_entry[t] = rdev->dummy_page.entry; |
if (rdev->gart.ptr) { |
radeon_gart_set_page(rdev, t, page_base, |
RADEON_GART_PAGE_DUMMY); |
radeon_gart_set_page(rdev, t, |
rdev->dummy_page.entry); |
} |
page_base += RADEON_GPU_PAGE_SIZE; |
} |
} |
} |
if (rdev->gart.ptr) { |
mb(); |
radeon_gart_tlb_flush(rdev); |
} |
} |
/** |
* radeon_gart_bind - bind pages into the gart page table |
283,7 → 294,7 |
{ |
unsigned t; |
unsigned p; |
uint64_t page_base; |
uint64_t page_base, page_entry; |
int i, j; |
if (!rdev->gart.ready) { |
294,18 → 305,21 |
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
for (i = 0; i < pages; i++, p++) { |
rdev->gart.pages_addr[p] = dma_addr[i]; |
rdev->gart.pages[p] = pagelist[i]; |
page_base = dma_addr[i]; |
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
page_entry = radeon_gart_get_page_entry(page_base, flags); |
rdev->gart.pages_entry[t] = page_entry; |
if (rdev->gart.ptr) { |
page_base = rdev->gart.pages_addr[p]; |
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
radeon_gart_set_page(rdev, t, page_base, flags); |
radeon_gart_set_page(rdev, t, page_entry); |
} |
page_base += RADEON_GPU_PAGE_SIZE; |
} |
} |
} |
if (rdev->gart.ptr) { |
mb(); |
radeon_gart_tlb_flush(rdev); |
} |
return 0; |
} |
343,16 → 357,15 |
radeon_gart_fini(rdev); |
return -ENOMEM; |
} |
rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * |
rdev->gart.num_cpu_pages); |
if (rdev->gart.pages_addr == NULL) { |
rdev->gart.pages_entry = KernelAlloc(sizeof(uint64_t) * |
rdev->gart.num_gpu_pages); |
if (rdev->gart.pages_entry == NULL) { |
radeon_gart_fini(rdev); |
return -ENOMEM; |
} |
/* set GART entry to point to the dummy page by default */ |
for (i = 0; i < rdev->gart.num_cpu_pages; i++) { |
rdev->gart.pages_addr[i] = rdev->dummy_page.addr; |
} |
for (i = 0; i < rdev->gart.num_gpu_pages; i++) |
rdev->gart.pages_entry[i] = rdev->dummy_page.entry; |
return 0; |
} |
365,15 → 378,15 |
*/ |
void radeon_gart_fini(struct radeon_device *rdev) |
{ |
if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { |
if (rdev->gart.ready) { |
/* unbind pages */ |
radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); |
} |
rdev->gart.ready = false; |
vfree(rdev->gart.pages); |
vfree(rdev->gart.pages_addr); |
vfree(rdev->gart.pages_entry); |
rdev->gart.pages = NULL; |
rdev->gart.pages_addr = NULL; |
rdev->gart.pages_entry = NULL; |
radeon_dummy_page_fini(rdev); |
} |
/drivers/video/drm/radeon/radeon_i2c.c |
---|
924,6 → 924,7 |
i2c->rec = *rec; |
i2c->adapter.owner = THIS_MODULE; |
i2c->adapter.class = I2C_CLASS_DDC; |
i2c->adapter.dev.parent = &dev->pdev->dev; |
i2c->dev = dev; |
i2c_set_adapdata(&i2c->adapter, i2c); |
mutex_init(&i2c->mutex); |
1047,11 → 1048,6 |
return NULL; |
} |
struct drm_encoder *radeon_best_encoder(struct drm_connector *connector) |
{ |
return NULL; |
} |
void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus, |
u8 slave_addr, |
u8 addr, |
/drivers/video/drm/radeon/radeon_irq_kms.c |
---|
32,12 → 32,10 |
#include "radeon.h" |
#include "atom.h" |
#include <linux/pm_runtime.h> |
#define RADEON_WAIT_IDLE_TIMEOUT 200 |
extern int irq_override; |
/** |
* radeon_driver_irq_handler_kms - irq handler for KMS |
* |
97,7 → 95,13 |
*/ |
int radeon_driver_irq_postinstall_kms(struct drm_device *dev) |
{ |
struct radeon_device *rdev = dev->dev_private; |
if (ASIC_IS_AVIVO(rdev)) |
dev->max_vblank_count = 0x00ffffff; |
else |
dev->max_vblank_count = 0x001fffff; |
return 0; |
} |
148,6 → 152,10 |
int r = 0; |
spin_lock_init(&rdev->irq.lock); |
r = drm_vblank_init(rdev->ddev, rdev->num_crtc); |
if (r) { |
return r; |
} |
/* enable msi */ |
rdev->msi_enabled = 0; |
172,7 → 180,7 |
*/ |
void radeon_irq_kms_fini(struct radeon_device *rdev) |
{ |
// drm_vblank_cleanup(rdev->ddev); |
drm_vblank_cleanup(rdev->ddev); |
if (rdev->irq.installed) { |
// drm_irq_uninstall(rdev->ddev); |
rdev->irq.installed = false; |
/drivers/video/drm/radeon/radeon_kfd.h |
---|
29,7 → 29,7 |
#define RADEON_KFD_H_INCLUDED |
#include <linux/types.h> |
//#include "../amd/include/kgd_kfd_interface.h" |
#include "kgd_kfd_interface.h" |
struct radeon_device; |
/drivers/video/drm/radeon/radeon_legacy_crtc.c |
---|
1054,6 → 1054,7 |
DRM_ERROR("Mode need scaling but only first crtc can do that.\n"); |
} |
} |
radeon_cursor_reset(crtc); |
return 0; |
} |
/drivers/video/drm/radeon/radeon_legacy_encoders.c |
---|
36,7 → 36,7 |
static void radeon_legacy_encoder_disable(struct drm_encoder *encoder) |
{ |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct drm_encoder_helper_funcs *encoder_funcs; |
const struct drm_encoder_helper_funcs *encoder_funcs; |
encoder_funcs = encoder->helper_private; |
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); |
441,6 → 441,7 |
backlight_update_status(bd); |
DRM_INFO("radeon legacy LVDS backlight initialized\n"); |
rdev->mode_info.bl_encoder = radeon_encoder; |
return; |
1495,6 → 1496,9 |
if (found) |
break; |
if (!drm_can_sleep()) |
mdelay(1); |
else |
msleep(1); |
} |
/drivers/video/drm/radeon/radeon_mode.h |
---|
33,6 → 33,7 |
#include <drm/drm_crtc.h> |
#include <drm/drm_edid.h> |
#include <drm/drm_dp_helper.h> |
#include <drm/drm_dp_mst_helper.h> |
#include <drm/drm_fixed.h> |
#include <drm/drm_crtc_helper.h> |
#include <linux/i2c.h> |
85,6 → 86,13 |
RADEON_HPD_NONE = 0xff, |
}; |
enum radeon_output_csc { |
RADEON_OUTPUT_CSC_BYPASS = 0, |
RADEON_OUTPUT_CSC_TVRGB = 1, |
RADEON_OUTPUT_CSC_YCBCR601 = 2, |
RADEON_OUTPUT_CSC_YCBCR709 = 3, |
}; |
#define RADEON_MAX_I2C_BUS 16 |
/* radeon gpio-based i2c |
229,7 → 237,6 |
int offset; |
bool last_buffer_filled_status; |
int id; |
struct r600_audio_pin *pin; |
}; |
struct radeon_mode_info { |
255,6 → 262,8 |
struct drm_property *audio_property; |
/* FMT dithering */ |
struct drm_property *dither_property; |
/* Output CSC */ |
struct drm_property *output_csc_property; |
/* hardcoded DFP edid from BIOS */ |
struct edid *bios_hardcoded_edid; |
int bios_hardcoded_edid_size; |
265,6 → 274,9 |
u16 firmware_flags; |
/* pointer to backlight encoder */ |
struct radeon_encoder *bl_encoder; |
/* bitmask for active encoder frontends */ |
uint32_t active_encoders; |
}; |
#define RADEON_MAX_BL_LEVEL 0xFF |
330,7 → 342,6 |
int max_cursor_width; |
int max_cursor_height; |
uint32_t legacy_display_base_addr; |
uint32_t legacy_cursor_offset; |
enum radeon_rmx_type rmx_type; |
u8 h_border; |
u8 v_border; |
356,7 → 367,9 |
u32 line_time; |
u32 wm_low; |
u32 wm_high; |
u32 lb_vblank_lead_lines; |
struct drm_display_mode hw_mode; |
enum radeon_output_csc output_csc; |
}; |
struct radeon_encoder_primary_dac { |
426,6 → 439,8 |
uint8_t backlight_level; |
int panel_mode; |
struct radeon_afmt *afmt; |
struct r600_audio_pin *pin; |
int active_mst_links; |
}; |
struct radeon_encoder_atom_dac { |
432,6 → 447,17 |
enum radeon_tv_std tv_std; |
}; |
struct radeon_encoder_mst { |
int crtc; |
struct radeon_encoder *primary; |
struct radeon_connector *connector; |
struct drm_dp_mst_port *port; |
int pbn; |
int fe; |
bool fe_from_be; |
bool enc_active; |
}; |
struct radeon_encoder { |
struct drm_encoder base; |
uint32_t encoder_enum; |
449,6 → 475,12 |
int audio_polling_active; |
bool is_ext_encoder; |
u16 caps; |
struct radeon_audio_funcs *audio; |
enum radeon_output_csc output_csc; |
bool can_mst; |
uint32_t offset; |
bool is_mst_encoder; |
/* front end for this mst encoder */ |
}; |
struct radeon_connector_atom_dig { |
459,6 → 491,7 |
int dp_clock; |
int dp_lane_count; |
bool edp_on; |
bool is_mst; |
}; |
struct radeon_gpio_rec { |
502,6 → 535,11 |
RADEON_FMT_DITHER_ENABLE = 1, |
}; |
struct stream_attribs { |
uint16_t fe; |
uint16_t slots; |
}; |
struct radeon_connector { |
struct drm_connector base; |
uint32_t connector_id; |
516,6 → 554,7 |
void *con_priv; |
bool dac_load_detect; |
bool detected_by_load; /* if the connection status was determined by load */ |
bool detected_hpd_without_ddc; /* if an HPD signal was detected on DVI, but ddc probing failed */ |
uint16_t connector_object_id; |
struct radeon_hpd hpd; |
struct radeon_router router; |
523,6 → 562,14 |
enum radeon_connector_audio audio; |
enum radeon_connector_dither dither; |
int pixelclock_for_modeset; |
bool is_mst_connector; |
struct radeon_connector *mst_port; |
struct drm_dp_mst_port *port; |
struct drm_dp_mst_topology_mgr mst_mgr; |
struct radeon_encoder *mst_encoder; |
struct stream_attribs cur_stream_attribs[6]; |
int enabled_attribs; |
}; |
struct radeon_framebuffer { |
641,6 → 688,9 |
struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES]; |
}; |
/* Driver internal use only flags of radeon_get_crtc_scanoutpos() */ |
#define USE_REAL_VBLANKSTART (1 << 30) |
#define GET_DISTANCE_TO_VBLANKSTART (1 << 31) |
extern void |
radeon_add_atom_connector(struct drm_device *dev, |
707,15 → 757,26 |
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); |
extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder, |
struct drm_connector *connector); |
int radeon_dp_get_max_link_rate(struct drm_connector *connector, |
const u8 *dpcd); |
extern void radeon_dp_set_rx_power_state(struct drm_connector *connector, |
u8 power_state); |
extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector); |
extern ssize_t |
radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg); |
extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode); |
extern void atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_mode, int enc_override); |
extern void radeon_atom_encoder_init(struct radeon_device *rdev); |
extern void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev); |
extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, |
int action, uint8_t lane_num, |
uint8_t lane_set); |
extern void atombios_dig_transmitter_setup2(struct drm_encoder *encoder, |
int action, uint8_t lane_num, |
uint8_t lane_set, int fe); |
extern void atombios_set_mst_encoder_crtc_source(struct drm_encoder *encoder, |
int fe); |
extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder); |
extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder); |
void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); |
745,8 → 806,6 |
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); |
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux); |
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); |
extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, |
struct radeon_atom_ss *ss, |
int id); |
820,10 → 879,10 |
int x, int y); |
extern void radeon_cursor_reset(struct drm_crtc *crtc); |
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, |
unsigned int flags, |
int *vpos, int *hpos, void *stime, |
void *etime); |
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, |
unsigned int flags, int *vpos, int *hpos, |
ktime_t *stime, ktime_t *etime, |
const struct drm_display_mode *mode); |
extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); |
extern struct edid * |
925,13 → 984,29 |
int radeon_fbdev_init(struct radeon_device *rdev); |
void radeon_fbdev_fini(struct radeon_device *rdev); |
void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state); |
int radeon_fbdev_total_size(struct radeon_device *rdev); |
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); |
void radeon_fbdev_restore_mode(struct radeon_device *rdev); |
void radeon_fb_output_poll_changed(struct radeon_device *rdev); |
void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id); |
void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector); |
void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector); |
void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id); |
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled); |
/* mst */ |
int radeon_dp_mst_init(struct radeon_connector *radeon_connector); |
int radeon_dp_mst_probe(struct radeon_connector *radeon_connector); |
int radeon_dp_mst_check_status(struct radeon_connector *radeon_connector); |
int radeon_mst_debugfs_init(struct radeon_device *rdev); |
void radeon_dp_mst_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode); |
void radeon_setup_mst_connector(struct drm_device *dev); |
int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx); |
void radeon_atom_release_dig_encoder(struct radeon_device *rdev, int enc_idx); |
#endif |
/drivers/video/drm/radeon/radeon_object.c |
---|
172,18 → 172,7 |
else |
rbo->placements[i].lpfn = 0; |
} |
/* |
* Use two-ended allocation depending on the buffer size to |
* improve fragmentation quality. |
* 512kb was measured as the most optimal number. |
*/ |
if (rbo->tbo.mem.size > 512 * 1024) { |
for (i = 0; i < c; i++) { |
rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN; |
} |
} |
} |
int radeon_bo_create(struct radeon_device *rdev, |
unsigned long size, int byte_align, bool kernel, |
232,11 → 221,30 |
if (!(rdev->flags & RADEON_IS_PCIE)) |
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
/* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx |
* See https://bugs.freedesktop.org/show_bug.cgi?id=91268 |
*/ |
if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635) |
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
#ifdef CONFIG_X86_32 |
/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit |
* See https://bugs.freedesktop.org/show_bug.cgi?id=84627 |
*/ |
bo->flags &= ~RADEON_GEM_GTT_WC; |
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) |
/* Don't try to enable write-combining when it can't work, or things |
* may be slow |
* See https://bugs.freedesktop.org/show_bug.cgi?id=88758 |
*/ |
#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ |
thanks to write-combining |
if (bo->flags & RADEON_GEM_GTT_WC) |
DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " |
"better performance thanks to write-combining\n"); |
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
#endif |
radeon_ttm_placement_from_domain(bo, domain); |
/drivers/video/drm/radeon/radeon_object.h |
---|
143,8 → 143,6 |
extern int radeon_bo_list_validate(struct radeon_device *rdev, |
struct ww_acquire_ctx *ticket, |
struct list_head *head, int ring); |
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
struct vm_area_struct *vma); |
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
u32 tiling_flags, u32 pitch); |
extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo, |
/drivers/video/drm/radeon/radeon_pm.c |
---|
24,8 → 24,8 |
#include "radeon.h" |
#include "avivod.h" |
#include "atom.h" |
#include "r600_dpm.h" |
#define RADEON_IDLE_LOOP_MS 100 |
#define RADEON_RECLOCK_DELAY_MS 200 |
#define RADEON_WAIT_VBLANK_TIMEOUT 200 |
155,9 → 155,9 |
{ |
if (rdev->pm.active_crtcs) { |
rdev->pm.vblank_sync = false; |
// wait_event_timeout( |
// rdev->irq.vblank_queue, rdev->pm.vblank_sync, |
// msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); |
wait_event_timeout( |
rdev->irq.vblank_queue, rdev->pm.vblank_sync, |
msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); |
} |
} |
250,7 → 250,6 |
(rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) |
return; |
mutex_lock(&rdev->ddev->struct_mutex); |
down_write(&rdev->pm.mclk_lock); |
mutex_lock(&rdev->ring_lock); |
265,7 → 264,6 |
/* needs a GPU reset dont reset here */ |
mutex_unlock(&rdev->ring_lock); |
up_write(&rdev->pm.mclk_lock); |
mutex_unlock(&rdev->ddev->struct_mutex); |
return; |
} |
} |
276,7 → 274,7 |
for (i = 0; i < rdev->num_crtc; i++) { |
if (rdev->pm.active_crtcs & (1 << i)) { |
rdev->pm.req_vblank |= (1 << i); |
// drm_vblank_get(rdev->ddev, i); |
drm_vblank_get(rdev->ddev, i); |
} |
} |
} |
287,7 → 285,7 |
for (i = 0; i < rdev->num_crtc; i++) { |
if (rdev->pm.req_vblank & (1 << i)) { |
rdev->pm.req_vblank &= ~(1 << i); |
// drm_vblank_put(rdev->ddev, i); |
drm_vblank_put(rdev->ddev, i); |
} |
} |
} |
301,7 → 299,6 |
mutex_unlock(&rdev->ring_lock); |
up_write(&rdev->pm.mclk_lock); |
mutex_unlock(&rdev->ddev->struct_mutex); |
} |
static void radeon_pm_print_states(struct radeon_device *rdev) |
663,12 → 660,8 |
radeon_pm_compute_clocks(rdev); |
} |
static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, |
enum radeon_pm_state_type dpm_state) |
static bool radeon_dpm_single_display(struct radeon_device *rdev) |
{ |
int i; |
struct radeon_ps *ps; |
u32 ui_class; |
bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ? |
true : false; |
678,6 → 671,23 |
single_display = false; |
} |
/* 120hz tends to be problematic even if they are under the |
* vblank limit. |
*/ |
if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120)) |
single_display = false; |
return single_display; |
} |
static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, |
enum radeon_pm_state_type dpm_state) |
{ |
int i; |
struct radeon_ps *ps; |
u32 ui_class; |
bool single_display = radeon_dpm_single_display(rdev); |
/* certain older asics have a separare 3D performance state, |
* so try that first if the user selected performance |
*/ |
803,6 → 813,7 |
struct radeon_ps *ps; |
enum radeon_pm_state_type dpm_state; |
int ret; |
bool single_display = radeon_dpm_single_display(rdev); |
/* if dpm init failed */ |
if (!rdev->pm.dpm_enabled) |
827,6 → 838,9 |
/* vce just modifies an existing state so force a change */ |
if (ps->vce_active != rdev->pm.dpm.vce_active) |
goto force; |
/* user has made a display change (such as timing) */ |
if (rdev->pm.dpm.single_display != single_display) |
goto force; |
if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) { |
/* for pre-BTC and APUs if the num crtcs changed but state is the same, |
* all we need to do is update the display configuration. |
871,7 → 885,6 |
radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps); |
} |
mutex_lock(&rdev->ddev->struct_mutex); |
down_write(&rdev->pm.mclk_lock); |
mutex_lock(&rdev->ring_lock); |
889,6 → 902,7 |
rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; |
rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; |
rdev->pm.dpm.single_display = single_display; |
/* wait for the rings to drain */ |
for (i = 0; i < RADEON_NUM_RINGS; i++) { |
921,7 → 935,6 |
done: |
mutex_unlock(&rdev->ring_lock); |
up_write(&rdev->pm.mclk_lock); |
mutex_unlock(&rdev->ddev->struct_mutex); |
} |
void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) |
1218,8 → 1231,39 |
return ret; |
} |
struct radeon_dpm_quirk { |
u32 chip_vendor; |
u32 chip_device; |
u32 subsys_vendor; |
u32 subsys_device; |
}; |
/* cards with dpm stability problems */ |
static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = { |
/* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */ |
{ PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 }, |
/* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */ |
{ PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 }, |
{ 0, 0, 0, 0 }, |
}; |
int radeon_pm_init(struct radeon_device *rdev) |
{ |
struct radeon_dpm_quirk *p = radeon_dpm_quirk_list; |
bool disable_dpm = false; |
/* Apply dpm quirks */ |
while (p && p->chip_device != 0) { |
if (rdev->pdev->vendor == p->chip_vendor && |
rdev->pdev->device == p->chip_device && |
rdev->pdev->subsystem_vendor == p->subsys_vendor && |
rdev->pdev->subsystem_device == p->subsys_device) { |
disable_dpm = true; |
break; |
} |
++p; |
} |
/* enable dpm on rv6xx+ */ |
switch (rdev->family) { |
case CHIP_RV610: |
1275,6 → 1319,8 |
(!(rdev->flags & RADEON_IS_IGP)) && |
(!rdev->smc_fw)) |
rdev->pm.pm_method = PM_METHOD_PROFILE; |
else if (disable_dpm && (radeon_dpm == -1)) |
rdev->pm.pm_method = PM_METHOD_PROFILE; |
else if (radeon_dpm == 0) |
rdev->pm.pm_method = PM_METHOD_PROFILE; |
else |
1477,7 → 1523,11 |
*/ |
for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { |
if (rdev->pm.active_crtcs & (1 << crtc)) { |
vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL); |
vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, |
crtc, |
USE_REAL_VBLANKSTART, |
&vpos, &hpos, NULL, NULL, |
&rdev->mode_info.crtcs[crtc]->base.hwmode); |
if ((vbl_status & DRM_SCANOUTPOS_VALID) && |
!(vbl_status & DRM_SCANOUTPOS_IN_VBLANK)) |
in_vbl = false; |
/drivers/video/drm/radeon/radeon_ring.c |
---|
314,7 → 314,7 |
} |
/* and then save the content of the ring */ |
*data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); |
*data = drm_malloc_ab(size, sizeof(uint32_t)); |
if (!*data) { |
mutex_unlock(&rdev->ring_lock); |
return 0; |
495,7 → 495,7 |
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); |
seq_printf(m, "%u dwords in ring\n", count); |
if (!ring->ready) |
if (!ring->ring) |
return 0; |
/* print 8 dw before current rptr as often it's the last executed |
/drivers/video/drm/radeon/radeon_test.c |
---|
119,11 → 119,11 |
if (ring == R600_RING_TYPE_DMA_INDEX) |
fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, |
size / RADEON_GPU_PAGE_SIZE, |
NULL); |
vram_obj->tbo.resv); |
else |
fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, |
size / RADEON_GPU_PAGE_SIZE, |
NULL); |
vram_obj->tbo.resv); |
if (IS_ERR(fence)) { |
DRM_ERROR("Failed GTT->VRAM copy %d\n", i); |
r = PTR_ERR(fence); |
170,11 → 170,11 |
if (ring == R600_RING_TYPE_DMA_INDEX) |
fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, |
size / RADEON_GPU_PAGE_SIZE, |
NULL); |
vram_obj->tbo.resv); |
else |
fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, |
size / RADEON_GPU_PAGE_SIZE, |
NULL); |
vram_obj->tbo.resv); |
if (IS_ERR(fence)) { |
DRM_ERROR("Failed VRAM->GTT copy %d\n", i); |
r = PTR_ERR(fence); |
/drivers/video/drm/radeon/radeon_ttm.c |
---|
132,7 → 132,7 |
man->available_caching = TTM_PL_MASK_CACHING; |
man->default_caching = TTM_PL_FLAG_CACHED; |
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; |
#if __OS_HAS_AGP |
#if IS_ENABLED(CONFIG_AGP) |
if (rdev->flags & RADEON_IS_AGP) { |
if (!rdev->ddev->agp) { |
DRM_ERROR("AGP is not enabled for memory type %u\n", |
447,7 → 447,7 |
/* system memory */ |
return 0; |
case TTM_PL_TT: |
#if __OS_HAS_AGP |
#if IS_ENABLED(CONFIG_AGP) |
if (rdev->flags & RADEON_IS_AGP) { |
/* RADEON_IS_AGP is set only if AGP is active */ |
mem->bus.offset = mem->start << PAGE_SHIFT; |
565,7 → 565,7 |
struct radeon_ttm_tt *gtt; |
rdev = radeon_get_rdev(bdev); |
#if __OS_HAS_AGP |
#if IS_ENABLED(CONFIG_AGP) |
if (rdev->flags & RADEON_IS_AGP) { |
return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge, |
size, page_flags, dummy_read_page); |
611,7 → 611,7 |
} |
rdev = radeon_get_rdev(ttm->bdev); |
#if __OS_HAS_AGP |
#if IS_ENABLED(CONFIG_AGP) |
if (rdev->flags & RADEON_IS_AGP) { |
return ttm_agp_tt_populate(ttm); |
} |
648,7 → 648,7 |
return; |
rdev = radeon_get_rdev(ttm->bdev); |
#if __OS_HAS_AGP |
#if IS_ENABLED(CONFIG_AGP) |
if (rdev->flags & RADEON_IS_AGP) { |
ttm_agp_tt_unpopulate(ttm); |
return; |
/drivers/video/drm/radeon/radeon_uvd.c |
---|
40,6 → 40,9 |
#define UVD_IDLE_TIMEOUT_MS 1000 |
/* Firmware Names */ |
#define FIRMWARE_R600 "radeon/R600_uvd.bin" |
#define FIRMWARE_RS780 "radeon/RS780_uvd.bin" |
#define FIRMWARE_RV770 "radeon/RV770_uvd.bin" |
#define FIRMWARE_RV710 "radeon/RV710_uvd.bin" |
#define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin" |
#define FIRMWARE_SUMO "radeon/SUMO_uvd.bin" |
66,6 → 69,23 |
// INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler); |
switch (rdev->family) { |
case CHIP_RV610: |
case CHIP_RV630: |
case CHIP_RV670: |
case CHIP_RV620: |
case CHIP_RV635: |
fw_name = FIRMWARE_R600; |
break; |
case CHIP_RS780: |
case CHIP_RS880: |
fw_name = FIRMWARE_RS780; |
break; |
case CHIP_RV770: |
fw_name = FIRMWARE_RV770; |
break; |
case CHIP_RV710: |
case CHIP_RV730: |
case CHIP_RV740: |
184,28 → 204,32 |
int radeon_uvd_suspend(struct radeon_device *rdev) |
{ |
unsigned size; |
void *ptr; |
int i; |
int i, r; |
if (rdev->uvd.vcpu_bo == NULL) |
return 0; |
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) |
if (atomic_read(&rdev->uvd.handles[i])) |
break; |
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { |
uint32_t handle = atomic_read(&rdev->uvd.handles[i]); |
if (handle != 0) { |
struct radeon_fence *fence; |
if (i == RADEON_MAX_UVD_HANDLES) |
return 0; |
radeon_uvd_note_usage(rdev); |
size = radeon_bo_size(rdev->uvd.vcpu_bo); |
size -= rdev->uvd_fw->size; |
r = radeon_uvd_get_destroy_msg(rdev, |
R600_RING_TYPE_UVD_INDEX, handle, &fence); |
if (r) { |
DRM_ERROR("Error destroying UVD (%d)!\n", r); |
continue; |
} |
ptr = rdev->uvd.cpu_addr; |
ptr += rdev->uvd_fw->size; |
radeon_fence_wait(fence, false); |
radeon_fence_unref(&fence); |
rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); |
memcpy(rdev->uvd.saved_bo, ptr, size); |
rdev->uvd.filp[i] = NULL; |
atomic_set(&rdev->uvd.handles[i], 0); |
} |
} |
return 0; |
} |
226,11 → 250,6 |
ptr = rdev->uvd.cpu_addr; |
ptr += rdev->uvd_fw->size; |
if (rdev->uvd.saved_bo != NULL) { |
memcpy(ptr, rdev->uvd.saved_bo, size); |
kfree(rdev->uvd.saved_bo); |
rdev->uvd.saved_bo = NULL; |
} else |
memset(ptr, 0, size); |
return 0; |
376,6 → 395,29 |
return 0; |
} |
static int radeon_uvd_validate_codec(struct radeon_cs_parser *p, |
unsigned stream_type) |
{ |
switch (stream_type) { |
case 0: /* H264 */ |
case 1: /* VC1 */ |
/* always supported */ |
return 0; |
case 3: /* MPEG2 */ |
case 4: /* MPEG4 */ |
/* only since UVD 3 */ |
if (p->rdev->family >= CHIP_PALM) |
return 0; |
/* fall through */ |
default: |
DRM_ERROR("UVD codec not supported by hardware %d!\n", |
stream_type); |
return -EINVAL; |
} |
} |
static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, |
unsigned offset, unsigned buf_sizes[]) |
{ |
416,50 → 458,70 |
return -EINVAL; |
} |
if (msg_type == 1) { |
/* it's a decode msg, calc buffer sizes */ |
r = radeon_uvd_cs_msg_decode(msg, buf_sizes); |
/* calc image size (width * height) */ |
img_size = msg[6] * msg[7]; |
switch (msg_type) { |
case 0: |
/* it's a create msg, calc image size (width * height) */ |
img_size = msg[7] * msg[8]; |
r = radeon_uvd_validate_codec(p, msg[4]); |
radeon_bo_kunmap(bo); |
if (r) |
return r; |
} else if (msg_type == 2) { |
/* it's a destroy msg, free the handle */ |
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) |
atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0); |
radeon_bo_kunmap(bo); |
return 0; |
} else { |
/* it's a create msg, calc image size (width * height) */ |
img_size = msg[7] * msg[8]; |
radeon_bo_kunmap(bo); |
if (msg_type != 0) { |
DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); |
/* try to alloc a new handle */ |
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { |
if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { |
DRM_ERROR("Handle 0x%x already in use!\n", handle); |
return -EINVAL; |
} |
/* it's a create msg, no special handling needed */ |
if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { |
p->rdev->uvd.filp[i] = p->filp; |
p->rdev->uvd.img_size[i] = img_size; |
return 0; |
} |
} |
/* create or decode, validate the handle */ |
DRM_ERROR("No more free UVD handles!\n"); |
return -EINVAL; |
case 1: |
/* it's a decode msg, validate codec and calc buffer sizes */ |
r = radeon_uvd_validate_codec(p, msg[4]); |
if (!r) |
r = radeon_uvd_cs_msg_decode(msg, buf_sizes); |
radeon_bo_kunmap(bo); |
if (r) |
return r; |
/* validate the handle */ |
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { |
if (atomic_read(&p->rdev->uvd.handles[i]) == handle) |
if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { |
if (p->rdev->uvd.filp[i] != p->filp) { |
DRM_ERROR("UVD handle collision detected!\n"); |
return -EINVAL; |
} |
return 0; |
} |
} |
/* handle not found try to alloc a new one */ |
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { |
if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { |
p->rdev->uvd.filp[i] = p->filp; |
p->rdev->uvd.img_size[i] = img_size; |
DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); |
return -ENOENT; |
case 2: |
/* it's a destroy msg, free the handle */ |
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) |
atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0); |
radeon_bo_kunmap(bo); |
return 0; |
default: |
DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); |
return -EINVAL; |
} |
} |
DRM_ERROR("No more free UVD handles!\n"); |
BUG(); |
return -EINVAL; |
} |
/drivers/video/drm/radeon/radeon_vce.c |
---|
38,8 → 38,10 |
#define VCE_IDLE_TIMEOUT_MS 1000 |
/* Firmware Names */ |
#define FIRMWARE_TAHITI "radeon/TAHITI_vce.bin" |
#define FIRMWARE_BONAIRE "radeon/BONAIRE_vce.bin" |
MODULE_FIRMWARE(FIRMWARE_TAHITI); |
MODULE_FIRMWARE(FIRMWARE_BONAIRE); |
static void radeon_vce_idle_work_handler(struct work_struct *work); |
63,6 → 65,14 |
INIT_DELAYED_WORK(&rdev->vce.idle_work, radeon_vce_idle_work_handler); |
switch (rdev->family) { |
case CHIP_TAHITI: |
case CHIP_PITCAIRN: |
case CHIP_VERDE: |
case CHIP_OLAND: |
case CHIP_ARUBA: |
fw_name = FIRMWARE_TAHITI; |
break; |
case CHIP_BONAIRE: |
case CHIP_KAVERI: |
case CHIP_KABINI: |
118,13 → 128,17 |
rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8); |
/* we can only work with this fw version for now */ |
if (rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8))) |
if ((rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8))) && |
(rdev->vce.fw_version != ((50 << 24) | (0 << 16) | (1 << 8))) && |
(rdev->vce.fw_version != ((50 << 24) | (1 << 16) | (2 << 8)))) |
return -EINVAL; |
/* allocate firmware, stack and heap BO */ |
size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) + |
RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE; |
if (rdev->family < CHIP_BONAIRE) |
size = vce_v1_0_bo_size(rdev); |
else |
size = vce_v2_0_bo_size(rdev); |
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL, |
&rdev->vce.vcpu_bo); |
225,6 → 239,10 |
return r; |
} |
memset(cpu_addr, 0, radeon_bo_size(rdev->vce.vcpu_bo)); |
if (rdev->family < CHIP_BONAIRE) |
r = vce_v1_0_load_fw(rdev, cpu_addr); |
else |
memcpy(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size); |
radeon_bo_kunmap(rdev->vce.vcpu_bo); |
231,7 → 249,7 |
radeon_bo_unreserve(rdev->vce.vcpu_bo); |
return 0; |
return r; |
} |
/** |
343,31 → 361,31 |
/* stitch together an VCE create msg */ |
ib.length_dw = 0; |
ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ |
ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ |
ib.ptr[ib.length_dw++] = handle; |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */ |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */ |
ib.ptr[ib.length_dw++] = cpu_to_le32(handle); |
ib.ptr[ib.length_dw++] = 0x00000030; /* len */ |
ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */ |
ib.ptr[ib.length_dw++] = 0x00000000; |
ib.ptr[ib.length_dw++] = 0x00000042; |
ib.ptr[ib.length_dw++] = 0x0000000a; |
ib.ptr[ib.length_dw++] = 0x00000001; |
ib.ptr[ib.length_dw++] = 0x00000080; |
ib.ptr[ib.length_dw++] = 0x00000060; |
ib.ptr[ib.length_dw++] = 0x00000100; |
ib.ptr[ib.length_dw++] = 0x00000100; |
ib.ptr[ib.length_dw++] = 0x0000000c; |
ib.ptr[ib.length_dw++] = 0x00000000; |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000030); /* len */ |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x01000001); /* create cmd */ |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000); |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000042); |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000a); |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000080); |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000060); |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100); |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100); |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000); |
ib.ptr[ib.length_dw++] = 0x00000014; /* len */ |
ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ |
ib.ptr[ib.length_dw++] = upper_32_bits(dummy); |
ib.ptr[ib.length_dw++] = dummy; |
ib.ptr[ib.length_dw++] = 0x00000001; |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */ |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */ |
ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy)); |
ib.ptr[ib.length_dw++] = cpu_to_le32(dummy); |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); |
for (i = ib.length_dw; i < ib_size_dw; ++i) |
ib.ptr[i] = 0x0; |
ib.ptr[i] = cpu_to_le32(0x0); |
r = radeon_ib_schedule(rdev, &ib, NULL, false); |
if (r) { |
410,21 → 428,21 |
/* stitch together an VCE destroy msg */ |
ib.length_dw = 0; |
ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ |
ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ |
ib.ptr[ib.length_dw++] = handle; |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */ |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */ |
ib.ptr[ib.length_dw++] = cpu_to_le32(handle); |
ib.ptr[ib.length_dw++] = 0x00000014; /* len */ |
ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ |
ib.ptr[ib.length_dw++] = upper_32_bits(dummy); |
ib.ptr[ib.length_dw++] = dummy; |
ib.ptr[ib.length_dw++] = 0x00000001; |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */ |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */ |
ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy)); |
ib.ptr[ib.length_dw++] = cpu_to_le32(dummy); |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); |
ib.ptr[ib.length_dw++] = 0x00000008; /* len */ |
ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */ |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000008); /* len */ |
ib.ptr[ib.length_dw++] = cpu_to_le32(0x02000001); /* destroy cmd */ |
for (i = ib.length_dw; i < ib_size_dw; ++i) |
ib.ptr[i] = 0x0; |
ib.ptr[i] = cpu_to_le32(0x0); |
r = radeon_ib_schedule(rdev, &ib, NULL, false); |
if (r) { |
493,19 → 511,28 |
* |
* @p: parser context |
* @handle: handle to validate |
* @allocated: allocated a new handle? |
* |
* Validates the handle and return the found session index or -EINVAL |
* we we don't have another free session index. |
*/ |
int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) |
static int radeon_vce_validate_handle(struct radeon_cs_parser *p, |
uint32_t handle, bool *allocated) |
{ |
unsigned i; |
*allocated = false; |
/* validate the handle */ |
for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { |
if (atomic_read(&p->rdev->vce.handles[i]) == handle) |
if (atomic_read(&p->rdev->vce.handles[i]) == handle) { |
if (p->rdev->vce.filp[i] != p->filp) { |
DRM_ERROR("VCE handle collision detected!\n"); |
return -EINVAL; |
} |
return i; |
} |
} |
/* handle not found try to alloc a new one */ |
for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { |
512,6 → 539,7 |
if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { |
p->rdev->vce.filp[i] = p->filp; |
p->rdev->vce.img_size[i] = 0; |
*allocated = true; |
return i; |
} |
} |
529,10 → 557,10 |
int radeon_vce_cs_parse(struct radeon_cs_parser *p) |
{ |
int session_idx = -1; |
bool destroyed = false; |
bool destroyed = false, created = false, allocated = false; |
uint32_t tmp, handle = 0; |
uint32_t *size = &tmp; |
int i, r; |
int i, r = 0; |
while (p->idx < p->chunk_ib->length_dw) { |
uint32_t len = radeon_get_ib_value(p, p->idx); |
540,18 → 568,21 |
if ((len < 8) || (len & 3)) { |
DRM_ERROR("invalid VCE command length (%d)!\n", len); |
return -EINVAL; |
r = -EINVAL; |
goto out; |
} |
if (destroyed) { |
DRM_ERROR("No other command allowed after destroy!\n"); |
return -EINVAL; |
r = -EINVAL; |
goto out; |
} |
switch (cmd) { |
case 0x00000001: // session |
handle = radeon_get_ib_value(p, p->idx + 2); |
session_idx = radeon_vce_validate_handle(p, handle); |
session_idx = radeon_vce_validate_handle(p, handle, |
&allocated); |
if (session_idx < 0) |
return session_idx; |
size = &p->rdev->vce.img_size[session_idx]; |
561,6 → 592,13 |
break; |
case 0x01000001: // create |
created = true; |
if (!allocated) { |
DRM_ERROR("Handle already in use!\n"); |
r = -EINVAL; |
goto out; |
} |
*size = radeon_get_ib_value(p, p->idx + 8) * |
radeon_get_ib_value(p, p->idx + 10) * |
8 * 3 / 2; |
571,6 → 609,7 |
case 0x04000005: // rate control |
case 0x04000007: // motion estimation |
case 0x04000008: // rdo |
case 0x04000009: // vui |
break; |
case 0x03000001: // encode |
577,12 → 616,12 |
r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9, |
*size); |
if (r) |
return r; |
goto out; |
r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11, |
*size / 3); |
if (r) |
return r; |
goto out; |
break; |
case 0x02000001: // destroy |
593,7 → 632,7 |
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, |
*size * 2); |
if (r) |
return r; |
goto out; |
break; |
case 0x05000004: // video bitstream buffer |
601,7 → 640,7 |
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, |
tmp); |
if (r) |
return r; |
goto out; |
break; |
case 0x05000005: // feedback buffer |
608,29 → 647,40 |
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, |
4096); |
if (r) |
return r; |
goto out; |
break; |
default: |
DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); |
return -EINVAL; |
r = -EINVAL; |
goto out; |
} |
if (session_idx == -1) { |
DRM_ERROR("no session command at start of IB\n"); |
return -EINVAL; |
r = -EINVAL; |
goto out; |
} |
p->idx += len / 4; |
} |
if (destroyed) { |
/* IB contains a destroy msg, free the handle */ |
if (allocated && !created) { |
DRM_ERROR("New session without create command!\n"); |
r = -ENOENT; |
} |
out: |
if ((!r && destroyed) || (r && allocated)) { |
/* |
* IB contains a destroy msg or we have allocated an |
* handle and got an error, anyway free the handle |
*/ |
for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) |
atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); |
} |
return 0; |
return r; |
} |
/** |
649,12 → 699,12 |
{ |
uint64_t addr = semaphore->gpu_addr; |
radeon_ring_write(ring, VCE_CMD_SEMAPHORE); |
radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); |
radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); |
radeon_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0)); |
radeon_ring_write(ring, cpu_to_le32(VCE_CMD_SEMAPHORE)); |
radeon_ring_write(ring, cpu_to_le32((addr >> 3) & 0x000FFFFF)); |
radeon_ring_write(ring, cpu_to_le32((addr >> 23) & 0x000FFFFF)); |
radeon_ring_write(ring, cpu_to_le32(0x01003000 | (emit_wait ? 1 : 0))); |
if (!emit_wait) |
radeon_ring_write(ring, VCE_CMD_END); |
radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END)); |
return true; |
} |
669,10 → 719,10 |
void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) |
{ |
struct radeon_ring *ring = &rdev->ring[ib->ring]; |
radeon_ring_write(ring, VCE_CMD_IB); |
radeon_ring_write(ring, ib->gpu_addr); |
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr)); |
radeon_ring_write(ring, ib->length_dw); |
radeon_ring_write(ring, cpu_to_le32(VCE_CMD_IB)); |
radeon_ring_write(ring, cpu_to_le32(ib->gpu_addr)); |
radeon_ring_write(ring, cpu_to_le32(upper_32_bits(ib->gpu_addr))); |
radeon_ring_write(ring, cpu_to_le32(ib->length_dw)); |
} |
/** |
688,12 → 738,12 |
struct radeon_ring *ring = &rdev->ring[fence->ring]; |
uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; |
radeon_ring_write(ring, VCE_CMD_FENCE); |
radeon_ring_write(ring, addr); |
radeon_ring_write(ring, upper_32_bits(addr)); |
radeon_ring_write(ring, fence->seq); |
radeon_ring_write(ring, VCE_CMD_TRAP); |
radeon_ring_write(ring, VCE_CMD_END); |
radeon_ring_write(ring, cpu_to_le32(VCE_CMD_FENCE)); |
radeon_ring_write(ring, cpu_to_le32(addr)); |
radeon_ring_write(ring, cpu_to_le32(upper_32_bits(addr))); |
radeon_ring_write(ring, cpu_to_le32(fence->seq)); |
radeon_ring_write(ring, cpu_to_le32(VCE_CMD_TRAP)); |
radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END)); |
} |
/** |
715,7 → 765,7 |
ring->idx, r); |
return r; |
} |
radeon_ring_write(ring, VCE_CMD_END); |
radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END)); |
radeon_ring_unlock_commit(rdev, ring, false); |
for (i = 0; i < rdev->usec_timeout; i++) { |
/drivers/video/drm/radeon/radeon_vm.c |
---|
331,7 → 331,6 |
bo_va->it.start = 0; |
bo_va->it.last = 0; |
bo_va->flags = 0; |
bo_va->addr = 0; |
bo_va->ref_count = 1; |
INIT_LIST_HEAD(&bo_va->bo_list); |
INIT_LIST_HEAD(&bo_va->vm_status); |
458,7 → 457,8 |
/* make sure object fit at this offset */ |
eoffset = soffset + size; |
if (soffset >= eoffset) { |
return -EINVAL; |
r = -EINVAL; |
goto error_unreserve; |
} |
last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; |
465,7 → 465,8 |
if (last_pfn > rdev->vm_manager.max_pfn) { |
dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", |
last_pfn, rdev->vm_manager.max_pfn); |
return -EINVAL; |
r = -EINVAL; |
goto error_unreserve; |
} |
} else { |
473,52 → 474,57 |
} |
mutex_lock(&vm->mutex); |
soffset /= RADEON_GPU_PAGE_SIZE; |
eoffset /= RADEON_GPU_PAGE_SIZE; |
if (soffset || eoffset) { |
struct interval_tree_node *it; |
it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1); |
if (it && it != &bo_va->it) { |
struct radeon_bo_va *tmp; |
tmp = container_of(it, struct radeon_bo_va, it); |
/* bo and tmp overlap, invalid offset */ |
dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with " |
"(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, |
soffset, tmp->bo, tmp->it.start, tmp->it.last); |
mutex_unlock(&vm->mutex); |
r = -EINVAL; |
goto error_unreserve; |
} |
} |
if (bo_va->it.start || bo_va->it.last) { |
if (bo_va->addr) { |
/* add a clone of the bo_va to clear the old address */ |
struct radeon_bo_va *tmp; |
tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); |
if (!tmp) { |
mutex_unlock(&vm->mutex); |
return -ENOMEM; |
r = -ENOMEM; |
goto error_unreserve; |
} |
tmp->it.start = bo_va->it.start; |
tmp->it.last = bo_va->it.last; |
tmp->vm = vm; |
tmp->addr = bo_va->addr; |
tmp->bo = radeon_bo_ref(bo_va->bo); |
spin_lock(&vm->status_lock); |
list_add(&tmp->vm_status, &vm->freed); |
spin_unlock(&vm->status_lock); |
} |
interval_tree_remove(&bo_va->it, &vm->va); |
spin_lock(&vm->status_lock); |
bo_va->it.start = 0; |
bo_va->it.last = 0; |
list_del_init(&bo_va->vm_status); |
list_add(&tmp->vm_status, &vm->freed); |
spin_unlock(&vm->status_lock); |
} |
soffset /= RADEON_GPU_PAGE_SIZE; |
eoffset /= RADEON_GPU_PAGE_SIZE; |
if (soffset || eoffset) { |
struct interval_tree_node *it; |
it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1); |
if (it) { |
struct radeon_bo_va *tmp; |
tmp = container_of(it, struct radeon_bo_va, it); |
/* bo and tmp overlap, invalid offset */ |
dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with " |
"(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, |
soffset, tmp->bo, tmp->it.start, tmp->it.last); |
mutex_unlock(&vm->mutex); |
return -EINVAL; |
} |
spin_lock(&vm->status_lock); |
bo_va->it.start = soffset; |
bo_va->it.last = eoffset - 1; |
list_add(&bo_va->vm_status, &vm->cleared); |
spin_unlock(&vm->status_lock); |
interval_tree_insert(&bo_va->it, &vm->va); |
} |
bo_va->flags = flags; |
bo_va->addr = 0; |
soffset >>= radeon_vm_block_size; |
eoffset >>= radeon_vm_block_size; |
550,7 → 556,6 |
r = radeon_vm_clear_bo(rdev, pt); |
if (r) { |
radeon_bo_unref(&pt); |
radeon_bo_reserve(bo_va->bo, false); |
return r; |
} |
570,6 → 575,10 |
mutex_unlock(&vm->mutex); |
return 0; |
error_unreserve: |
radeon_bo_unreserve(bo_va->bo); |
return r; |
} |
/** |
587,11 → 596,9 |
uint64_t result; |
/* page table offset */ |
result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; |
result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT]; |
result &= ~RADEON_GPU_PAGE_MASK; |
/* in case cpu page size != gpu page size*/ |
result |= addr & (~PAGE_MASK); |
return result; |
} |
745,9 → 752,11 |
*/ |
/* NI is optimized for 256KB fragments, SI and newer for 64KB */ |
uint64_t frag_flags = rdev->family == CHIP_CAYMAN ? |
uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) || |
(rdev->family == CHIP_ARUBA)) ? |
R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB; |
uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80; |
uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) || |
(rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80; |
uint64_t frag_start = ALIGN(pe_start, frag_align); |
uint64_t frag_end = pe_end & ~(frag_align - 1); |
916,7 → 925,16 |
} |
spin_lock(&vm->status_lock); |
if (mem) { |
if (list_empty(&bo_va->vm_status)) { |
spin_unlock(&vm->status_lock); |
return 0; |
} |
list_del_init(&bo_va->vm_status); |
} else { |
list_del(&bo_va->vm_status); |
list_add(&bo_va->vm_status, &vm->cleared); |
} |
spin_unlock(&vm->status_lock); |
bo_va->flags &= ~RADEON_VM_PAGE_VALID; |
942,10 → 960,6 |
addr = 0; |
} |
if (addr == bo_va->addr) |
return 0; |
bo_va->addr = addr; |
trace_radeon_vm_bo_update(bo_va); |
nptes = bo_va->it.last - bo_va->it.start + 1; |
1033,7 → 1047,7 |
struct radeon_vm *vm) |
{ |
struct radeon_bo_va *bo_va; |
int r; |
int r = 0; |
spin_lock(&vm->status_lock); |
while (!list_empty(&vm->freed)) { |
1044,14 → 1058,15 |
r = radeon_vm_bo_update(rdev, bo_va, NULL); |
radeon_bo_unref(&bo_va->bo); |
radeon_fence_unref(&bo_va->last_pt_update); |
spin_lock(&vm->status_lock); |
list_del(&bo_va->vm_status); |
kfree(bo_va); |
if (r) |
return r; |
break; |
spin_lock(&vm->status_lock); |
} |
spin_unlock(&vm->status_lock); |
return 0; |
return r; |
} |
1107,11 → 1122,12 |
list_del(&bo_va->bo_list); |
mutex_lock(&vm->mutex); |
if (bo_va->it.start || bo_va->it.last) |
interval_tree_remove(&bo_va->it, &vm->va); |
spin_lock(&vm->status_lock); |
list_del(&bo_va->vm_status); |
if (bo_va->addr) { |
if (bo_va->it.start || bo_va->it.last) { |
bo_va->bo = radeon_bo_ref(bo_va->bo); |
list_add(&bo_va->vm_status, &vm->freed); |
} else { |
1138,14 → 1154,13 |
struct radeon_bo_va *bo_va; |
list_for_each_entry(bo_va, &bo->va, bo_list) { |
if (bo_va->addr) { |
spin_lock(&bo_va->vm->status_lock); |
list_del(&bo_va->vm_status); |
if (list_empty(&bo_va->vm_status) && |
(bo_va->it.start || bo_va->it.last)) |
list_add(&bo_va->vm_status, &bo_va->vm->invalidated); |
spin_unlock(&bo_va->vm->status_lock); |
} |
} |
} |
/** |
* radeon_vm_init - initialize a vm instance |
1173,6 → 1188,7 |
spin_lock_init(&vm->status_lock); |
INIT_LIST_HEAD(&vm->invalidated); |
INIT_LIST_HEAD(&vm->freed); |
INIT_LIST_HEAD(&vm->cleared); |
pd_size = radeon_vm_directory_size(rdev); |
pd_entries = radeon_vm_num_pdes(rdev); |
/drivers/video/drm/radeon/rdisplay_kms.c |
---|
107,9 → 107,9 |
WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); |
} |
else { |
radeon_crtc->legacy_cursor_offset = gpu_addr - rdev->mc.vram_start; |
radeon_crtc->legacy_display_base_addr = gpu_addr - rdev->mc.vram_start; |
/* offset is from DISP(2)_BASE_ADDRESS */ |
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset); |
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_display_base_addr); |
} |
return old; |
243,9 → 243,6 |
hdisplay = mode->hdisplay; |
vdisplay = mode->vdisplay; |
if (crtc->invert_dimensions) |
swap(hdisplay, vdisplay); |
fb = main_fb; |
fb->width = reqmode->width; |
/drivers/video/drm/radeon/rs400.c |
---|
212,11 → 212,9 |
#define RS400_PTE_WRITEABLE (1 << 2) |
#define RS400_PTE_READABLE (1 << 3) |
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, |
uint64_t addr, uint32_t flags) |
uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags) |
{ |
uint32_t entry; |
u32 *gtt = rdev->gart.ptr; |
entry = (lower_32_bits(addr) & PAGE_MASK) | |
((upper_32_bits(addr) & 0xff) << 4); |
226,10 → 224,16 |
entry |= RS400_PTE_WRITEABLE; |
if (!(flags & RADEON_GART_PAGE_SNOOP)) |
entry |= RS400_PTE_UNSNOOPED; |
entry = cpu_to_le32(entry); |
gtt[i] = entry; |
return entry; |
} |
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, |
uint64_t entry) |
{ |
u32 *gtt = rdev->gart.ptr; |
gtt[i] = cpu_to_le32(lower_32_bits(entry)); |
} |
int rs400_mc_wait_for_idle(struct radeon_device *rdev) |
{ |
unsigned i; |
454,6 → 458,21 |
void rs400_fini(struct radeon_device *rdev) |
{ |
radeon_pm_fini(rdev); |
r100_cp_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_gem_fini(rdev); |
rs400_gart_fini(rdev); |
radeon_irq_kms_fini(rdev); |
radeon_fence_driver_fini(rdev); |
radeon_bo_fini(rdev); |
radeon_atombios_fini(rdev); |
kfree(rdev->bios); |
rdev->bios = NULL; |
} |
int rs400_init(struct radeon_device *rdev) |
{ |
517,11 → 536,11 |
if (r) { |
/* Somethings want wront with the accel init stop accel */ |
dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
// r100_cp_fini(rdev); |
// r100_wb_fini(rdev); |
// r100_ib_fini(rdev); |
r100_cp_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
rs400_gart_fini(rdev); |
// radeon_irq_kms_fini(rdev); |
radeon_irq_kms_fini(rdev); |
rdev->accel_working = false; |
} |
return 0; |
/drivers/video/drm/radeon/rs600.c |
---|
38,6 → 38,7 |
#include <drm/drmP.h> |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "radeon_audio.h" |
#include "atom.h" |
#include "rs600d.h" |
586,11 → 587,8 |
radeon_gart_table_vram_free(rdev); |
} |
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, |
uint64_t addr, uint32_t flags) |
uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags) |
{ |
void __iomem *ptr = (void *)rdev->gart.ptr; |
addr = addr & 0xFFFFFFFFFFFFF000ULL; |
addr |= R600_PTE_SYSTEM; |
if (flags & RADEON_GART_PAGE_VALID) |
601,9 → 599,16 |
addr |= R600_PTE_WRITEABLE; |
if (flags & RADEON_GART_PAGE_SNOOP) |
addr |= R600_PTE_SNOOPED; |
writeq(addr, ptr + (i * 8)); |
return addr; |
} |
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, |
uint64_t entry) |
{ |
void __iomem *ptr = (void *)rdev->gart.ptr; |
writeq(entry, ptr + (i * 8)); |
} |
int rs600_irq_set(struct radeon_device *rdev) |
{ |
uint32_t tmp = 0; |
650,6 → 655,10 |
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); |
if (ASIC_IS_DCE2(rdev)) |
WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0); |
/* posting read */ |
RREG32(R_000040_GEN_INT_CNTL); |
return 0; |
} |
734,21 → 743,21 |
/* Vertical blank interrupts */ |
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
if (rdev->irq.crtc_vblank_int[0]) { |
// drm_handle_vblank(rdev->ddev, 0); |
drm_handle_vblank(rdev->ddev, 0); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (rdev->irq.pflip[0]) |
// radeon_crtc_handle_flip(rdev, 0); |
if (atomic_read(&rdev->irq.pflip[0])) |
radeon_crtc_handle_vblank(rdev, 0); |
} |
if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
if (rdev->irq.crtc_vblank_int[1]) { |
// drm_handle_vblank(rdev->ddev, 1); |
drm_handle_vblank(rdev->ddev, 1); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (rdev->irq.pflip[1]) |
// radeon_crtc_handle_flip(rdev, 1); |
if (atomic_read(&rdev->irq.pflip[1])) |
radeon_crtc_handle_vblank(rdev, 1); |
} |
if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
queue_hotplug = true; |
973,7 → 982,7 |
return r; |
} |
r = r600_audio_init(rdev); |
r = radeon_audio_init(rdev); |
if (r) { |
dev_err(rdev->dev, "failed initializing audio\n"); |
return r; |
983,6 → 992,22 |
} |
void rs600_fini(struct radeon_device *rdev) |
{ |
radeon_pm_fini(rdev); |
radeon_audio_fini(rdev); |
r100_cp_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_gem_fini(rdev); |
rs600_gart_fini(rdev); |
radeon_irq_kms_fini(rdev); |
radeon_fence_driver_fini(rdev); |
radeon_bo_fini(rdev); |
radeon_atombios_fini(rdev); |
kfree(rdev->bios); |
rdev->bios = NULL; |
} |
int rs600_init(struct radeon_device *rdev) |
{ |
1046,11 → 1071,11 |
if (r) { |
/* Somethings want wront with the accel init stop accel */ |
dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
// r100_cp_fini(rdev); |
// r100_wb_fini(rdev); |
// r100_ib_fini(rdev); |
r100_cp_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
rs600_gart_fini(rdev); |
// radeon_irq_kms_fini(rdev); |
radeon_irq_kms_fini(rdev); |
rdev->accel_working = false; |
} |
return 0; |
/drivers/video/drm/radeon/rs690.c |
---|
28,6 → 28,7 |
#include <drm/drmP.h> |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "radeon_audio.h" |
#include "atom.h" |
#include "rs690d.h" |
206,6 → 207,9 |
{ |
u32 tmp; |
/* Guess line buffer size to be 8192 pixels */ |
u32 lb_size = 8192; |
/* |
* Line Buffer Setup |
* There is a single line buffer shared by both display controllers. |
242,6 → 246,13 |
tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
} |
WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); |
/* Save number of lines the linebuffer leads before the scanout */ |
if (mode1) |
rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay); |
if (mode2) |
rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay); |
} |
struct rs690_watermark { |
729,7 → 740,7 |
return r; |
} |
r = r600_audio_init(rdev); |
r = radeon_audio_init(rdev); |
if (r) { |
dev_err(rdev->dev, "failed initializing audio\n"); |
return r; |
740,6 → 751,22 |
void rs690_fini(struct radeon_device *rdev) |
{ |
radeon_pm_fini(rdev); |
radeon_audio_fini(rdev); |
r100_cp_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_gem_fini(rdev); |
rs400_gart_fini(rdev); |
radeon_irq_kms_fini(rdev); |
radeon_fence_driver_fini(rdev); |
radeon_bo_fini(rdev); |
radeon_atombios_fini(rdev); |
kfree(rdev->bios); |
rdev->bios = NULL; |
} |
int rs690_init(struct radeon_device *rdev) |
{ |
804,11 → 831,11 |
if (r) { |
/* Somethings want wront with the accel init stop accel */ |
dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
// r100_cp_fini(rdev); |
// r100_wb_fini(rdev); |
// r100_ib_fini(rdev); |
r100_cp_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
rs400_gart_fini(rdev); |
// radeon_irq_kms_fini(rdev); |
radeon_irq_kms_fini(rdev); |
rdev->accel_working = false; |
} |
return 0; |
/drivers/video/drm/radeon/rs780_dpm.c |
---|
1001,6 → 1001,28 |
ps->sclk_high, ps->max_voltage); |
} |
/* get the current sclk in 10 khz units */ |
u32 rs780_dpm_get_current_sclk(struct radeon_device *rdev) |
{ |
u32 current_fb_div = RREG32(FVTHROT_STATUS_REG0) & CURRENT_FEEDBACK_DIV_MASK; |
u32 func_cntl = RREG32(CG_SPLL_FUNC_CNTL); |
u32 ref_div = ((func_cntl & SPLL_REF_DIV_MASK) >> SPLL_REF_DIV_SHIFT) + 1; |
u32 post_div = ((func_cntl & SPLL_SW_HILEN_MASK) >> SPLL_SW_HILEN_SHIFT) + 1 + |
((func_cntl & SPLL_SW_LOLEN_MASK) >> SPLL_SW_LOLEN_SHIFT) + 1; |
u32 sclk = (rdev->clock.spll.reference_freq * current_fb_div) / |
(post_div * ref_div); |
return sclk; |
} |
/* get the current mclk in 10 khz units */ |
u32 rs780_dpm_get_current_mclk(struct radeon_device *rdev) |
{ |
struct igp_power_info *pi = rs780_get_pi(rdev); |
return pi->bootup_uma_clk; |
} |
int rs780_dpm_force_performance_level(struct radeon_device *rdev, |
enum radeon_dpm_forced_level level) |
{ |
/drivers/video/drm/radeon/rv515.c |
---|
572,6 → 572,23 |
rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rv515_reg_safe_bm); |
} |
void rv515_fini(struct radeon_device *rdev) |
{ |
radeon_pm_fini(rdev); |
r100_cp_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_gem_fini(rdev); |
rv370_pcie_gart_fini(rdev); |
radeon_agp_fini(rdev); |
radeon_irq_kms_fini(rdev); |
radeon_fence_driver_fini(rdev); |
radeon_bo_fini(rdev); |
radeon_atombios_fini(rdev); |
kfree(rdev->bios); |
rdev->bios = NULL; |
} |
int rv515_init(struct radeon_device *rdev) |
{ |
int r; |
639,6 → 656,12 |
if (r) { |
/* Somethings want wront with the accel init stop accel */ |
dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
r100_cp_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_irq_kms_fini(rdev); |
rv370_pcie_gart_fini(rdev); |
radeon_agp_fini(rdev); |
rdev->accel_working = false; |
} |
return 0; |
/drivers/video/drm/radeon/rv6xx_dpm.c |
---|
2050,6 → 2050,52 |
} |
} |
/* get the current sclk in 10 khz units */ |
u32 rv6xx_dpm_get_current_sclk(struct radeon_device *rdev) |
{ |
struct radeon_ps *rps = rdev->pm.dpm.current_ps; |
struct rv6xx_ps *ps = rv6xx_get_ps(rps); |
struct rv6xx_pl *pl; |
u32 current_index = |
(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >> |
CURRENT_PROFILE_INDEX_SHIFT; |
if (current_index > 2) { |
return 0; |
} else { |
if (current_index == 0) |
pl = &ps->low; |
else if (current_index == 1) |
pl = &ps->medium; |
else /* current_index == 2 */ |
pl = &ps->high; |
return pl->sclk; |
} |
} |
/* get the current mclk in 10 khz units */ |
u32 rv6xx_dpm_get_current_mclk(struct radeon_device *rdev) |
{ |
struct radeon_ps *rps = rdev->pm.dpm.current_ps; |
struct rv6xx_ps *ps = rv6xx_get_ps(rps); |
struct rv6xx_pl *pl; |
u32 current_index = |
(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >> |
CURRENT_PROFILE_INDEX_SHIFT; |
if (current_index > 2) { |
return 0; |
} else { |
if (current_index == 0) |
pl = &ps->low; |
else if (current_index == 1) |
pl = &ps->medium; |
else /* current_index == 2 */ |
pl = &ps->high; |
return pl->mclk; |
} |
} |
void rv6xx_dpm_fini(struct radeon_device *rdev) |
{ |
int i; |
/drivers/video/drm/radeon/rv730_dpm.c |
---|
464,7 → 464,7 |
result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); |
if (result != PPSMC_Result_OK) |
DRM_ERROR("Could not force DPM to low\n"); |
DRM_DEBUG("Could not force DPM to low\n"); |
WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); |
/drivers/video/drm/radeon/rv770.c |
---|
30,6 → 30,7 |
#include <drm/drmP.h> |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "radeon_audio.h" |
#include <drm/radeon_drm.h> |
#include "rv770d.h" |
#include "atom.h" |
63,10 → 64,10 |
return 0; |
} |
// r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000, |
// 43663, 0x03FFFFFE, 1, 30, ~0, |
// &fb_div, &vclk_div, &dclk_div); |
// if (r) |
r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000, |
43663, 0x03FFFFFE, 1, 30, ~0, |
&fb_div, &vclk_div, &dclk_div); |
if (r) |
return r; |
fb_div |= 1; |
83,8 → 84,8 |
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); |
WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(1), ~UPLL_FB_DIV(1)); |
// r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); |
// if (r) |
r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); |
if (r) |
return r; |
/* assert PLL_RESET */ |
114,8 → 115,8 |
WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK); |
WREG32_P(CG_UPLL_FUNC_CNTL_3, 0, ~UPLL_FB_DIV(1)); |
// r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); |
// if (r) |
r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); |
if (r) |
return r; |
/* switch VCLK and DCLK selection */ |
1120,6 → 1121,14 |
return 0; |
} |
void r700_cp_fini(struct radeon_device *rdev) |
{ |
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
r700_cp_stop(rdev); |
radeon_ring_fini(rdev, ring); |
radeon_scratch_free(rdev, ring->rptr_save_reg); |
} |
void rv770_set_clk_bypass_mode(struct radeon_device *rdev) |
{ |
u32 tmp, i; |
1714,16 → 1723,16 |
return r; |
} |
// r = rv770_uvd_resume(rdev); |
// if (!r) { |
// r = radeon_fence_driver_start_ring(rdev, |
// R600_RING_TYPE_UVD_INDEX); |
// if (r) |
// dev_err(rdev->dev, "UVD fences init error (%d).\n", r); |
// } |
r = uvd_v2_2_resume(rdev); |
if (!r) { |
r = radeon_fence_driver_start_ring(rdev, |
R600_RING_TYPE_UVD_INDEX); |
if (r) |
dev_err(rdev->dev, "UVD fences init error (%d).\n", r); |
} |
// if (r) |
// rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; |
if (r) |
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; |
/* Enable IRQ */ |
if (!rdev->irq.installed) { |
1735,7 → 1744,7 |
r = r600_irq_init(rdev); |
if (r) { |
DRM_ERROR("radeon: IH init failed (%d).\n", r); |
// radeon_irq_kms_fini(rdev); |
radeon_irq_kms_fini(rdev); |
return r; |
} |
r600_irq_set(rdev); |
1763,18 → 1772,16 |
if (r) |
return r; |
// ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
// if (ring->ring_size) { |
// r = radeon_ring_init(rdev, ring, ring->ring_size, |
// R600_WB_UVD_RPTR_OFFSET, |
// UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, |
// 0, 0xfffff, RADEON_CP_PACKET2); |
// if (!r) |
// r = r600_uvd_init(rdev); |
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
if (ring->ring_size) { |
r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
RADEON_CP_PACKET2); |
if (!r) |
r = uvd_v1_0_init(rdev); |
// if (r) |
// DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); |
// } |
if (r) |
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); |
} |
r = radeon_ib_pool_init(rdev); |
if (r) { |
1782,21 → 1789,41 |
return r; |
} |
r = r600_audio_init(rdev); |
if (r) { |
DRM_ERROR("radeon: audio init failed\n"); |
return r; |
} |
return 0; |
} |
int rv770_resume(struct radeon_device *rdev) |
{ |
int r; |
/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, |
* posting will perform necessary task to bring back GPU into good |
* shape. |
*/ |
/* post card */ |
atom_asic_init(rdev->mode_info.atom_context); |
/* init golden registers */ |
rv770_init_golden_registers(rdev); |
if (rdev->pm.pm_method == PM_METHOD_DPM) |
radeon_pm_resume(rdev); |
rdev->accel_working = true; |
r = rv770_startup(rdev); |
if (r) { |
DRM_ERROR("r600 startup failed on resume\n"); |
rdev->accel_working = false; |
return r; |
} |
return r; |
} |
/* Plan is to move initialization in that function and use |
* helper function so that radeon_device_init pretty much |
* do nothing more than calling asic specific function. This |
1872,12 → 1899,12 |
rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; |
r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); |
// r = radeon_uvd_init(rdev); |
// if (!r) { |
// rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL; |
// r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], |
// 4096); |
// } |
r = radeon_uvd_init(rdev); |
if (!r) { |
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL; |
r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], |
4096); |
} |
rdev->ih.ring_obj = NULL; |
r600_ih_ring_init(rdev, 64 * 1024); |
1890,6 → 1917,12 |
r = rv770_startup(rdev); |
if (r) { |
dev_err(rdev->dev, "disabling GPU acceleration\n"); |
r700_cp_fini(rdev); |
r600_dma_fini(rdev); |
r600_irq_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_irq_kms_fini(rdev); |
rv770_pcie_gart_fini(rdev); |
rdev->accel_working = false; |
} |
1897,6 → 1930,28 |
return 0; |
} |
void rv770_fini(struct radeon_device *rdev) |
{ |
radeon_pm_fini(rdev); |
r700_cp_fini(rdev); |
r600_dma_fini(rdev); |
r600_irq_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_irq_kms_fini(rdev); |
uvd_v1_0_fini(rdev); |
radeon_uvd_fini(rdev); |
rv770_pcie_gart_fini(rdev); |
r600_vram_scratch_fini(rdev); |
radeon_gem_fini(rdev); |
radeon_fence_driver_fini(rdev); |
radeon_agp_fini(rdev); |
radeon_bo_fini(rdev); |
radeon_atombios_fini(rdev); |
kfree(rdev->bios); |
rdev->bios = NULL; |
} |
static void rv770_pcie_gen2_enable(struct radeon_device *rdev) |
{ |
u32 link_width_cntl, lanes, speed_cntl, tmp; |
/drivers/video/drm/radeon/rv770_dpm.c |
---|
193,7 → 193,7 |
result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); |
if (result != PPSMC_Result_OK) |
DRM_ERROR("Could not force DPM to low.\n"); |
DRM_DEBUG("Could not force DPM to low.\n"); |
WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); |
231,6 → 231,7 |
MC_CG_SEQ_DRAMCONF_S0 : MC_CG_SEQ_DRAMCONF_S1; |
} |
#if 0 |
int rv770_read_smc_soft_register(struct radeon_device *rdev, |
u16 reg_offset, u32 *value) |
{ |
240,6 → 241,7 |
pi->soft_regs_start + reg_offset, |
value, pi->sram_end); |
} |
#endif |
int rv770_write_smc_soft_register(struct radeon_device *rdev, |
u16 reg_offset, u32 value) |
1416,7 → 1418,7 |
int rv770_set_sw_state(struct radeon_device *rdev) |
{ |
if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK) |
return -EINVAL; |
DRM_DEBUG("rv770_set_sw_state failed\n"); |
return 0; |
} |
2075,6 → 2077,7 |
return 0; |
} |
#if 0 |
void rv770_dpm_reset_asic(struct radeon_device *rdev) |
{ |
struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
2087,6 → 2090,7 |
if (pi->dcodt) |
rv770_program_dcodt_after_state_switch(rdev, boot_ps, boot_ps); |
} |
#endif |
void rv770_dpm_setup_asic(struct radeon_device *rdev) |
{ |
2488,6 → 2492,50 |
} |
} |
u32 rv770_dpm_get_current_sclk(struct radeon_device *rdev) |
{ |
struct radeon_ps *rps = rdev->pm.dpm.current_ps; |
struct rv7xx_ps *ps = rv770_get_ps(rps); |
struct rv7xx_pl *pl; |
u32 current_index = |
(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >> |
CURRENT_PROFILE_INDEX_SHIFT; |
if (current_index > 2) { |
return 0; |
} else { |
if (current_index == 0) |
pl = &ps->low; |
else if (current_index == 1) |
pl = &ps->medium; |
else /* current_index == 2 */ |
pl = &ps->high; |
return pl->sclk; |
} |
} |
u32 rv770_dpm_get_current_mclk(struct radeon_device *rdev) |
{ |
struct radeon_ps *rps = rdev->pm.dpm.current_ps; |
struct rv7xx_ps *ps = rv770_get_ps(rps); |
struct rv7xx_pl *pl; |
u32 current_index = |
(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >> |
CURRENT_PROFILE_INDEX_SHIFT; |
if (current_index > 2) { |
return 0; |
} else { |
if (current_index == 0) |
pl = &ps->low; |
else if (current_index == 1) |
pl = &ps->medium; |
else /* current_index == 2 */ |
pl = &ps->high; |
return pl->mclk; |
} |
} |
void rv770_dpm_fini(struct radeon_device *rdev) |
{ |
int i; |
/drivers/video/drm/radeon/rv770_dpm.h |
---|
278,8 → 278,6 |
void rv770_get_engine_memory_ss(struct radeon_device *rdev); |
/* smc */ |
int rv770_read_smc_soft_register(struct radeon_device *rdev, |
u16 reg_offset, u32 *value); |
int rv770_write_smc_soft_register(struct radeon_device *rdev, |
u16 reg_offset, u32 value); |
/drivers/video/drm/radeon/rv770d.h |
---|
989,6 → 989,9 |
((n) & 0x3FFF) << 16) |
/* UVD */ |
#define UVD_SEMA_ADDR_LOW 0xef00 |
#define UVD_SEMA_ADDR_HIGH 0xef04 |
#define UVD_SEMA_CMD 0xef08 |
#define UVD_GPCOM_VCPU_CMD 0xef0c |
#define UVD_GPCOM_VCPU_DATA0 0xef10 |
#define UVD_GPCOM_VCPU_DATA1 0xef14 |
/drivers/video/drm/radeon/si.c |
---|
27,6 → 27,7 |
#include <drm/drmP.h> |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "radeon_audio.h" |
#include <drm/radeon_drm.h> |
#include "sid.h" |
#include "atom.h" |
1263,6 → 1264,36 |
} |
} |
/** |
* si_get_allowed_info_register - fetch the register for the info ioctl |
* |
* @rdev: radeon_device pointer |
* @reg: register offset in bytes |
* @val: register value |
* |
* Returns 0 for success or -EINVAL for an invalid register |
* |
*/ |
int si_get_allowed_info_register(struct radeon_device *rdev, |
u32 reg, u32 *val) |
{ |
switch (reg) { |
case GRBM_STATUS: |
case GRBM_STATUS2: |
case GRBM_STATUS_SE0: |
case GRBM_STATUS_SE1: |
case SRBM_STATUS: |
case SRBM_STATUS2: |
case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET): |
case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET): |
case UVD_STATUS: |
*val = RREG32(reg); |
return 0; |
default: |
return -EINVAL; |
} |
} |
#define PCIE_BUS_CLK 10000 |
#define TCLK (PCIE_BUS_CLK / 10) |
2345,6 → 2376,9 |
c.full = dfixed_div(c, a); |
priority_b_mark = dfixed_trunc(c); |
priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; |
/* Save number of lines the linebuffer leads before the scanout */ |
radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); |
} |
/* select wm A */ |
3161,6 → 3195,8 |
} |
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); |
WREG32(SRBM_INT_CNTL, 1); |
WREG32(SRBM_INT_ACK, 1); |
evergreen_fix_pci_max_read_req_size(rdev); |
4285,7 → 4321,7 |
/* empty context1-15 */ |
/* set vm size, must be a multiple of 4 */ |
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); |
WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); |
WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1); |
/* Assign the pt base to something valid for now; the pts used for |
* the VMs are determined by the application and setup and assigned |
* on the fly in the vm part of radeon_gart.c |
4360,7 → 4396,7 |
{ |
si_pcie_gart_disable(rdev); |
radeon_gart_table_vram_free(rdev); |
// radeon_gart_fini(rdev); |
radeon_gart_fini(rdev); |
} |
/* vm parser */ |
4698,12 → 4734,6 |
switch (pkt.type) { |
case RADEON_PACKET_TYPE0: |
dev_err(rdev->dev, "Packet0 not allowed!\n"); |
for (i = 0; i < ib->length_dw; i++) { |
if (i == idx) |
printk("\t0x%08x <---\n", ib->ptr[i]); |
else |
printk("\t0x%08x\n", ib->ptr[i]); |
} |
ret = -EINVAL; |
break; |
case RADEON_PACKET_TYPE2: |
4735,8 → 4765,15 |
ret = -EINVAL; |
break; |
} |
if (ret) |
if (ret) { |
for (i = 0; i < ib->length_dw; i++) { |
if (i == idx) |
printk("\t0x%08x <---\n", ib->ptr[i]); |
else |
printk("\t0x%08x\n", ib->ptr[i]); |
} |
break; |
} |
} while (idx < ib->length_dw); |
return ret; |
5057,6 → 5094,16 |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, 1 << vm_id); |
/* wait for the invalidate to complete */ |
radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); |
radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */ |
WAIT_REG_MEM_ENGINE(0))); /* me */ |
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, 0); /* ref */ |
radeon_ring_write(ring, 0); /* mask */ |
radeon_ring_write(ring, 0x20); /* poll interval */ |
/* sync PFP to ME, otherwise we might get invalid PFP reads */ |
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); |
radeon_ring_write(ring, 0x0); |
5899,6 → 5946,7 |
tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; |
WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp); |
WREG32(GRBM_INT_CNTL, 0); |
WREG32(SRBM_INT_CNTL, 0); |
if (rdev->num_crtc >= 2) { |
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
6040,12 → 6088,12 |
(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); |
if (!ASIC_IS_NODCE(rdev)) { |
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
} |
dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; |
6108,27 → 6156,27 |
} |
if (rdev->irq.hpd[0]) { |
DRM_DEBUG("si_irq_set: hpd 1\n"); |
hpd1 |= DC_HPDx_INT_EN; |
hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.hpd[1]) { |
DRM_DEBUG("si_irq_set: hpd 2\n"); |
hpd2 |= DC_HPDx_INT_EN; |
hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.hpd[2]) { |
DRM_DEBUG("si_irq_set: hpd 3\n"); |
hpd3 |= DC_HPDx_INT_EN; |
hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.hpd[3]) { |
DRM_DEBUG("si_irq_set: hpd 4\n"); |
hpd4 |= DC_HPDx_INT_EN; |
hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.hpd[4]) { |
DRM_DEBUG("si_irq_set: hpd 5\n"); |
hpd5 |= DC_HPDx_INT_EN; |
hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.hpd[5]) { |
DRM_DEBUG("si_irq_set: hpd 6\n"); |
hpd6 |= DC_HPDx_INT_EN; |
hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
WREG32(CP_INT_CNTL_RING0, cp_int_cntl); |
6188,6 → 6236,9 |
WREG32(CG_THERMAL_INT, thermal_int); |
/* posting read */ |
RREG32(SRBM_STATUS); |
return 0; |
} |
6288,7 → 6339,38 |
tmp |= DC_HPDx_INT_ACK; |
WREG32(DC_HPD6_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD1_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD1_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD2_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD2_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD3_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD3_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD4_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD4_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD5_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD5_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD5_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD6_INT_CONTROL, tmp); |
} |
} |
static void si_irq_disable(struct radeon_device *rdev) |
{ |
6353,6 → 6435,7 |
u32 src_id, src_data, ring_id; |
u32 ring_index; |
bool queue_hotplug = false; |
bool queue_dp = false; |
bool queue_thermal = false; |
u32 status, addr; |
6386,23 → 6469,27 |
case 1: /* D1 vblank/vline */ |
switch (src_data) { |
case 0: /* D1 vblank */ |
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
if (rdev->irq.crtc_vblank_int[0]) { |
// drm_handle_vblank(rdev->ddev, 0); |
drm_handle_vblank(rdev->ddev, 0); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (atomic_read(&rdev->irq.pflip[0])) |
// radeon_crtc_handle_flip(rdev, 0); |
if (atomic_read(&rdev->irq.pflip[0])) |
radeon_crtc_handle_vblank(rdev, 0); |
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D1 vblank\n"); |
} |
break; |
case 1: /* D1 vline */ |
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D1 vline\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
6412,23 → 6499,27 |
case 2: /* D2 vblank/vline */ |
switch (src_data) { |
case 0: /* D2 vblank */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
if (rdev->irq.crtc_vblank_int[1]) { |
// drm_handle_vblank(rdev->ddev, 1); |
drm_handle_vblank(rdev->ddev, 1); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (atomic_read(&rdev->irq.pflip[1])) |
// radeon_crtc_handle_flip(rdev, 1); |
if (atomic_read(&rdev->irq.pflip[1])) |
radeon_crtc_handle_vblank(rdev, 1); |
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D2 vblank\n"); |
} |
break; |
case 1: /* D2 vline */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D2 vline\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
6438,23 → 6529,27 |
case 3: /* D3 vblank/vline */ |
switch (src_data) { |
case 0: /* D3 vblank */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
if (rdev->irq.crtc_vblank_int[2]) { |
// drm_handle_vblank(rdev->ddev, 2); |
drm_handle_vblank(rdev->ddev, 2); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (atomic_read(&rdev->irq.pflip[2])) |
// radeon_crtc_handle_flip(rdev, 2); |
if (atomic_read(&rdev->irq.pflip[2])) |
radeon_crtc_handle_vblank(rdev, 2); |
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D3 vblank\n"); |
} |
break; |
case 1: /* D3 vline */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D3 vline\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
6464,23 → 6559,27 |
case 4: /* D4 vblank/vline */ |
switch (src_data) { |
case 0: /* D4 vblank */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
if (rdev->irq.crtc_vblank_int[3]) { |
// drm_handle_vblank(rdev->ddev, 3); |
drm_handle_vblank(rdev->ddev, 3); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (atomic_read(&rdev->irq.pflip[3])) |
// radeon_crtc_handle_flip(rdev, 3); |
if (atomic_read(&rdev->irq.pflip[3])) |
radeon_crtc_handle_vblank(rdev, 3); |
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D4 vblank\n"); |
} |
break; |
case 1: /* D4 vline */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D4 vline\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
6490,23 → 6589,27 |
case 5: /* D5 vblank/vline */ |
switch (src_data) { |
case 0: /* D5 vblank */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
if (rdev->irq.crtc_vblank_int[4]) { |
// drm_handle_vblank(rdev->ddev, 4); |
drm_handle_vblank(rdev->ddev, 4); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (atomic_read(&rdev->irq.pflip[4])) |
// radeon_crtc_handle_flip(rdev, 4); |
if (atomic_read(&rdev->irq.pflip[4])) |
radeon_crtc_handle_vblank(rdev, 4); |
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D5 vblank\n"); |
} |
break; |
case 1: /* D5 vline */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D5 vline\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
6516,23 → 6619,27 |
case 6: /* D6 vblank/vline */ |
switch (src_data) { |
case 0: /* D6 vblank */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
if (rdev->irq.crtc_vblank_int[5]) { |
// drm_handle_vblank(rdev->ddev, 5); |
drm_handle_vblank(rdev->ddev, 5); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (atomic_read(&rdev->irq.pflip[5])) |
// radeon_crtc_handle_flip(rdev, 5); |
if (atomic_read(&rdev->irq.pflip[5])) |
radeon_crtc_handle_vblank(rdev, 5); |
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D6 vblank\n"); |
} |
break; |
case 1: /* D6 vline */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D6 vline\n"); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
6550,52 → 6657,122 |
case 42: /* HPD hotplug */ |
switch (src_data) { |
case 0: |
if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD1\n"); |
} |
break; |
case 1: |
if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD2\n"); |
} |
break; |
case 2: |
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD3\n"); |
} |
break; |
case 3: |
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD4\n"); |
} |
break; |
case 4: |
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD5\n"); |
} |
break; |
case 5: |
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD6\n"); |
} |
break; |
case 6: |
if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 1\n"); |
break; |
case 7: |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 2\n"); |
break; |
case 8: |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 3\n"); |
break; |
case 9: |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 4\n"); |
break; |
case 10: |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 5\n"); |
break; |
case 11: |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 6\n"); |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
break; |
} |
break; |
case 96: |
DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR)); |
WREG32(SRBM_INT_ACK, 0x1); |
break; |
case 124: /* UVD */ |
DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); |
radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); |
6775,6 → 6952,22 |
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; |
} |
r = radeon_vce_resume(rdev); |
if (!r) { |
r = vce_v1_0_resume(rdev); |
if (!r) |
r = radeon_fence_driver_start_ring(rdev, |
TN_RING_TYPE_VCE1_INDEX); |
if (!r) |
r = radeon_fence_driver_start_ring(rdev, |
TN_RING_TYPE_VCE2_INDEX); |
} |
if (r) { |
dev_err(rdev->dev, "VCE init error (%d).\n", r); |
rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0; |
rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0; |
} |
/* Enable IRQ */ |
if (!rdev->irq.installed) { |
r = radeon_irq_kms_init(rdev); |
6843,6 → 7036,23 |
} |
} |
r = -ENOENT; |
ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; |
if (ring->ring_size) |
r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
VCE_CMD_NO_OP); |
ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; |
if (ring->ring_size) |
r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
VCE_CMD_NO_OP); |
if (!r) |
r = vce_v1_0_init(rdev); |
else if (r != -ENOENT) |
DRM_ERROR("radeon: failed initializing VCE (%d).\n", r); |
r = radeon_ib_pool_init(rdev); |
if (r) { |
dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
6855,7 → 7065,7 |
return r; |
} |
r = dce6_audio_init(rdev); |
r = radeon_audio_init(rdev); |
if (r) |
return r; |
6862,9 → 7072,35 |
return 0; |
} |
int si_resume(struct radeon_device *rdev) |
{ |
int r; |
/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, |
* posting will perform necessary task to bring back GPU into good |
* shape. |
*/ |
/* post card */ |
atom_asic_init(rdev->mode_info.atom_context); |
/* init golden registers */ |
si_init_golden_registers(rdev); |
if (rdev->pm.pm_method == PM_METHOD_DPM) |
radeon_pm_resume(rdev); |
rdev->accel_working = true; |
r = si_startup(rdev); |
if (r) { |
DRM_ERROR("si startup failed on resume\n"); |
rdev->accel_working = false; |
return r; |
} |
return r; |
} |
/* Plan is to move initialization in that function and use |
* helper function so that radeon_device_init pretty much |
* do nothing more than calling asic specific function. This |
6963,6 → 7199,17 |
} |
} |
r = radeon_vce_init(rdev); |
if (!r) { |
ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 4096); |
ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 4096); |
} |
rdev->ih.ring_obj = NULL; |
r600_ih_ring_init(rdev, 64 * 1024); |
6975,13 → 7222,14 |
if (r) { |
dev_err(rdev->dev, "disabling GPU acceleration\n"); |
si_cp_fini(rdev); |
// si_irq_fini(rdev); |
// si_rlc_fini(rdev); |
// radeon_wb_fini(rdev); |
// radeon_ib_pool_fini(rdev); |
// radeon_vm_manager_fini(rdev); |
// radeon_irq_kms_fini(rdev); |
// si_pcie_gart_fini(rdev); |
cayman_dma_fini(rdev); |
si_irq_fini(rdev); |
sumo_rlc_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_vm_manager_fini(rdev); |
radeon_irq_kms_fini(rdev); |
si_pcie_gart_fini(rdev); |
rdev->accel_working = false; |
} |
6997,6 → 7245,34 |
return 0; |
} |
void si_fini(struct radeon_device *rdev) |
{ |
radeon_pm_fini(rdev); |
si_cp_fini(rdev); |
cayman_dma_fini(rdev); |
si_fini_pg(rdev); |
si_fini_cg(rdev); |
si_irq_fini(rdev); |
sumo_rlc_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_vm_manager_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_irq_kms_fini(rdev); |
if (rdev->has_uvd) { |
uvd_v1_0_fini(rdev); |
radeon_uvd_fini(rdev); |
radeon_vce_fini(rdev); |
} |
si_pcie_gart_fini(rdev); |
r600_vram_scratch_fini(rdev); |
radeon_gem_fini(rdev); |
radeon_fence_driver_fini(rdev); |
radeon_bo_fini(rdev); |
radeon_atombios_fini(rdev); |
kfree(rdev->bios); |
rdev->bios = NULL; |
} |
/** |
* si_get_gpu_clock_counter - return GPU clock counter snapshot |
* |
7031,8 → 7307,7 |
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); |
if (!vclk || !dclk) { |
/* keep the Bypass mode, put PLL to sleep */ |
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); |
/* keep the Bypass mode */ |
return 0; |
} |
7048,8 → 7323,7 |
/* set VCO_MODE to 1 */ |
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); |
/* toggle UPLL_SLEEP to 1 then back to 0 */ |
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); |
/* disable sleep mode */ |
WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); |
/* deassert UPLL_RESET */ |
7468,3 → 7742,124 |
} |
} |
} |
int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev) |
{ |
unsigned i; |
/* make sure VCEPLL_CTLREQ is deasserted */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK); |
mdelay(10); |
/* assert UPLL_CTLREQ */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK); |
/* wait for CTLACK and CTLACK2 to get asserted */ |
for (i = 0; i < 100; ++i) { |
uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK; |
if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask) |
break; |
mdelay(10); |
} |
/* deassert UPLL_CTLREQ */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK); |
if (i == 100) { |
DRM_ERROR("Timeout setting UVD clocks!\n"); |
return -ETIMEDOUT; |
} |
return 0; |
} |
int si_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk) |
{ |
unsigned fb_div = 0, evclk_div = 0, ecclk_div = 0; |
int r; |
/* bypass evclk and ecclk with bclk */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2, |
EVCLK_SRC_SEL(1) | ECCLK_SRC_SEL(1), |
~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK)); |
/* put PLL in bypass mode */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_BYPASS_EN_MASK, |
~VCEPLL_BYPASS_EN_MASK); |
if (!evclk || !ecclk) { |
/* keep the Bypass mode, put PLL to sleep */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK, |
~VCEPLL_SLEEP_MASK); |
return 0; |
} |
r = radeon_uvd_calc_upll_dividers(rdev, evclk, ecclk, 125000, 250000, |
16384, 0x03FFFFFF, 0, 128, 5, |
&fb_div, &evclk_div, &ecclk_div); |
if (r) |
return r; |
/* set RESET_ANTI_MUX to 0 */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK); |
/* set VCO_MODE to 1 */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_VCO_MODE_MASK, |
~VCEPLL_VCO_MODE_MASK); |
/* toggle VCEPLL_SLEEP to 1 then back to 0 */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK, |
~VCEPLL_SLEEP_MASK); |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_SLEEP_MASK); |
/* deassert VCEPLL_RESET */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK); |
mdelay(1); |
r = si_vce_send_vcepll_ctlreq(rdev); |
if (r) |
return r; |
/* assert VCEPLL_RESET again */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_RESET_MASK, ~VCEPLL_RESET_MASK); |
/* disable spread spectrum. */ |
WREG32_SMC_P(CG_VCEPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK); |
/* set feedback divider */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_3, VCEPLL_FB_DIV(fb_div), ~VCEPLL_FB_DIV_MASK); |
/* set ref divider to 0 */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_REF_DIV_MASK); |
/* set PDIV_A and PDIV_B */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2, |
VCEPLL_PDIV_A(evclk_div) | VCEPLL_PDIV_B(ecclk_div), |
~(VCEPLL_PDIV_A_MASK | VCEPLL_PDIV_B_MASK)); |
/* give the PLL some time to settle */ |
mdelay(15); |
/* deassert PLL_RESET */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK); |
mdelay(15); |
/* switch from bypass mode to normal mode */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_BYPASS_EN_MASK); |
r = si_vce_send_vcepll_ctlreq(rdev); |
if (r) |
return r; |
/* switch VCLK and DCLK selection */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2, |
EVCLK_SRC_SEL(16) | ECCLK_SRC_SEL(16), |
~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK)); |
mdelay(100); |
return 0; |
} |
/drivers/video/drm/radeon/si_dma.c |
---|
123,7 → 123,6 |
for (; ndw > 0; ndw -= 2, --count, pe += 8) { |
if (flags & R600_PTE_SYSTEM) { |
value = radeon_vm_map_gart(rdev, addr); |
value &= 0xFFFFFFFFFFFFF000ULL; |
} else if (flags & R600_PTE_VALID) { |
value = addr; |
} else { |
206,6 → 205,14 |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); |
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); |
radeon_ring_write(ring, 1 << vm_id); |
/* wait for invalidate to complete */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0)); |
radeon_ring_write(ring, VM_INVALIDATE_REQUEST); |
radeon_ring_write(ring, 0xff << 16); /* retry */ |
radeon_ring_write(ring, 1 << vm_id); /* mask */ |
radeon_ring_write(ring, 0); /* value */ |
radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ |
} |
/** |
/drivers/video/drm/radeon/si_dpm.c |
---|
1740,6 → 1740,7 |
struct ni_ps *ni_get_ps(struct radeon_ps *rps); |
extern int si_mc_load_microcode(struct radeon_device *rdev); |
extern void vce_v1_0_enable_mgcg(struct radeon_device *rdev, bool enable); |
static int si_populate_voltage_value(struct radeon_device *rdev, |
const struct atom_voltage_table *table, |
1756,6 → 1757,9 |
u32 engine_clock, |
SISLANDS_SMC_SCLK_VALUE *sclk); |
static void si_thermal_start_smc_fan_control(struct radeon_device *rdev); |
static void si_fan_ctrl_set_default_mode(struct radeon_device *rdev); |
static struct si_power_info *si_get_pi(struct radeon_device *rdev) |
{ |
struct si_power_info *pi = rdev->pm.dpm.priv; |
2908,6 → 2912,76 |
return ret; |
} |
struct si_dpm_quirk { |
u32 chip_vendor; |
u32 chip_device; |
u32 subsys_vendor; |
u32 subsys_device; |
u32 max_sclk; |
u32 max_mclk; |
}; |
/* cards with dpm stability problems */ |
static struct si_dpm_quirk si_dpm_quirk_list[] = { |
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ |
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, |
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, |
{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, |
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, |
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, |
{ 0, 0, 0, 0 }, |
}; |
static u16 si_get_lower_of_leakage_and_vce_voltage(struct radeon_device *rdev, |
u16 vce_voltage) |
{ |
u16 highest_leakage = 0; |
struct si_power_info *si_pi = si_get_pi(rdev); |
int i; |
for (i = 0; i < si_pi->leakage_voltage.count; i++){ |
if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage) |
highest_leakage = si_pi->leakage_voltage.entries[i].voltage; |
} |
if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage)) |
return highest_leakage; |
return vce_voltage; |
} |
static int si_get_vce_clock_voltage(struct radeon_device *rdev, |
u32 evclk, u32 ecclk, u16 *voltage) |
{ |
u32 i; |
int ret = -EINVAL; |
struct radeon_vce_clock_voltage_dependency_table *table = |
&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; |
if (((evclk == 0) && (ecclk == 0)) || |
(table && (table->count == 0))) { |
*voltage = 0; |
return 0; |
} |
for (i = 0; i < table->count; i++) { |
if ((evclk <= table->entries[i].evclk) && |
(ecclk <= table->entries[i].ecclk)) { |
*voltage = table->entries[i].v; |
ret = 0; |
break; |
} |
} |
/* if no match return the highest voltage */ |
if (ret) |
*voltage = table->entries[table->count - 1].v; |
*voltage = si_get_lower_of_leakage_and_vce_voltage(rdev, *voltage); |
return ret; |
} |
static void si_apply_state_adjust_rules(struct radeon_device *rdev, |
struct radeon_ps *rps) |
{ |
2916,10 → 2990,35 |
bool disable_mclk_switching = false; |
bool disable_sclk_switching = false; |
u32 mclk, sclk; |
u16 vddc, vddci; |
u16 vddc, vddci, min_vce_voltage = 0; |
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; |
u32 max_sclk = 0, max_mclk = 0; |
int i; |
struct si_dpm_quirk *p = si_dpm_quirk_list; |
/* Apply dpm quirks */ |
while (p && p->chip_device != 0) { |
if (rdev->pdev->vendor == p->chip_vendor && |
rdev->pdev->device == p->chip_device && |
rdev->pdev->subsystem_vendor == p->subsys_vendor && |
rdev->pdev->subsystem_device == p->subsys_device) { |
max_sclk = p->max_sclk; |
max_mclk = p->max_mclk; |
break; |
} |
++p; |
} |
if (rps->vce_active) { |
rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; |
rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk; |
si_get_vce_clock_voltage(rdev, rps->evclk, rps->ecclk, |
&min_vce_voltage); |
} else { |
rps->evclk = 0; |
rps->ecclk = 0; |
} |
if ((rdev->pm.dpm.new_active_crtc_count > 1) || |
ni_dpm_vblank_too_short(rdev)) |
disable_mclk_switching = true; |
2972,7 → 3071,15 |
if (ps->performance_levels[i].mclk > max_mclk_vddc) |
ps->performance_levels[i].mclk = max_mclk_vddc; |
} |
if (max_mclk) { |
if (ps->performance_levels[i].mclk > max_mclk) |
ps->performance_levels[i].mclk = max_mclk; |
} |
if (max_sclk) { |
if (ps->performance_levels[i].sclk > max_sclk) |
ps->performance_levels[i].sclk = max_sclk; |
} |
} |
/* XXX validate the min clocks required for display */ |
2992,6 → 3099,13 |
vddc = ps->performance_levels[0].vddc; |
} |
if (rps->vce_active) { |
if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk) |
sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk; |
if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk) |
mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk; |
} |
/* adjusted low state */ |
ps->performance_levels[0].sclk = sclk; |
ps->performance_levels[0].mclk = mclk; |
3041,6 → 3155,8 |
&ps->performance_levels[i]); |
for (i = 0; i < ps->performance_level_count; i++) { |
if (ps->performance_levels[i].vddc < min_vce_voltage) |
ps->performance_levels[i].vddc = min_vce_voltage; |
btc_apply_voltage_dependency_rules(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk, |
ps->performance_levels[i].sclk, |
max_limits->vddc, &ps->performance_levels[i].vddc); |
3067,7 → 3183,6 |
if (ps->performance_levels[i].vddc > rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc) |
ps->dc_compatible = false; |
} |
} |
#if 0 |
3320,11 → 3435,13 |
return 0; |
} |
#if 0 |
static int si_set_boot_state(struct radeon_device *rdev) |
{ |
return (si_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ? |
0 : -EINVAL; |
} |
#endif |
static int si_set_sw_state(struct radeon_device *rdev) |
{ |
5814,6 → 5931,21 |
} |
} |
static void si_set_vce_clock(struct radeon_device *rdev, |
struct radeon_ps *new_rps, |
struct radeon_ps *old_rps) |
{ |
if ((old_rps->evclk != new_rps->evclk) || |
(old_rps->ecclk != new_rps->ecclk)) { |
/* turn the clocks on when encoding, off otherwise */ |
if (new_rps->evclk || new_rps->ecclk) |
vce_v1_0_enable_mgcg(rdev, false); |
else |
vce_v1_0_enable_mgcg(rdev, true); |
radeon_set_vce_clocks(rdev, new_rps->evclk, new_rps->ecclk); |
} |
} |
void si_dpm_setup_asic(struct radeon_device *rdev) |
{ |
int r; |
5934,6 → 6066,10 |
slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); |
slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); |
fan_table.temp_min = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100); |
fan_table.temp_med = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100); |
fan_table.temp_max = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100); |
fan_table.slope1 = cpu_to_be16(slope1); |
fan_table.slope2 = cpu_to_be16(slope2); |
5973,28 → 6109,34 |
static int si_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev) |
{ |
struct si_power_info *si_pi = si_get_pi(rdev); |
PPSMC_Result ret; |
ret = si_send_msg_to_smc(rdev, PPSMC_StartFanControl); |
if (ret == PPSMC_Result_OK) |
if (ret == PPSMC_Result_OK) { |
si_pi->fan_is_controlled_by_smc = true; |
return 0; |
else |
} else { |
return -EINVAL; |
} |
} |
static int si_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev) |
{ |
struct si_power_info *si_pi = si_get_pi(rdev); |
PPSMC_Result ret; |
ret = si_send_msg_to_smc(rdev, PPSMC_StopFanControl); |
if (ret == PPSMC_Result_OK) |
if (ret == PPSMC_Result_OK) { |
si_pi->fan_is_controlled_by_smc = false; |
return 0; |
else |
} else { |
return -EINVAL; |
} |
} |
#if 0 |
static int si_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev, |
int si_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev, |
u32 *speed) |
{ |
u32 duty, duty100; |
6019,9 → 6161,10 |
return 0; |
} |
static int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev, |
int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev, |
u32 speed) |
{ |
struct si_power_info *si_pi = si_get_pi(rdev); |
u32 tmp; |
u32 duty, duty100; |
u64 tmp64; |
6029,12 → 6172,12 |
if (rdev->pm.no_fan) |
return -ENOENT; |
if (si_pi->fan_is_controlled_by_smc) |
return -EINVAL; |
if (speed > 100) |
return -EINVAL; |
if (rdev->pm.dpm.fan.ucode_fan_control) |
si_fan_ctrl_stop_smc_fan_control(rdev); |
duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; |
if (duty100 == 0) |
6048,11 → 6191,38 |
tmp |= FDO_STATIC_DUTY(duty); |
WREG32(CG_FDO_CTRL0, tmp); |
si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC); |
return 0; |
} |
void si_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode) |
{ |
if (mode) { |
/* stop auto-manage */ |
if (rdev->pm.dpm.fan.ucode_fan_control) |
si_fan_ctrl_stop_smc_fan_control(rdev); |
si_fan_ctrl_set_static_mode(rdev, mode); |
} else { |
/* restart auto-manage */ |
if (rdev->pm.dpm.fan.ucode_fan_control) |
si_thermal_start_smc_fan_control(rdev); |
else |
si_fan_ctrl_set_default_mode(rdev); |
} |
} |
u32 si_fan_ctrl_get_mode(struct radeon_device *rdev) |
{ |
struct si_power_info *si_pi = si_get_pi(rdev); |
u32 tmp; |
if (si_pi->fan_is_controlled_by_smc) |
return 0; |
tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK; |
return (tmp >> FDO_PWM_MODE_SHIFT); |
} |
#if 0 |
static int si_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev, |
u32 *speed) |
{ |
6464,6 → 6634,7 |
return ret; |
} |
ni_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); |
si_set_vce_clock(rdev, new_ps, old_ps); |
if (eg_pi->pcie_performance_request) |
si_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps); |
ret = si_set_power_state_conditionally_enable_ulv(rdev, new_ps); |
6499,7 → 6670,7 |
ni_update_current_ps(rdev, new_ps); |
} |
#if 0 |
void si_dpm_reset_asic(struct radeon_device *rdev) |
{ |
si_restrict_performance_levels_before_switch(rdev); |
6506,6 → 6677,7 |
si_disable_ulv(rdev); |
si_set_boot_state(rdev); |
} |
#endif |
void si_dpm_display_configuration_changed(struct radeon_device *rdev) |
{ |
6709,6 → 6881,21 |
power_state_offset += 2 + power_state->v2.ucNumDPMLevels; |
} |
rdev->pm.dpm.num_ps = state_array->ucNumEntries; |
/* fill in the vce power states */ |
for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { |
u32 sclk, mclk; |
clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx; |
clock_info = (union pplib_clock_info *) |
&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; |
sclk = le16_to_cpu(clock_info->si.usEngineClockLow); |
sclk |= clock_info->si.ucEngineClockHigh << 16; |
mclk = le16_to_cpu(clock_info->si.usMemoryClockLow); |
mclk |= clock_info->si.ucMemoryClockHigh << 16; |
rdev->pm.dpm.vce_states[i].sclk = sclk; |
rdev->pm.dpm.vce_states[i].mclk = mclk; |
} |
return 0; |
} |
6753,10 → 6940,11 |
if (ret) |
return ret; |
ret = si_parse_power_table(rdev); |
ret = r600_parse_extended_power_table(rdev); |
if (ret) |
return ret; |
ret = r600_parse_extended_power_table(rdev); |
ret = si_parse_power_table(rdev); |
if (ret) |
return ret; |
6873,7 → 7061,6 |
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; |
si_pi->fan_ctrl_is_in_default_mode = true; |
rdev->pm.dpm.fan.ucode_fan_control = false; |
return 0; |
} |
6911,3 → 7098,39 |
current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); |
} |
} |
u32 si_dpm_get_current_sclk(struct radeon_device *rdev) |
{ |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
struct radeon_ps *rps = &eg_pi->current_rps; |
struct ni_ps *ps = ni_get_ps(rps); |
struct rv7xx_pl *pl; |
u32 current_index = |
(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> |
CURRENT_STATE_INDEX_SHIFT; |
if (current_index >= ps->performance_level_count) { |
return 0; |
} else { |
pl = &ps->performance_levels[current_index]; |
return pl->sclk; |
} |
} |
u32 si_dpm_get_current_mclk(struct radeon_device *rdev) |
{ |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
struct radeon_ps *rps = &eg_pi->current_rps; |
struct ni_ps *ps = ni_get_ps(rps); |
struct rv7xx_pl *pl; |
u32 current_index = |
(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> |
CURRENT_STATE_INDEX_SHIFT; |
if (current_index >= ps->performance_level_count) { |
return 0; |
} else { |
pl = &ps->performance_levels[current_index]; |
return pl->mclk; |
} |
} |
/drivers/video/drm/radeon/si_dpm.h |
---|
202,6 → 202,7 |
bool fan_ctrl_is_in_default_mode; |
u32 t_min; |
u32 fan_ctrl_default_mode; |
bool fan_is_controlled_by_smc; |
}; |
#define SISLANDS_INITIAL_STATE_ARB_INDEX 0 |
/drivers/video/drm/radeon/sid.h |
---|
358,6 → 358,10 |
#define CC_SYS_RB_BACKEND_DISABLE 0xe80 |
#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84 |
#define SRBM_READ_ERROR 0xE98 |
#define SRBM_INT_CNTL 0xEA0 |
#define SRBM_INT_ACK 0xEA8 |
#define SRBM_STATUS2 0x0EC4 |
#define DMA_BUSY (1 << 5) |
#define DMA1_BUSY (1 << 6) |
901,6 → 905,16 |
/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */ |
#define CRTC_STATUS_FRAME_COUNT 0x6e98 |
/* Audio clocks */ |
#define DCCG_AUDIO_DTO_SOURCE 0x05ac |
# define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */ |
# define DCCG_AUDIO_DTO_SEL (1 << 4) /* 0=dto0 1=dto1 */ |
#define DCCG_AUDIO_DTO0_PHASE 0x05b0 |
#define DCCG_AUDIO_DTO0_MODULE 0x05b4 |
#define DCCG_AUDIO_DTO1_PHASE 0x05c0 |
#define DCCG_AUDIO_DTO1_MODULE 0x05c4 |
#define AFMT_AUDIO_SRC_CONTROL 0x713c |
#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0) |
/* AFMT_AUDIO_SRC_SELECT |
1542,6 → 1556,7 |
#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54 |
#define UVD_RBC_RB_RPTR 0xF690 |
#define UVD_RBC_RB_WPTR 0xF694 |
#define UVD_STATUS 0xf6bc |
#define UVD_CGC_CTRL 0xF4B0 |
# define DCM (1 << 0) |
1632,6 → 1647,23 |
#define PACKET3_MPEG_INDEX 0x3A |
#define PACKET3_COPY_DW 0x3B |
#define PACKET3_WAIT_REG_MEM 0x3C |
#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0) |
/* 0 - always |
* 1 - < |
* 2 - <= |
* 3 - == |
* 4 - != |
* 5 - >= |
* 6 - > |
*/ |
#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4) |
/* 0 - reg |
* 1 - mem |
*/ |
#define WAIT_REG_MEM_ENGINE(x) ((x) << 8) |
/* 0 - me |
* 1 - pfp |
*/ |
#define PACKET3_MEM_WRITE 0x3D |
#define PACKET3_COPY_DATA 0x40 |
#define PACKET3_CP_DMA 0x41 |
1835,6 → 1867,7 |
#define DMA_PACKET_TRAP 0x7 |
#define DMA_PACKET_SRBM_WRITE 0x9 |
#define DMA_PACKET_CONSTANT_FILL 0xd |
#define DMA_PACKET_POLL_REG_MEM 0xe |
#define DMA_PACKET_NOP 0xf |
#define VCE_STATUS 0x20004 |
1846,6 → 1879,7 |
#define VCE_VCPU_CACHE_SIZE1 0x20030 |
#define VCE_VCPU_CACHE_OFFSET2 0x20034 |
#define VCE_VCPU_CACHE_SIZE2 0x20038 |
#define VCE_VCPU_SCRATCH7 0x200dc |
#define VCE_SOFT_RESET 0x20120 |
#define VCE_ECPU_SOFT_RESET (1 << 0) |
#define VCE_FME_SOFT_RESET (1 << 2) |
1860,6 → 1894,7 |
#define VCE_RB_RPTR 0x2018c |
#define VCE_RB_WPTR 0x20190 |
#define VCE_CLOCK_GATING_A 0x202f8 |
# define CGC_DYN_CLOCK_MODE (1 << 16) |
#define VCE_CLOCK_GATING_B 0x202fc |
#define VCE_UENC_CLOCK_GATING 0x205bc |
#define VCE_UENC_REG_CLOCK_GATING 0x205c0 |
1884,4 → 1919,31 |
#define VCE_CMD_IB_AUTO 0x00000005 |
#define VCE_CMD_SEMAPHORE 0x00000006 |
/* discrete vce clocks */ |
#define CG_VCEPLL_FUNC_CNTL 0xc0030600 |
# define VCEPLL_RESET_MASK 0x00000001 |
# define VCEPLL_SLEEP_MASK 0x00000002 |
# define VCEPLL_BYPASS_EN_MASK 0x00000004 |
# define VCEPLL_CTLREQ_MASK 0x00000008 |
# define VCEPLL_VCO_MODE_MASK 0x00000600 |
# define VCEPLL_REF_DIV_MASK 0x003F0000 |
# define VCEPLL_CTLACK_MASK 0x40000000 |
# define VCEPLL_CTLACK2_MASK 0x80000000 |
#define CG_VCEPLL_FUNC_CNTL_2 0xc0030601 |
# define VCEPLL_PDIV_A(x) ((x) << 0) |
# define VCEPLL_PDIV_A_MASK 0x0000007F |
# define VCEPLL_PDIV_B(x) ((x) << 8) |
# define VCEPLL_PDIV_B_MASK 0x00007F00 |
# define EVCLK_SRC_SEL(x) ((x) << 20) |
# define EVCLK_SRC_SEL_MASK 0x01F00000 |
# define ECCLK_SRC_SEL(x) ((x) << 25) |
# define ECCLK_SRC_SEL_MASK 0x3E000000 |
#define CG_VCEPLL_FUNC_CNTL_3 0xc0030602 |
# define VCEPLL_FB_DIV(x) ((x) << 0) |
# define VCEPLL_FB_DIV_MASK 0x01FFFFFF |
#define CG_VCEPLL_FUNC_CNTL_4 0xc0030603 |
#define CG_VCEPLL_FUNC_CNTL_5 0xc0030604 |
#define CG_VCEPLL_SPREAD_SPECTRUM 0xc0030606 |
# define VCEPLL_SSEN_MASK 0x00000001 |
#endif |
/drivers/video/drm/radeon/sumo_dpm.c |
---|
1338,6 → 1338,7 |
sumo_update_current_ps(rdev, new_ps); |
} |
#if 0 |
void sumo_dpm_reset_asic(struct radeon_device *rdev) |
{ |
sumo_program_bootup_state(rdev); |
1349,6 → 1350,7 |
sumo_set_forced_mode_enabled(rdev); |
sumo_set_forced_mode_disabled(rdev); |
} |
#endif |
void sumo_dpm_setup_asic(struct radeon_device *rdev) |
{ |
1537,6 → 1539,7 |
return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; |
} |
#if 0 |
u32 sumo_convert_vid7_to_vid2(struct radeon_device *rdev, |
struct sumo_vid_mapping_table *vid_mapping_table, |
u32 vid_7bit) |
1550,6 → 1553,7 |
return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; |
} |
#endif |
static u16 sumo_convert_voltage_index_to_value(struct radeon_device *rdev, |
u32 vid_2bit) |
1833,6 → 1837,34 |
} |
} |
u32 sumo_dpm_get_current_sclk(struct radeon_device *rdev) |
{ |
struct sumo_power_info *pi = sumo_get_pi(rdev); |
struct radeon_ps *rps = &pi->current_rps; |
struct sumo_ps *ps = sumo_get_ps(rps); |
struct sumo_pl *pl; |
u32 current_index = |
(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_INDEX_MASK) >> |
CURR_INDEX_SHIFT; |
if (current_index == BOOST_DPM_LEVEL) { |
pl = &pi->boost_pl; |
return pl->sclk; |
} else if (current_index >= ps->num_levels) { |
return 0; |
} else { |
pl = &ps->levels[current_index]; |
return pl->sclk; |
} |
} |
u32 sumo_dpm_get_current_mclk(struct radeon_device *rdev) |
{ |
struct sumo_power_info *pi = sumo_get_pi(rdev); |
return pi->sys_info.bootup_uma_clk; |
} |
void sumo_dpm_fini(struct radeon_device *rdev) |
{ |
int i; |
/drivers/video/drm/radeon/sumo_dpm.h |
---|
202,9 → 202,6 |
u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev, |
struct sumo_vid_mapping_table *vid_mapping_table, |
u32 vid_2bit); |
u32 sumo_convert_vid7_to_vid2(struct radeon_device *rdev, |
struct sumo_vid_mapping_table *vid_mapping_table, |
u32 vid_7bit); |
u32 sumo_get_sleep_divider_from_id(u32 id); |
u32 sumo_get_sleep_divider_id_from_clock(struct radeon_device *rdev, |
u32 sclk, |
/drivers/video/drm/radeon/trinity_dpm.c |
---|
336,6 → 336,7 |
0x00000204, 0x00000000, |
}; |
extern void vce_v1_0_enable_mgcg(struct radeon_device *rdev, bool enable); |
static void trinity_program_clk_gating_hw_sequence(struct radeon_device *rdev, |
const u32 *seq, u32 count); |
static void trinity_override_dynamic_mg_powergating(struct radeon_device *rdev); |
985,6 → 986,21 |
trinity_setup_uvd_clocks(rdev, new_rps, old_rps); |
} |
static void trinity_set_vce_clock(struct radeon_device *rdev, |
struct radeon_ps *new_rps, |
struct radeon_ps *old_rps) |
{ |
if ((old_rps->evclk != new_rps->evclk) || |
(old_rps->ecclk != new_rps->ecclk)) { |
/* turn the clocks on when encoding, off otherwise */ |
if (new_rps->evclk || new_rps->ecclk) |
vce_v1_0_enable_mgcg(rdev, false); |
else |
vce_v1_0_enable_mgcg(rdev, true); |
radeon_set_vce_clocks(rdev, new_rps->evclk, new_rps->ecclk); |
} |
} |
static void trinity_program_ttt(struct radeon_device *rdev) |
{ |
struct trinity_power_info *pi = trinity_get_pi(rdev); |
1246,6 → 1262,7 |
trinity_force_level_0(rdev); |
trinity_unforce_levels(rdev); |
trinity_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); |
trinity_set_vce_clock(rdev, new_ps, old_ps); |
} |
trinity_release_mutex(rdev); |
1269,6 → 1286,7 |
trinity_release_mutex(rdev); |
} |
#if 0 |
void trinity_dpm_reset_asic(struct radeon_device *rdev) |
{ |
struct trinity_power_info *pi = trinity_get_pi(rdev); |
1284,6 → 1302,7 |
} |
trinity_release_mutex(rdev); |
} |
#endif |
static u16 trinity_convert_voltage_index_to_value(struct radeon_device *rdev, |
u32 vid_2bit) |
1481,8 → 1500,36 |
} |
} |
static int trinity_get_vce_clock_voltage(struct radeon_device *rdev, |
u32 evclk, u32 ecclk, u16 *voltage) |
{ |
u32 i; |
int ret = -EINVAL; |
struct radeon_vce_clock_voltage_dependency_table *table = |
&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; |
if (((evclk == 0) && (ecclk == 0)) || |
(table && (table->count == 0))) { |
*voltage = 0; |
return 0; |
} |
for (i = 0; i < table->count; i++) { |
if ((evclk <= table->entries[i].evclk) && |
(ecclk <= table->entries[i].ecclk)) { |
*voltage = table->entries[i].v; |
ret = 0; |
break; |
} |
} |
/* if no match return the highest voltage */ |
if (ret) |
*voltage = table->entries[table->count - 1].v; |
return ret; |
} |
static void trinity_apply_state_adjust_rules(struct radeon_device *rdev, |
struct radeon_ps *new_rps, |
struct radeon_ps *old_rps) |
1494,6 → 1541,7 |
u32 min_sclk = pi->sys_info.min_sclk; /* XXX check against disp reqs */ |
u32 sclk_in_sr = pi->sys_info.min_sclk; /* ??? */ |
u32 i; |
u16 min_vce_voltage; |
bool force_high; |
u32 num_active_displays = rdev->pm.dpm.new_active_crtc_count; |
1502,6 → 1550,14 |
trinity_adjust_uvd_state(rdev, new_rps); |
if (new_rps->vce_active) { |
new_rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; |
new_rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk; |
} else { |
new_rps->evclk = 0; |
new_rps->ecclk = 0; |
} |
for (i = 0; i < ps->num_levels; i++) { |
if (ps->levels[i].vddc_index < min_voltage) |
ps->levels[i].vddc_index = min_voltage; |
1510,6 → 1566,17 |
ps->levels[i].sclk = |
trinity_get_valid_engine_clock(rdev, min_sclk); |
/* patch in vce limits */ |
if (new_rps->vce_active) { |
/* sclk */ |
if (ps->levels[i].sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk) |
ps->levels[i].sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk; |
/* vddc */ |
trinity_get_vce_clock_voltage(rdev, new_rps->evclk, new_rps->ecclk, &min_vce_voltage); |
if (ps->levels[i].vddc_index < min_vce_voltage) |
ps->levels[i].vddc_index = min_vce_voltage; |
} |
ps->levels[i].ds_divider_index = |
sumo_get_sleep_divider_id_from_clock(rdev, ps->levels[i].sclk, sclk_in_sr); |
1731,6 → 1798,19 |
power_state_offset += 2 + power_state->v2.ucNumDPMLevels; |
} |
rdev->pm.dpm.num_ps = state_array->ucNumEntries; |
/* fill in the vce power states */ |
for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) { |
u32 sclk; |
clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx; |
clock_info = (union pplib_clock_info *) |
&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; |
sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); |
sclk |= clock_info->sumo.ucEngineClockHigh << 16; |
rdev->pm.dpm.vce_states[i].sclk = sclk; |
rdev->pm.dpm.vce_states[i].mclk = 0; |
} |
return 0; |
} |
1912,6 → 1992,10 |
if (ret) |
return ret; |
ret = r600_parse_extended_power_table(rdev); |
if (ret) |
return ret; |
ret = trinity_parse_power_table(rdev); |
if (ret) |
return ret; |
1962,6 → 2046,31 |
} |
} |
u32 trinity_dpm_get_current_sclk(struct radeon_device *rdev) |
{ |
struct trinity_power_info *pi = trinity_get_pi(rdev); |
struct radeon_ps *rps = &pi->current_rps; |
struct trinity_ps *ps = trinity_get_ps(rps); |
struct trinity_pl *pl; |
u32 current_index = |
(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_MASK) >> |
CURRENT_STATE_SHIFT; |
if (current_index >= ps->num_levels) { |
return 0; |
} else { |
pl = &ps->levels[current_index]; |
return pl->sclk; |
} |
} |
u32 trinity_dpm_get_current_mclk(struct radeon_device *rdev) |
{ |
struct trinity_power_info *pi = trinity_get_pi(rdev); |
return pi->sys_info.bootup_uma_clk; |
} |
void trinity_dpm_fini(struct radeon_device *rdev) |
{ |
int i; |
1973,6 → 2082,7 |
} |
kfree(rdev->pm.dpm.ps); |
kfree(rdev->pm.dpm.priv); |
r600_free_extended_power_table(rdev); |
} |
u32 trinity_dpm_get_sclk(struct radeon_device *rdev, bool low) |
/drivers/video/drm/radeon/utils.c |
---|
226,62 → 226,72 |
* example output buffer: |
* 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO |
*/ |
void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, |
int groupsize, char *linebuf, size_t linebuflen, |
bool ascii) |
int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, |
char *linebuf, size_t linebuflen, bool ascii) |
{ |
const u8 *ptr = buf; |
int ngroups; |
u8 ch; |
int j, lx = 0; |
int ascii_column; |
int ret; |
if (rowsize != 16 && rowsize != 32) |
rowsize = 16; |
if (!len) |
goto nil; |
if (len > rowsize) /* limit to one line at a time */ |
len = rowsize; |
if (!is_power_of_2(groupsize) || groupsize > 8) |
groupsize = 1; |
if ((len % groupsize) != 0) /* no mixed size output */ |
groupsize = 1; |
switch (groupsize) { |
case 8: { |
ngroups = len / groupsize; |
ascii_column = rowsize * 2 + rowsize / groupsize + 1; |
if (!linebuflen) |
goto overflow1; |
if (!len) |
goto nil; |
if (groupsize == 8) { |
const u64 *ptr8 = buf; |
int ngroups = len / groupsize; |
for (j = 0; j < ngroups; j++) |
lx += scnprintf(linebuf + lx, linebuflen - lx, |
for (j = 0; j < ngroups; j++) { |
ret = snprintf(linebuf + lx, linebuflen - lx, |
"%s%16.16llx", j ? " " : "", |
(unsigned long long)*(ptr8 + j)); |
ascii_column = 17 * ngroups + 2; |
break; |
if (ret >= linebuflen - lx) |
goto overflow1; |
lx += ret; |
} |
case 4: { |
} else if (groupsize == 4) { |
const u32 *ptr4 = buf; |
int ngroups = len / groupsize; |
for (j = 0; j < ngroups; j++) |
lx += scnprintf(linebuf + lx, linebuflen - lx, |
"%s%8.8x", j ? " " : "", *(ptr4 + j)); |
ascii_column = 9 * ngroups + 2; |
break; |
for (j = 0; j < ngroups; j++) { |
ret = snprintf(linebuf + lx, linebuflen - lx, |
"%s%8.8x", j ? " " : "", |
*(ptr4 + j)); |
if (ret >= linebuflen - lx) |
goto overflow1; |
lx += ret; |
} |
case 2: { |
} else if (groupsize == 2) { |
const u16 *ptr2 = buf; |
int ngroups = len / groupsize; |
for (j = 0; j < ngroups; j++) |
lx += scnprintf(linebuf + lx, linebuflen - lx, |
"%s%4.4x", j ? " " : "", *(ptr2 + j)); |
ascii_column = 5 * ngroups + 2; |
break; |
for (j = 0; j < ngroups; j++) { |
ret = snprintf(linebuf + lx, linebuflen - lx, |
"%s%4.4x", j ? " " : "", |
*(ptr2 + j)); |
if (ret >= linebuflen - lx) |
goto overflow1; |
lx += ret; |
} |
default: |
for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { |
} else { |
for (j = 0; j < len; j++) { |
if (linebuflen < lx + 3) |
goto overflow2; |
ch = ptr[j]; |
linebuf[lx++] = hex_asc_hi(ch); |
linebuf[lx++] = hex_asc_lo(ch); |
289,23 → 299,29 |
} |
if (j) |
lx--; |
ascii_column = 3 * rowsize + 2; |
break; |
} |
if (!ascii) |
goto nil; |
while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) |
while (lx < ascii_column) { |
if (linebuflen < lx + 2) |
goto overflow2; |
linebuf[lx++] = ' '; |
for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) { |
} |
for (j = 0; j < len; j++) { |
if (linebuflen < lx + 2) |
goto overflow2; |
ch = ptr[j]; |
linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.'; |
} |
nil: |
linebuf[lx] = '\0'; |
return lx; |
overflow2: |
linebuf[lx++] = '\0'; |
overflow1: |
return ascii ? ascii_column + len : (groupsize * 2 + 1) * ngroups - 1; |
} |
/** |
* print_hex_dump - print a text hex dump to syslog for a binary blob of data |
* @level: kernel log level (e.g. KERN_DEBUG) |
377,6 → 393,98 |
buf, len, true); |
} |
#define KMAP_MAX 256 |
static struct mutex kmap_mutex; |
static struct page* kmap_table[KMAP_MAX]; |
static int kmap_av; |
static int kmap_first; |
static void* kmap_base; |
int kmap_init() |
{ |
kmap_base = AllocKernelSpace(KMAP_MAX*4096); |
if(kmap_base == NULL) |
return -1; |
kmap_av = KMAP_MAX; |
MutexInit(&kmap_mutex); |
return 0; |
}; |
void *kmap(struct page *page) |
{ |
void *vaddr = NULL; |
int i; |
do |
{ |
MutexLock(&kmap_mutex); |
if(kmap_av != 0) |
{ |
for(i = kmap_first; i < KMAP_MAX; i++) |
{ |
if(kmap_table[i] == NULL) |
{ |
kmap_av--; |
kmap_first = i; |
kmap_table[i] = page; |
vaddr = kmap_base + (i<<12); |
MapPage(vaddr,(addr_t)page,3); |
break; |
}; |
}; |
}; |
MutexUnlock(&kmap_mutex); |
}while(vaddr == NULL); |
return vaddr; |
}; |
void *kmap_atomic(struct page *page) __attribute__ ((alias ("kmap"))); |
void kunmap(struct page *page) |
{ |
void *vaddr; |
int i; |
MutexLock(&kmap_mutex); |
for(i = 0; i < KMAP_MAX; i++) |
{ |
if(kmap_table[i] == page) |
{ |
kmap_av++; |
if(i < kmap_first) |
kmap_first = i; |
kmap_table[i] = NULL; |
vaddr = kmap_base + (i<<12); |
MapPage(vaddr,0,0); |
break; |
}; |
}; |
MutexUnlock(&kmap_mutex); |
}; |
void kunmap_atomic(void *vaddr) |
{ |
int i; |
MapPage(vaddr,0,0); |
i = (vaddr - kmap_base) >> 12; |
MutexLock(&kmap_mutex); |
kmap_av++; |
if(i < kmap_first) |
kmap_first = i; |
kmap_table[i] = NULL; |
MutexUnlock(&kmap_mutex); |
} |
void msleep(unsigned int msecs) |
{ |
msecs /= 10; |
974,4 → 1082,40 |
__call_rcu(head, func, &rcu_sched_ctrlblk); |
} |
fb_get_options(const char *name, char **option) |
{ |
return 1; |
} |
ktime_t ktime_get(void) |
{ |
ktime_t t; |
t.tv64 = GetClockNs(); |
return t; |
} |
void radeon_cursor_reset(struct drm_crtc *crtc) |
{ |
} |
/* Greatest common divisor */ |
unsigned long gcd(unsigned long a, unsigned long b) |
{ |
unsigned long r; |
if (a < b) |
swap(a, b); |
if (!b) |
return a; |
while ((r = a % b) != 0) { |
a = b; |
b = r; |
} |
return b; |
} |
/drivers/video/drm/radeon/uvd_v1_0.c |
---|
22,6 → 22,7 |
* Authors: Christian König <christian.koenig@amd.com> |
*/ |
#include <linux/firmware.h> |
#include <drm/drmP.h> |
#include "radeon.h" |
#include "radeon_asic.h" |
465,18 → 466,8 |
struct radeon_semaphore *semaphore, |
bool emit_wait) |
{ |
uint64_t addr = semaphore->gpu_addr; |
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); |
radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); |
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); |
radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); |
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); |
radeon_ring_write(ring, emit_wait ? 1 : 0); |
return true; |
/* disable semaphores for UVD V1 hardware */ |
return false; |
} |
/** |
/drivers/video/drm/radeon/uvd_v2_2.c |
---|
60,6 → 60,35 |
} |
/** |
* uvd_v2_2_semaphore_emit - emit semaphore command |
* |
* @rdev: radeon_device pointer |
* @ring: radeon_ring pointer |
* @semaphore: semaphore to emit commands for |
* @emit_wait: true if we should emit a wait command |
* |
* Emit a semaphore command (either wait or signal) to the UVD ring. |
*/ |
bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev, |
struct radeon_ring *ring, |
struct radeon_semaphore *semaphore, |
bool emit_wait) |
{ |
uint64_t addr = semaphore->gpu_addr; |
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); |
radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); |
radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); |
radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); |
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); |
radeon_ring_write(ring, emit_wait ? 1 : 0); |
return true; |
} |
/** |
* uvd_v2_2_resume - memory controller programming |
* |
* @rdev: radeon_device pointer |
/drivers/video/drm/radeon/vce_v1_0.c |
---|
31,6 → 31,23 |
#include "radeon_asic.h" |
#include "sid.h" |
#define VCE_V1_0_FW_SIZE (256 * 1024) |
#define VCE_V1_0_STACK_SIZE (64 * 1024) |
#define VCE_V1_0_DATA_SIZE (7808 * (RADEON_MAX_VCE_HANDLES + 1)) |
struct vce_v1_0_fw_signature |
{ |
int32_t off; |
uint32_t len; |
int32_t num; |
struct { |
uint32_t chip_id; |
uint32_t keyselect; |
uint32_t nonce[4]; |
uint32_t sigval[4]; |
} val[8]; |
}; |
/** |
* vce_v1_0_get_rptr - get read pointer |
* |
82,6 → 99,186 |
WREG32(VCE_RB_WPTR2, ring->wptr); |
} |
void vce_v1_0_enable_mgcg(struct radeon_device *rdev, bool enable) |
{ |
u32 tmp; |
if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_VCE_MGCG)) { |
tmp = RREG32(VCE_CLOCK_GATING_A); |
tmp |= CGC_DYN_CLOCK_MODE; |
WREG32(VCE_CLOCK_GATING_A, tmp); |
tmp = RREG32(VCE_UENC_CLOCK_GATING); |
tmp &= ~0x1ff000; |
tmp |= 0xff800000; |
WREG32(VCE_UENC_CLOCK_GATING, tmp); |
tmp = RREG32(VCE_UENC_REG_CLOCK_GATING); |
tmp &= ~0x3ff; |
WREG32(VCE_UENC_REG_CLOCK_GATING, tmp); |
} else { |
tmp = RREG32(VCE_CLOCK_GATING_A); |
tmp &= ~CGC_DYN_CLOCK_MODE; |
WREG32(VCE_CLOCK_GATING_A, tmp); |
tmp = RREG32(VCE_UENC_CLOCK_GATING); |
tmp |= 0x1ff000; |
tmp &= ~0xff800000; |
WREG32(VCE_UENC_CLOCK_GATING, tmp); |
tmp = RREG32(VCE_UENC_REG_CLOCK_GATING); |
tmp |= 0x3ff; |
WREG32(VCE_UENC_REG_CLOCK_GATING, tmp); |
} |
} |
static void vce_v1_0_init_cg(struct radeon_device *rdev) |
{ |
u32 tmp; |
tmp = RREG32(VCE_CLOCK_GATING_A); |
tmp |= CGC_DYN_CLOCK_MODE; |
WREG32(VCE_CLOCK_GATING_A, tmp); |
tmp = RREG32(VCE_CLOCK_GATING_B); |
tmp |= 0x1e; |
tmp &= ~0xe100e1; |
WREG32(VCE_CLOCK_GATING_B, tmp); |
tmp = RREG32(VCE_UENC_CLOCK_GATING); |
tmp &= ~0xff9ff000; |
WREG32(VCE_UENC_CLOCK_GATING, tmp); |
tmp = RREG32(VCE_UENC_REG_CLOCK_GATING); |
tmp &= ~0x3ff; |
WREG32(VCE_UENC_REG_CLOCK_GATING, tmp); |
} |
int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data) |
{ |
struct vce_v1_0_fw_signature *sign = (void*)rdev->vce_fw->data; |
uint32_t chip_id; |
int i; |
switch (rdev->family) { |
case CHIP_TAHITI: |
chip_id = 0x01000014; |
break; |
case CHIP_VERDE: |
chip_id = 0x01000015; |
break; |
case CHIP_PITCAIRN: |
case CHIP_OLAND: |
chip_id = 0x01000016; |
break; |
case CHIP_ARUBA: |
chip_id = 0x01000017; |
break; |
default: |
return -EINVAL; |
} |
for (i = 0; i < sign->num; ++i) { |
if (sign->val[i].chip_id == chip_id) |
break; |
} |
if (i == sign->num) |
return -EINVAL; |
data += (256 - 64) / 4; |
data[0] = sign->val[i].nonce[0]; |
data[1] = sign->val[i].nonce[1]; |
data[2] = sign->val[i].nonce[2]; |
data[3] = sign->val[i].nonce[3]; |
data[4] = sign->len + 64; |
memset(&data[5], 0, 44); |
memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign)); |
data += data[4] / 4; |
data[0] = sign->val[i].sigval[0]; |
data[1] = sign->val[i].sigval[1]; |
data[2] = sign->val[i].sigval[2]; |
data[3] = sign->val[i].sigval[3]; |
rdev->vce.keyselect = sign->val[i].keyselect; |
return 0; |
} |
unsigned vce_v1_0_bo_size(struct radeon_device *rdev) |
{ |
WARN_ON(VCE_V1_0_FW_SIZE < rdev->vce_fw->size); |
return VCE_V1_0_FW_SIZE + VCE_V1_0_STACK_SIZE + VCE_V1_0_DATA_SIZE; |
} |
int vce_v1_0_resume(struct radeon_device *rdev) |
{ |
uint64_t addr = rdev->vce.gpu_addr; |
uint32_t size; |
int i; |
WREG32_P(VCE_CLOCK_GATING_A, 0, ~(1 << 16)); |
WREG32_P(VCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); |
WREG32_P(VCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); |
WREG32(VCE_CLOCK_GATING_B, 0); |
WREG32_P(VCE_LMI_FW_PERIODIC_CTRL, 0x4, ~0x4); |
WREG32(VCE_LMI_CTRL, 0x00398000); |
WREG32_P(VCE_LMI_CACHE_CTRL, 0x0, ~0x1); |
WREG32(VCE_LMI_SWAP_CNTL, 0); |
WREG32(VCE_LMI_SWAP_CNTL1, 0); |
WREG32(VCE_LMI_VM_CTRL, 0); |
WREG32(VCE_VCPU_SCRATCH7, RADEON_MAX_VCE_HANDLES); |
addr += 256; |
size = VCE_V1_0_FW_SIZE; |
WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); |
WREG32(VCE_VCPU_CACHE_SIZE0, size); |
addr += size; |
size = VCE_V1_0_STACK_SIZE; |
WREG32(VCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff); |
WREG32(VCE_VCPU_CACHE_SIZE1, size); |
addr += size; |
size = VCE_V1_0_DATA_SIZE; |
WREG32(VCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff); |
WREG32(VCE_VCPU_CACHE_SIZE2, size); |
WREG32_P(VCE_LMI_CTRL2, 0x0, ~0x100); |
WREG32(VCE_LMI_FW_START_KEYSEL, rdev->vce.keyselect); |
for (i = 0; i < 10; ++i) { |
mdelay(10); |
if (RREG32(VCE_FW_REG_STATUS) & VCE_FW_REG_STATUS_DONE) |
break; |
} |
if (i == 10) |
return -ETIMEDOUT; |
if (!(RREG32(VCE_FW_REG_STATUS) & VCE_FW_REG_STATUS_PASS)) |
return -EINVAL; |
for (i = 0; i < 10; ++i) { |
mdelay(10); |
if (!(RREG32(VCE_FW_REG_STATUS) & VCE_FW_REG_STATUS_BUSY)) |
break; |
} |
if (i == 10) |
return -ETIMEDOUT; |
vce_v1_0_init_cg(rdev); |
return 0; |
} |
/** |
* vce_v1_0_start - start VCE block |
* |
/drivers/video/drm/radeon/vce_v2_0.c |
---|
31,6 → 31,10 |
#include "radeon_asic.h" |
#include "cikd.h" |
#define VCE_V2_0_FW_SIZE (256 * 1024) |
#define VCE_V2_0_STACK_SIZE (64 * 1024) |
#define VCE_V2_0_DATA_SIZE (23552 * RADEON_MAX_VCE_HANDLES) |
static void vce_v2_0_set_sw_cg(struct radeon_device *rdev, bool gated) |
{ |
u32 tmp; |
140,6 → 144,12 |
WREG32(VCE_CLOCK_GATING_B, tmp); |
} |
unsigned vce_v2_0_bo_size(struct radeon_device *rdev) |
{ |
WARN_ON(rdev->vce_fw->size > VCE_V2_0_FW_SIZE); |
return VCE_V2_0_FW_SIZE + VCE_V2_0_STACK_SIZE + VCE_V2_0_DATA_SIZE; |
} |
int vce_v2_0_resume(struct radeon_device *rdev) |
{ |
uint64_t addr = rdev->vce.gpu_addr; |
156,17 → 166,20 |
WREG32(VCE_LMI_SWAP_CNTL1, 0); |
WREG32(VCE_LMI_VM_CTRL, 0); |
size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size); |
WREG32(VCE_LMI_VCPU_CACHE_40BIT_BAR, addr >> 8); |
addr &= 0xff; |
size = VCE_V2_0_FW_SIZE; |
WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); |
WREG32(VCE_VCPU_CACHE_SIZE0, size); |
addr += size; |
size = RADEON_VCE_STACK_SIZE; |
size = VCE_V2_0_STACK_SIZE; |
WREG32(VCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff); |
WREG32(VCE_VCPU_CACHE_SIZE1, size); |
addr += size; |
size = RADEON_VCE_HEAP_SIZE; |
size = VCE_V2_0_DATA_SIZE; |
WREG32(VCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff); |
WREG32(VCE_VCPU_CACHE_SIZE2, size); |