33,8 → 33,10 |
#include "avivod.h" |
#include "evergreen_reg.h" |
#include "evergreen_blit_shaders.h" |
#include "radeon_ucode.h" |
|
#define EVERGREEN_PFP_UCODE_SIZE 1120 |
#define EVERGREEN_PM4_UCODE_SIZE 1376 |
|
static const u32 crtc_offsets[6] = |
{ |
EVERGREEN_CRTC0_REGISTER_OFFSET, |
45,109 → 47,12 |
EVERGREEN_CRTC5_REGISTER_OFFSET |
}; |
|
#include "clearstate_evergreen.h" |
|
static const u32 sumo_rlc_save_restore_register_list[] = |
{ |
0x98fc, |
0x9830, |
0x9834, |
0x9838, |
0x9870, |
0x9874, |
0x8a14, |
0x8b24, |
0x8bcc, |
0x8b10, |
0x8d00, |
0x8d04, |
0x8c00, |
0x8c04, |
0x8c08, |
0x8c0c, |
0x8d8c, |
0x8c20, |
0x8c24, |
0x8c28, |
0x8c18, |
0x8c1c, |
0x8cf0, |
0x8e2c, |
0x8e38, |
0x8c30, |
0x9508, |
0x9688, |
0x9608, |
0x960c, |
0x9610, |
0x9614, |
0x88c4, |
0x88d4, |
0xa008, |
0x900c, |
0x9100, |
0x913c, |
0x98f8, |
0x98f4, |
0x9b7c, |
0x3f8c, |
0x8950, |
0x8954, |
0x8a18, |
0x8b28, |
0x9144, |
0x9148, |
0x914c, |
0x3f90, |
0x3f94, |
0x915c, |
0x9160, |
0x9178, |
0x917c, |
0x9180, |
0x918c, |
0x9190, |
0x9194, |
0x9198, |
0x919c, |
0x91a8, |
0x91ac, |
0x91b0, |
0x91b4, |
0x91b8, |
0x91c4, |
0x91c8, |
0x91cc, |
0x91d0, |
0x91d4, |
0x91e0, |
0x91e4, |
0x91ec, |
0x91f0, |
0x91f4, |
0x9200, |
0x9204, |
0x929c, |
0x9150, |
0x802c, |
}; |
|
static void evergreen_gpu_init(struct radeon_device *rdev); |
void evergreen_fini(struct radeon_device *rdev); |
void evergreen_pcie_gen2_enable(struct radeon_device *rdev); |
void evergreen_program_aspm(struct radeon_device *rdev); |
extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev, |
int ring, u32 cp_int_cntl); |
extern void cayman_vm_decode_fault(struct radeon_device *rdev, |
u32 status, u32 addr); |
void cik_init_cp_pg_table(struct radeon_device *rdev); |
|
extern u32 si_get_csb_size(struct radeon_device *rdev); |
extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer); |
extern u32 cik_get_csb_size(struct radeon_device *rdev); |
extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer); |
extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev); |
|
static const u32 evergreen_golden_registers[] = |
{ |
0x3f90, 0xffff0000, 0xff000000, |
189,7 → 94,7 |
0x8c1c, 0xffffffff, 0x00001010, |
0x28350, 0xffffffff, 0x00000000, |
0xa008, 0xffffffff, 0x00010000, |
0x5c4, 0xffffffff, 0x00000001, |
0x5cc, 0xffffffff, 0x00000001, |
0x9508, 0xffffffff, 0x00000002, |
0x913c, 0x0000000f, 0x0000000a |
}; |
476,7 → 381,7 |
0x8c1c, 0xffffffff, 0x00001010, |
0x28350, 0xffffffff, 0x00000000, |
0xa008, 0xffffffff, 0x00010000, |
0x5c4, 0xffffffff, 0x00000001, |
0x5cc, 0xffffffff, 0x00000001, |
0x9508, 0xffffffff, 0x00000002 |
}; |
|
635,7 → 540,7 |
static const u32 supersumo_golden_registers[] = |
{ |
0x5eb4, 0xffffffff, 0x00000002, |
0x5c4, 0xffffffff, 0x00000001, |
0x5cc, 0xffffffff, 0x00000001, |
0x7030, 0xffffffff, 0x00000011, |
0x7c30, 0xffffffff, 0x00000011, |
0x6104, 0x01000300, 0x00000000, |
719,7 → 624,7 |
static const u32 wrestler_golden_registers[] = |
{ |
0x5eb4, 0xffffffff, 0x00000002, |
0x5c4, 0xffffffff, 0x00000001, |
0x5cc, 0xffffffff, 0x00000001, |
0x7030, 0xffffffff, 0x00000011, |
0x7c30, 0xffffffff, 0x00000011, |
0x6104, 0x01000300, 0x00000000, |
1175,74 → 1080,25 |
|
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) |
{ |
int readrq; |
u16 v; |
u16 ctl, v; |
int err; |
|
readrq = pcie_get_readrq(rdev->pdev); |
v = ffs(readrq) - 8; |
err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl); |
if (err) |
return; |
|
v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12; |
|
/* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it |
* to avoid hangs or perfomance issues |
*/ |
if ((v == 0) || (v == 6) || (v == 7)) |
pcie_set_readrq(rdev->pdev, 512); |
if ((v == 0) || (v == 6) || (v == 7)) { |
ctl &= ~PCI_EXP_DEVCTL_READRQ; |
ctl |= (2 << 12); |
pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl); |
} |
|
void dce4_program_fmt(struct drm_encoder *encoder) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
int bpc = 0; |
u32 tmp = 0; |
enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE; |
|
if (connector) { |
struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
bpc = radeon_get_monitor_bpc(connector); |
dither = radeon_connector->dither; |
} |
|
/* LVDS/eDP FMT is set up by atom */ |
if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) |
return; |
|
/* not needed for analog */ |
if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || |
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) |
return; |
|
if (bpc == 0) |
return; |
|
switch (bpc) { |
case 6: |
if (dither == RADEON_FMT_DITHER_ENABLE) |
/* XXX sort out optimal dither settings */ |
tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE | |
FMT_SPATIAL_DITHER_EN); |
else |
tmp |= FMT_TRUNCATE_EN; |
break; |
case 8: |
if (dither == RADEON_FMT_DITHER_ENABLE) |
/* XXX sort out optimal dither settings */ |
tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE | |
FMT_RGB_RANDOM_ENABLE | |
FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH); |
else |
tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH); |
break; |
case 10: |
default: |
/* not needed */ |
break; |
} |
|
WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp); |
} |
|
static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc) |
{ |
if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK) |
1300,6 → 1156,7 |
} |
} |
|
|
/** |
* evergreen_page_flip - pageflip callback. |
* |
1313,7 → 1170,7 |
* double buffered update to take place. |
* Returns the current update pending status. |
*/ |
void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) |
u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) |
{ |
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; |
u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); |
1345,23 → 1202,9 |
/* Unlock the lock, so double-buffering can take place inside vblank */ |
tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK; |
WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); |
} |
|
/** |
* evergreen_page_flip_pending - check if page flip is still pending |
* |
* @rdev: radeon_device pointer |
* @crtc_id: crtc to check |
* |
* Returns the current update pending status. |
*/ |
bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc_id) |
{ |
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; |
|
/* Return current update_pending status: */ |
return !!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & |
EVERGREEN_GRPH_SURFACE_UPDATE_PENDING); |
return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING; |
} |
|
/* get temperature in millidegrees */ |
1545,8 → 1388,8 |
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; |
|
if (voltage->type == VOLTAGE_SW) { |
/* 0xff0x are flags rather then an actual voltage */ |
if ((voltage->voltage & 0xff00) == 0xff00) |
/* 0xff01 is a flag rather then an actual voltage */ |
if (voltage->voltage == 0xff01) |
return; |
if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) { |
radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); |
1566,8 → 1409,8 |
voltage = &rdev->pm.power_state[req_ps_idx]. |
clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage; |
|
/* 0xff0x are flags rather then an actual voltage */ |
if ((voltage->vddci & 0xff00) == 0xff00) |
/* 0xff01 is a flag rather then an actual voltage */ |
if (voltage->vddci == 0xff01) |
return; |
if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) { |
radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI); |
1846,8 → 1689,7 |
struct drm_display_mode *mode, |
struct drm_display_mode *other_mode) |
{ |
u32 tmp, buffer_alloc, i; |
u32 pipe_offset = radeon_crtc->crtc_id * 0x20; |
u32 tmp; |
/* |
* Line Buffer Setup |
* There are 3 line buffers, each one shared by 2 display controllers. |
1870,17 → 1712,12 |
* non-linked crtcs for maximum line buffer allocation. |
*/ |
if (radeon_crtc->base.enabled && mode) { |
if (other_mode) { |
if (other_mode) |
tmp = 0; /* 1/2 */ |
buffer_alloc = 1; |
} else { |
else |
tmp = 2; /* whole */ |
buffer_alloc = 2; |
} |
} else { |
} else |
tmp = 0; |
buffer_alloc = 0; |
} |
|
/* second controller of the pair uses second half of the lb */ |
if (radeon_crtc->crtc_id % 2) |
1887,17 → 1724,6 |
tmp += 4; |
WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp); |
|
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { |
WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset, |
DMIF_BUFFERS_ALLOCATED(buffer_alloc)); |
for (i = 0; i < rdev->usec_timeout; i++) { |
if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) & |
DMIF_BUFFERS_ALLOCATED_COMPLETED) |
break; |
udelay(1); |
} |
} |
|
if (radeon_crtc->base.enabled && mode) { |
switch (tmp) { |
case 0: |
2181,8 → 2007,7 |
u32 lb_size, u32 num_heads) |
{ |
struct drm_display_mode *mode = &radeon_crtc->base.mode; |
struct evergreen_wm_params wm_low, wm_high; |
u32 dram_channels; |
struct evergreen_wm_params wm; |
u32 pixel_period; |
u32 line_time = 0; |
u32 latency_watermark_a = 0, latency_watermark_b = 0; |
2198,81 → 2023,39 |
line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); |
priority_a_cnt = 0; |
priority_b_cnt = 0; |
dram_channels = evergreen_get_number_of_dram_channels(rdev); |
|
/* watermark for high clocks */ |
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { |
wm_high.yclk = |
radeon_dpm_get_mclk(rdev, false) * 10; |
wm_high.sclk = |
radeon_dpm_get_sclk(rdev, false) * 10; |
} else { |
wm_high.yclk = rdev->pm.current_mclk * 10; |
wm_high.sclk = rdev->pm.current_sclk * 10; |
} |
|
wm_high.disp_clk = mode->clock; |
wm_high.src_width = mode->crtc_hdisplay; |
wm_high.active_time = mode->crtc_hdisplay * pixel_period; |
wm_high.blank_time = line_time - wm_high.active_time; |
wm_high.interlaced = false; |
wm.yclk = rdev->pm.current_mclk * 10; |
wm.sclk = rdev->pm.current_sclk * 10; |
wm.disp_clk = mode->clock; |
wm.src_width = mode->crtc_hdisplay; |
wm.active_time = mode->crtc_hdisplay * pixel_period; |
wm.blank_time = line_time - wm.active_time; |
wm.interlaced = false; |
if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
wm_high.interlaced = true; |
wm_high.vsc = radeon_crtc->vsc; |
wm_high.vtaps = 1; |
wm.interlaced = true; |
wm.vsc = radeon_crtc->vsc; |
wm.vtaps = 1; |
if (radeon_crtc->rmx_type != RMX_OFF) |
wm_high.vtaps = 2; |
wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ |
wm_high.lb_size = lb_size; |
wm_high.dram_channels = dram_channels; |
wm_high.num_heads = num_heads; |
wm.vtaps = 2; |
wm.bytes_per_pixel = 4; /* XXX: get this from fb config */ |
wm.lb_size = lb_size; |
wm.dram_channels = evergreen_get_number_of_dram_channels(rdev); |
wm.num_heads = num_heads; |
|
/* watermark for low clocks */ |
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { |
wm_low.yclk = |
radeon_dpm_get_mclk(rdev, true) * 10; |
wm_low.sclk = |
radeon_dpm_get_sclk(rdev, true) * 10; |
} else { |
wm_low.yclk = rdev->pm.current_mclk * 10; |
wm_low.sclk = rdev->pm.current_sclk * 10; |
} |
|
wm_low.disp_clk = mode->clock; |
wm_low.src_width = mode->crtc_hdisplay; |
wm_low.active_time = mode->crtc_hdisplay * pixel_period; |
wm_low.blank_time = line_time - wm_low.active_time; |
wm_low.interlaced = false; |
if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
wm_low.interlaced = true; |
wm_low.vsc = radeon_crtc->vsc; |
wm_low.vtaps = 1; |
if (radeon_crtc->rmx_type != RMX_OFF) |
wm_low.vtaps = 2; |
wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ |
wm_low.lb_size = lb_size; |
wm_low.dram_channels = dram_channels; |
wm_low.num_heads = num_heads; |
|
/* set for high clocks */ |
latency_watermark_a = min(evergreen_latency_watermark(&wm_high), (u32)65535); |
latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535); |
/* set for low clocks */ |
latency_watermark_b = min(evergreen_latency_watermark(&wm_low), (u32)65535); |
/* wm.yclk = low clk; wm.sclk = low clk */ |
latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535); |
|
/* possibly force display priority to high */ |
/* should really do this at mode validation time... */ |
if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || |
!evergreen_average_bandwidth_vs_available_bandwidth(&wm_high) || |
!evergreen_check_latency_hiding(&wm_high) || |
if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) || |
!evergreen_average_bandwidth_vs_available_bandwidth(&wm) || |
!evergreen_check_latency_hiding(&wm) || |
(rdev->disp_priority == 2)) { |
DRM_DEBUG_KMS("force priority a to high\n"); |
DRM_DEBUG_KMS("force priority to high\n"); |
priority_a_cnt |= PRIORITY_ALWAYS_ON; |
} |
if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || |
!evergreen_average_bandwidth_vs_available_bandwidth(&wm_low) || |
!evergreen_check_latency_hiding(&wm_low) || |
(rdev->disp_priority == 2)) { |
DRM_DEBUG_KMS("force priority b to high\n"); |
priority_b_cnt |= PRIORITY_ALWAYS_ON; |
} |
|
2325,10 → 2108,6 |
WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt); |
WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt); |
|
/* save values for DPM */ |
radeon_crtc->line_time = line_time; |
radeon_crtc->wm_high = latency_watermark_a; |
radeon_crtc->wm_low = latency_watermark_b; |
} |
|
/** |
2424,6 → 2203,7 |
r = radeon_gart_table_vram_pin(rdev); |
if (r) |
return r; |
radeon_gart_restore(rdev); |
/* Setup L2 cache */ |
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | |
2641,9 → 2421,8 |
for (i = 0; i < rdev->num_crtc; i++) { |
if (save->crtc_enabled[i]) { |
tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]); |
if ((tmp & 0x7) != 3) { |
tmp &= ~0x7; |
tmp |= 0x3; |
if ((tmp & 0x3) != 0) { |
tmp &= ~0x3; |
WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); |
} |
tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); |
2676,7 → 2455,7 |
if (save->crtc_enabled[i]) { |
if (ASIC_IS_DCE6(rdev)) { |
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); |
tmp &= ~EVERGREEN_CRTC_BLANK_DATA_EN; |
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; |
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); |
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); |
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); |
2869,7 → 2648,7 |
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, 0); |
radeon_ring_unlock_commit(rdev, ring, false); |
radeon_ring_unlock_commit(rdev, ring); |
|
cp_me = 0xff; |
WREG32(CP_ME_CNTL, cp_me); |
2912,7 → 2691,7 |
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ |
radeon_ring_write(ring, 0x00000010); /* */ |
|
radeon_ring_unlock_commit(rdev, ring, false); |
radeon_ring_unlock_commit(rdev, ring); |
|
return 0; |
} |
2937,8 → 2716,8 |
RREG32(GRBM_SOFT_RESET); |
|
/* Set ring buffer size */ |
rb_bufsz = order_base_2(ring->ring_size / 8); |
tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
rb_bufsz = drm_order(ring->ring_size / 8); |
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
#ifdef __BIG_ENDIAN |
tmp |= BUF_SWAP_32BIT; |
#endif |
2974,6 → 2753,8 |
WREG32(CP_RB_BASE, ring->gpu_addr >> 8); |
WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); |
|
ring->rptr = RREG32(CP_RB_RPTR); |
|
evergreen_cp_start(rdev); |
ring->ready = true; |
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); |
3163,7 → 2944,7 |
rdev->config.evergreen.sx_max_export_size = 256; |
rdev->config.evergreen.sx_max_export_pos_size = 64; |
rdev->config.evergreen.sx_max_export_smx_size = 192; |
rdev->config.evergreen.max_hw_contexts = 4; |
rdev->config.evergreen.max_hw_contexts = 8; |
rdev->config.evergreen.sq_num_cf_insts = 2; |
|
rdev->config.evergreen.sc_prim_fifo_size = 0x40; |
3310,8 → 3091,10 |
u32 efuse_straps_4; |
u32 efuse_straps_3; |
|
efuse_straps_4 = RREG32_RCU(0x204); |
efuse_straps_3 = RREG32_RCU(0x203); |
WREG32(RCU_IND_INDEX, 0x204); |
efuse_straps_4 = RREG32(RCU_IND_DATA); |
WREG32(RCU_IND_INDEX, 0x203); |
efuse_straps_3 = RREG32(RCU_IND_DATA); |
tmp = (((efuse_straps_4 & 0xf) << 4) | |
((efuse_straps_3 & 0xf0000000) >> 28)); |
} else { |
3337,18 → 3120,6 |
disabled_rb_mask &= ~(1 << i); |
} |
|
for (i = 0; i < rdev->config.evergreen.num_ses; i++) { |
u32 simd_disable_bitmap; |
|
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); |
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); |
simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16; |
simd_disable_bitmap |= 0xffffffff << rdev->config.evergreen.max_simds; |
tmp <<= 16; |
tmp |= simd_disable_bitmap; |
} |
rdev->config.evergreen.active_simds = hweight32(~tmp); |
|
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); |
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); |
|
3679,7 → 3450,7 |
return true; |
} |
|
u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev) |
static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev) |
{ |
u32 reset_mask = 0; |
u32 tmp; |
3862,48 → 3633,6 |
evergreen_print_gpu_status_regs(rdev); |
} |
|
void evergreen_gpu_pci_config_reset(struct radeon_device *rdev) |
{ |
struct evergreen_mc_save save; |
u32 tmp, i; |
|
dev_info(rdev->dev, "GPU pci config reset\n"); |
|
/* disable dpm? */ |
|
/* Disable CP parsing/prefetching */ |
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); |
udelay(50); |
/* Disable DMA */ |
tmp = RREG32(DMA_RB_CNTL); |
tmp &= ~DMA_RB_ENABLE; |
WREG32(DMA_RB_CNTL, tmp); |
/* XXX other engines? */ |
|
/* halt the rlc */ |
r600_rlc_stop(rdev); |
|
udelay(50); |
|
/* set mclk/sclk to bypass */ |
rv770_set_clk_bypass_mode(rdev); |
/* disable BM */ |
pci_clear_master(rdev->pdev); |
/* disable mem access */ |
evergreen_mc_stop(rdev, &save); |
if (evergreen_mc_wait_for_idle(rdev)) { |
dev_warn(rdev->dev, "Wait for MC idle timed out !\n"); |
} |
/* reset */ |
radeon_pci_config_reset(rdev); |
/* wait for asic to come out of reset */ |
for (i = 0; i < rdev->usec_timeout; i++) { |
if (RREG32(CONFIG_MEMSIZE) != 0xffffffff) |
break; |
udelay(1); |
} |
} |
|
int evergreen_asic_reset(struct radeon_device *rdev) |
{ |
u32 reset_mask; |
3913,17 → 3642,10 |
if (reset_mask) |
r600_set_bios_scratch_engine_hung(rdev, true); |
|
/* try soft reset */ |
evergreen_gpu_soft_reset(rdev, reset_mask); |
|
reset_mask = evergreen_gpu_check_soft_reset(rdev); |
|
/* try pci config reset */ |
if (reset_mask && radeon_hard_reset) |
evergreen_gpu_pci_config_reset(rdev); |
|
reset_mask = evergreen_gpu_check_soft_reset(rdev); |
|
if (!reset_mask) |
r600_set_bios_scratch_engine_hung(rdev, false); |
|
3946,356 → 3668,36 |
if (!(reset_mask & (RADEON_RESET_GFX | |
RADEON_RESET_COMPUTE | |
RADEON_RESET_CP))) { |
radeon_ring_lockup_update(rdev, ring); |
radeon_ring_lockup_update(ring); |
return false; |
} |
/* force CP activities */ |
radeon_ring_force_activity(rdev, ring); |
return radeon_ring_test_lockup(rdev, ring); |
} |
|
/* |
* RLC |
/** |
* evergreen_dma_is_lockup - Check if the DMA engine is locked up |
* |
* @rdev: radeon_device pointer |
* @ring: radeon_ring structure holding ring information |
* |
* Check if the async DMA engine is locked up. |
* Returns true if the engine appears to be locked up, false if not. |
*/ |
#define RLC_SAVE_RESTORE_LIS |