/drivers/video/drm/radeon/Makefile |
---|
44,7 → 44,7 |
pci.c \ |
$(DRM_TOPDIR)/drm_crtc.c \ |
$(DRM_TOPDIR)/drm_crtc_helper.c \ |
$(DRM_TOPDIR)/drm_dp_i2c_helper.c \ |
$(DRM_TOPDIR)/drm_dp_helper.c \ |
$(DRM_TOPDIR)/drm_edid.c \ |
$(DRM_TOPDIR)/drm_fb_helper.c \ |
$(DRM_TOPDIR)/drm_irq.c \ |
/drivers/video/drm/radeon/atombios_crtc.c |
---|
561,6 → 561,8 |
/* use frac fb div on APUs */ |
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) |
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; |
if (ASIC_IS_DCE32(rdev) && mode->clock > 165000) |
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; |
} else { |
radeon_crtc->pll_flags |= RADEON_PLL_LEGACY; |
/drivers/video/drm/radeon/atombios_dp.c |
---|
34,8 → 34,7 |
/* move these to drm_dp_helper.c/h */ |
#define DP_LINK_CONFIGURATION_SIZE 9 |
#define DP_LINK_STATUS_SIZE 6 |
#define DP_DPCD_SIZE 8 |
#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE |
static char *voltage_names[] = { |
"0.4V", "0.6V", "0.8V", "1.2V" |
290,78 → 289,6 |
/***** general DP utility functions *****/ |
static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r) |
{ |
return link_status[r - DP_LANE0_1_STATUS]; |
} |
static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE], |
int lane) |
{ |
int i = DP_LANE0_1_STATUS + (lane >> 1); |
int s = (lane & 1) * 4; |
u8 l = dp_link_status(link_status, i); |
return (l >> s) & 0xf; |
} |
static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], |
int lane_count) |
{ |
int lane; |
u8 lane_status; |
for (lane = 0; lane < lane_count; lane++) { |
lane_status = dp_get_lane_status(link_status, lane); |
if ((lane_status & DP_LANE_CR_DONE) == 0) |
return false; |
} |
return true; |
} |
static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], |
int lane_count) |
{ |
u8 lane_align; |
u8 lane_status; |
int lane; |
lane_align = dp_link_status(link_status, |
DP_LANE_ALIGN_STATUS_UPDATED); |
if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) |
return false; |
for (lane = 0; lane < lane_count; lane++) { |
lane_status = dp_get_lane_status(link_status, lane); |
if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS) |
return false; |
} |
return true; |
} |
static u8 dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], |
int lane) |
{ |
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); |
int s = ((lane & 1) ? |
DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : |
DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); |
u8 l = dp_link_status(link_status, i); |
return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; |
} |
static u8 dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], |
int lane) |
{ |
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); |
int s = ((lane & 1) ? |
DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : |
DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); |
u8 l = dp_link_status(link_status, i); |
return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; |
} |
#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 |
#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5 |
374,8 → 301,8 |
int lane; |
for (lane = 0; lane < lane_count; lane++) { |
u8 this_v = dp_get_adjust_request_voltage(link_status, lane); |
u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane); |
u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane); |
u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); |
DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n", |
lane, |
420,37 → 347,6 |
return (link_rate * lane_num * 8) / bpp; |
} |
static int dp_get_max_link_rate(u8 dpcd[DP_DPCD_SIZE]) |
{ |
switch (dpcd[DP_MAX_LINK_RATE]) { |
case DP_LINK_BW_1_62: |
default: |
return 162000; |
case DP_LINK_BW_2_7: |
return 270000; |
case DP_LINK_BW_5_4: |
return 540000; |
} |
} |
static u8 dp_get_max_lane_number(u8 dpcd[DP_DPCD_SIZE]) |
{ |
return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; |
} |
static u8 dp_get_dp_link_rate_coded(int link_rate) |
{ |
switch (link_rate) { |
case 162000: |
default: |
return DP_LINK_BW_1_62; |
case 270000: |
return DP_LINK_BW_2_7; |
case 540000: |
return DP_LINK_BW_5_4; |
} |
} |
/***** radeon specific DP functions *****/ |
/* First get the min lane# when low rate is used according to pixel clock |
462,8 → 358,8 |
int pix_clock) |
{ |
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); |
int max_link_rate = dp_get_max_link_rate(dpcd); |
int max_lane_num = dp_get_max_lane_number(dpcd); |
int max_link_rate = drm_dp_max_link_rate(dpcd); |
int max_lane_num = drm_dp_max_lane_count(dpcd); |
int lane_num; |
int max_dp_pix_clock; |
500,7 → 396,7 |
return 540000; |
} |
return dp_get_max_link_rate(dpcd); |
return drm_dp_max_link_rate(dpcd); |
} |
static u8 radeon_dp_encoder_service(struct radeon_device *rdev, |
551,21 → 447,25 |
bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) |
{ |
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; |
u8 msg[25]; |
u8 msg[DP_DPCD_SIZE]; |
int ret, i; |
ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, 8, 0); |
ENTER(); |
ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, |
DP_DPCD_SIZE, 0); |
if (ret > 0) { |
memcpy(dig_connector->dpcd, msg, 8); |
memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); |
DRM_DEBUG_KMS("DPCD: "); |
for (i = 0; i < 8; i++) |
for (i = 0; i < DP_DPCD_SIZE; i++) |
DRM_DEBUG_KMS("%02x ", msg[i]); |
DRM_DEBUG_KMS("\n"); |
radeon_dp_probe_oui(radeon_connector); |
LEAVE(); |
return true; |
} |
FAIL(); |
dig_connector->dpcd[0] = 0; |
return false; |
} |
664,7 → 564,7 |
if (!radeon_dp_get_link_status(radeon_connector, link_status)) |
return false; |
if (dp_channel_eq_ok(link_status, dig->dp_lane_count)) |
if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count)) |
return false; |
return true; |
} |
677,9 → 577,8 |
int enc_id; |
int dp_clock; |
int dp_lane_count; |
int rd_interval; |
bool tp3_supported; |
u8 dpcd[8]; |
u8 dpcd[DP_RECEIVER_CAP_SIZE]; |
u8 train_set[4]; |
u8 link_status[DP_LINK_STATUS_SIZE]; |
u8 tries; |
765,7 → 664,7 |
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp); |
/* set the link rate on the sink */ |
tmp = dp_get_dp_link_rate_coded(dp_info->dp_clock); |
tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock); |
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp); |
/* start training on the source */ |
821,10 → 720,7 |
dp_info->tries = 0; |
voltage = 0xff; |
while (1) { |
if (dp_info->rd_interval == 0) |
udelay(100); |
else |
mdelay(dp_info->rd_interval * 4); |
drm_dp_link_train_clock_recovery_delay(dp_info->dpcd); |
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { |
DRM_ERROR("displayport link status failed\n"); |
831,7 → 727,7 |
break; |
} |
if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) { |
if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) { |
clock_recovery = true; |
break; |
} |
886,10 → 782,7 |
dp_info->tries = 0; |
channel_eq = false; |
while (1) { |
if (dp_info->rd_interval == 0) |
udelay(400); |
else |
mdelay(dp_info->rd_interval * 4); |
drm_dp_link_train_channel_eq_delay(dp_info->dpcd); |
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { |
DRM_ERROR("displayport link status failed\n"); |
896,7 → 789,7 |
break; |
} |
if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) { |
if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) { |
channel_eq = true; |
break; |
} |
974,7 → 867,6 |
else |
dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; |
dp_info.rd_interval = radeon_read_dpcd_reg(radeon_connector, DP_TRAINING_AUX_RD_INTERVAL); |
tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT); |
if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) |
dp_info.tp3_supported = true; |
981,7 → 873,7 |
else |
dp_info.tp3_supported = false; |
memcpy(dp_info.dpcd, dig_connector->dpcd, 8); |
memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE); |
dp_info.rdev = rdev; |
dp_info.encoder = encoder; |
dp_info.connector = connector; |
/drivers/video/drm/radeon/atombios_encoders.c |
---|
340,7 → 340,7 |
((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || |
(radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) { |
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
radeon_dp_set_link_config(connector, mode); |
radeon_dp_set_link_config(connector, adjusted_mode); |
} |
return true; |
/drivers/video/drm/radeon/bitmap.c |
---|
190,7 → 190,7 |
bitmap->page_count = size/PAGE_SIZE; |
bitmap->max_count = max_size/PAGE_SIZE; |
DRM_DEBUG("%s alloc %d pages\n", __FUNCTION__, page_count); |
// DRM_DEBUG("%s alloc %d pages\n", __FUNCTION__, page_count); |
bitmap->handle = handle; |
bitmap->uaddr = uaddr; |
/drivers/video/drm/radeon/evergreen.c |
---|
1734,7 → 1734,7 |
case CHIP_SUMO: |
rdev->config.evergreen.num_ses = 1; |
rdev->config.evergreen.max_pipes = 4; |
rdev->config.evergreen.max_tile_pipes = 2; |
rdev->config.evergreen.max_tile_pipes = 4; |
if (rdev->pdev->device == 0x9648) |
rdev->config.evergreen.max_simds = 3; |
else if ((rdev->pdev->device == 0x9647) || |
1757,7 → 1757,7 |
rdev->config.evergreen.sc_prim_fifo_size = 0x40; |
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; |
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; |
gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN; |
gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN; |
break; |
case CHIP_SUMO2: |
rdev->config.evergreen.num_ses = 1; |
1779,7 → 1779,7 |
rdev->config.evergreen.sc_prim_fifo_size = 0x40; |
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; |
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; |
gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN; |
gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN; |
break; |
case CHIP_BARTS: |
rdev->config.evergreen.num_ses = 2; |
1827,7 → 1827,7 |
break; |
case CHIP_CAICOS: |
rdev->config.evergreen.num_ses = 1; |
rdev->config.evergreen.max_pipes = 4; |
rdev->config.evergreen.max_pipes = 2; |
rdev->config.evergreen.max_tile_pipes = 2; |
rdev->config.evergreen.max_simds = 2; |
rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; |
1947,6 → 1947,7 |
WREG32(GB_ADDR_CONFIG, gb_addr_config); |
WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
WREG32(DMA_TILING_CONFIG, gb_addr_config); |
tmp = gb_addr_config & NUM_PIPES_MASK; |
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, |
2316,8 → 2317,12 |
CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); |
cayman_cp_int_cntl_setup(rdev, 1, 0); |
cayman_cp_int_cntl_setup(rdev, 2, 0); |
tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE; |
WREG32(CAYMAN_DMA1_CNTL, tmp); |
} else |
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); |
tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; |
WREG32(DMA_CNTL, tmp); |
WREG32(GRBM_INT_CNTL, 0); |
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
2370,6 → 2375,7 |
u32 grbm_int_cntl = 0; |
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; |
u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; |
u32 dma_cntl, dma_cntl1 = 0; |
if (!rdev->irq.installed) { |
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
2397,6 → 2403,8 |
afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; |
afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; |
dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE; |
if (rdev->family >= CHIP_CAYMAN) { |
/* enable CP interrupts on all rings */ |
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { |
2419,6 → 2427,19 |
} |
} |
if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { |
DRM_DEBUG("r600_irq_set: sw int dma\n"); |
dma_cntl |= TRAP_ENABLE; |
} |
if (rdev->family >= CHIP_CAYMAN) { |
dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE; |
if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) { |
DRM_DEBUG("r600_irq_set: sw int dma1\n"); |
dma_cntl1 |= TRAP_ENABLE; |
} |
} |
if (rdev->irq.crtc_vblank_int[0] || |
atomic_read(&rdev->irq.pflip[0])) { |
DRM_DEBUG("evergreen_irq_set: vblank 0\n"); |
2504,6 → 2525,12 |
cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2); |
} else |
WREG32(CP_INT_CNTL, cp_int_cntl); |
WREG32(DMA_CNTL, dma_cntl); |
if (rdev->family >= CHIP_CAYMAN) |
WREG32(CAYMAN_DMA1_CNTL, dma_cntl1); |
WREG32(GRBM_INT_CNTL, grbm_int_cntl); |
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); |
3006,6 → 3033,16 |
break; |
} |
break; |
case 146: |
case 147: |
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); |
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", |
RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); |
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", |
RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); |
/* reset addr and status */ |
WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); |
break; |
case 176: /* CP_INT in ring buffer */ |
case 177: /* CP_INT in IB1 */ |
case 178: /* CP_INT in IB2 */ |
3029,9 → 3066,19 |
} else |
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); |
break; |
case 224: /* DMA trap event */ |
DRM_DEBUG("IH: DMA trap\n"); |
radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); |
break; |
case 233: /* GUI IDLE */ |
DRM_DEBUG("IH: GUI idle\n"); |
break; |
case 244: /* DMA trap event */ |
if (rdev->family >= CHIP_CAYMAN) { |
DRM_DEBUG("IH: DMA1 trap\n"); |
radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX); |
} |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
break; |
3053,6 → 3100,143 |
return IRQ_HANDLED; |
} |
/** |
* evergreen_dma_fence_ring_emit - emit a fence on the DMA ring |
* |
* @rdev: radeon_device pointer |
* @fence: radeon fence object |
* |
* Add a DMA fence packet to the ring to write |
* the fence seq number and DMA trap packet to generate |
* an interrupt if needed (evergreen-SI). |
*/ |
void evergreen_dma_fence_ring_emit(struct radeon_device *rdev, |
struct radeon_fence *fence) |
{ |
struct radeon_ring *ring = &rdev->ring[fence->ring]; |
u64 addr = rdev->fence_drv[fence->ring].gpu_addr; |
/* write the fence */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0)); |
radeon_ring_write(ring, addr & 0xfffffffc); |
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); |
radeon_ring_write(ring, fence->seq); |
/* generate an interrupt */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0)); |
/* flush HDP */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); |
radeon_ring_write(ring, (0xf << 16) | HDP_MEM_COHERENCY_FLUSH_CNTL); |
radeon_ring_write(ring, 1); |
} |
/** |
* evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine |
* |
* @rdev: radeon_device pointer |
* @ib: IB object to schedule |
* |
* Schedule an IB in the DMA ring (evergreen). |
*/ |
void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, |
struct radeon_ib *ib) |
{ |
struct radeon_ring *ring = &rdev->ring[ib->ring]; |
if (rdev->wb.enabled) { |
u32 next_rptr = ring->wptr + 4; |
while ((next_rptr & 7) != 5) |
next_rptr++; |
next_rptr += 3; |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); |
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); |
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); |
radeon_ring_write(ring, next_rptr); |
} |
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. |
* Pad as necessary with NOPs. |
*/ |
while ((ring->wptr & 7) != 5) |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0)); |
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); |
radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); |
} |
/** |
* evergreen_copy_dma - copy pages using the DMA engine |
* |
* @rdev: radeon_device pointer |
* @src_offset: src GPU address |
* @dst_offset: dst GPU address |
* @num_gpu_pages: number of GPU pages to xfer |
* @fence: radeon fence object |
* |
* Copy GPU paging using the DMA engine (evergreen-cayman). |
* Used by the radeon ttm implementation to move pages if |
* registered as the asic copy callback. |
*/ |
int evergreen_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence) |
{ |
struct radeon_semaphore *sem = NULL; |
int ring_index = rdev->asic->copy.dma_ring_index; |
struct radeon_ring *ring = &rdev->ring[ring_index]; |
u32 size_in_dw, cur_size_in_dw; |
int i, num_loops; |
int r = 0; |
r = radeon_semaphore_create(rdev, &sem); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
return r; |
} |
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; |
num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff); |
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
} |
if (radeon_fence_need_sync(*fence, ring->idx)) { |
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, |
ring->idx); |
radeon_fence_note_sync(*fence, ring->idx); |
} else { |
radeon_semaphore_free(rdev, &sem, NULL); |
} |
for (i = 0; i < num_loops; i++) { |
cur_size_in_dw = size_in_dw; |
if (cur_size_in_dw > 0xFFFFF) |
cur_size_in_dw = 0xFFFFF; |
size_in_dw -= cur_size_in_dw; |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); |
radeon_ring_write(ring, dst_offset & 0xfffffffc); |
radeon_ring_write(ring, src_offset & 0xfffffffc); |
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); |
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); |
src_offset += cur_size_in_dw * 4; |
dst_offset += cur_size_in_dw * 4; |
} |
r = radeon_fence_emit(rdev, fence, ring->idx); |
if (r) { |
radeon_ring_unlock_undo(rdev, ring); |
return r; |
} |
radeon_ring_unlock_commit(rdev, ring); |
radeon_semaphore_free(rdev, &sem, *fence); |
return r; |
} |
static int evergreen_startup(struct radeon_device *rdev) |
{ |
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
3110,6 → 3294,18 |
if (r) |
return r; |
r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); |
return r; |
} |
r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); |
return r; |
} |
/* Enable IRQ */ |
r = r600_irq_init(rdev); |
if (r) { |
3124,6 → 3320,14 |
0, 0xfffff, RADEON_CP_PACKET2); |
if (r) |
return r; |
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, |
DMA_RB_RPTR, DMA_RB_WPTR, |
2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
if (r) |
return r; |
r = evergreen_cp_load_microcode(rdev); |
if (r) |
return r; |
3130,7 → 3334,16 |
r = evergreen_cp_resume(rdev); |
if (r) |
return r; |
r = r600_dma_resume(rdev); |
if (r) |
return r; |
r = radeon_ib_pool_init(rdev); |
if (r) { |
dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
return r; |
} |
return 0; |
} |
3229,6 → 3442,9 |
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; |
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); |
rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; |
r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); |
rdev->ih.ring_obj = NULL; |
r600_ih_ring_init(rdev, 64 * 1024); |
/drivers/video/drm/radeon/evergreen_blit_kms.c |
---|
633,11 → 633,6 |
rdev->r600_blit.max_dim = 16384; |
/* pin copy shader into vram if already initialized */ |
if (rdev->r600_blit.shader_obj) |
goto done; |
mutex_init(&rdev->r600_blit.mutex); |
rdev->r600_blit.state_offset = 0; |
if (rdev->family < CHIP_CAYMAN) |
668,13 → 663,28 |
obj_size += cayman_ps_size * 4; |
obj_size = ALIGN(obj_size, 256); |
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
&rdev->r600_blit.shader_obj); |
/* pin copy shader into vram if not already initialized */ |
if (!rdev->r600_blit.shader_obj) { |
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, |
RADEON_GEM_DOMAIN_VRAM, |
NULL, &rdev->r600_blit.shader_obj); |
if (r) { |
DRM_ERROR("evergreen failed to allocate shader\n"); |
return r; |
} |
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
if (unlikely(r != 0)) |
return r; |
r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, |
&rdev->r600_blit.shader_gpu_addr); |
radeon_bo_unreserve(rdev->r600_blit.shader_obj); |
if (r) { |
dev_err(rdev->dev, "(%d) pin blit object failed\n", r); |
return r; |
} |
} |
DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n", |
obj_size, |
rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); |
714,17 → 724,6 |
radeon_bo_kunmap(rdev->r600_blit.shader_obj); |
radeon_bo_unreserve(rdev->r600_blit.shader_obj); |
done: |
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
if (unlikely(r != 0)) |
return r; |
r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, |
&rdev->r600_blit.shader_gpu_addr); |
radeon_bo_unreserve(rdev->r600_blit.shader_obj); |
if (r) { |
dev_err(rdev->dev, "(%d) pin blit object failed\n", r); |
return r; |
} |
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); |
#endif |
/drivers/video/drm/radeon/evergreend.h |
---|
45,6 → 45,8 |
#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002 |
#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001 |
#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001 |
#define SUMO_GB_ADDR_CONFIG_GOLDEN 0x02010002 |
#define SUMO2_GB_ADDR_CONFIG_GOLDEN 0x02010002 |
/* Registers */ |
355,6 → 357,54 |
# define AFMT_MPEG_INFO_UPDATE (1 << 10) |
#define AFMT_GENERIC0_7 0x7138 |
/* DCE4/5 ELD audio interface */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x5f84 /* LPCM */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x5f88 /* AC3 */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x5f8c /* MPEG1 */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x5f90 /* MP3 */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x5f94 /* MPEG2 */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x5f98 /* AAC */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x5f9c /* DTS */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x5fa0 /* ATRAC */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x5fa4 /* one bit audio - leave at 0 (default) */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x5fa8 /* Dolby Digital */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x5fac /* DTS-HD */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x5fb0 /* MAT-MLP */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x5fb4 /* DTS */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x5fb8 /* WMA Pro */ |
# define MAX_CHANNELS(x) (((x) & 0x7) << 0) |
/* max channels minus one. 7 = 8 channels */ |
# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8) |
# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16) |
# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */ |
/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO |
* bit0 = 32 kHz |
* bit1 = 44.1 kHz |
* bit2 = 48 kHz |
* bit3 = 88.2 kHz |
* bit4 = 96 kHz |
* bit5 = 176.4 kHz |
* bit6 = 192 kHz |
*/ |
#define AZ_HOT_PLUG_CONTROL 0x5e78 |
# define AZ_FORCE_CODEC_WAKE (1 << 0) |
# define PIN0_JACK_DETECTION_ENABLE (1 << 4) |
# define PIN1_JACK_DETECTION_ENABLE (1 << 5) |
# define PIN2_JACK_DETECTION_ENABLE (1 << 6) |
# define PIN3_JACK_DETECTION_ENABLE (1 << 7) |
# define PIN0_UNSOLICITED_RESPONSE_ENABLE (1 << 8) |
# define PIN1_UNSOLICITED_RESPONSE_ENABLE (1 << 9) |
# define PIN2_UNSOLICITED_RESPONSE_ENABLE (1 << 10) |
# define PIN3_UNSOLICITED_RESPONSE_ENABLE (1 << 11) |
# define CODEC_HOT_PLUG_ENABLE (1 << 12) |
# define PIN0_AUDIO_ENABLED (1 << 24) |
# define PIN1_AUDIO_ENABLED (1 << 25) |
# define PIN2_AUDIO_ENABLED (1 << 26) |
# define PIN3_AUDIO_ENABLED (1 << 27) |
# define AUDIO_ENABLED (1 << 31) |
#define GC_USER_SHADER_PIPE_CONFIG 0x8954 |
#define INACTIVE_QD_PIPES(x) ((x) << 8) |
#define INACTIVE_QD_PIPES_MASK 0x0000FF00 |
651,6 → 701,7 |
#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1) |
#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4) |
#define VM_CONTEXT1_CNTL 0x1414 |
#define VM_CONTEXT1_CNTL2 0x1434 |
#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C |
#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C |
#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C |
672,6 → 723,8 |
#define CACHE_UPDATE_MODE(x) ((x) << 6) |
#define VM_L2_STATUS 0x140C |
#define L2_BUSY (1 << 0) |
#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC |
#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC |
#define WAIT_UNTIL 0x8040 |
689,6 → 742,7 |
#define SOFT_RESET_ROM (1 << 14) |
#define SOFT_RESET_SEM (1 << 15) |
#define SOFT_RESET_VMC (1 << 17) |
#define SOFT_RESET_DMA (1 << 20) |
#define SOFT_RESET_TST (1 << 21) |
#define SOFT_RESET_REGBB (1 << 22) |
#define SOFT_RESET_ORB (1 << 23) |
854,6 → 908,37 |
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) |
# define DC_HPDx_EN (1 << 28) |
/* ASYNC DMA */ |
#define DMA_RB_RPTR 0xd008 |
#define DMA_RB_WPTR 0xd00c |
#define DMA_CNTL 0xd02c |
# define TRAP_ENABLE (1 << 0) |
# define SEM_INCOMPLETE_INT_ENABLE (1 << 1) |
# define SEM_WAIT_INT_ENABLE (1 << 2) |
# define DATA_SWAP_ENABLE (1 << 3) |
# define FENCE_SWAP_ENABLE (1 << 4) |
# define CTXEMPTY_INT_ENABLE (1 << 28) |
#define DMA_TILING_CONFIG 0xD0B8 |
#define CAYMAN_DMA1_CNTL 0xd82c |
/* async DMA packets */ |
#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \ |
(((t) & 0x1) << 23) | \ |
(((s) & 0x1) << 22) | \ |
(((n) & 0xFFFFF) << 0)) |
/* async DMA Packet types */ |
#define DMA_PACKET_WRITE 0x2 |
#define DMA_PACKET_COPY 0x3 |
#define DMA_PACKET_INDIRECT_BUFFER 0x4 |
#define DMA_PACKET_SEMAPHORE 0x5 |
#define DMA_PACKET_FENCE 0x6 |
#define DMA_PACKET_TRAP 0x7 |
#define DMA_PACKET_SRBM_WRITE 0x9 |
#define DMA_PACKET_CONSTANT_FILL 0xd |
#define DMA_PACKET_NOP 0xf |
/* PCIE link stuff */ |
#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ |
#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ |
951,6 → 1036,53 |
#define PACKET3_WAIT_REG_MEM 0x3C |
#define PACKET3_MEM_WRITE 0x3D |
#define PACKET3_INDIRECT_BUFFER 0x32 |
#define PACKET3_CP_DMA 0x41 |
/* 1. header |
* 2. SRC_ADDR_LO or DATA [31:0] |
* 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] | |
* SRC_ADDR_HI [7:0] |
* 4. DST_ADDR_LO [31:0] |
* 5. DST_ADDR_HI [7:0] |
* 6. COMMAND [29:22] | BYTE_COUNT [20:0] |
*/ |
# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20) |
/* 0 - SRC_ADDR |
* 1 - GDS |
*/ |
# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27) |
/* 0 - ME |
* 1 - PFP |
*/ |
# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29) |
/* 0 - SRC_ADDR |
* 1 - GDS |
* 2 - DATA |
*/ |
# define PACKET3_CP_DMA_CP_SYNC (1 << 31) |
/* COMMAND */ |
# define PACKET3_CP_DMA_DIS_WC (1 << 21) |
# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23) |
/* 0 - none |
* 1 - 8 in 16 |
* 2 - 8 in 32 |
* 3 - 8 in 64 |
*/ |
# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24) |
/* 0 - none |
* 1 - 8 in 16 |
* 2 - 8 in 32 |
* 3 - 8 in 64 |
*/ |
# define PACKET3_CP_DMA_CMD_SAS (1 << 26) |
/* 0 - memory |
* 1 - register |
*/ |
# define PACKET3_CP_DMA_CMD_DAS (1 << 27) |
/* 0 - memory |
* 1 - register |
*/ |
# define PACKET3_CP_DMA_CMD_SAIC (1 << 28) |
# define PACKET3_CP_DMA_CMD_DAIC (1 << 29) |
#define PACKET3_SURFACE_SYNC 0x43 |
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) |
# define PACKET3_CB1_DEST_BASE_ENA (1 << 7) |
1896,4 → 2028,15 |
/* cayman packet3 addition */ |
#define CAYMAN_PACKET3_DEALLOC_STATE 0x14 |
/* DMA regs common on r6xx/r7xx/evergreen/ni */ |
#define DMA_RB_CNTL 0xd000 |
# define DMA_RB_ENABLE (1 << 0) |
# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */ |
# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */ |
# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12) |
# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */ |
# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */ |
#define DMA_STATUS_REG 0xd034 |
# define DMA_IDLE (1 << 0) |
#endif |
/drivers/video/drm/radeon/ni.c |
---|
611,6 → 611,8 |
WREG32(GB_ADDR_CONFIG, gb_addr_config); |
WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); |
WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); |
tmp = gb_addr_config & NUM_PIPES_MASK; |
tmp = r6xx_remap_render_backend(rdev, tmp, |
784,10 → 786,20 |
/* enable context1-7 */ |
WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, |
(u32)(rdev->dummy_page.addr >> 12)); |
WREG32(VM_CONTEXT1_CNTL2, 0); |
WREG32(VM_CONTEXT1_CNTL, 0); |
WREG32(VM_CONTEXT1_CNTL2, 4); |
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); |
RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | |
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | |
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT | |
PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT | |
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT | |
VALID_PROTECTION_FAULT_ENABLE_INTERRUPT | |
VALID_PROTECTION_FAULT_ENABLE_DEFAULT | |
READ_PROTECTION_FAULT_ENABLE_INTERRUPT | |
READ_PROTECTION_FAULT_ENABLE_DEFAULT | |
WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT | |
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT); |
cayman_pcie_gart_tlb_flush(rdev); |
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", |
895,8 → 907,10 |
if (enable) |
WREG32(CP_ME_CNTL, 0); |
else { |
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); |
WREG32(SCRATCH_UMSK, 0); |
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
} |
} |
1103,15 → 1117,188 |
return 0; |
} |
static int cayman_gpu_soft_reset(struct radeon_device *rdev) |
/* |
* DMA |
* Starting with R600, the GPU has an asynchronous |
* DMA engine. The programming model is very similar |
* to the 3D engine (ring buffer, IBs, etc.), but the |
* DMA controller has it's own packet format that is |
* different form the PM4 format used by the 3D engine. |
* It supports copying data, writing embedded data, |
* solid fills, and a number of other things. It also |
* has support for tiling/detiling of buffers. |
* Cayman and newer support two asynchronous DMA engines. |
*/ |
/** |
* cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine |
* |
* @rdev: radeon_device pointer |
* @ib: IB object to schedule |
* |
* Schedule an IB in the DMA ring (cayman-SI). |
*/ |
void cayman_dma_ring_ib_execute(struct radeon_device *rdev, |
struct radeon_ib *ib) |
{ |
struct evergreen_mc_save save; |
struct radeon_ring *ring = &rdev->ring[ib->ring]; |
if (rdev->wb.enabled) { |
u32 next_rptr = ring->wptr + 4; |
while ((next_rptr & 7) != 5) |
next_rptr++; |
next_rptr += 3; |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); |
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); |
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); |
radeon_ring_write(ring, next_rptr); |
} |
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. |
* Pad as necessary with NOPs. |
*/ |
while ((ring->wptr & 7) != 5) |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0)); |
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); |
radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); |
} |
/** |
* cayman_dma_stop - stop the async dma engines |
* |
* @rdev: radeon_device pointer |
* |
* Stop the async dma engines (cayman-SI). |
*/ |
void cayman_dma_stop(struct radeon_device *rdev) |
{ |
u32 rb_cntl; |
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
/* dma0 */ |
rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); |
rb_cntl &= ~DMA_RB_ENABLE; |
WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl); |
/* dma1 */ |
rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); |
rb_cntl &= ~DMA_RB_ENABLE; |
WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl); |
rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; |
rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; |
} |
/** |
* cayman_dma_resume - setup and start the async dma engines |
* |
* @rdev: radeon_device pointer |
* |
* Set up the DMA ring buffers and enable them. (cayman-SI). |
* Returns 0 for success, error for failure. |
*/ |
int cayman_dma_resume(struct radeon_device *rdev) |
{ |
struct radeon_ring *ring; |
u32 rb_cntl, dma_cntl; |
u32 rb_bufsz; |
u32 reg_offset, wb_offset; |
int i, r; |
/* Reset dma */ |
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1); |
RREG32(SRBM_SOFT_RESET); |
udelay(50); |
WREG32(SRBM_SOFT_RESET, 0); |
for (i = 0; i < 2; i++) { |
if (i == 0) { |
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
reg_offset = DMA0_REGISTER_OFFSET; |
wb_offset = R600_WB_DMA_RPTR_OFFSET; |
} else { |
ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; |
reg_offset = DMA1_REGISTER_OFFSET; |
wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET; |
} |
WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0); |
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); |
/* Set ring buffer size in dwords */ |
rb_bufsz = drm_order(ring->ring_size / 4); |
rb_cntl = rb_bufsz << 1; |
#ifdef __BIG_ENDIAN |
rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; |
#endif |
WREG32(DMA_RB_CNTL + reg_offset, rb_cntl); |
/* Initialize the ring buffer's read and write pointers */ |
WREG32(DMA_RB_RPTR + reg_offset, 0); |
WREG32(DMA_RB_WPTR + reg_offset, 0); |
/* set the wb address whether it's enabled or not */ |
WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset, |
upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF); |
WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset, |
((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); |
if (rdev->wb.enabled) |
rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; |
WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8); |
/* enable DMA IBs */ |
WREG32(DMA_IB_CNTL + reg_offset, DMA_IB_ENABLE | CMD_VMID_FORCE); |
dma_cntl = RREG32(DMA_CNTL + reg_offset); |
dma_cntl &= ~CTXEMPTY_INT_ENABLE; |
WREG32(DMA_CNTL + reg_offset, dma_cntl); |
ring->wptr = 0; |
WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2); |
ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2; |
WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE); |
ring->ready = true; |
r = radeon_ring_test(rdev, ring->idx, ring); |
if (r) { |
ring->ready = false; |
return r; |
} |
} |
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); |
return 0; |
} |
/** |
* cayman_dma_fini - tear down the async dma engines |
* |
* @rdev: radeon_device pointer |
* |
* Stop the async dma engines and free the rings (cayman-SI). |
*/ |
void cayman_dma_fini(struct radeon_device *rdev) |
{ |
cayman_dma_stop(rdev); |
radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); |
radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]); |
} |
static void cayman_gpu_soft_reset_gfx(struct radeon_device *rdev) |
{ |
u32 grbm_reset = 0; |
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) |
return 0; |
return; |
dev_info(rdev->dev, "GPU softreset \n"); |
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", |
RREG32(GRBM_STATUS)); |
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", |
1128,19 → 1315,7 |
RREG32(CP_BUSY_STAT)); |
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", |
RREG32(CP_STAT)); |
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n", |
RREG32(0x14F8)); |
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n", |
RREG32(0x14D8)); |
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", |
RREG32(0x14FC)); |
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", |
RREG32(0x14DC)); |
evergreen_mc_stop(rdev, &save); |
if (evergreen_mc_wait_for_idle(rdev)) { |
dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
} |
/* Disable CP parsing/prefetching */ |
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); |
1165,8 → 1340,6 |
udelay(50); |
WREG32(GRBM_SOFT_RESET, 0); |
(void)RREG32(GRBM_SOFT_RESET); |
/* Wait a little for things to settle down */ |
udelay(50); |
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", |
RREG32(GRBM_STATUS)); |
1184,6 → 1357,72 |
RREG32(CP_BUSY_STAT)); |
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", |
RREG32(CP_STAT)); |
} |
static void cayman_gpu_soft_reset_dma(struct radeon_device *rdev) |
{ |
u32 tmp; |
if (RREG32(DMA_STATUS_REG) & DMA_IDLE) |
return; |
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", |
RREG32(DMA_STATUS_REG)); |
/* dma0 */ |
tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); |
tmp &= ~DMA_RB_ENABLE; |
WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp); |
/* dma1 */ |
tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); |
tmp &= ~DMA_RB_ENABLE; |
WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp); |
/* Reset dma */ |
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1); |
RREG32(SRBM_SOFT_RESET); |
udelay(50); |
WREG32(SRBM_SOFT_RESET, 0); |
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", |
RREG32(DMA_STATUS_REG)); |
} |
static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) |
{ |
struct evergreen_mc_save save; |
if (reset_mask == 0) |
return 0; |
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); |
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n", |
RREG32(0x14F8)); |
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n", |
RREG32(0x14D8)); |
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", |
RREG32(0x14FC)); |
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", |
RREG32(0x14DC)); |
evergreen_mc_stop(rdev, &save); |
if (evergreen_mc_wait_for_idle(rdev)) { |
dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
} |
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) |
cayman_gpu_soft_reset_gfx(rdev); |
if (reset_mask & RADEON_RESET_DMA) |
cayman_gpu_soft_reset_dma(rdev); |
/* Wait a little for things to settle down */ |
udelay(50); |
evergreen_mc_resume(rdev, &save); |
return 0; |
} |
1190,9 → 1429,37 |
int cayman_asic_reset(struct radeon_device *rdev) |
{ |
return cayman_gpu_soft_reset(rdev); |
return cayman_gpu_soft_reset(rdev, (RADEON_RESET_GFX | |
RADEON_RESET_COMPUTE | |
RADEON_RESET_DMA)); |
} |
/** |
* cayman_dma_is_lockup - Check if the DMA engine is locked up |
* |
* @rdev: radeon_device pointer |
* @ring: radeon_ring structure holding ring information |
* |
* Check if the async DMA engine is locked up (cayman-SI). |
* Returns true if the engine appears to be locked up, false if not. |
*/ |
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) |
{ |
u32 dma_status_reg; |
if (ring->idx == R600_RING_TYPE_DMA_INDEX) |
dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET); |
else |
dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET); |
if (dma_status_reg & DMA_IDLE) { |
radeon_ring_lockup_update(ring); |
return false; |
} |
/* force ring activities */ |
radeon_ring_force_activity(rdev, ring); |
return radeon_ring_test_lockup(rdev, ring); |
} |
static int cayman_startup(struct radeon_device *rdev) |
{ |
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
1256,6 → 1523,36 |
if (r) |
return r; |
r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); |
return r; |
} |
r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); |
return r; |
} |
r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); |
return r; |
} |
r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); |
return r; |
} |
r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); |
return r; |
} |
/* Enable IRQ */ |
r = r600_irq_init(rdev); |
if (r) { |
1270,6 → 1567,23 |
0, 0xfffff, RADEON_CP_PACKET2); |
if (r) |
return r; |
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, |
DMA_RB_RPTR + DMA0_REGISTER_OFFSET, |
DMA_RB_WPTR + DMA0_REGISTER_OFFSET, |
2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
if (r) |
return r; |
ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; |
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, |
DMA_RB_RPTR + DMA1_REGISTER_OFFSET, |
DMA_RB_WPTR + DMA1_REGISTER_OFFSET, |
2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
if (r) |
return r; |
r = cayman_cp_load_microcode(rdev); |
if (r) |
return r; |
1277,6 → 1591,15 |
if (r) |
return r; |
r = cayman_dma_resume(rdev); |
if (r) |
return r; |
r = radeon_ib_pool_init(rdev); |
if (r) { |
dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
return r; |
} |
return 0; |
} |
1344,6 → 1667,14 |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 1024 * 1024); |
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 64 * 1024); |
ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 64 * 1024); |
rdev->ih.ring_obj = NULL; |
r600_ih_ring_init(rdev, 64 * 1024); |
1431,9 → 1762,12 |
{ |
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; |
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); |
uint64_t value; |
unsigned ndw; |
if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) { |
while (count) { |
unsigned ndw = 1 + count * 2; |
ndw = 1 + count * 2; |
if (ndw > 0x3FFF) |
ndw = 0x3FFF; |
1441,17 → 1775,40 |
radeon_ring_write(ring, pe); |
radeon_ring_write(ring, upper_32_bits(pe) & 0xff); |
for (; ndw > 1; ndw -= 2, --count, pe += 8) { |
uint64_t value = 0; |
if (flags & RADEON_VM_PAGE_SYSTEM) { |
value = radeon_vm_map_gart(rdev, addr); |
value &= 0xFFFFFFFFFFFFF000ULL; |
} else if (flags & RADEON_VM_PAGE_VALID) { |
value = addr; |
} else { |
value = 0; |
} |
addr += incr; |
value |= r600_flags; |
radeon_ring_write(ring, value); |
radeon_ring_write(ring, upper_32_bits(value)); |
} |
} |
} else { |
while (count) { |
ndw = count * 2; |
if (ndw > 0xFFFFE) |
ndw = 0xFFFFE; |
/* for non-physically contiguous pages (system) */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw)); |
radeon_ring_write(ring, pe); |
radeon_ring_write(ring, upper_32_bits(pe) & 0xff); |
for (; ndw > 0; ndw -= 2, --count, pe += 8) { |
if (flags & RADEON_VM_PAGE_SYSTEM) { |
value = radeon_vm_map_gart(rdev, addr); |
value &= 0xFFFFFFFFFFFFF000ULL; |
} else if (flags & RADEON_VM_PAGE_VALID) { |
value = addr; |
} else { |
value = 0; |
} |
addr += incr; |
} |
value |= r600_flags; |
radeon_ring_write(ring, value); |
radeon_ring_write(ring, upper_32_bits(value)); |
1458,6 → 1815,7 |
} |
} |
} |
} |
/** |
* cayman_vm_flush - vm flush using the CP |
1489,3 → 1847,26 |
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); |
radeon_ring_write(ring, 0x0); |
} |
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) |
{ |
struct radeon_ring *ring = &rdev->ring[ridx]; |
if (vm == NULL) |
return; |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); |
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); |
radeon_ring_write(ring, vm->pd_gpu_addr >> 12); |
/* flush hdp cache */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); |
radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); |
radeon_ring_write(ring, 1); |
/* bits 0-7 are the VM contexts0-7 */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); |
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); |
radeon_ring_write(ring, 1 << vm->id); |
} |
/drivers/video/drm/radeon/nid.h |
---|
50,6 → 50,24 |
#define VMID(x) (((x) & 0x7) << 0) |
#define SRBM_STATUS 0x0E50 |
#define SRBM_SOFT_RESET 0x0E60 |
#define SOFT_RESET_BIF (1 << 1) |
#define SOFT_RESET_CG (1 << 2) |
#define SOFT_RESET_DC (1 << 5) |
#define SOFT_RESET_DMA1 (1 << 6) |
#define SOFT_RESET_GRBM (1 << 8) |
#define SOFT_RESET_HDP (1 << 9) |
#define SOFT_RESET_IH (1 << 10) |
#define SOFT_RESET_MC (1 << 11) |
#define SOFT_RESET_RLC (1 << 13) |
#define SOFT_RESET_ROM (1 << 14) |
#define SOFT_RESET_SEM (1 << 15) |
#define SOFT_RESET_VMC (1 << 17) |
#define SOFT_RESET_DMA (1 << 20) |
#define SOFT_RESET_TST (1 << 21) |
#define SOFT_RESET_REGBB (1 << 22) |
#define SOFT_RESET_ORB (1 << 23) |
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470 |
#define REQUEST_TYPE(x) (((x) & 0xf) << 0) |
#define RESPONSE_TYPE_MASK 0x000000F0 |
80,7 → 98,18 |
#define VM_CONTEXT0_CNTL 0x1410 |
#define ENABLE_CONTEXT (1 << 0) |
#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1) |
#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3) |
#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4) |
#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6) |
#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7) |
#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9) |
#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10) |
#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12) |
#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13) |
#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15) |
#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16) |
#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18) |
#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19) |
#define VM_CONTEXT1_CNTL 0x1414 |
#define VM_CONTEXT0_CNTL2 0x1430 |
#define VM_CONTEXT1_CNTL2 0x1434 |
588,5 → 617,61 |
#define PACKET3_SET_APPEND_CNT 0x75 |
#define PACKET3_ME_WRITE 0x7A |
/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */ |
#define DMA0_REGISTER_OFFSET 0x0 /* not a register */ |
#define DMA1_REGISTER_OFFSET 0x800 /* not a register */ |
#define DMA_RB_CNTL 0xd000 |
# define DMA_RB_ENABLE (1 << 0) |
# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */ |
# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */ |
# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12) |
# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */ |
# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */ |
#define DMA_RB_BASE 0xd004 |
#define DMA_RB_RPTR 0xd008 |
#define DMA_RB_WPTR 0xd00c |
#define DMA_RB_RPTR_ADDR_HI 0xd01c |
#define DMA_RB_RPTR_ADDR_LO 0xd020 |
#define DMA_IB_CNTL 0xd024 |
# define DMA_IB_ENABLE (1 << 0) |
# define DMA_IB_SWAP_ENABLE (1 << 4) |
# define CMD_VMID_FORCE (1 << 31) |
#define DMA_IB_RPTR 0xd028 |
#define DMA_CNTL 0xd02c |
# define TRAP_ENABLE (1 << 0) |
# define SEM_INCOMPLETE_INT_ENABLE (1 << 1) |
# define SEM_WAIT_INT_ENABLE (1 << 2) |
# define DATA_SWAP_ENABLE (1 << 3) |
# define FENCE_SWAP_ENABLE (1 << 4) |
# define CTXEMPTY_INT_ENABLE (1 << 28) |
#define DMA_STATUS_REG 0xd034 |
# define DMA_IDLE (1 << 0) |
#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0xd044 |
#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0xd048 |
#define DMA_TILING_CONFIG 0xd0b8 |
#define DMA_MODE 0xd0bc |
#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \ |
(((t) & 0x1) << 23) | \ |
(((s) & 0x1) << 22) | \ |
(((n) & 0xFFFFF) << 0)) |
#define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \ |
(((vmid) & 0xF) << 20) | \ |
(((n) & 0xFFFFF) << 0)) |
/* async DMA Packet types */ |
#define DMA_PACKET_WRITE 0x2 |
#define DMA_PACKET_COPY 0x3 |
#define DMA_PACKET_INDIRECT_BUFFER 0x4 |
#define DMA_PACKET_SEMAPHORE 0x5 |
#define DMA_PACKET_FENCE 0x6 |
#define DMA_PACKET_TRAP 0x7 |
#define DMA_PACKET_SRBM_WRITE 0x9 |
#define DMA_PACKET_CONSTANT_FILL 0xd |
#define DMA_PACKET_NOP 0xf |
#endif |
/drivers/video/drm/radeon/r100.c |
---|
391,7 → 391,6 |
uint32_t status, msi_rearm; |
bool queue_hotplug = false; |
status = r100_irq_ack(rdev); |
if (!status) { |
return IRQ_NONE; |
804,7 → 803,16 |
return r; |
} |
ring->ready = true; |
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); |
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); |
if (!ring->rptr_save_reg /* not resuming from suspend */ |
&& radeon_ring_supports_scratch_reg(rdev, ring)) { |
r = radeon_scratch_get(rdev, &ring->rptr_save_reg); |
if (r) { |
DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r); |
ring->rptr_save_reg = 0; |
} |
} |
return 0; |
} |
815,6 → 823,7 |
} |
/* Disable ring */ |
r100_cp_disable(rdev); |
radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg); |
radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
DRM_INFO("radeon: cp finalized\n"); |
} |
822,7 → 831,7 |
void r100_cp_disable(struct radeon_device *rdev) |
{ |
/* Disable ring */ |
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
WREG32(RADEON_CP_CSQ_MODE, 0); |
WREG32(RADEON_CP_CSQ_CNTL, 0); |
3708,23 → 3717,36 |
return 0; |
} |
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) |
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg, |
bool always_indirect) |
{ |
if (reg < rdev->rmmio_size) |
if (reg < rdev->rmmio_size && !always_indirect) |
return readl(((void __iomem *)rdev->rmmio) + reg); |
else { |
unsigned long flags; |
uint32_t ret; |
spin_lock_irqsave(&rdev->mmio_idx_lock, flags); |
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); |
return ret; |
} |
} |
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, |
bool always_indirect) |
{ |
if (reg < rdev->rmmio_size) |
if (reg < rdev->rmmio_size && !always_indirect) |
writel(v, ((void __iomem *)rdev->rmmio) + reg); |
else { |
unsigned long flags; |
spin_lock_irqsave(&rdev->mmio_idx_lock, flags); |
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); |
} |
} |
/drivers/video/drm/radeon/r600.c |
---|
812,9 → 812,8 |
* reset, it's up to the caller to determine if the GPU needs one. We |
* might add an helper function to check that. |
*/ |
static int r600_gpu_soft_reset(struct radeon_device *rdev) |
static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev) |
{ |
struct rv515_mc_save save; |
u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | |
S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) | |
S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) | |
834,9 → 833,8 |
u32 tmp; |
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) |
return 0; |
return; |
dev_info(rdev->dev, "GPU softreset \n"); |
dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", |
RREG32(R_008010_GRBM_STATUS)); |
dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", |
851,12 → 849,10 |
RREG32(CP_BUSY_STAT)); |
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", |
RREG32(CP_STAT)); |
rv515_mc_stop(rdev, &save); |
if (r600_mc_wait_for_idle(rdev)) { |
dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
} |
/* Disable CP parsing/prefetching */ |
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); |
/* Check if any of the rendering block is busy and reset it */ |
if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || |
(RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { |
886,8 → 882,7 |
RREG32(R_008020_GRBM_SOFT_RESET); |
mdelay(15); |
WREG32(R_008020_GRBM_SOFT_RESET, 0); |
/* Wait a little for things to settle down */ |
mdelay(1); |
dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", |
RREG32(R_008010_GRBM_STATUS)); |
dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", |
902,6 → 897,60 |
RREG32(CP_BUSY_STAT)); |
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", |
RREG32(CP_STAT)); |
} |
static void r600_gpu_soft_reset_dma(struct radeon_device *rdev) |
{ |
u32 tmp; |
if (RREG32(DMA_STATUS_REG) & DMA_IDLE) |
return; |
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", |
RREG32(DMA_STATUS_REG)); |
/* Disable DMA */ |
tmp = RREG32(DMA_RB_CNTL); |
tmp &= ~DMA_RB_ENABLE; |
WREG32(DMA_RB_CNTL, tmp); |
/* Reset dma */ |
if (rdev->family >= CHIP_RV770) |
WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA); |
else |
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); |
RREG32(SRBM_SOFT_RESET); |
udelay(50); |
WREG32(SRBM_SOFT_RESET, 0); |
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", |
RREG32(DMA_STATUS_REG)); |
} |
static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) |
{ |
struct rv515_mc_save save; |
if (reset_mask == 0) |
return 0; |
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); |
rv515_mc_stop(rdev, &save); |
if (r600_mc_wait_for_idle(rdev)) { |
dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
} |
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) |
r600_gpu_soft_reset_gfx(rdev); |
if (reset_mask & RADEON_RESET_DMA) |
r600_gpu_soft_reset_dma(rdev); |
/* Wait a little for things to settle down */ |
mdelay(1); |
rv515_mc_resume(rdev, &save); |
return 0; |
} |
924,9 → 973,34 |
return radeon_ring_test_lockup(rdev, ring); |
} |
/** |
* r600_dma_is_lockup - Check if the DMA engine is locked up |
* |
* @rdev: radeon_device pointer |
* @ring: radeon_ring structure holding ring information |
* |
* Check if the async DMA engine is locked up (r6xx-evergreen). |
* Returns true if the engine appears to be locked up, false if not. |
*/ |
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) |
{ |
u32 dma_status_reg; |
dma_status_reg = RREG32(DMA_STATUS_REG); |
if (dma_status_reg & DMA_IDLE) { |
radeon_ring_lockup_update(ring); |
return false; |
} |
/* force ring activities */ |
radeon_ring_force_activity(rdev, ring); |
return radeon_ring_test_lockup(rdev, ring); |
} |
int r600_asic_reset(struct radeon_device *rdev) |
{ |
return r600_gpu_soft_reset(rdev); |
return r600_gpu_soft_reset(rdev, (RADEON_RESET_GFX | |
RADEON_RESET_COMPUTE | |
RADEON_RESET_DMA)); |
} |
u32 r6xx_remap_render_backend(struct radeon_device *rdev, |
978,14 → 1052,8 |
int r600_count_pipe_bits(uint32_t val) |
{ |
int i, ret = 0; |
for (i = 0; i < 32; i++) { |
ret += val & 1; |
val >>= 1; |
return hweight32(val); |
} |
return ret; |
} |
static void r600_gpu_init(struct radeon_device *rdev) |
{ |
1148,6 → 1216,7 |
WREG32(GB_TILING_CONFIG, tiling_config); |
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); |
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); |
WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff); |
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); |
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); |
1422,9 → 1491,10 |
*/ |
void r600_cp_stop(struct radeon_device *rdev) |
{ |
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); |
WREG32(SCRATCH_UMSK, 0); |
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
} |
int r600_init_microcode(struct radeon_device *rdev) |
1750,7 → 1820,129 |
radeon_scratch_free(rdev, ring->rptr_save_reg); |
} |
/* |
* DMA |
* Starting with R600, the GPU has an asynchronous |
* DMA engine. The programming model is very similar |
* to the 3D engine (ring buffer, IBs, etc.), but the |
* DMA controller has it's own packet format that is |
* different form the PM4 format used by the 3D engine. |
* It supports copying data, writing embedded data, |
* solid fills, and a number of other things. It also |
* has support for tiling/detiling of buffers. |
*/ |
/** |
* r600_dma_stop - stop the async dma engine |
* |
* @rdev: radeon_device pointer |
* |
* Stop the async dma engine (r6xx-evergreen). |
*/ |
void r600_dma_stop(struct radeon_device *rdev) |
{ |
u32 rb_cntl = RREG32(DMA_RB_CNTL); |
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
rb_cntl &= ~DMA_RB_ENABLE; |
WREG32(DMA_RB_CNTL, rb_cntl); |
rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; |
} |
/** |
* r600_dma_resume - setup and start the async dma engine |
* |
* @rdev: radeon_device pointer |
* |
* Set up the DMA ring buffer and enable it. (r6xx-evergreen). |
* Returns 0 for success, error for failure. |
*/ |
int r600_dma_resume(struct radeon_device *rdev) |
{ |
struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
u32 rb_cntl, dma_cntl; |
u32 rb_bufsz; |
int r; |
/* Reset dma */ |
if (rdev->family >= CHIP_RV770) |
WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA); |
else |
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); |
RREG32(SRBM_SOFT_RESET); |
udelay(50); |
WREG32(SRBM_SOFT_RESET, 0); |
WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0); |
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); |
/* Set ring buffer size in dwords */ |
rb_bufsz = drm_order(ring->ring_size / 4); |
rb_cntl = rb_bufsz << 1; |
#ifdef __BIG_ENDIAN |
rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; |
#endif |
WREG32(DMA_RB_CNTL, rb_cntl); |
/* Initialize the ring buffer's read and write pointers */ |
WREG32(DMA_RB_RPTR, 0); |
WREG32(DMA_RB_WPTR, 0); |
/* set the wb address whether it's enabled or not */ |
WREG32(DMA_RB_RPTR_ADDR_HI, |
upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF); |
WREG32(DMA_RB_RPTR_ADDR_LO, |
((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC)); |
if (rdev->wb.enabled) |
rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; |
WREG32(DMA_RB_BASE, ring->gpu_addr >> 8); |
/* enable DMA IBs */ |
WREG32(DMA_IB_CNTL, DMA_IB_ENABLE); |
dma_cntl = RREG32(DMA_CNTL); |
dma_cntl &= ~CTXEMPTY_INT_ENABLE; |
WREG32(DMA_CNTL, dma_cntl); |
if (rdev->family >= CHIP_RV770) |
WREG32(DMA_MODE, 1); |
ring->wptr = 0; |
WREG32(DMA_RB_WPTR, ring->wptr << 2); |
ring->rptr = RREG32(DMA_RB_RPTR) >> 2; |
WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE); |
ring->ready = true; |
r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring); |
if (r) { |
ring->ready = false; |
return r; |
} |
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); |
return 0; |
} |
/** |
* r600_dma_fini - tear down the async dma engine |
* |
* @rdev: radeon_device pointer |
* |
* Stop the async dma engine and free the ring (r6xx-evergreen). |
*/ |
void r600_dma_fini(struct radeon_device *rdev) |
{ |
r600_dma_stop(rdev); |
radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); |
} |
/* |
* GPU scratch registers helpers function. |
*/ |
1806,6 → 1998,64 |
return r; |
} |
/** |
* r600_dma_ring_test - simple async dma engine test |
* |
* @rdev: radeon_device pointer |
* @ring: radeon_ring structure holding ring information |
* |
* Test the DMA engine by writing using it to write an |
* value to memory. (r6xx-SI). |
* Returns 0 for success, error for failure. |
*/ |
int r600_dma_ring_test(struct radeon_device *rdev, |
struct radeon_ring *ring) |
{ |
unsigned i; |
int r; |
void __iomem *ptr = (void *)rdev->vram_scratch.ptr; |
u32 tmp; |
if (!ptr) { |
DRM_ERROR("invalid vram scratch pointer\n"); |
return -EINVAL; |
} |
tmp = 0xCAFEDEAD; |
writel(tmp, ptr); |
r = radeon_ring_lock(rdev, ring, 4); |
if (r) { |
DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); |
return r; |
} |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); |
radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); |
radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); |
radeon_ring_write(ring, 0xDEADBEEF); |
radeon_ring_unlock_commit(rdev, ring); |
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = readl(ptr); |
if (tmp == 0xDEADBEEF) |
break; |
DRM_UDELAY(1); |
} |
if (i < rdev->usec_timeout) { |
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); |
} else { |
DRM_ERROR("radeon: ring %d test failed (0x%08X)\n", |
ring->idx, tmp); |
r = -EINVAL; |
} |
return r; |
} |
/* |
* CP fences/semaphores |
*/ |
void r600_fence_ring_emit(struct radeon_device *rdev, |
struct radeon_fence *fence) |
{ |
1869,6 → 2119,59 |
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); |
} |
/* |
* DMA fences/semaphores |
*/ |
/** |
* r600_dma_fence_ring_emit - emit a fence on the DMA ring |
* |
* @rdev: radeon_device pointer |
* @fence: radeon fence object |
* |
* Add a DMA fence packet to the ring to write |
* the fence seq number and DMA trap packet to generate |
* an interrupt if needed (r6xx-r7xx). |
*/ |
void r600_dma_fence_ring_emit(struct radeon_device *rdev, |
struct radeon_fence *fence) |
{ |
struct radeon_ring *ring = &rdev->ring[fence->ring]; |
u64 addr = rdev->fence_drv[fence->ring].gpu_addr; |
/* write the fence */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0)); |
radeon_ring_write(ring, addr & 0xfffffffc); |
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); |
radeon_ring_write(ring, lower_32_bits(fence->seq)); |
/* generate an interrupt */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0)); |
} |
/** |
* r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring |
* |
* @rdev: radeon_device pointer |
* @ring: radeon_ring structure holding ring information |
* @semaphore: radeon semaphore object |
* @emit_wait: wait or signal semaphore |
* |
* Add a DMA semaphore packet to the ring wait on or signal |
* other rings (r6xx-SI). |
*/ |
void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, |
struct radeon_ring *ring, |
struct radeon_semaphore *semaphore, |
bool emit_wait) |
{ |
u64 addr = semaphore->gpu_addr; |
u32 s = emit_wait ? 0 : 1; |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0)); |
radeon_ring_write(ring, addr & 0xfffffffc); |
radeon_ring_write(ring, upper_32_bits(addr) & 0xff); |
} |
int r600_copy_blit(struct radeon_device *rdev, |
uint64_t src_offset, |
uint64_t dst_offset, |
1888,6 → 2191,80 |
return 0; |
} |
/** |
* r600_copy_dma - copy pages using the DMA engine |
* |
* @rdev: radeon_device pointer |
* @src_offset: src GPU address |
* @dst_offset: dst GPU address |
* @num_gpu_pages: number of GPU pages to xfer |
* @fence: radeon fence object |
* |
* Copy GPU paging using the DMA engine (r6xx). |
* Used by the radeon ttm implementation to move pages if |
* registered as the asic copy callback. |
*/ |
int r600_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence) |
{ |
struct radeon_semaphore *sem = NULL; |
int ring_index = rdev->asic->copy.dma_ring_index; |
struct radeon_ring *ring = &rdev->ring[ring_index]; |
u32 size_in_dw, cur_size_in_dw; |
int i, num_loops; |
int r = 0; |
r = radeon_semaphore_create(rdev, &sem); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
return r; |
} |
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; |
num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE); |
r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
} |
if (radeon_fence_need_sync(*fence, ring->idx)) { |
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, |
ring->idx); |
radeon_fence_note_sync(*fence, ring->idx); |
} else { |
radeon_semaphore_free(rdev, &sem, NULL); |
} |
for (i = 0; i < num_loops; i++) { |
cur_size_in_dw = size_in_dw; |
if (cur_size_in_dw > 0xFFFE) |
cur_size_in_dw = 0xFFFE; |
size_in_dw -= cur_size_in_dw; |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); |
radeon_ring_write(ring, dst_offset & 0xfffffffc); |
radeon_ring_write(ring, src_offset & 0xfffffffc); |
radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) | |
(upper_32_bits(src_offset) & 0xff))); |
src_offset += cur_size_in_dw * 4; |
dst_offset += cur_size_in_dw * 4; |
} |
r = radeon_fence_emit(rdev, fence, ring->idx); |
if (r) { |
radeon_ring_unlock_undo(rdev, ring); |
return r; |
} |
radeon_ring_unlock_commit(rdev, ring); |
radeon_semaphore_free(rdev, &sem, *fence); |
return r; |
} |
int r600_set_surface_reg(struct radeon_device *rdev, int reg, |
uint32_t tiling_flags, uint32_t pitch, |
uint32_t offset, uint32_t obj_size) |
1903,7 → 2280,7 |
static int r600_startup(struct radeon_device *rdev) |
{ |
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
struct radeon_ring *ring; |
int r; |
/* enable pcie gen2 link */ |
1938,6 → 2315,18 |
if (r) |
return r; |
r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); |
return r; |
} |
r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); |
return r; |
} |
/* Enable IRQ */ |
r = r600_irq_init(rdev); |
if (r) { |
1947,12 → 2336,20 |
} |
r600_irq_set(rdev); |
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
R600_CP_RB_RPTR, R600_CP_RB_WPTR, |
0, 0xfffff, RADEON_CP_PACKET2); |
if (r) |
return r; |
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, |
DMA_RB_RPTR, DMA_RB_WPTR, |
2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
if (r) |
return r; |
r = r600_cp_load_microcode(rdev); |
if (r) |
return r; |
1960,6 → 2357,15 |
if (r) |
return r; |
r = r600_dma_resume(rdev); |
if (r) |
return r; |
r = radeon_ib_pool_init(rdev); |
if (r) { |
dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
return r; |
} |
return 0; |
} |
2046,6 → 2452,9 |
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; |
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); |
rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; |
r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); |
rdev->ih.ring_obj = NULL; |
r600_ih_ring_init(rdev, 64 * 1024); |
2150,6 → 2559,109 |
return r; |
} |
/** |
* r600_dma_ib_test - test an IB on the DMA engine |
* |
* @rdev: radeon_device pointer |
* @ring: radeon_ring structure holding ring information |
* |
* Test a simple IB in the DMA ring (r6xx-SI). |
* Returns 0 on success, error on failure. |
*/ |
int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) |
{ |
struct radeon_ib ib; |
unsigned i; |
int r; |
void __iomem *ptr = (void *)rdev->vram_scratch.ptr; |
u32 tmp = 0; |
ENTER(); |
if (!ptr) { |
DRM_ERROR("invalid vram scratch pointer\n"); |
return -EINVAL; |
} |
tmp = 0xCAFEDEAD; |
writel(tmp, ptr); |
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); |
if (r) { |
DRM_ERROR("radeon: failed to get ib (%d).\n", r); |
return r; |
} |
ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1); |
ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; |
ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff; |
ib.ptr[3] = 0xDEADBEEF; |
ib.length_dw = 4; |
r = radeon_ib_schedule(rdev, &ib, NULL); |
if (r) { |
radeon_ib_free(rdev, &ib); |
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); |
return r; |
} |
r = radeon_fence_wait(ib.fence, false); |
if (r) { |
DRM_ERROR("radeon: fence wait failed (%d).\n", r); |
return r; |
} |
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = readl(ptr); |
if (tmp == 0xDEADBEEF) |
break; |
DRM_UDELAY(1); |
} |
if (i < rdev->usec_timeout) { |
DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); |
} else { |
DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp); |
r = -EINVAL; |
} |
radeon_ib_free(rdev, &ib); |
LEAVE(); |
return r; |
} |
/** |
* r600_dma_ring_ib_execute - Schedule an IB on the DMA engine |
* |
* @rdev: radeon_device pointer |
* @ib: IB object to schedule |
* |
* Schedule an IB in the DMA ring (r6xx-r7xx). |
*/ |
void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) |
{ |
struct radeon_ring *ring = &rdev->ring[ib->ring]; |
if (rdev->wb.enabled) { |
u32 next_rptr = ring->wptr + 4; |
while ((next_rptr & 7) != 5) |
next_rptr++; |
next_rptr += 3; |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); |
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); |
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); |
radeon_ring_write(ring, next_rptr); |
} |
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. |
* Pad as necessary with NOPs. |
*/ |
while ((ring->wptr & 7) != 5) |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0)); |
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); |
radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF)); |
} |
/* |
* Interrupts |
* |
2341,6 → 2853,8 |
u32 tmp; |
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); |
tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; |
WREG32(DMA_CNTL, tmp); |
WREG32(GRBM_INT_CNTL, 0); |
WREG32(DxMODE_INT_MASK, 0); |
WREG32(D1GRPH_INTERRUPT_CONTROL, 0); |
2469,6 → 2983,7 |
u32 grbm_int_cntl = 0; |
u32 hdmi0, hdmi1; |
u32 d1grph = 0, d2grph = 0; |
u32 dma_cntl; |
if (!rdev->irq.installed) { |
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
2503,6 → 3018,7 |
hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; |
hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; |
} |
dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE; |
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { |
DRM_DEBUG("r600_irq_set: sw int\n"); |
2509,6 → 3025,12 |
cp_int_cntl |= RB_INT_ENABLE; |
cp_int_cntl |= TIME_STAMP_INT_ENABLE; |
} |
if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { |
DRM_DEBUG("r600_irq_set: sw int dma\n"); |
dma_cntl |= TRAP_ENABLE; |
} |
if (rdev->irq.crtc_vblank_int[0] || |
atomic_read(&rdev->irq.pflip[0])) { |
DRM_DEBUG("r600_irq_set: vblank 0\n"); |
2553,6 → 3075,7 |
} |
WREG32(CP_INT_CNTL, cp_int_cntl); |
WREG32(DMA_CNTL, dma_cntl); |
WREG32(DxMODE_INT_MASK, mode_int); |
WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); |
WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); |
2698,6 → 3221,15 |
} |
} |
void r600_irq_disable(struct radeon_device *rdev) |
{ |
r600_disable_interrupts(rdev); |
/* Wait and acknowledge irq */ |
mdelay(1); |
r600_irq_ack(rdev); |
r600_disable_interrupt_state(rdev); |
} |
static u32 r600_get_ih_wptr(struct radeon_device *rdev) |
{ |
u32 wptr, tmp; |
2925,6 → 3457,10 |
DRM_DEBUG("IH: CP EOP\n"); |
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); |
break; |
case 224: /* DMA trap event */ |
DRM_DEBUG("IH: DMA trap\n"); |
radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); |
break; |
case 233: /* GUI IDLE */ |
DRM_DEBUG("IH: GUI idle\n"); |
break; |
/drivers/video/drm/radeon/r600_audio.c |
---|
23,7 → 23,7 |
* |
* Authors: Christian König |
*/ |
#include "drmP.h" |
#include <drm/drmP.h> |
#include "radeon.h" |
#include "radeon_reg.h" |
#include "radeon_asic.h" |
/drivers/video/drm/radeon/r600_blit_shaders.c |
---|
24,6 → 24,7 |
* Alex Deucher <alexander.deucher@amd.com> |
*/ |
#include <linux/bug.h> |
#include <linux/types.h> |
#include <linux/kernel.h> |
/drivers/video/drm/radeon/r600_reg.h |
---|
96,6 → 96,15 |
#define R600_CONFIG_F0_BASE 0x542C |
#define R600_CONFIG_APER_SIZE 0x5430 |
#define R600_BIF_FB_EN 0x5490 |
#define R600_FB_READ_EN (1 << 0) |
#define R600_FB_WRITE_EN (1 << 1) |
#define R600_CITF_CNTL 0x200c |
#define R600_BLACKOUT_MASK 0x00000003 |
#define R700_MC_CITF_CNTL 0x25c0 |
#define R600_ROM_CNTL 0x1600 |
# define R600_SCK_OVERWRITE (1 << 1) |
# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28 |
/drivers/video/drm/radeon/r600d.h |
---|
590,9 → 590,59 |
#define WAIT_2D_IDLECLEAN_bit (1 << 16) |
#define WAIT_3D_IDLECLEAN_bit (1 << 17) |
/* async DMA */ |
#define DMA_TILING_CONFIG 0x3ec4 |
#define DMA_CONFIG 0x3e4c |
#define DMA_RB_CNTL 0xd000 |
# define DMA_RB_ENABLE (1 << 0) |
# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */ |
# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */ |
# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12) |
# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */ |
# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */ |
#define DMA_RB_BASE 0xd004 |
#define DMA_RB_RPTR 0xd008 |
#define DMA_RB_WPTR 0xd00c |
#define DMA_RB_RPTR_ADDR_HI 0xd01c |
#define DMA_RB_RPTR_ADDR_LO 0xd020 |
#define DMA_IB_CNTL 0xd024 |
# define DMA_IB_ENABLE (1 << 0) |
# define DMA_IB_SWAP_ENABLE (1 << 4) |
#define DMA_IB_RPTR 0xd028 |
#define DMA_CNTL 0xd02c |
# define TRAP_ENABLE (1 << 0) |
# define SEM_INCOMPLETE_INT_ENABLE (1 << 1) |
# define SEM_WAIT_INT_ENABLE (1 << 2) |
# define DATA_SWAP_ENABLE (1 << 3) |
# define FENCE_SWAP_ENABLE (1 << 4) |
# define CTXEMPTY_INT_ENABLE (1 << 28) |
#define DMA_STATUS_REG 0xd034 |
# define DMA_IDLE (1 << 0) |
#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0xd044 |
#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0xd048 |
#define DMA_MODE 0xd0bc |
/* async DMA packets */ |
#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \ |
(((t) & 0x1) << 23) | \ |
(((s) & 0x1) << 22) | \ |
(((n) & 0xFFFF) << 0)) |
/* async DMA Packet types */ |
#define DMA_PACKET_WRITE 0x2 |
#define DMA_PACKET_COPY 0x3 |
#define DMA_PACKET_INDIRECT_BUFFER 0x4 |
#define DMA_PACKET_SEMAPHORE 0x5 |
#define DMA_PACKET_FENCE 0x6 |
#define DMA_PACKET_TRAP 0x7 |
#define DMA_PACKET_CONSTANT_FILL 0xd /* 7xx only */ |
#define DMA_PACKET_NOP 0xf |
#define IH_RB_CNTL 0x3e00 |
# define IH_RB_ENABLE (1 << 0) |
# define IH_IB_SIZE(x) ((x) << 1) /* log2 */ |
# define IH_RB_SIZE(x) ((x) << 1) /* log2 */ |
# define IH_RB_FULL_DRAIN_ENABLE (1 << 6) |
# define IH_WPTR_WRITEBACK_ENABLE (1 << 8) |
# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */ |
637,7 → 687,9 |
#define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20 |
#define SRBM_SOFT_RESET 0xe60 |
# define SOFT_RESET_DMA (1 << 12) |
# define SOFT_RESET_RLC (1 << 13) |
# define RV770_SOFT_RESET_DMA (1 << 20) |
#define CP_INT_CNTL 0xc124 |
# define CNTX_BUSY_INT_ENABLE (1 << 19) |
1134,6 → 1186,38 |
#define PACKET3_WAIT_REG_MEM 0x3C |
#define PACKET3_MEM_WRITE 0x3D |
#define PACKET3_INDIRECT_BUFFER 0x32 |
#define PACKET3_CP_DMA 0x41 |
/* 1. header |
* 2. SRC_ADDR_LO [31:0] |
* 3. CP_SYNC [31] | SRC_ADDR_HI [7:0] |
* 4. DST_ADDR_LO [31:0] |
* 5. DST_ADDR_HI [7:0] |
* 6. COMMAND [29:22] | BYTE_COUNT [20:0] |
*/ |
# define PACKET3_CP_DMA_CP_SYNC (1 << 31) |
/* COMMAND */ |
# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23) |
/* 0 - none |
* 1 - 8 in 16 |
* 2 - 8 in 32 |
* 3 - 8 in 64 |
*/ |
# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24) |
/* 0 - none |
* 1 - 8 in 16 |
* 2 - 8 in 32 |
* 3 - 8 in 64 |
*/ |
# define PACKET3_CP_DMA_CMD_SAS (1 << 26) |
/* 0 - memory |
* 1 - register |
*/ |
# define PACKET3_CP_DMA_CMD_DAS (1 << 27) |
/* 0 - memory |
* 1 - register |
*/ |
# define PACKET3_CP_DMA_CMD_SAIC (1 << 28) |
# define PACKET3_CP_DMA_CMD_DAIC (1 << 29) |
#define PACKET3_SURFACE_SYNC 0x43 |
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) |
# define PACKET3_TC_ACTION_ENA (1 << 23) |
/drivers/video/drm/radeon/radeon.h |
---|
129,13 → 129,7 |
out32((u32)addr, b); |
} |
//struct __wait_queue_head { |
// spinlock_t lock; |
// struct list_head task_list; |
//}; |
//typedef struct __wait_queue_head wait_queue_head_t; |
/* |
* Copy from radeon_drv.h so we don't have to include both and have conflicting |
* symbol; |
149,7 → 143,7 |
#define RADEON_BIOS_NUM_SCRATCH 8 |
/* max number of rings */ |
#define RADEON_NUM_RINGS 3 |
#define RADEON_NUM_RINGS 5 |
/* fence seq are set to this number when signaled */ |
#define RADEON_FENCE_SIGNALED_SEQ 0LL |
162,11 → 156,21 |
#define CAYMAN_RING_TYPE_CP1_INDEX 1 |
#define CAYMAN_RING_TYPE_CP2_INDEX 2 |
/* R600+ has an async dma ring */ |
#define R600_RING_TYPE_DMA_INDEX 3 |
/* cayman add a second async dma ring */ |
#define CAYMAN_RING_TYPE_DMA1_INDEX 4 |
/* hardcode those limit for now */ |
#define RADEON_VA_IB_OFFSET (1 << 20) |
#define RADEON_VA_RESERVED_SIZE (8 << 20) |
#define RADEON_IB_VM_MAX_SIZE (64 << 10) |
/* reset flags */ |
#define RADEON_RESET_GFX (1 << 0) |
#define RADEON_RESET_COMPUTE (1 << 1) |
#define RADEON_RESET_DMA (1 << 2) |
/* |
* Errata workarounds. |
*/ |
260,12 → 264,13 |
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); |
int radeon_fence_driver_init(struct radeon_device *rdev); |
void radeon_fence_driver_fini(struct radeon_device *rdev); |
void radeon_fence_driver_force_completion(struct radeon_device *rdev); |
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring); |
void radeon_fence_process(struct radeon_device *rdev, int ring); |
bool radeon_fence_signaled(struct radeon_fence *fence); |
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); |
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring); |
void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring); |
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring); |
int radeon_fence_wait_any(struct radeon_device *rdev, |
struct radeon_fence **fences, |
bool intr); |
353,6 → 358,7 |
struct list_head list; |
/* Protected by tbo.reserved */ |
u32 placements[3]; |
u32 busy_placements[3]; |
struct ttm_placement placement; |
struct ttm_buffer_object tbo; |
struct ttm_bo_kmap_obj kmap; |
817,6 → 823,15 |
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp); |
/* r600 async dma */ |
void r600_dma_stop(struct radeon_device *rdev); |
int r600_dma_resume(struct radeon_device *rdev); |
void r600_dma_fini(struct radeon_device *rdev); |
void cayman_dma_stop(struct radeon_device *rdev); |
int cayman_dma_resume(struct radeon_device *rdev); |
void cayman_dma_fini(struct radeon_device *rdev); |
/* |
* CS. |
*/ |
854,6 → 869,7 |
struct radeon_cs_reloc *relocs; |
struct radeon_cs_reloc **relocs_ptr; |
struct list_head validated; |
unsigned dma_reloc_idx; |
/* indices of various chunks */ |
int chunk_ib_idx; |
int chunk_relocs_idx; |
913,7 → 929,9 |
#define RADEON_WB_CP_RPTR_OFFSET 1024 |
#define RADEON_WB_CP1_RPTR_OFFSET 1280 |
#define RADEON_WB_CP2_RPTR_OFFSET 1536 |
#define R600_WB_DMA_RPTR_OFFSET 1792 |
#define R600_WB_IH_WPTR_OFFSET 2048 |
#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304 |
#define R600_WB_EVENT_OFFSET 3072 |
/** |
1458,6 → 1476,8 |
/* Register mmio */ |
resource_size_t rmmio_base; |
resource_size_t rmmio_size; |
/* protects concurrent MM_INDEX/DATA based register access */ |
spinlock_t mmio_idx_lock; |
void __iomem *rmmio; |
radeon_rreg_t mc_rreg; |
radeon_wreg_t mc_wreg; |
1533,8 → 1553,10 |
void radeon_device_fini(struct radeon_device *rdev); |
int radeon_gpu_wait_for_idle(struct radeon_device *rdev); |
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); |
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg, |
bool always_indirect); |
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, |
bool always_indirect); |
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); |
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
1550,9 → 1572,11 |
#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg)) |
#define RREG16(reg) readw((rdev->rmmio) + (reg)) |
#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg)) |
#define RREG32(reg) r100_mm_rreg(rdev, (reg)) |
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg))) |
#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) |
#define RREG32(reg) r100_mm_rreg(rdev, (reg), false) |
#define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true) |
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg), false)) |
#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false) |
#define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true) |
#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) |
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) |
#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg)) |
1873,4 → 1897,6 |
#define radeon_ttm_set_active_vram_size(a, b) |
#endif |
/drivers/video/drm/radeon/radeon_agp.c |
---|
24,10 → 24,9 |
* Dave Airlie |
* Jerome Glisse <glisse@freedesktop.org> |
*/ |
#include "drmP.h" |
#include "drm.h" |
#include <drm/drmP.h> |
#include "radeon.h" |
#include "radeon_drm.h" |
#include <drm/radeon_drm.h> |
#if __OS_HAS_AGP |
70,9 → 69,12 |
/* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/ |
{ PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59, |
PCI_VENDOR_ID_DELL, 0x00e3, 2}, |
/* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */ |
/* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */ |
{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66, |
PCI_VENDOR_ID_DELL, 0x0149, 1}, |
/* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */ |
{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66, |
PCI_VENDOR_ID_IBM, 0x0531, 1}, |
/* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */ |
{ PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50, |
0x1025, 0x0061, 1}, |
/drivers/video/drm/radeon/radeon_asic.c |
---|
171,7 → 171,7 |
// .resume = &r100_resume, |
// .vga_set_state = &r100_vga_set_state, |
.asic_reset = &r100_asic_reset, |
// .ioctl_wait_idle = NULL, |
.ioctl_wait_idle = NULL, |
.gui_idle = &r100_gui_idle, |
.mc_wait_for_idle = &r100_mc_wait_for_idle, |
.gart = { |
228,9 → 228,9 |
// .get_engine_clock = &radeon_legacy_get_engine_clock, |
// .set_engine_clock = &radeon_legacy_set_engine_clock, |
// .get_memory_clock = &radeon_legacy_get_memory_clock, |
// .set_memory_clock = NULL, |
// .get_pcie_lanes = NULL, |
// .set_pcie_lanes = NULL, |
.set_memory_clock = NULL, |
.get_pcie_lanes = NULL, |
.set_pcie_lanes = NULL, |
// .set_clock_gating = &radeon_legacy_set_clock_gating, |
}, |
.pflip = { |
247,7 → 247,7 |
// .resume = &r100_resume, |
// .vga_set_state = &r100_vga_set_state, |
.asic_reset = &r100_asic_reset, |
// .ioctl_wait_idle = NULL, |
.ioctl_wait_idle = NULL, |
.gui_idle = &r100_gui_idle, |
.mc_wait_for_idle = &r100_mc_wait_for_idle, |
.gart = { |
304,9 → 304,9 |
// .get_engine_clock = &radeon_legacy_get_engine_clock, |
// .set_engine_clock = &radeon_legacy_set_engine_clock, |
// .get_memory_clock = &radeon_legacy_get_memory_clock, |
// .set_memory_clock = NULL, |
// .get_pcie_lanes = NULL, |
// .set_pcie_lanes = NULL, |
.set_memory_clock = NULL, |
.get_pcie_lanes = NULL, |
.set_pcie_lanes = NULL, |
// .set_clock_gating = &radeon_legacy_set_clock_gating, |
}, |
.pflip = { |
323,7 → 323,7 |
// .resume = &r300_resume, |
// .vga_set_state = &r100_vga_set_state, |
.asic_reset = &r300_asic_reset, |
// .ioctl_wait_idle = NULL, |
.ioctl_wait_idle = NULL, |
.gui_idle = &r100_gui_idle, |
.mc_wait_for_idle = &r300_mc_wait_for_idle, |
.gart = { |
399,7 → 399,7 |
// .resume = &r300_resume, |
// .vga_set_state = &r100_vga_set_state, |
.asic_reset = &r300_asic_reset, |
// .ioctl_wait_idle = NULL, |
.ioctl_wait_idle = NULL, |
.gui_idle = &r100_gui_idle, |
.mc_wait_for_idle = &r300_mc_wait_for_idle, |
.gart = { |
475,7 → 475,7 |
// .resume = &r420_resume, |
// .vga_set_state = &r100_vga_set_state, |
.asic_reset = &r300_asic_reset, |
// .ioctl_wait_idle = NULL, |
.ioctl_wait_idle = NULL, |
.gui_idle = &r100_gui_idle, |
.mc_wait_for_idle = &r300_mc_wait_for_idle, |
.gart = { |
551,7 → 551,7 |
// .resume = &rs400_resume, |
// .vga_set_state = &r100_vga_set_state, |
.asic_reset = &r300_asic_reset, |
// .ioctl_wait_idle = NULL, |
.ioctl_wait_idle = NULL, |
.gui_idle = &r100_gui_idle, |
.mc_wait_for_idle = &rs400_mc_wait_for_idle, |
.gart = { |
627,7 → 627,7 |
// .resume = &rs600_resume, |
// .vga_set_state = &r100_vga_set_state, |
.asic_reset = &rs600_asic_reset, |
// .ioctl_wait_idle = NULL, |
.ioctl_wait_idle = NULL, |
.gui_idle = &r100_gui_idle, |
.mc_wait_for_idle = &rs600_mc_wait_for_idle, |
.gart = { |
703,7 → 703,7 |
// .resume = &rs690_resume, |
// .vga_set_state = &r100_vga_set_state, |
.asic_reset = &rs600_asic_reset, |
// .ioctl_wait_idle = NULL, |
.ioctl_wait_idle = NULL, |
.gui_idle = &r100_gui_idle, |
.mc_wait_for_idle = &rs690_mc_wait_for_idle, |
.gart = { |
779,7 → 779,7 |
// .resume = &rv515_resume, |
// .vga_set_state = &r100_vga_set_state, |
.asic_reset = &rs600_asic_reset, |
// .ioctl_wait_idle = NULL, |
.ioctl_wait_idle = NULL, |
.gui_idle = &r100_gui_idle, |
.mc_wait_for_idle = &rv515_mc_wait_for_idle, |
.gart = { |
855,7 → 855,7 |
// .resume = &r520_resume, |
// .vga_set_state = &r100_vga_set_state, |
.asic_reset = &rs600_asic_reset, |
// .ioctl_wait_idle = NULL, |
.ioctl_wait_idle = NULL, |
.gui_idle = &r100_gui_idle, |
.mc_wait_for_idle = &r520_mc_wait_for_idle, |
.gart = { |
947,6 → 947,15 |
.ring_test = &r600_ring_test, |
.ib_test = &r600_ib_test, |
.is_lockup = &r600_gpu_is_lockup, |
}, |
[R600_RING_TYPE_DMA_INDEX] = { |
.ib_execute = &r600_dma_ring_ib_execute, |
.emit_fence = &r600_dma_fence_ring_emit, |
.emit_semaphore = &r600_dma_semaphore_ring_emit, |
// .cs_parse = &r600_dma_cs_parse, |
.ring_test = &r600_dma_ring_test, |
.ib_test = &r600_dma_ib_test, |
.is_lockup = &r600_dma_is_lockup, |
} |
}, |
.irq = { |
963,10 → 972,10 |
.copy = { |
.blit = &r600_copy_blit, |
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.dma = NULL, |
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.copy = &r600_copy_blit, |
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.dma = &r600_copy_dma, |
.dma_ring_index = R600_RING_TYPE_DMA_INDEX, |
.copy = &r600_copy_dma, |
.copy_ring_index = R600_RING_TYPE_DMA_INDEX, |
}, |
.surface = { |
.set_reg = r600_set_surface_reg, |
1022,6 → 1031,15 |
.ring_test = &r600_ring_test, |
.ib_test = &r600_ib_test, |
.is_lockup = &r600_gpu_is_lockup, |
}, |
[R600_RING_TYPE_DMA_INDEX] = { |
.ib_execute = &r600_dma_ring_ib_execute, |
.emit_fence = &r600_dma_fence_ring_emit, |
.emit_semaphore = &r600_dma_semaphore_ring_emit, |
// .cs_parse = &r600_dma_cs_parse, |
.ring_test = &r600_dma_ring_test, |
.ib_test = &r600_dma_ib_test, |
.is_lockup = &r600_dma_is_lockup, |
} |
}, |
.irq = { |
1038,10 → 1056,10 |
.copy = { |
.blit = &r600_copy_blit, |
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.dma = NULL, |
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.copy = &r600_copy_blit, |
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.dma = &r600_copy_dma, |
.dma_ring_index = R600_RING_TYPE_DMA_INDEX, |
.copy = &r600_copy_dma, |
.copy_ring_index = R600_RING_TYPE_DMA_INDEX, |
}, |
.surface = { |
.set_reg = r600_set_surface_reg, |
1097,6 → 1115,15 |
.ring_test = &r600_ring_test, |
.ib_test = &r600_ib_test, |
.is_lockup = &r600_gpu_is_lockup, |
}, |
[R600_RING_TYPE_DMA_INDEX] = { |
.ib_execute = &r600_dma_ring_ib_execute, |
.emit_fence = &r600_dma_fence_ring_emit, |
.emit_semaphore = &r600_dma_semaphore_ring_emit, |
// .cs_parse = &r600_dma_cs_parse, |
.ring_test = &r600_dma_ring_test, |
.ib_test = &r600_dma_ib_test, |
.is_lockup = &r600_dma_is_lockup, |
} |
}, |
.irq = { |
1113,10 → 1140,10 |
.copy = { |
.blit = &r600_copy_blit, |
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.dma = NULL, |
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.copy = &r600_copy_blit, |
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.dma = &r600_copy_dma, |
.dma_ring_index = R600_RING_TYPE_DMA_INDEX, |
.copy = &r600_copy_dma, |
.copy_ring_index = R600_RING_TYPE_DMA_INDEX, |
}, |
.surface = { |
.set_reg = r600_set_surface_reg, |
1172,6 → 1199,15 |
.ring_test = &r600_ring_test, |
.ib_test = &r600_ib_test, |
.is_lockup = &evergreen_gpu_is_lockup, |
}, |
[R600_RING_TYPE_DMA_INDEX] = { |
.ib_execute = &evergreen_dma_ring_ib_execute, |
.emit_fence = &evergreen_dma_fence_ring_emit, |
.emit_semaphore = &r600_dma_semaphore_ring_emit, |
// .cs_parse = &evergreen_dma_cs_parse, |
.ring_test = &r600_dma_ring_test, |
.ib_test = &r600_dma_ib_test, |
.is_lockup = &r600_dma_is_lockup, |
} |
}, |
.irq = { |
1188,10 → 1224,10 |
.copy = { |
.blit = &r600_copy_blit, |
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.dma = NULL, |
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.copy = &r600_copy_blit, |
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.dma = &evergreen_copy_dma, |
.dma_ring_index = R600_RING_TYPE_DMA_INDEX, |
.copy = &evergreen_copy_dma, |
.copy_ring_index = R600_RING_TYPE_DMA_INDEX, |
}, |
.surface = { |
.set_reg = r600_set_surface_reg, |
1248,6 → 1284,15 |
.ib_test = &r600_ib_test, |
.is_lockup = &evergreen_gpu_is_lockup, |
}, |
[R600_RING_TYPE_DMA_INDEX] = { |
.ib_execute = &evergreen_dma_ring_ib_execute, |
.emit_fence = &evergreen_dma_fence_ring_emit, |
.emit_semaphore = &r600_dma_semaphore_ring_emit, |
// .cs_parse = &evergreen_dma_cs_parse, |
.ring_test = &r600_dma_ring_test, |
.ib_test = &r600_dma_ib_test, |
.is_lockup = &r600_dma_is_lockup, |
} |
}, |
.irq = { |
.set = &evergreen_irq_set, |
1263,10 → 1308,10 |
.copy = { |
.blit = &r600_copy_blit, |
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.dma = NULL, |
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.copy = &r600_copy_blit, |
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.dma = &evergreen_copy_dma, |
.dma_ring_index = R600_RING_TYPE_DMA_INDEX, |
.copy = &evergreen_copy_dma, |
.copy_ring_index = R600_RING_TYPE_DMA_INDEX, |
}, |
.surface = { |
.set_reg = r600_set_surface_reg, |
1322,6 → 1367,15 |
.ring_test = &r600_ring_test, |
.ib_test = &r600_ib_test, |
.is_lockup = &evergreen_gpu_is_lockup, |
}, |
[R600_RING_TYPE_DMA_INDEX] = { |
.ib_execute = &evergreen_dma_ring_ib_execute, |
.emit_fence = &evergreen_dma_fence_ring_emit, |
.emit_semaphore = &r600_dma_semaphore_ring_emit, |
// .cs_parse = &evergreen_dma_cs_parse, |
.ring_test = &r600_dma_ring_test, |
.ib_test = &r600_dma_ib_test, |
.is_lockup = &r600_dma_is_lockup, |
} |
}, |
.irq = { |
1338,10 → 1392,10 |
.copy = { |
.blit = &r600_copy_blit, |
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.dma = NULL, |
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.copy = &r600_copy_blit, |
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.dma = &evergreen_copy_dma, |
.dma_ring_index = R600_RING_TYPE_DMA_INDEX, |
.copy = &evergreen_copy_dma, |
.copy_ring_index = R600_RING_TYPE_DMA_INDEX, |
}, |
.surface = { |
.set_reg = r600_set_surface_reg, |
1391,7 → 1445,7 |
.vm = { |
.init = &cayman_vm_init, |
.fini = &cayman_vm_fini, |
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.pt_ring_index = R600_RING_TYPE_DMA_INDEX, |
.set_page = &cayman_vm_set_page, |
}, |
.ring = { |
1427,6 → 1481,28 |
.ib_test = &r600_ib_test, |
.is_lockup = &evergreen_gpu_is_lockup, |
.vm_flush = &cayman_vm_flush, |
}, |
[R600_RING_TYPE_DMA_INDEX] = { |
.ib_execute = &cayman_dma_ring_ib_execute, |
// .ib_parse = &evergreen_dma_ib_parse, |
.emit_fence = &evergreen_dma_fence_ring_emit, |
.emit_semaphore = &r600_dma_semaphore_ring_emit, |
// .cs_parse = &evergreen_dma_cs_parse, |
.ring_test = &r600_dma_ring_test, |
.ib_test = &r600_dma_ib_test, |
.is_lockup = &cayman_dma_is_lockup, |
.vm_flush = &cayman_dma_vm_flush, |
}, |
[CAYMAN_RING_TYPE_DMA1_INDEX] = { |
.ib_execute = &cayman_dma_ring_ib_execute, |
// .ib_parse = &evergreen_dma_ib_parse, |
.emit_fence = &evergreen_dma_fence_ring_emit, |
.emit_semaphore = &r600_dma_semaphore_ring_emit, |
// .cs_parse = &evergreen_dma_cs_parse, |
.ring_test = &r600_dma_ring_test, |
.ib_test = &r600_dma_ib_test, |
.is_lockup = &cayman_dma_is_lockup, |
.vm_flush = &cayman_dma_vm_flush, |
} |
}, |
.irq = { |
1443,10 → 1519,10 |
.copy = { |
.blit = &r600_copy_blit, |
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.dma = NULL, |
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.copy = &r600_copy_blit, |
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.dma = &evergreen_copy_dma, |
.dma_ring_index = R600_RING_TYPE_DMA_INDEX, |
.copy = &evergreen_copy_dma, |
.copy_ring_index = R600_RING_TYPE_DMA_INDEX, |
}, |
.surface = { |
.set_reg = r600_set_surface_reg, |
1496,7 → 1572,7 |
.vm = { |
.init = &cayman_vm_init, |
.fini = &cayman_vm_fini, |
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.pt_ring_index = R600_RING_TYPE_DMA_INDEX, |
.set_page = &cayman_vm_set_page, |
}, |
.ring = { |
1532,6 → 1608,28 |
.ib_test = &r600_ib_test, |
.is_lockup = &evergreen_gpu_is_lockup, |
.vm_flush = &cayman_vm_flush, |
}, |
[R600_RING_TYPE_DMA_INDEX] = { |
.ib_execute = &cayman_dma_ring_ib_execute, |
// .ib_parse = &evergreen_dma_ib_parse, |
.emit_fence = &evergreen_dma_fence_ring_emit, |
.emit_semaphore = &r600_dma_semaphore_ring_emit, |
// .cs_parse = &evergreen_dma_cs_parse, |
.ring_test = &r600_dma_ring_test, |
.ib_test = &r600_dma_ib_test, |
.is_lockup = &cayman_dma_is_lockup, |
.vm_flush = &cayman_dma_vm_flush, |
}, |
[CAYMAN_RING_TYPE_DMA1_INDEX] = { |
.ib_execute = &cayman_dma_ring_ib_execute, |
// .ib_parse = &evergreen_dma_ib_parse, |
.emit_fence = &evergreen_dma_fence_ring_emit, |
.emit_semaphore = &r600_dma_semaphore_ring_emit, |
// .cs_parse = &evergreen_dma_cs_parse, |
.ring_test = &r600_dma_ring_test, |
.ib_test = &r600_dma_ib_test, |
.is_lockup = &cayman_dma_is_lockup, |
.vm_flush = &cayman_dma_vm_flush, |
} |
}, |
.irq = { |
1548,10 → 1646,10 |
.copy = { |
.blit = &r600_copy_blit, |
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.dma = NULL, |
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.copy = &r600_copy_blit, |
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.dma = &evergreen_copy_dma, |
.dma_ring_index = R600_RING_TYPE_DMA_INDEX, |
.copy = &evergreen_copy_dma, |
.copy_ring_index = R600_RING_TYPE_DMA_INDEX, |
}, |
.surface = { |
.set_reg = r600_set_surface_reg, |
1601,7 → 1699,7 |
.vm = { |
.init = &si_vm_init, |
.fini = &si_vm_fini, |
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.pt_ring_index = R600_RING_TYPE_DMA_INDEX, |
.set_page = &si_vm_set_page, |
}, |
.ring = { |
1637,6 → 1735,28 |
.ib_test = &r600_ib_test, |
.is_lockup = &si_gpu_is_lockup, |
.vm_flush = &si_vm_flush, |
}, |
[R600_RING_TYPE_DMA_INDEX] = { |
.ib_execute = &cayman_dma_ring_ib_execute, |
// .ib_parse = &evergreen_dma_ib_parse, |
.emit_fence = &evergreen_dma_fence_ring_emit, |
.emit_semaphore = &r600_dma_semaphore_ring_emit, |
.cs_parse = NULL, |
.ring_test = &r600_dma_ring_test, |
.ib_test = &r600_dma_ib_test, |
.is_lockup = &cayman_dma_is_lockup, |
.vm_flush = &si_dma_vm_flush, |
}, |
[CAYMAN_RING_TYPE_DMA1_INDEX] = { |
.ib_execute = &cayman_dma_ring_ib_execute, |
// .ib_parse = &evergreen_dma_ib_parse, |
.emit_fence = &evergreen_dma_fence_ring_emit, |
.emit_semaphore = &r600_dma_semaphore_ring_emit, |
.cs_parse = NULL, |
.ring_test = &r600_dma_ring_test, |
.ib_test = &r600_dma_ib_test, |
.is_lockup = &cayman_dma_is_lockup, |
.vm_flush = &si_dma_vm_flush, |
} |
}, |
.irq = { |
1653,10 → 1773,10 |
.copy = { |
.blit = NULL, |
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.dma = NULL, |
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.copy = NULL, |
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, |
.dma = &si_copy_dma, |
.dma_ring_index = R600_RING_TYPE_DMA_INDEX, |
.copy = &si_copy_dma, |
.copy_ring_index = R600_RING_TYPE_DMA_INDEX, |
}, |
.surface = { |
.set_reg = r600_set_surface_reg, |
/drivers/video/drm/radeon/radeon_asic.h |
---|
263,6 → 263,7 |
struct rv515_mc_save { |
u32 vga_render_control; |
u32 vga_hdp_control; |
bool crtc_enabled[2]; |
}; |
int rv515_init(struct radeon_device *rdev); |
303,6 → 304,7 |
uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg); |
void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
int r600_cs_parse(struct radeon_cs_parser *p); |
int r600_dma_cs_parse(struct radeon_cs_parser *p); |
void r600_fence_ring_emit(struct radeon_device *rdev, |
struct radeon_fence *fence); |
void r600_semaphore_ring_emit(struct radeon_device *rdev, |
309,6 → 311,14 |
struct radeon_ring *cp, |
struct radeon_semaphore *semaphore, |
bool emit_wait); |
void r600_dma_fence_ring_emit(struct radeon_device *rdev, |
struct radeon_fence *fence); |
void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, |
struct radeon_ring *ring, |
struct radeon_semaphore *semaphore, |
bool emit_wait); |
void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); |
bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); |
int r600_asic_reset(struct radeon_device *rdev); |
int r600_set_surface_reg(struct radeon_device *rdev, int reg, |
316,11 → 326,16 |
uint32_t offset, uint32_t obj_size); |
void r600_clear_surface_reg(struct radeon_device *rdev, int reg); |
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); |
int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); |
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); |
int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); |
int r600_copy_blit(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, struct radeon_fence **fence); |
int r600_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, struct radeon_fence **fence); |
void r600_hpd_init(struct radeon_device *rdev); |
void r600_hpd_fini(struct radeon_device *rdev); |
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
388,6 → 403,10 |
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); |
void r700_cp_stop(struct radeon_device *rdev); |
void r700_cp_fini(struct radeon_device *rdev); |
int rv770_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence); |
/* |
* evergreen |
416,6 → 435,7 |
int evergreen_irq_set(struct radeon_device *rdev); |
int evergreen_irq_process(struct radeon_device *rdev); |
extern int evergreen_cs_parse(struct radeon_cs_parser *p); |
extern int evergreen_dma_cs_parse(struct radeon_cs_parser *p); |
extern void evergreen_pm_misc(struct radeon_device *rdev); |
extern void evergreen_pm_prepare(struct radeon_device *rdev); |
extern void evergreen_pm_finish(struct radeon_device *rdev); |
428,6 → 448,14 |
void evergreen_disable_interrupt_state(struct radeon_device *rdev); |
int evergreen_blit_init(struct radeon_device *rdev); |
int evergreen_mc_wait_for_idle(struct radeon_device *rdev); |
void evergreen_dma_fence_ring_emit(struct radeon_device *rdev, |
struct radeon_fence *fence); |
void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, |
struct radeon_ib *ib); |
int evergreen_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence); |
/* |
* cayman |
449,6 → 477,11 |
uint64_t addr, unsigned count, |
uint32_t incr, uint32_t flags); |
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); |
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); |
void cayman_dma_ring_ib_execute(struct radeon_device *rdev, |
struct radeon_ib *ib); |
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); |
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
/* DCE6 - SI */ |
void dce6_bandwidth_update(struct radeon_device *rdev); |
476,5 → 509,10 |
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); |
uint64_t si_get_gpu_clock(struct radeon_device *rdev); |
int si_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence); |
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
#endif |
/drivers/video/drm/radeon/radeon_combios.c |
---|
1548,6 → 1548,9 |
of_machine_is_compatible("PowerBook6,7")) { |
/* ibook */ |
rdev->mode_info.connector_table = CT_IBOOK; |
} else if (of_machine_is_compatible("PowerMac3,5")) { |
/* PowerMac G4 Silver radeon 7500 */ |
rdev->mode_info.connector_table = CT_MAC_G4_SILVER; |
} else if (of_machine_is_compatible("PowerMac4,4")) { |
/* emac */ |
rdev->mode_info.connector_table = CT_EMAC; |
2212,6 → 2215,54 |
CONNECTOR_OBJECT_ID_SVIDEO, |
&hpd); |
break; |
case CT_MAC_G4_SILVER: |
DRM_INFO("Connector Table: %d (mac g4 silver)\n", |
rdev->mode_info.connector_table); |
/* DVI-I - tv dac, int tmds */ |
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); |
hpd.hpd = RADEON_HPD_1; /* ??? */ |
radeon_add_legacy_encoder(dev, |
radeon_get_encoder_enum(dev, |
ATOM_DEVICE_DFP1_SUPPORT, |
0), |
ATOM_DEVICE_DFP1_SUPPORT); |
radeon_add_legacy_encoder(dev, |
radeon_get_encoder_enum(dev, |
ATOM_DEVICE_CRT2_SUPPORT, |
2), |
ATOM_DEVICE_CRT2_SUPPORT); |
radeon_add_legacy_connector(dev, 0, |
ATOM_DEVICE_DFP1_SUPPORT | |
ATOM_DEVICE_CRT2_SUPPORT, |
DRM_MODE_CONNECTOR_DVII, &ddc_i2c, |
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, |
&hpd); |
/* VGA - primary dac */ |
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); |
hpd.hpd = RADEON_HPD_NONE; |
radeon_add_legacy_encoder(dev, |
radeon_get_encoder_enum(dev, |
ATOM_DEVICE_CRT1_SUPPORT, |
1), |
ATOM_DEVICE_CRT1_SUPPORT); |
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT, |
DRM_MODE_CONNECTOR_VGA, &ddc_i2c, |
CONNECTOR_OBJECT_ID_VGA, |
&hpd); |
/* TV - TV DAC */ |
ddc_i2c.valid = false; |
hpd.hpd = RADEON_HPD_NONE; |
radeon_add_legacy_encoder(dev, |
radeon_get_encoder_enum(dev, |
ATOM_DEVICE_TV1_SUPPORT, |
2), |
ATOM_DEVICE_TV1_SUPPORT); |
radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, |
DRM_MODE_CONNECTOR_SVIDEO, |
&ddc_i2c, |
CONNECTOR_OBJECT_ID_SVIDEO, |
&hpd); |
break; |
default: |
DRM_INFO("Connector table: %d (invalid)\n", |
rdev->mode_info.connector_table); |
3246,11 → 3297,9 |
while (ram--) { |
addr = ram * 1024 * 1024; |
/* write to each page */ |
WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER); |
WREG32(RADEON_MM_DATA, 0xdeadbeef); |
WREG32_IDX((addr) | RADEON_MM_APER, 0xdeadbeef); |
/* read back and verify */ |
WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER); |
if (RREG32(RADEON_MM_DATA) != 0xdeadbeef) |
if (RREG32_IDX((addr) | RADEON_MM_APER) != 0xdeadbeef) |
return 0; |
} |
/drivers/video/drm/radeon/radeon_connectors.c |
---|
31,6 → 31,9 |
#include "radeon.h" |
#include "atom.h" |
#define DISABLE_DP 0 |
extern void |
radeon_combios_connected_scratch_regs(struct drm_connector *connector, |
struct drm_encoder *encoder, |
612,6 → 615,8 |
struct drm_encoder *encoder = radeon_best_single_encoder(connector); |
enum drm_connector_status ret = connector_status_disconnected; |
ENTER(); |
if (encoder) { |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct drm_display_mode *native_mode = &radeon_encoder->native_mode; |
636,6 → 641,7 |
/* check acpi lid status ??? */ |
radeon_connector_update_scratch_regs(connector, ret); |
LEAVE(); |
return ret; |
} |
741,7 → 747,7 |
ret = connector_status_disconnected; |
if (radeon_connector->ddc_bus) |
dret = radeon_ddc_probe(radeon_connector); |
dret = radeon_ddc_probe(radeon_connector, false); |
if (dret) { |
radeon_connector->detected_by_load = false; |
if (radeon_connector->edid) { |
948,7 → 954,7 |
return connector->status; |
if (radeon_connector->ddc_bus) |
dret = radeon_ddc_probe(radeon_connector); |
dret = radeon_ddc_probe(radeon_connector, false); |
if (dret) { |
radeon_connector->detected_by_load = false; |
if (radeon_connector->edid) { |
1364,6 → 1370,14 |
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; |
struct drm_encoder *encoder = radeon_best_single_encoder(connector); |
dbgprintf("%s radeon_connector %p encoder %p\n", |
__FUNCTION__, radeon_connector, encoder); |
#if DISABLE_DP |
connector->status = connector_status_disconnected; |
return connector->status; |
#endif |
if (!force && radeon_check_hpd_status_unchanged(connector)) |
return connector->status; |
1387,6 → 1401,7 |
if (!radeon_dig_connector->edp_on) |
atombios_set_edp_panel_power(connector, |
ATOM_TRANSMITTER_ACTION_POWER_ON); |
dbgprintf("check eDP\n"); |
if (radeon_dp_getdpcd(radeon_connector)) |
ret = connector_status_connected; |
if (!radeon_dig_connector->edp_on) |
1402,7 → 1417,8 |
if (encoder) { |
/* setup ddc on the bridge */ |
radeon_atom_ext_encoder_setup_ddc(encoder); |
if (radeon_ddc_probe(radeon_connector)) /* try DDC */ |
/* bridge chips are always aux */ |
if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */ |
ret = connector_status_connected; |
else if (radeon_connector->dac_load_detect) { /* try load detection */ |
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
1420,7 → 1436,8 |
if (radeon_dp_getdpcd(radeon_connector)) |
ret = connector_status_connected; |
} else { |
if (radeon_ddc_probe(radeon_connector)) |
/* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */ |
if (radeon_ddc_probe(radeon_connector, false)) |
ret = connector_status_connected; |
} |
} |
1427,6 → 1444,7 |
} |
radeon_connector_update_scratch_regs(connector, ret); |
LEAVE(); |
return ret; |
} |
1600,7 → 1618,7 |
connector->interlace_allowed = true; |
connector->doublescan_allowed = true; |
radeon_connector->dac_load_detect = true; |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.load_detect_property, |
1); |
break; |
1609,13 → 1627,13 |
case DRM_MODE_CONNECTOR_HDMIA: |
case DRM_MODE_CONNECTOR_HDMIB: |
case DRM_MODE_CONNECTOR_DisplayPort: |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.underscan_property, |
UNDERSCAN_OFF); |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.underscan_hborder_property, |
0); |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.underscan_vborder_property, |
0); |
subpixel_order = SubPixelHorizontalRGB; |
1626,7 → 1644,7 |
connector->doublescan_allowed = false; |
if (connector_type == DRM_MODE_CONNECTOR_DVII) { |
radeon_connector->dac_load_detect = true; |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.load_detect_property, |
1); |
} |
1633,7 → 1651,7 |
break; |
case DRM_MODE_CONNECTOR_LVDS: |
case DRM_MODE_CONNECTOR_eDP: |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
dev->mode_config.scaling_mode_property, |
DRM_MODE_SCALE_FULLSCREEN); |
subpixel_order = SubPixelHorizontalRGB; |
1652,7 → 1670,7 |
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
} |
radeon_connector->dac_load_detect = true; |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.load_detect_property, |
1); |
/* no HPD on analog connectors */ |
1670,7 → 1688,7 |
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
} |
radeon_connector->dac_load_detect = true; |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.load_detect_property, |
1); |
/* no HPD on analog connectors */ |
1693,23 → 1711,23 |
DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
} |
subpixel_order = SubPixelHorizontalRGB; |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.coherent_mode_property, |
1); |
if (ASIC_IS_AVIVO(rdev)) { |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.underscan_property, |
UNDERSCAN_OFF); |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.underscan_hborder_property, |
0); |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.underscan_vborder_property, |
0); |
} |
if (connector_type == DRM_MODE_CONNECTOR_DVII) { |
radeon_connector->dac_load_detect = true; |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.load_detect_property, |
1); |
} |
1733,17 → 1751,17 |
if (!radeon_connector->ddc_bus) |
DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
} |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.coherent_mode_property, |
1); |
if (ASIC_IS_AVIVO(rdev)) { |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.underscan_property, |
UNDERSCAN_OFF); |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.underscan_hborder_property, |
0); |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.underscan_vborder_property, |
0); |
} |
1772,17 → 1790,17 |
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
} |
subpixel_order = SubPixelHorizontalRGB; |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.coherent_mode_property, |
1); |
if (ASIC_IS_AVIVO(rdev)) { |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.underscan_property, |
UNDERSCAN_OFF); |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.underscan_hborder_property, |
0); |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.underscan_vborder_property, |
0); |
} |
1807,7 → 1825,7 |
if (!radeon_connector->ddc_bus) |
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
} |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
dev->mode_config.scaling_mode_property, |
DRM_MODE_SCALE_FULLSCREEN); |
subpixel_order = SubPixelHorizontalRGB; |
1820,10 → 1838,10 |
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); |
drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); |
radeon_connector->dac_load_detect = true; |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.load_detect_property, |
1); |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.tv_std_property, |
radeon_atombios_get_tv_info(rdev)); |
/* no HPD on analog connectors */ |
1844,7 → 1862,7 |
if (!radeon_connector->ddc_bus) |
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
} |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
dev->mode_config.scaling_mode_property, |
DRM_MODE_SCALE_FULLSCREEN); |
subpixel_order = SubPixelHorizontalRGB; |
1923,7 → 1941,7 |
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
} |
radeon_connector->dac_load_detect = true; |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.load_detect_property, |
1); |
/* no HPD on analog connectors */ |
1941,7 → 1959,7 |
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
} |
radeon_connector->dac_load_detect = true; |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.load_detect_property, |
1); |
/* no HPD on analog connectors */ |
1960,7 → 1978,7 |
} |
if (connector_type == DRM_MODE_CONNECTOR_DVII) { |
radeon_connector->dac_load_detect = true; |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.load_detect_property, |
1); |
} |
1984,10 → 2002,10 |
*/ |
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) |
radeon_connector->dac_load_detect = false; |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.load_detect_property, |
radeon_connector->dac_load_detect); |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
rdev->mode_info.tv_std_property, |
radeon_combios_get_tv_info(rdev)); |
/* no HPD on analog connectors */ |
2003,7 → 2021,7 |
if (!radeon_connector->ddc_bus) |
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); |
} |
drm_connector_attach_property(&radeon_connector->base, |
drm_object_attach_property(&radeon_connector->base.base, |
dev->mode_config.scaling_mode_property, |
DRM_MODE_SCALE_FULLSCREEN); |
subpixel_order = SubPixelHorizontalRGB; |
/drivers/video/drm/radeon/radeon_device.c |
---|
1026,6 → 1026,7 |
/* Registers mapping */ |
/* TODO: block userspace mapping of io register */ |
spin_lock_init(&rdev->mmio_idx_lock); |
rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); |
rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); |
rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); |
1051,6 → 1052,10 |
if (r) |
return r; |
// r = radeon_ib_ring_tests(rdev); |
// if (r) |
// DRM_ERROR("ib ring test failed (%d).\n", r); |
if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { |
/* Acceleration not working on AGP card try again |
* with fallback to PCI or PCIE GART |
1126,15 → 1131,15 |
ring_data[i] = NULL; |
} |
r = radeon_ib_ring_tests(rdev); |
if (r) { |
dev_err(rdev->dev, "ib ring test failed (%d).\n", r); |
if (saved) { |
saved = false; |
radeon_suspend(rdev); |
goto retry; |
} |
} |
// r = radeon_ib_ring_tests(rdev); |
// if (r) { |
// dev_err(rdev->dev, "ib ring test failed (%d).\n", r); |
// if (saved) { |
// saved = false; |
// radeon_suspend(rdev); |
// goto retry; |
// } |
// } |
} else { |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
kfree(ring_data[i]); |
1152,7 → 1157,6 |
} |
/* |
* Driver load/unload |
*/ |
1434,7 → 1438,7 |
if(!dbg_open(log)) |
{ |
strcpy(log, "/RD/1/DRIVERS/atikms.log"); |
strcpy(log, "/TMP1/1/atikms.log"); |
if(!dbg_open(log)) |
{ |
1442,7 → 1446,7 |
return 0; |
}; |
} |
dbgprintf("Radeon RC12 preview 1 cmdline %s\n", cmdline); |
dbgprintf("Radeon RC13 cmdline %s\n", cmdline); |
enum_pci_devices(); |
1491,3 → 1495,13 |
} |
return ret; |
} |
unsigned int hweight32(unsigned int w) |
{ |
unsigned int res = w - ((w >> 1) & 0x55555555); |
res = (res & 0x33333333) + ((res >> 2) & 0x33333333); |
res = (res + (res >> 4)) & 0x0F0F0F0F; |
res = res + (res >> 8); |
return (res + (res >> 16)) & 0x000000FF; |
} |
/drivers/video/drm/radeon/radeon_display.c |
---|
448,12 → 448,17 |
if (radeon_connector->router.ddc_valid) |
radeon_router_select_ddc_port(radeon_connector); |
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || |
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) || |
(radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) != |
ENCODER_OBJECT_ID_NONE)) { |
if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) != |
ENCODER_OBJECT_ID_NONE) { |
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; |
if (dig->dp_i2c_bus) |
radeon_connector->edid = drm_get_edid(&radeon_connector->base, |
&dig->dp_i2c_bus->adapter); |
} else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || |
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { |
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; |
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || |
dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus) |
radeon_connector->edid = drm_get_edid(&radeon_connector->base, |
1038,6 → 1043,8 |
int i; |
int ret; |
ENTER(); |
drm_mode_config_init(rdev->ddev); |
rdev->mode_info.mode_config_initialized = true; |
1067,6 → 1074,8 |
/* init i2c buses */ |
radeon_i2c_init(rdev); |
dbgprintf("i2c init\n"); |
/* check combios for a valid hardcoded EDID - Sun servers */ |
if (!rdev->is_atom_bios) { |
/* check for hardcoded EDID in BIOS */ |
1078,6 → 1087,8 |
radeon_crtc_init(rdev->ddev, i); |
} |
dbgprintf("crtc init\n"); |
/* okay we should have all the bios connectors */ |
ret = radeon_setup_enc_conn(rdev->ddev); |
if (!ret) { |
1102,6 → 1113,8 |
radeon_fbdev_init(rdev); |
// drm_kms_helper_poll_init(rdev->ddev); |
LEAVE(); |
return 0; |
} |
/drivers/video/drm/radeon/radeon_fence.c |
---|
30,7 → 30,7 |
*/ |
#include <linux/seq_file.h> |
#include <asm/atomic.h> |
//#include <linux/wait.h> |
#include <linux/wait.h> |
#include <linux/list.h> |
#include <linux/kref.h> |
#include <linux/slab.h> |
303,21 → 303,19 |
// trace_radeon_fence_wait_begin(rdev->ddev, seq); |
radeon_irq_kms_sw_irq_get(rdev, ring); |
// if (intr) { |
// r = wait_event_interruptible_timeout(rdev->fence_queue, |
// (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), |
// timeout); |
// } else { |
// r = wait_event_timeout(rdev->fence_queue, |
// (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), |
// timeout); |
// } |
delay(1); |
if (intr) { |
r = wait_event_interruptible_timeout(rdev->fence_queue, |
(signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), |
timeout); |
} else { |
r = wait_event_timeout(rdev->fence_queue, |
(signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)), |
timeout); |
} |
radeon_irq_kms_sw_irq_put(rdev, ring); |
// if (unlikely(r < 0)) { |
// return r; |
// } |
if (unlikely(r < 0)) { |
return r; |
} |
// trace_radeon_fence_wait_end(rdev->ddev, seq); |
if (unlikely(!signaled)) { |
474,11 → 472,15 |
radeon_irq_kms_sw_irq_get(rdev, i); |
} |
} |
// WaitEvent(fence->evnt); |
r = 1; |
if (intr) { |
r = wait_event_interruptible_timeout(rdev->fence_queue, |
(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)), |
timeout); |
} else { |
r = wait_event_timeout(rdev->fence_queue, |
(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)), |
timeout); |
} |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
if (target_seq[i]) { |
radeon_irq_kms_sw_irq_put(rdev, i); |
606,27 → 608,21 |
* Returns 0 if the fences have passed, error for all other cases. |
* Caller must hold ring lock. |
*/ |
void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) |
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) |
{ |
uint64_t seq = rdev->fence_drv[ring].sync_seq[ring]; |
int r; |
while(1) { |
int r; |
r = radeon_fence_wait_seq(rdev, seq, ring, false, false); |
if (r) { |
if (r == -EDEADLK) { |
mutex_unlock(&rdev->ring_lock); |
r = radeon_gpu_reset(rdev); |
mutex_lock(&rdev->ring_lock); |
if (!r) |
continue; |
return -EDEADLK; |
} |
if (r) { |
dev_err(rdev->dev, "error waiting for ring to become" |
" idle (%d)\n", r); |
dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", |
ring, r); |
} |
return; |
return 0; |
} |
} |
/** |
* radeon_fence_ref - take a ref on a fence |
769,7 → 765,7 |
int r; |
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
if (rdev->wb.use_event) { |
if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { |
rdev->fence_drv[ring].scratch_reg = 0; |
index = R600_WB_EVENT_OFFSET + ring * 4; |
} else { |
851,13 → 847,17 |
*/ |
void radeon_fence_driver_fini(struct radeon_device *rdev) |
{ |
int ring; |
int ring, r; |
mutex_lock(&rdev->ring_lock); |
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
if (!rdev->fence_drv[ring].initialized) |
continue; |
radeon_fence_wait_empty_locked(rdev, ring); |
r = radeon_fence_wait_empty_locked(rdev, ring); |
if (r) { |
/* no need to trigger GPU reset as we are unloading */ |
radeon_fence_driver_force_completion(rdev); |
} |
wake_up_all(&rdev->fence_queue); |
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); |
rdev->fence_drv[ring].initialized = false; |
865,7 → 865,26 |
mutex_unlock(&rdev->ring_lock); |
} |
/** |
* radeon_fence_driver_force_completion - force all fence waiter to complete |
* |
* @rdev: radeon device pointer |
* |
* In case of GPU reset failure make sure no process keep waiting on fence |
* that will never complete. |
*/ |
void radeon_fence_driver_force_completion(struct radeon_device *rdev) |
{ |
int ring; |
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { |
if (!rdev->fence_drv[ring].initialized) |
continue; |
radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); |
} |
} |
/* |
* Fence debugfs |
*/ |
/drivers/video/drm/radeon/radeon_gart.c |
---|
1248,7 → 1248,6 |
{ |
struct radeon_bo_va *bo_va; |
BUG_ON(!atomic_read(&bo->tbo.reserved)); |
list_for_each_entry(bo_va, &bo->va, bo_list) { |
bo_va->valid = false; |
} |
/drivers/video/drm/radeon/radeon_i2c.c |
---|
39,7 → 39,7 |
* radeon_ddc_probe |
* |
*/ |
bool radeon_ddc_probe(struct radeon_connector *radeon_connector) |
bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux) |
{ |
u8 out = 0x0; |
u8 buf[8]; |
63,7 → 63,13 |
if (radeon_connector->router.ddc_valid) |
radeon_router_select_ddc_port(radeon_connector); |
if (use_aux) { |
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; |
ret = i2c_transfer(&dig->dp_i2c_bus->adapter, msgs, 2); |
} else { |
ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); |
} |
if (ret != 2) |
/* Couldn't find an accessible DDC on this connector */ |
return false; |
922,11 → 928,11 |
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), |
"Radeon i2c hw bus %s", name); |
i2c->adapter.algo = &radeon_i2c_algo; |
// ret = i2c_add_adapter(&i2c->adapter); |
// if (ret) { |
// DRM_ERROR("Failed to register hw i2c %s\n", name); |
// goto out_free; |
// } |
ret = i2c_add_adapter(&i2c->adapter); |
if (ret) { |
DRM_ERROR("Failed to register hw i2c %s\n", name); |
goto out_free; |
} |
} else if (rec->hw_capable && |
radeon_hw_i2c && |
ASIC_IS_DCE3(rdev)) { |
934,11 → 940,11 |
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), |
"Radeon i2c hw bus %s", name); |
i2c->adapter.algo = &radeon_atom_i2c_algo; |
// ret = i2c_add_adapter(&i2c->adapter); |
// if (ret) { |
// DRM_ERROR("Failed to register hw i2c %s\n", name); |
// goto out_free; |
// } |
ret = i2c_add_adapter(&i2c->adapter); |
if (ret) { |
DRM_ERROR("Failed to register hw i2c %s\n", name); |
goto out_free; |
} |
} else { |
/* set the radeon bit adapter */ |
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), |
1005,7 → 1011,7 |
{ |
if (!i2c) |
return; |
// i2c_del_adapter(&i2c->adapter); |
i2c_del_adapter(&i2c->adapter); |
kfree(i2c); |
} |
/drivers/video/drm/radeon/radeon_mode.h |
---|
209,7 → 209,8 |
CT_RN50_POWER, |
CT_MAC_X800, |
CT_MAC_G5_9600, |
CT_SAM440EP |
CT_SAM440EP, |
CT_MAC_G4_SILVER |
}; |
enum radeon_dvo_chip { |
425,7 → 426,7 |
uint32_t igp_lane_info; |
/* displayport */ |
struct radeon_i2c_chan *dp_i2c_bus; |
u8 dpcd[8]; |
u8 dpcd[DP_RECEIVER_CAP_SIZE]; |
u8 dp_sink_type; |
int dp_clock; |
int dp_lane_count; |
556,7 → 557,7 |
u8 val); |
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); |
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); |
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); |
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux); |
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); |
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); |
/drivers/video/drm/radeon/radeon_ring.c |
---|
385,11 → 385,9 |
if (ndw < ring->ring_free_dw) { |
break; |
} |
// r = radeon_fence_wait_next(rdev); |
// if (r) { |
// mutex_unlock(&rdev->cp.mutex); |
// return r; |
// } |
r = radeon_fence_wait_next_locked(rdev, ring->idx); |
if (r) |
return r; |
} |
ring->count_dw = ndw; |
ring->wptr_old = ring->wptr; |
461,7 → 459,7 |
* |
* @ring: radeon_ring structure holding ring information |
* |
* Reset the driver's copy of the wtpr (all asics). |
* Reset the driver's copy of the wptr (all asics). |
*/ |
void radeon_ring_undo(struct radeon_ring *ring) |
{ |
505,7 → 503,7 |
} |
/** |
* radeon_ring_force_activity - update lockup variables |
* radeon_ring_lockup_update - update lockup variables |
* |
* @ring: radeon_ring structure holding ring information |
* |
772,22 → 770,30 |
int ridx = *(int*)node->info_ent->data; |
struct radeon_ring *ring = &rdev->ring[ridx]; |
unsigned count, i, j; |
u32 tmp; |
radeon_ring_free_size(rdev, ring); |
count = (ring->ring_size / 4) - ring->ring_free_dw; |
seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg)); |
seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg)); |
tmp = RREG32(ring->wptr_reg) >> ring->ptr_reg_shift; |
seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp); |
tmp = RREG32(ring->rptr_reg) >> ring->ptr_reg_shift; |
seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp); |
if (ring->rptr_save_reg) { |
seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg, |
RREG32(ring->rptr_save_reg)); |
} |
seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr); |
seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr); |
seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr); |
seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr); |
seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr); |
seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr); |
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); |
seq_printf(m, "%u dwords in ring\n", count); |
i = ring->rptr; |
for (j = 0; j <= count; j++) { |
seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); |
/* print 8 dw before current rptr as often it's the last executed |
* packet that is the root issue |
*/ |
i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; |
for (j = 0; j <= (count + 32); j++) { |
seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]); |
i = (i + 1) & ring->ptr_mask; |
} |
return 0; |
796,11 → 802,15 |
static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX; |
static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX; |
static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX; |
static int radeon_ring_type_dma1_index = R600_RING_TYPE_DMA_INDEX; |
static int radeon_ring_type_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX; |
static struct drm_info_list radeon_debugfs_ring_info_list[] = { |
{"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index}, |
{"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index}, |
{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index}, |
{"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma1_index}, |
{"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma2_index}, |
}; |
static int radeon_debugfs_sa_info(struct seq_file *m, void *data) |
/drivers/video/drm/radeon/rdisplay_kms.c |
---|
894,6 → 894,8 |
goto fail; |
} |
radeon_fence_unref(&ib->fence); |
fail: |
return ret; |
}; |
/drivers/video/drm/radeon/rs600.c |
---|
705,6 → 705,12 |
if (r) |
return r; |
r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); |
return r; |
} |
/* Enable IRQ */ |
rs600_irq_set(rdev); |
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
/drivers/video/drm/radeon/rs690.c |
---|
621,6 → 621,12 |
if (r) |
return r; |
r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); |
return r; |
} |
/* Enable IRQ */ |
rs600_irq_set(rdev); |
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
/drivers/video/drm/radeon/rv515.c |
---|
40,6 → 40,12 |
static void rv515_gpu_init(struct radeon_device *rdev); |
int rv515_mc_wait_for_idle(struct radeon_device *rdev); |
static const u32 crtc_offsets[2] = |
{ |
0, |
AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL |
}; |
void rv515_debugfs(struct radeon_device *rdev) |
{ |
if (r100_debugfs_rbbm_init(rdev)) { |
281,30 → 287,114 |
void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) |
{ |
u32 crtc_enabled, tmp, frame_count, blackout; |
int i, j; |
save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL); |
save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL); |
/* Stop all video */ |
WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); |
/* disable VGA render */ |
WREG32(R_000300_VGA_RENDER_CONTROL, 0); |
WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1); |
WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1); |
WREG32(R_006080_D1CRTC_CONTROL, 0); |
WREG32(R_006880_D2CRTC_CONTROL, 0); |
WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0); |
WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); |
WREG32(R_000330_D1VGA_CONTROL, 0); |
WREG32(R_000338_D2VGA_CONTROL, 0); |
/* blank the display controllers */ |
for (i = 0; i < rdev->num_crtc; i++) { |
crtc_enabled = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN; |
if (crtc_enabled) { |
save->crtc_enabled[i] = true; |
tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]); |
if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) { |
radeon_wait_for_vblank(rdev, i); |
tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; |
WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp); |
} |
/* wait for the next frame */ |
frame_count = radeon_get_vblank_counter(rdev, i); |
for (j = 0; j < rdev->usec_timeout; j++) { |
if (radeon_get_vblank_counter(rdev, i) != frame_count) |
break; |
udelay(1); |
} |
} else { |
save->crtc_enabled[i] = false; |
} |
} |
radeon_mc_wait_for_idle(rdev); |
if (rdev->family >= CHIP_R600) { |
if (rdev->family >= CHIP_RV770) |
blackout = RREG32(R700_MC_CITF_CNTL); |
else |
blackout = RREG32(R600_CITF_CNTL); |
if ((blackout & R600_BLACKOUT_MASK) != R600_BLACKOUT_MASK) { |
/* Block CPU access */ |
WREG32(R600_BIF_FB_EN, 0); |
/* blackout the MC */ |
blackout |= R600_BLACKOUT_MASK; |
if (rdev->family >= CHIP_RV770) |
WREG32(R700_MC_CITF_CNTL, blackout); |
else |
WREG32(R600_CITF_CNTL, blackout); |
} |
} |
} |
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) |
{ |
WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start); |
WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start); |
WREG32(R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start); |
WREG32(R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start); |
WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start); |
/* Unlock host access */ |
u32 tmp, frame_count; |
int i, j; |
/* update crtc base addresses */ |
for (i = 0; i < rdev->num_crtc; i++) { |
if (rdev->family >= CHIP_RV770) { |
if (i == 1) { |
WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, |
upper_32_bits(rdev->mc.vram_start)); |
WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, |
upper_32_bits(rdev->mc.vram_start)); |
} else { |
WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, |
upper_32_bits(rdev->mc.vram_start)); |
WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, |
upper_32_bits(rdev->mc.vram_start)); |
} |
} |
WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], |
(u32)rdev->mc.vram_start); |
WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], |
(u32)rdev->mc.vram_start); |
} |
WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); |
if (rdev->family >= CHIP_R600) { |
/* unblackout the MC */ |
if (rdev->family >= CHIP_RV770) |
tmp = RREG32(R700_MC_CITF_CNTL); |
else |
tmp = RREG32(R600_CITF_CNTL); |
tmp &= ~R600_BLACKOUT_MASK; |
if (rdev->family >= CHIP_RV770) |
WREG32(R700_MC_CITF_CNTL, tmp); |
else |
WREG32(R600_CITF_CNTL, tmp); |
/* allow CPU access */ |
WREG32(R600_BIF_FB_EN, R600_FB_READ_EN | R600_FB_WRITE_EN); |
} |
for (i = 0; i < rdev->num_crtc; i++) { |
if (save->crtc_enabled[i]) { |
tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]); |
tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; |
WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp); |
/* wait for the next frame */ |
frame_count = radeon_get_vblank_counter(rdev, i); |
for (j = 0; j < rdev->usec_timeout; j++) { |
if (radeon_get_vblank_counter(rdev, i) != frame_count) |
break; |
udelay(1); |
} |
} |
} |
/* Unlock vga access */ |
WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control); |
mdelay(1); |
WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control); |
/drivers/video/drm/radeon/rv770.c |
---|
239,6 → 239,7 |
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); |
WREG32(SCRATCH_UMSK, 0); |
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
} |
static int rv770_cp_load_microcode(struct radeon_device *rdev) |
499,6 → 500,8 |
WREG32(GB_TILING_CONFIG, gb_tiling_config); |
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff)); |
WREG32(CGTS_SYS_TCC_DISABLE, 0); |
WREG32(CGTS_TCC_DISABLE, 0); |
802,7 → 805,7 |
static int rv770_startup(struct radeon_device *rdev) |
{ |
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
struct radeon_ring *ring; |
int r; |
/* enable pcie gen2 link */ |
849,6 → 852,18 |
if (r) |
return r; |
r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); |
return r; |
} |
r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); |
return r; |
} |
/* Enable IRQ */ |
r = r600_irq_init(rdev); |
if (r) { |
858,11 → 873,20 |
} |
r600_irq_set(rdev); |
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
R600_CP_RB_RPTR, R600_CP_RB_WPTR, |
0, 0xfffff, RADEON_CP_PACKET2); |
if (r) |
return r; |
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, |
DMA_RB_RPTR, DMA_RB_WPTR, |
2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); |
if (r) |
return r; |
r = rv770_cp_load_microcode(rdev); |
if (r) |
return r; |
870,6 → 894,10 |
if (r) |
return r; |
r = r600_dma_resume(rdev); |
if (r) |
return r; |
r = radeon_ib_pool_init(rdev); |
if (r) { |
dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
949,6 → 977,9 |
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; |
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); |
rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; |
r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); |
rdev->ih.ring_obj = NULL; |
r600_ih_ring_init(rdev, 64 * 1024); |
/drivers/video/drm/radeon/rv770d.h |
---|
109,6 → 109,9 |
#define PIPE_TILING__SHIFT 1 |
#define PIPE_TILING__MASK 0x0000000e |
#define DMA_TILING_CONFIG 0x3ec8 |
#define DMA_TILING_CONFIG2 0xd0b8 |
#define GC_USER_SHADER_PIPE_CONFIG 0x8954 |
#define INACTIVE_QD_PIPES(x) ((x) << 8) |
#define INACTIVE_QD_PIPES_MASK 0x0000FF00 |
358,6 → 361,26 |
#define WAIT_UNTIL 0x8040 |
/* async DMA */ |
#define DMA_RB_RPTR 0xd008 |
#define DMA_RB_WPTR 0xd00c |
/* async DMA packets */ |
#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \ |
(((t) & 0x1) << 23) | \ |
(((s) & 0x1) << 22) | \ |
(((n) & 0xFFFF) << 0)) |
/* async DMA Packet types */ |
#define DMA_PACKET_WRITE 0x2 |
#define DMA_PACKET_COPY 0x3 |
#define DMA_PACKET_INDIRECT_BUFFER 0x4 |
#define DMA_PACKET_SEMAPHORE 0x5 |
#define DMA_PACKET_FENCE 0x6 |
#define DMA_PACKET_TRAP 0x7 |
#define DMA_PACKET_CONSTANT_FILL 0xd |
#define DMA_PACKET_NOP 0xf |
#define SRBM_STATUS 0x0E50 |
/* DCE 3.2 HDMI */ |
551,6 → 574,54 |
#define HDMI_OFFSET0 (0x7400 - 0x7400) |
#define HDMI_OFFSET1 (0x7800 - 0x7400) |
/* DCE3.2 ELD audio interface */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x71c8 /* LPCM */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x71cc /* AC3 */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x71d0 /* MPEG1 */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x71d4 /* MP3 */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x71d8 /* MPEG2 */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x71dc /* AAC */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x71e0 /* DTS */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x71e4 /* ATRAC */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x71e8 /* one bit audio - leave at 0 (default) */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x71ec /* Dolby Digital */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x71f0 /* DTS-HD */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x71f4 /* MAT-MLP */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x71f8 /* DTS */ |
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x71fc /* WMA Pro */ |
# define MAX_CHANNELS(x) (((x) & 0x7) << 0) |
/* max channels minus one. 7 = 8 channels */ |
# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8) |
# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16) |
# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */ |
/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO |
* bit0 = 32 kHz |
* bit1 = 44.1 kHz |
* bit2 = 48 kHz |
* bit3 = 88.2 kHz |
* bit4 = 96 kHz |
* bit5 = 176.4 kHz |
* bit6 = 192 kHz |
*/ |
#define AZ_HOT_PLUG_CONTROL 0x7300 |
# define AZ_FORCE_CODEC_WAKE (1 << 0) |
# define PIN0_JACK_DETECTION_ENABLE (1 << 4) |
# define PIN1_JACK_DETECTION_ENABLE (1 << 5) |
# define PIN2_JACK_DETECTION_ENABLE (1 << 6) |
# define PIN3_JACK_DETECTION_ENABLE (1 << 7) |
# define PIN0_UNSOLICITED_RESPONSE_ENABLE (1 << 8) |
# define PIN1_UNSOLICITED_RESPONSE_ENABLE (1 << 9) |
# define PIN2_UNSOLICITED_RESPONSE_ENABLE (1 << 10) |
# define PIN3_UNSOLICITED_RESPONSE_ENABLE (1 << 11) |
# define CODEC_HOT_PLUG_ENABLE (1 << 12) |
# define PIN0_AUDIO_ENABLED (1 << 24) |
# define PIN1_AUDIO_ENABLED (1 << 25) |
# define PIN2_AUDIO_ENABLED (1 << 26) |
# define PIN3_AUDIO_ENABLED (1 << 27) |
# define AUDIO_ENABLED (1 << 31) |
#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 |
#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914 |
#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114 |
/drivers/video/drm/radeon/si.c |
---|
1660,6 → 1660,8 |
WREG32(GB_ADDR_CONFIG, gb_addr_config); |
WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); |
WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); |
si_tiling_mode_table_init(rdev); |
1833,9 → 1835,12 |
if (enable) |
WREG32(CP_ME_CNTL, 0); |
else { |
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT)); |
WREG32(SCRATCH_UMSK, 0); |
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; |
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; |
} |
udelay(50); |
} |
2121,15 → 2126,13 |
return radeon_ring_test_lockup(rdev, ring); |
} |
static int si_gpu_soft_reset(struct radeon_device *rdev) |
static void si_gpu_soft_reset_gfx(struct radeon_device *rdev) |
{ |
struct evergreen_mc_save save; |
u32 grbm_reset = 0; |
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) |
return 0; |
return; |
dev_info(rdev->dev, "GPU softreset \n"); |
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", |
RREG32(GRBM_STATUS)); |
dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n", |
2140,10 → 2143,7 |
RREG32(GRBM_STATUS_SE1)); |
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", |
RREG32(SRBM_STATUS)); |
evergreen_mc_stop(rdev, &save); |
if (radeon_mc_wait_for_idle(rdev)) { |
dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
} |
/* Disable CP parsing/prefetching */ |
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); |
2168,8 → 2168,7 |
udelay(50); |
WREG32(GRBM_SOFT_RESET, 0); |
(void)RREG32(GRBM_SOFT_RESET); |
/* Wait a little for things to settle down */ |
udelay(50); |
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", |
RREG32(GRBM_STATUS)); |
dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n", |
2180,6 → 2179,66 |
RREG32(GRBM_STATUS_SE1)); |
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", |
RREG32(SRBM_STATUS)); |
} |
static void si_gpu_soft_reset_dma(struct radeon_device *rdev) |
{ |
u32 tmp; |
if (RREG32(DMA_STATUS_REG) & DMA_IDLE) |
return; |
dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n", |
RREG32(DMA_STATUS_REG)); |
/* dma0 */ |
tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); |
tmp &= ~DMA_RB_ENABLE; |
WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp); |
/* dma1 */ |
tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); |
tmp &= ~DMA_RB_ENABLE; |
WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp); |
/* Reset dma */ |
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1); |
RREG32(SRBM_SOFT_RESET); |
udelay(50); |
WREG32(SRBM_SOFT_RESET, 0); |
dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n", |
RREG32(DMA_STATUS_REG)); |
} |
static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) |
{ |
struct evergreen_mc_save save; |
if (reset_mask == 0) |
return 0; |
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); |
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", |
RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); |
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", |
RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); |
evergreen_mc_stop(rdev, &save); |
if (radeon_mc_wait_for_idle(rdev)) { |
dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
} |
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) |
si_gpu_soft_reset_gfx(rdev); |
if (reset_mask & RADEON_RESET_DMA) |
si_gpu_soft_reset_dma(rdev); |
/* Wait a little for things to settle down */ |
udelay(50); |
evergreen_mc_resume(rdev, &save); |
return 0; |
} |
2186,7 → 2245,9 |
int si_asic_reset(struct radeon_device *rdev) |
{ |
return si_gpu_soft_reset(rdev); |
return si_gpu_soft_reset(rdev, (RADEON_RESET_GFX | |
RADEON_RESET_COMPUTE | |
RADEON_RESET_DMA)); |
} |
/* MC */ |
2426,9 → 2487,20 |
/* enable context1-15 */ |
WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, |
(u32)(rdev->dummy_page.addr >> 12)); |
WREG32(VM_CONTEXT1_CNTL2, 0); |
WREG32(VM_CONTEXT1_CNTL2, 4); |
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); |
RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | |
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | |
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT | |
PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT | |
PDE0_PROTECTION_FAULT_ENABLE_DEFAULT | |
VALID_PROTECTION_FAULT_ENABLE_INTERRUPT | |
VALID_PROTECTION_FAULT_ENABLE_DEFAULT | |
READ_PROTECTION_FAULT_ENABLE_INTERRUPT | |
READ_PROTECTION_FAULT_ENABLE_DEFAULT | |
WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT | |
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT); |
si_pcie_gart_tlb_flush(rdev); |
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", |
2534,6 → 2606,7 |
u32 idx = pkt->idx + 1; |
u32 idx_value = ib[idx]; |
u32 start_reg, end_reg, reg, i; |
u32 command, info; |
switch (pkt->opcode) { |
case PACKET3_NOP: |
2633,6 → 2706,52 |
return -EINVAL; |
} |
break; |
case PACKET3_CP_DMA: |
command = ib[idx + 4]; |
info = ib[idx + 1]; |
if (command & PACKET3_CP_DMA_CMD_SAS) { |
/* src address space is register */ |
if (((info & 0x60000000) >> 29) == 0) { |
start_reg = idx_value << 2; |
if (command & PACKET3_CP_DMA_CMD_SAIC) { |
reg = start_reg; |
if (!si_vm_reg_valid(reg)) { |
DRM_ERROR("CP DMA Bad SRC register\n"); |
return -EINVAL; |
} |
} else { |
for (i = 0; i < (command & 0x1fffff); i++) { |
reg = start_reg + (4 * i); |
if (!si_vm_reg_valid(reg)) { |
DRM_ERROR("CP DMA Bad SRC register\n"); |
return -EINVAL; |
} |
} |
} |
} |
} |
if (command & PACKET3_CP_DMA_CMD_DAS) { |
/* dst address space is register */ |
if (((info & 0x00300000) >> 20) == 0) { |
start_reg = ib[idx + 2]; |
if (command & PACKET3_CP_DMA_CMD_DAIC) { |
reg = start_reg; |
if (!si_vm_reg_valid(reg)) { |
DRM_ERROR("CP DMA Bad DST register\n"); |
return -EINVAL; |
} |
} else { |
for (i = 0; i < (command & 0x1fffff); i++) { |
reg = start_reg + (4 * i); |
if (!si_vm_reg_valid(reg)) { |
DRM_ERROR("CP DMA Bad DST register\n"); |
return -EINVAL; |
} |
} |
} |
} |
} |
break; |
default: |
DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode); |
return -EINVAL; |
2809,9 → 2928,12 |
{ |
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; |
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); |
uint64_t value; |
unsigned ndw; |
if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) { |
while (count) { |
unsigned ndw = 2 + count * 2; |
ndw = 2 + count * 2; |
if (ndw > 0x3FFE) |
ndw = 0x3FFE; |
2821,14 → 2943,14 |
radeon_ring_write(ring, pe); |
radeon_ring_write(ring, upper_32_bits(pe)); |
for (; ndw > 2; ndw -= 2, --count, pe += 8) { |
uint64_t value; |
if (flags & RADEON_VM_PAGE_SYSTEM) { |
value = radeon_vm_map_gart(rdev, addr); |
value &= 0xFFFFFFFFFFFFF000ULL; |
} else if (flags & RADEON_VM_PAGE_VALID) |
} else if (flags & RADEON_VM_PAGE_VALID) { |
value = addr; |
else |
} else { |
value = 0; |
} |
addr += incr; |
value |= r600_flags; |
radeon_ring_write(ring, value); |
2835,8 → 2957,61 |
radeon_ring_write(ring, upper_32_bits(value)); |
} |
} |
} else { |
/* DMA */ |
if (flags & RADEON_VM_PAGE_SYSTEM) { |
while (count) { |
ndw = count * 2; |
if (ndw > 0xFFFFE) |
ndw = 0xFFFFE; |
/* for non-physically contiguous pages (system) */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw)); |
radeon_ring_write(ring, pe); |
radeon_ring_write(ring, upper_32_bits(pe) & 0xff); |
for (; ndw > 0; ndw -= 2, --count, pe += 8) { |
if (flags & RADEON_VM_PAGE_SYSTEM) { |
value = radeon_vm_map_gart(rdev, addr); |
value &= 0xFFFFFFFFFFFFF000ULL; |
} else if (flags & RADEON_VM_PAGE_VALID) { |
value = addr; |
} else { |
value = 0; |
} |
addr += incr; |
value |= r600_flags; |
radeon_ring_write(ring, value); |
radeon_ring_write(ring, upper_32_bits(value)); |
} |
} |
} else { |
while (count) { |
ndw = count * 2; |
if (ndw > 0xFFFFE) |
ndw = 0xFFFFE; |
if (flags & RADEON_VM_PAGE_VALID) |
value = addr; |
else |
value = 0; |
/* for physically contiguous pages (vram) */ |
radeon_ring_write(ring, DMA_PTE_PDE_PACKET(ndw)); |
radeon_ring_write(ring, pe); /* dst addr */ |
radeon_ring_write(ring, upper_32_bits(pe) & 0xff); |
radeon_ring_write(ring, r600_flags); /* mask */ |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, value); /* value */ |
radeon_ring_write(ring, upper_32_bits(value)); |
radeon_ring_write(ring, incr); /* increment size */ |
radeon_ring_write(ring, 0); |
pe += ndw * 4; |
addr += (ndw / 2) * incr; |
count -= ndw / 2; |
} |
} |
} |
} |
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) |
{ |
struct radeon_ring *ring = &rdev->ring[ridx]; |
2880,6 → 3055,32 |
radeon_ring_write(ring, 0x0); |
} |
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) |
{ |
struct radeon_ring *ring = &rdev->ring[ridx]; |
if (vm == NULL) |
return; |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); |
if (vm->id < 8) { |
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); |
} else { |
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2)); |
} |
radeon_ring_write(ring, vm->pd_gpu_addr >> 12); |
/* flush hdp cache */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); |
radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); |
radeon_ring_write(ring, 1); |
/* bits 0-7 are the VM contexts0-7 */ |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); |
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); |
radeon_ring_write(ring, 1 << vm->id); |
} |
/* |
* RLC |
*/ |
3048,6 → 3249,10 |
WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); |
WREG32(CP_INT_CNTL_RING1, 0); |
WREG32(CP_INT_CNTL_RING2, 0); |
tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; |
WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp); |
tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; |
WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp); |
WREG32(GRBM_INT_CNTL, 0); |
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
3167,6 → 3372,7 |
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; |
u32 grbm_int_cntl = 0; |
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; |
u32 dma_cntl, dma_cntl1; |
if (!rdev->irq.installed) { |
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
3187,6 → 3393,9 |
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; |
dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; |
dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; |
/* enable CP interrupts on all rings */ |
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { |
DRM_DEBUG("si_irq_set: sw int gfx\n"); |
3200,6 → 3409,15 |
DRM_DEBUG("si_irq_set: sw int cp2\n"); |
cp_int_cntl2 |= TIME_STAMP_INT_ENABLE; |
} |
if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { |
DRM_DEBUG("si_irq_set: sw int dma\n"); |
dma_cntl |= TRAP_ENABLE; |
} |
if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) { |
DRM_DEBUG("si_irq_set: sw int dma1\n"); |
dma_cntl1 |= TRAP_ENABLE; |
} |
if (rdev->irq.crtc_vblank_int[0] || |
atomic_read(&rdev->irq.pflip[0])) { |
DRM_DEBUG("si_irq_set: vblank 0\n"); |
3259,6 → 3477,9 |
WREG32(CP_INT_CNTL_RING1, cp_int_cntl1); |
WREG32(CP_INT_CNTL_RING2, cp_int_cntl2); |
WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl); |
WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1); |
WREG32(GRBM_INT_CNTL, grbm_int_cntl); |
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); |
3684,6 → 3905,16 |
break; |
} |
break; |
case 146: |
case 147: |
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); |
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", |
RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); |
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", |
RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); |
/* reset addr and status */ |
WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); |
break; |
case 176: /* RINGID0 CP_INT */ |
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); |
break; |
3707,9 → 3938,17 |
break; |
} |
break; |
case 224: /* DMA trap event */ |
DRM_DEBUG("IH: DMA trap\n"); |
radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); |
break; |
case 233: /* GUI IDLE */ |
DRM_DEBUG("IH: GUI idle\n"); |
break; |
case 244: /* DMA trap event */ |
DRM_DEBUG("IH: DMA1 trap\n"); |
radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX); |
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
break; |
3733,6 → 3972,80 |
return IRQ_HANDLED; |
} |
/** |
* si_copy_dma - copy pages using the DMA engine |
* |
* @rdev: radeon_device pointer |
* @src_offset: src GPU address |
* @dst_offset: dst GPU address |
* @num_gpu_pages: number of GPU pages to xfer |
* @fence: radeon fence object |
* |
* Copy GPU paging using the DMA engine (SI). |
* Used by the radeon ttm implementation to move pages if |
* registered as the asic copy callback. |
*/ |
int si_copy_dma(struct radeon_device *rdev, |
uint64_t src_offset, uint64_t dst_offset, |
unsigned num_gpu_pages, |
struct radeon_fence **fence) |
{ |
struct radeon_semaphore *sem = NULL; |
int ring_index = rdev->asic->copy.dma_ring_index; |
struct radeon_ring *ring = &rdev->ring[ring_index]; |
u32 size_in_bytes, cur_size_in_bytes; |
int i, num_loops; |
int r = 0; |
r = radeon_semaphore_create(rdev, &sem); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
return r; |
} |
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); |
num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff); |
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); |
if (r) { |
DRM_ERROR("radeon: moving bo (%d).\n", r); |
radeon_semaphore_free(rdev, &sem, NULL); |
return r; |
} |
if (radeon_fence_need_sync(*fence, ring->idx)) { |
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, |
ring->idx); |
radeon_fence_note_sync(*fence, ring->idx); |
} else { |
radeon_semaphore_free(rdev, &sem, NULL); |
} |
for (i = 0; i < num_loops; i++) { |
cur_size_in_bytes = size_in_bytes; |
if (cur_size_in_bytes > 0xFFFFF) |
cur_size_in_bytes = 0xFFFFF; |
size_in_bytes -= cur_size_in_bytes; |
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes)); |
radeon_ring_write(ring, dst_offset & 0xffffffff); |
radeon_ring_write(ring, src_offset & 0xffffffff); |
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); |
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); |
src_offset += cur_size_in_bytes; |
dst_offset += cur_size_in_bytes; |
} |
r = radeon_fence_emit(rdev, fence, ring->idx); |
if (r) { |
radeon_ring_unlock_undo(rdev, ring); |
return r; |
} |
radeon_ring_unlock_commit(rdev, ring); |
radeon_semaphore_free(rdev, &sem, *fence); |
return r; |
} |
/* |
* startup/shutdown callbacks |
*/ |
3804,6 → 4117,18 |
return r; |
} |
r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); |
return r; |
} |
r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX); |
if (r) { |
dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); |
return r; |
} |
/* Enable IRQ */ |
r = si_irq_init(rdev); |
if (r) { |
3834,6 → 4159,22 |
if (r) |
return r; |
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, |
DMA_RB_RPTR + DMA0_REGISTER_OFFSET, |
DMA_RB_WPTR + DMA0_REGISTER_OFFSET, |
2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); |
if (r) |
return r; |
ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; |
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, |
DMA_RB_RPTR + DMA1_REGISTER_OFFSET, |
DMA_RB_WPTR + DMA1_REGISTER_OFFSET, |
2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); |
if (r) |
return r; |
r = si_cp_load_microcode(rdev); |
if (r) |
return r; |
3841,6 → 4182,10 |
if (r) |
return r; |
r = cayman_dma_resume(rdev); |
if (r) |
return r; |
r = radeon_ib_pool_init(rdev); |
if (r) { |
dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
3932,6 → 4277,14 |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 1024 * 1024); |
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 64 * 1024); |
ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 64 * 1024); |
rdev->ih.ring_obj = NULL; |
r600_ih_ring_init(rdev, 64 * 1024); |
/drivers/video/drm/radeon/sid.h |
---|
62,6 → 62,22 |
#define SRBM_STATUS 0xE50 |
#define SRBM_SOFT_RESET 0x0E60 |
#define SOFT_RESET_BIF (1 << 1) |
#define SOFT_RESET_DC (1 << 5) |
#define SOFT_RESET_DMA1 (1 << 6) |
#define SOFT_RESET_GRBM (1 << 8) |
#define SOFT_RESET_HDP (1 << 9) |
#define SOFT_RESET_IH (1 << 10) |
#define SOFT_RESET_MC (1 << 11) |
#define SOFT_RESET_ROM (1 << 14) |
#define SOFT_RESET_SEM (1 << 15) |
#define SOFT_RESET_VMC (1 << 17) |
#define SOFT_RESET_DMA (1 << 20) |
#define SOFT_RESET_TST (1 << 21) |
#define SOFT_RESET_REGBB (1 << 22) |
#define SOFT_RESET_ORB (1 << 23) |
#define CC_SYS_RB_BACKEND_DISABLE 0xe80 |
#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84 |
91,7 → 107,18 |
#define VM_CONTEXT0_CNTL 0x1410 |
#define ENABLE_CONTEXT (1 << 0) |
#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1) |
#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3) |
#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4) |
#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6) |
#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7) |
#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9) |
#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10) |
#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12) |
#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13) |
#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15) |
#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16) |
#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18) |
#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19) |
#define VM_CONTEXT1_CNTL 0x1414 |
#define VM_CONTEXT0_CNTL2 0x1430 |
#define VM_CONTEXT1_CNTL2 0x1434 |
104,6 → 131,9 |
#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x1450 |
#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x1454 |
#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC |
#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC |
#define VM_INVALIDATE_REQUEST 0x1478 |
#define VM_INVALIDATE_RESPONSE 0x147c |
835,6 → 865,54 |
#define PACKET3_WAIT_REG_MEM 0x3C |
#define PACKET3_MEM_WRITE 0x3D |
#define PACKET3_COPY_DATA 0x40 |
#define PACKET3_CP_DMA 0x41 |
/* 1. header |
* 2. SRC_ADDR_LO or DATA [31:0] |
* 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] | |
* SRC_ADDR_HI [7:0] |
* 4. DST_ADDR_LO [31:0] |
* 5. DST_ADDR_HI [7:0] |
* 6. COMMAND [30:21] | BYTE_COUNT [20:0] |
*/ |
# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20) |
/* 0 - SRC_ADDR |
* 1 - GDS |
*/ |
# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27) |
/* 0 - ME |
* 1 - PFP |
*/ |
# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29) |
/* 0 - SRC_ADDR |
* 1 - GDS |
* 2 - DATA |
*/ |
# define PACKET3_CP_DMA_CP_SYNC (1 << 31) |
/* COMMAND */ |
# define PACKET3_CP_DMA_DIS_WC (1 << 21) |
# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23) |
/* 0 - none |
* 1 - 8 in 16 |
* 2 - 8 in 32 |
* 3 - 8 in 64 |
*/ |
# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24) |
/* 0 - none |
* 1 - 8 in 16 |
* 2 - 8 in 32 |
* 3 - 8 in 64 |
*/ |
# define PACKET3_CP_DMA_CMD_SAS (1 << 26) |
/* 0 - memory |
* 1 - register |
*/ |
# define PACKET3_CP_DMA_CMD_DAS (1 << 27) |
/* 0 - memory |
* 1 - register |
*/ |
# define PACKET3_CP_DMA_CMD_SAIC (1 << 28) |
# define PACKET3_CP_DMA_CMD_DAIC (1 << 29) |
# define PACKET3_CP_DMA_CMD_RAW_WAIT (1 << 30) |
#define PACKET3_PFP_SYNC_ME 0x42 |
#define PACKET3_SURFACE_SYNC 0x43 |
# define PACKET3_DEST_BASE_0_ENA (1 << 0) |
922,4 → 1000,63 |
#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A |
#define PACKET3_SWITCH_BUFFER 0x8B |
/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */ |
#define DMA0_REGISTER_OFFSET 0x0 /* not a register */ |
#define DMA1_REGISTER_OFFSET 0x800 /* not a register */ |
#define DMA_RB_CNTL 0xd000 |
# define DMA_RB_ENABLE (1 << 0) |
# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */ |
# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */ |
# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12) |
# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */ |
# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */ |
#define DMA_RB_BASE 0xd004 |
#define DMA_RB_RPTR 0xd008 |
#define DMA_RB_WPTR 0xd00c |
#define DMA_RB_RPTR_ADDR_HI 0xd01c |
#define DMA_RB_RPTR_ADDR_LO 0xd020 |
#define DMA_IB_CNTL 0xd024 |
# define DMA_IB_ENABLE (1 << 0) |
# define DMA_IB_SWAP_ENABLE (1 << 4) |
#define DMA_IB_RPTR 0xd028 |
#define DMA_CNTL 0xd02c |
# define TRAP_ENABLE (1 << 0) |
# define SEM_INCOMPLETE_INT_ENABLE (1 << 1) |
# define SEM_WAIT_INT_ENABLE (1 << 2) |
# define DATA_SWAP_ENABLE (1 << 3) |
# define FENCE_SWAP_ENABLE (1 << 4) |
# define CTXEMPTY_INT_ENABLE (1 << 28) |
#define DMA_STATUS_REG 0xd034 |
# define DMA_IDLE (1 << 0) |
#define DMA_TILING_CONFIG 0xd0b8 |
#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \ |
(((b) & 0x1) << 26) | \ |
(((t) & 0x1) << 23) | \ |
(((s) & 0x1) << 22) | \ |
(((n) & 0xFFFFF) << 0)) |
#define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \ |
(((vmid) & 0xF) << 20) | \ |
(((n) & 0xFFFFF) << 0)) |
#define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \ |
(1 << 26) | \ |
(1 << 21) | \ |
(((n) & 0xFFFFF) << 0)) |
/* async DMA Packet types */ |
#define DMA_PACKET_WRITE 0x2 |
#define DMA_PACKET_COPY 0x3 |
#define DMA_PACKET_INDIRECT_BUFFER 0x4 |
#define DMA_PACKET_SEMAPHORE 0x5 |
#define DMA_PACKET_FENCE 0x6 |
#define DMA_PACKET_TRAP 0x7 |
#define DMA_PACKET_SRBM_WRITE 0x9 |
#define DMA_PACKET_CONSTANT_FILL 0xd |
#define DMA_PACKET_NOP 0xf |
#endif |