124,7 → 124,7 |
radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST); |
radeon_ring_write(ring, PACKET0(0x20C8, 0)); |
radeon_ring_write(ring, 0); |
radeon_ring_unlock_commit(rdev, ring); |
radeon_ring_unlock_commit(rdev, ring, false); |
} |
|
int rv515_mc_wait_for_idle(struct radeon_device *rdev) |
209,19 → 209,27 |
|
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
{ |
unsigned long flags; |
uint32_t r; |
|
spin_lock_irqsave(&rdev->mc_idx_lock, flags); |
WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); |
r = RREG32(MC_IND_DATA); |
WREG32(MC_IND_INDEX, 0); |
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); |
|
return r; |
} |
|
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
{ |
unsigned long flags; |
|
spin_lock_irqsave(&rdev->mc_idx_lock, flags); |
WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); |
WREG32(MC_IND_DATA, (v)); |
WREG32(MC_IND_INDEX, 0); |
spin_unlock_irqrestore(&rdev->mc_idx_lock, flags); |
} |
|
#if defined(CONFIG_DEBUG_FS) |
398,8 → 406,9 |
for (i = 0; i < rdev->num_crtc; i++) { |
if (save->crtc_enabled[i]) { |
tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]); |
if ((tmp & 0x3) != 0) { |
tmp &= ~0x3; |
if ((tmp & 0x7) != 3) { |
tmp &= ~0x7; |
tmp |= 0x3; |
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); |
} |
tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); |
622,6 → 631,9 |
return r; |
rv515_set_safe_registers(rdev); |
|
/* Initialize power management */ |
radeon_pm_init(rdev); |
|
rdev->accel_working = true; |
r = rv515_startup(rdev); |
if (r) { |
877,12 → 889,15 |
|
static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, |
struct radeon_crtc *crtc, |
struct rv515_watermark *wm) |
struct rv515_watermark *wm, |
bool low) |
{ |
struct drm_display_mode *mode = &crtc->base.mode; |
fixed20_12 a, b, c; |
fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; |
fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; |
fixed20_12 sclk; |
u32 selected_sclk; |
|
if (!crtc->base.enabled) { |
/* FIXME: wouldn't it better to set priority mark to maximum */ |
890,6 → 905,18 |
return; |
} |
|
/* rv6xx, rv7xx */ |
if ((rdev->family >= CHIP_RV610) && |
(rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) |
selected_sclk = radeon_dpm_get_sclk(rdev, low); |
else |
selected_sclk = rdev->pm.current_sclk; |
|
/* sclk in Mhz */ |
a.full = dfixed_const(100); |
sclk.full = dfixed_const(selected_sclk); |
sclk.full = dfixed_div(sclk, a); |
|
if (crtc->vsc.full > dfixed_const(2)) |
wm->num_line_pair.full = dfixed_const(2); |
else |
955,7 → 982,7 |
* sclk = system clock(Mhz) |
*/ |
a.full = dfixed_const(600 * 1000); |
chunk_time.full = dfixed_div(a, rdev->pm.sclk); |
chunk_time.full = dfixed_div(a, sclk); |
read_delay_latency.full = dfixed_const(1000); |
|
/* Determine the worst case latency |
1016,152 → 1043,169 |
} |
} |
|
void rv515_bandwidth_avivo_update(struct radeon_device *rdev) |
static void rv515_compute_mode_priority(struct radeon_device *rdev, |
struct rv515_watermark *wm0, |
struct rv515_watermark *wm1, |
struct drm_display_mode *mode0, |
struct drm_display_mode *mode1, |
u32 *d1mode_priority_a_cnt, |
u32 *d2mode_priority_a_cnt) |
{ |
struct drm_display_mode *mode0 = NULL; |
struct drm_display_mode *mode1 = NULL; |
struct rv515_watermark wm0; |
struct rv515_watermark wm1; |
u32 tmp; |
u32 d1mode_priority_a_cnt = MODE_PRIORITY_OFF; |
u32 d2mode_priority_a_cnt = MODE_PRIORITY_OFF; |
fixed20_12 priority_mark02, priority_mark12, fill_rate; |
fixed20_12 a, b; |
|
if (rdev->mode_info.crtcs[0]->base.enabled) |
mode0 = &rdev->mode_info.crtcs[0]->base.mode; |
if (rdev->mode_info.crtcs[1]->base.enabled) |
mode1 = &rdev->mode_info.crtcs[1]->base.mode; |
rs690_line_buffer_adjust(rdev, mode0, mode1); |
*d1mode_priority_a_cnt = MODE_PRIORITY_OFF; |
*d2mode_priority_a_cnt = MODE_PRIORITY_OFF; |
|
rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); |
rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); |
|
tmp = wm0.lb_request_fifo_depth; |
tmp |= wm1.lb_request_fifo_depth << 16; |
WREG32(LB_MAX_REQ_OUTSTANDING, tmp); |
|
if (mode0 && mode1) { |
if (dfixed_trunc(wm0.dbpp) > 64) |
a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); |
if (dfixed_trunc(wm0->dbpp) > 64) |
a.full = dfixed_div(wm0->dbpp, wm0->num_line_pair); |
else |
a.full = wm0.num_line_pair.full; |
if (dfixed_trunc(wm1.dbpp) > 64) |
b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); |
a.full = wm0->num_line_pair.full; |
if (dfixed_trunc(wm1->dbpp) > 64) |
b.full = dfixed_div(wm1->dbpp, wm1->num_line_pair); |
else |
b.full = wm1.num_line_pair.full; |
b.full = wm1->num_line_pair.full; |
a.full += b.full; |
fill_rate.full = dfixed_div(wm0.sclk, a); |
if (wm0.consumption_rate.full > fill_rate.full) { |
b.full = wm0.consumption_rate.full - fill_rate.full; |
b.full = dfixed_mul(b, wm0.active_time); |
fill_rate.full = dfixed_div(wm0->sclk, a); |
if (wm0->consumption_rate.full > fill_rate.full) { |
b.full = wm0->consumption_rate.full - fill_rate.full; |
b.full = dfixed_mul(b, wm0->active_time); |
a.full = dfixed_const(16); |
b.full = dfixed_div(b, a); |
a.full = dfixed_mul(wm0.worst_case_latency, |
wm0.consumption_rate); |
a.full = dfixed_mul(wm0->worst_case_latency, |
wm0->consumption_rate); |
priority_mark02.full = a.full + b.full; |
} else { |
a.full = dfixed_mul(wm0.worst_case_latency, |
wm0.consumption_rate); |
a.full = dfixed_mul(wm0->worst_case_latency, |
wm0->consumption_rate); |
b.full = dfixed_const(16 * 1000); |
priority_mark02.full = dfixed_div(a, b); |
} |
if (wm1.consumption_rate.full > fill_rate.full) { |
b.full = wm1.consumption_rate.full - fill_rate.full; |
b.full = dfixed_mul(b, wm1.active_time); |
if (wm1->consumption_rate.full > fill_rate.full) { |
b.full = wm1->consumption_rate.full - fill_rate.full; |
b.full = dfixed_mul(b, wm1->active_time); |
a.full = dfixed_const(16); |
b.full = dfixed_div(b, a); |
a.full = dfixed_mul(wm1.worst_case_latency, |
wm1.consumption_rate); |
a.full = dfixed_mul(wm1->worst_case_latency, |
wm1->consumption_rate); |
priority_mark12.full = a.full + b.full; |
} else { |
a.full = dfixed_mul(wm1.worst_case_latency, |
wm1.consumption_rate); |
a.full = dfixed_mul(wm1->worst_case_latency, |
wm1->consumption_rate); |
b.full = dfixed_const(16 * 1000); |
priority_mark12.full = dfixed_div(a, b); |
} |
if (wm0.priority_mark.full > priority_mark02.full) |
priority_mark02.full = wm0.priority_mark.full; |
if (dfixed_trunc(priority_mark02) < 0) |
priority_mark02.full = 0; |
if (wm0.priority_mark_max.full > priority_mark02.full) |
priority_mark02.full = wm0.priority_mark_max.full; |
if (wm1.priority_mark.full > priority_mark12.full) |
priority_mark12.full = wm1.priority_mark.full; |
if (dfixed_trunc(priority_mark12) < 0) |
priority_mark12.full = 0; |
if (wm1.priority_mark_max.full > priority_mark12.full) |
priority_mark12.full = wm1.priority_mark_max.full; |
d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); |
d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); |
if (wm0->priority_mark.full > priority_mark02.full) |
priority_mark02.full = wm0->priority_mark.full; |
if (wm0->priority_mark_max.full > priority_mark02.full) |
priority_mark02.full = wm0->priority_mark_max.full; |
if (wm1->priority_mark.full > priority_mark12.full) |
priority_mark12.full = wm1->priority_mark.full; |
if (wm1->priority_mark_max.full > priority_mark12.full) |
priority_mark12.full = wm1->priority_mark_max.full; |
*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); |
*d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); |
if (rdev->disp_priority == 2) { |
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; |
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; |
*d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; |
*d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; |
} |
} else if (mode0) { |
if (dfixed_trunc(wm0.dbpp) > 64) |
a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); |
if (dfixed_trunc(wm0->dbpp) > 64) |
a.full = dfixed_div(wm0->dbpp, wm0->num_line_pair); |
else |
a.full = wm0.num_line_pair.full; |
fill_rate.full = dfixed_div(wm0.sclk, a); |
if (wm0.consumption_rate.full > fill_rate.full) { |
b.full = wm0.consumption_rate.full - fill_rate.full; |
b.full = dfixed_mul(b, wm0.active_time); |
a.full = wm0->num_line_pair.full; |
fill_rate.full = dfixed_div(wm0->sclk, a); |
if (wm0->consumption_rate.full > fill_rate.full) { |
b.full = wm0->consumption_rate.full - fill_rate.full; |
b.full = dfixed_mul(b, wm0->active_time); |
a.full = dfixed_const(16); |
b.full = dfixed_div(b, a); |
a.full = dfixed_mul(wm0.worst_case_latency, |
wm0.consumption_rate); |
a.full = dfixed_mul(wm0->worst_case_latency, |
wm0->consumption_rate); |
priority_mark02.full = a.full + b.full; |
} else { |
a.full = dfixed_mul(wm0.worst_case_latency, |
wm0.consumption_rate); |
a.full = dfixed_mul(wm0->worst_case_latency, |
wm0->consumption_rate); |
b.full = dfixed_const(16); |
priority_mark02.full = dfixed_div(a, b); |
} |
if (wm0.priority_mark.full > priority_mark02.full) |
priority_mark02.full = wm0.priority_mark.full; |
if (dfixed_trunc(priority_mark02) < 0) |
priority_mark02.full = 0; |
if (wm0.priority_mark_max.full > priority_mark02.full) |
priority_mark02.full = wm0.priority_mark_max.full; |
d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); |
if (wm0->priority_mark.full > priority_mark02.full) |
priority_mark02.full = wm0->priority_mark.full; |
if (wm0->priority_mark_max.full > priority_mark02.full) |
priority_mark02.full = wm0->priority_mark_max.full; |
*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); |
if (rdev->disp_priority == 2) |
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; |
*d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; |
} else if (mode1) { |
if (dfixed_trunc(wm1.dbpp) > 64) |
a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); |
if (dfixed_trunc(wm1->dbpp) > 64) |
a.full = dfixed_div(wm1->dbpp, wm1->num_line_pair); |
else |
a.full = wm1.num_line_pair.full; |
fill_rate.full = dfixed_div(wm1.sclk, a); |
if (wm1.consumption_rate.full > fill_rate.full) { |
b.full = wm1.consumption_rate.full - fill_rate.full; |
b.full = dfixed_mul(b, wm1.active_time); |
a.full = wm1->num_line_pair.full; |
fill_rate.full = dfixed_div(wm1->sclk, a); |
if (wm1->consumption_rate.full > fill_rate.full) { |
b.full = wm1->consumption_rate.full - fill_rate.full; |
b.full = dfixed_mul(b, wm1->active_time); |
a.full = dfixed_const(16); |
b.full = dfixed_div(b, a); |
a.full = dfixed_mul(wm1.worst_case_latency, |
wm1.consumption_rate); |
a.full = dfixed_mul(wm1->worst_case_latency, |
wm1->consumption_rate); |
priority_mark12.full = a.full + b.full; |
} else { |
a.full = dfixed_mul(wm1.worst_case_latency, |
wm1.consumption_rate); |
a.full = dfixed_mul(wm1->worst_case_latency, |
wm1->consumption_rate); |
b.full = dfixed_const(16 * 1000); |
priority_mark12.full = dfixed_div(a, b); |
} |
if (wm1.priority_mark.full > priority_mark12.full) |
priority_mark12.full = wm1.priority_mark.full; |
if (dfixed_trunc(priority_mark12) < 0) |
priority_mark12.full = 0; |
if (wm1.priority_mark_max.full > priority_mark12.full) |
priority_mark12.full = wm1.priority_mark_max.full; |
d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); |
if (wm1->priority_mark.full > priority_mark12.full) |
priority_mark12.full = wm1->priority_mark.full; |
if (wm1->priority_mark_max.full > priority_mark12.full) |
priority_mark12.full = wm1->priority_mark_max.full; |
*d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); |
if (rdev->disp_priority == 2) |
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; |
*d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; |
} |
} |
|
void rv515_bandwidth_avivo_update(struct radeon_device *rdev) |
{ |
struct drm_display_mode *mode0 = NULL; |
struct drm_display_mode *mode1 = NULL; |
struct rv515_watermark wm0_high, wm0_low; |
struct rv515_watermark wm1_high, wm1_low; |
u32 tmp; |
u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt; |
u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt; |
|
if (rdev->mode_info.crtcs[0]->base.enabled) |
mode0 = &rdev->mode_info.crtcs[0]->base.mode; |
if (rdev->mode_info.crtcs[1]->base.enabled) |
mode1 = &rdev->mode_info.crtcs[1]->base.mode; |
rs690_line_buffer_adjust(rdev, mode0, mode1); |
|
rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false); |
rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false); |
|
rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, false); |
rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, false); |
|
tmp = wm0_high.lb_request_fifo_depth; |
tmp |= wm1_high.lb_request_fifo_depth << 16; |
WREG32(LB_MAX_REQ_OUTSTANDING, tmp); |
|
rv515_compute_mode_priority(rdev, |
&wm0_high, &wm1_high, |
mode0, mode1, |
&d1mode_priority_a_cnt, &d2mode_priority_a_cnt); |
rv515_compute_mode_priority(rdev, |
&wm0_low, &wm1_low, |
mode0, mode1, |
&d1mode_priority_b_cnt, &d2mode_priority_b_cnt); |
|
WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); |
WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); |
WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt); |
WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); |
WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); |
WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt); |
} |
|
void rv515_bandwidth_update(struct radeon_device *rdev) |