27,6 → 27,7 |
#include <drm/drmP.h> |
#include "radeon.h" |
#include "radeon_asic.h" |
#include "radeon_audio.h" |
#include <drm/radeon_drm.h> |
#include "sid.h" |
#include "atom.h" |
1263,6 → 1264,36 |
} |
} |
|
/** |
* si_get_allowed_info_register - fetch the register for the info ioctl |
* |
* @rdev: radeon_device pointer |
* @reg: register offset in bytes |
* @val: register value |
* |
* Returns 0 for success or -EINVAL for an invalid register |
* |
*/ |
int si_get_allowed_info_register(struct radeon_device *rdev, |
u32 reg, u32 *val) |
{ |
switch (reg) { |
case GRBM_STATUS: |
case GRBM_STATUS2: |
case GRBM_STATUS_SE0: |
case GRBM_STATUS_SE1: |
case SRBM_STATUS: |
case SRBM_STATUS2: |
case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET): |
case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET): |
case UVD_STATUS: |
*val = RREG32(reg); |
return 0; |
default: |
return -EINVAL; |
} |
} |
|
#define PCIE_BUS_CLK 10000 |
#define TCLK (PCIE_BUS_CLK / 10) |
|
2345,6 → 2376,9 |
c.full = dfixed_div(c, a); |
priority_b_mark = dfixed_trunc(c); |
priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; |
|
/* Save number of lines the linebuffer leads before the scanout */ |
radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); |
} |
|
/* select wm A */ |
3161,6 → 3195,8 |
} |
|
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); |
WREG32(SRBM_INT_CNTL, 1); |
WREG32(SRBM_INT_ACK, 1); |
|
evergreen_fix_pci_max_read_req_size(rdev); |
|
4285,7 → 4321,7 |
/* empty context1-15 */ |
/* set vm size, must be a multiple of 4 */ |
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); |
WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); |
WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1); |
/* Assign the pt base to something valid for now; the pts used for |
* the VMs are determined by the application and setup and assigned |
* on the fly in the vm part of radeon_gart.c |
4360,7 → 4396,7 |
{ |
si_pcie_gart_disable(rdev); |
radeon_gart_table_vram_free(rdev); |
// radeon_gart_fini(rdev); |
radeon_gart_fini(rdev); |
} |
|
/* vm parser */ |
4698,12 → 4734,6 |
switch (pkt.type) { |
case RADEON_PACKET_TYPE0: |
dev_err(rdev->dev, "Packet0 not allowed!\n"); |
for (i = 0; i < ib->length_dw; i++) { |
if (i == idx) |
printk("\t0x%08x <---\n", ib->ptr[i]); |
else |
printk("\t0x%08x\n", ib->ptr[i]); |
} |
ret = -EINVAL; |
break; |
case RADEON_PACKET_TYPE2: |
4735,8 → 4765,15 |
ret = -EINVAL; |
break; |
} |
if (ret) |
if (ret) { |
for (i = 0; i < ib->length_dw; i++) { |
if (i == idx) |
printk("\t0x%08x <---\n", ib->ptr[i]); |
else |
printk("\t0x%08x\n", ib->ptr[i]); |
} |
break; |
} |
} while (idx < ib->length_dw); |
|
return ret; |
5057,6 → 5094,16 |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, 1 << vm_id); |
|
/* wait for the invalidate to complete */ |
radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); |
radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */ |
WAIT_REG_MEM_ENGINE(0))); /* me */ |
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); |
radeon_ring_write(ring, 0); |
radeon_ring_write(ring, 0); /* ref */ |
radeon_ring_write(ring, 0); /* mask */ |
radeon_ring_write(ring, 0x20); /* poll interval */ |
|
/* sync PFP to ME, otherwise we might get invalid PFP reads */ |
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); |
radeon_ring_write(ring, 0x0); |
5899,6 → 5946,7 |
tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; |
WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp); |
WREG32(GRBM_INT_CNTL, 0); |
WREG32(SRBM_INT_CNTL, 0); |
if (rdev->num_crtc >= 2) { |
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
6040,12 → 6088,12 |
(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); |
|
if (!ASIC_IS_NODCE(rdev)) { |
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; |
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); |
} |
|
dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; |
6108,27 → 6156,27 |
} |
if (rdev->irq.hpd[0]) { |
DRM_DEBUG("si_irq_set: hpd 1\n"); |
hpd1 |= DC_HPDx_INT_EN; |
hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.hpd[1]) { |
DRM_DEBUG("si_irq_set: hpd 2\n"); |
hpd2 |= DC_HPDx_INT_EN; |
hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.hpd[2]) { |
DRM_DEBUG("si_irq_set: hpd 3\n"); |
hpd3 |= DC_HPDx_INT_EN; |
hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.hpd[3]) { |
DRM_DEBUG("si_irq_set: hpd 4\n"); |
hpd4 |= DC_HPDx_INT_EN; |
hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.hpd[4]) { |
DRM_DEBUG("si_irq_set: hpd 5\n"); |
hpd5 |= DC_HPDx_INT_EN; |
hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
if (rdev->irq.hpd[5]) { |
DRM_DEBUG("si_irq_set: hpd 6\n"); |
hpd6 |= DC_HPDx_INT_EN; |
hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; |
} |
|
WREG32(CP_INT_CNTL_RING0, cp_int_cntl); |
6188,6 → 6236,9 |
|
WREG32(CG_THERMAL_INT, thermal_int); |
|
/* posting read */ |
RREG32(SRBM_STATUS); |
|
return 0; |
} |
|
6288,7 → 6339,38 |
tmp |= DC_HPDx_INT_ACK; |
WREG32(DC_HPD6_INT_CONTROL, tmp); |
} |
|
if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD1_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD1_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD2_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD2_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD3_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD3_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD4_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD4_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD5_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD5_INT_CONTROL, tmp); |
} |
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { |
tmp = RREG32(DC_HPD5_INT_CONTROL); |
tmp |= DC_HPDx_RX_INT_ACK; |
WREG32(DC_HPD6_INT_CONTROL, tmp); |
} |
} |
|
static void si_irq_disable(struct radeon_device *rdev) |
{ |
6353,6 → 6435,7 |
u32 src_id, src_data, ring_id; |
u32 ring_index; |
bool queue_hotplug = false; |
bool queue_dp = false; |
bool queue_thermal = false; |
u32 status, addr; |
|
6386,23 → 6469,27 |
case 1: /* D1 vblank/vline */ |
switch (src_data) { |
case 0: /* D1 vblank */ |
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
if (rdev->irq.crtc_vblank_int[0]) { |
// drm_handle_vblank(rdev->ddev, 0); |
drm_handle_vblank(rdev->ddev, 0); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (atomic_read(&rdev->irq.pflip[0])) |
// radeon_crtc_handle_flip(rdev, 0); |
if (atomic_read(&rdev->irq.pflip[0])) |
radeon_crtc_handle_vblank(rdev, 0); |
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D1 vblank\n"); |
} |
|
break; |
case 1: /* D1 vline */ |
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D1 vline\n"); |
} |
|
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
6412,23 → 6499,27 |
case 2: /* D2 vblank/vline */ |
switch (src_data) { |
case 0: /* D2 vblank */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
if (rdev->irq.crtc_vblank_int[1]) { |
// drm_handle_vblank(rdev->ddev, 1); |
drm_handle_vblank(rdev->ddev, 1); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (atomic_read(&rdev->irq.pflip[1])) |
// radeon_crtc_handle_flip(rdev, 1); |
if (atomic_read(&rdev->irq.pflip[1])) |
radeon_crtc_handle_vblank(rdev, 1); |
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D2 vblank\n"); |
} |
|
break; |
case 1: /* D2 vline */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D2 vline\n"); |
} |
|
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
6438,23 → 6529,27 |
case 3: /* D3 vblank/vline */ |
switch (src_data) { |
case 0: /* D3 vblank */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
if (rdev->irq.crtc_vblank_int[2]) { |
// drm_handle_vblank(rdev->ddev, 2); |
drm_handle_vblank(rdev->ddev, 2); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (atomic_read(&rdev->irq.pflip[2])) |
// radeon_crtc_handle_flip(rdev, 2); |
if (atomic_read(&rdev->irq.pflip[2])) |
radeon_crtc_handle_vblank(rdev, 2); |
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D3 vblank\n"); |
} |
|
break; |
case 1: /* D3 vline */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D3 vline\n"); |
} |
|
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
6464,23 → 6559,27 |
case 4: /* D4 vblank/vline */ |
switch (src_data) { |
case 0: /* D4 vblank */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
if (rdev->irq.crtc_vblank_int[3]) { |
// drm_handle_vblank(rdev->ddev, 3); |
drm_handle_vblank(rdev->ddev, 3); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (atomic_read(&rdev->irq.pflip[3])) |
// radeon_crtc_handle_flip(rdev, 3); |
if (atomic_read(&rdev->irq.pflip[3])) |
radeon_crtc_handle_vblank(rdev, 3); |
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D4 vblank\n"); |
} |
|
break; |
case 1: /* D4 vline */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D4 vline\n"); |
} |
|
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
6490,23 → 6589,27 |
case 5: /* D5 vblank/vline */ |
switch (src_data) { |
case 0: /* D5 vblank */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
if (rdev->irq.crtc_vblank_int[4]) { |
// drm_handle_vblank(rdev->ddev, 4); |
drm_handle_vblank(rdev->ddev, 4); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (atomic_read(&rdev->irq.pflip[4])) |
// radeon_crtc_handle_flip(rdev, 4); |
if (atomic_read(&rdev->irq.pflip[4])) |
radeon_crtc_handle_vblank(rdev, 4); |
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D5 vblank\n"); |
} |
|
break; |
case 1: /* D5 vline */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D5 vline\n"); |
} |
|
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
6516,23 → 6619,27 |
case 6: /* D6 vblank/vline */ |
switch (src_data) { |
case 0: /* D6 vblank */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
if (rdev->irq.crtc_vblank_int[5]) { |
// drm_handle_vblank(rdev->ddev, 5); |
drm_handle_vblank(rdev->ddev, 5); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (atomic_read(&rdev->irq.pflip[5])) |
// radeon_crtc_handle_flip(rdev, 5); |
if (atomic_read(&rdev->irq.pflip[5])) |
radeon_crtc_handle_vblank(rdev, 5); |
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; |
DRM_DEBUG("IH: D6 vblank\n"); |
} |
|
break; |
case 1: /* D6 vline */ |
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; |
DRM_DEBUG("IH: D6 vline\n"); |
} |
|
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
6550,52 → 6657,122 |
case 42: /* HPD hotplug */ |
switch (src_data) { |
case 0: |
if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD1\n"); |
} |
|
break; |
case 1: |
if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD2\n"); |
} |
|
break; |
case 2: |
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD3\n"); |
} |
|
break; |
case 3: |
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD4\n"); |
} |
|
break; |
case 4: |
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD5\n"); |
} |
|
break; |
case 5: |
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; |
queue_hotplug = true; |
DRM_DEBUG("IH: HPD6\n"); |
} |
|
break; |
case 6: |
if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 1\n"); |
|
break; |
case 7: |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 2\n"); |
|
break; |
case 8: |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 3\n"); |
|
break; |
case 9: |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 4\n"); |
|
break; |
case 10: |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 5\n"); |
|
break; |
case 11: |
if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT)) |
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); |
|
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; |
queue_dp = true; |
DRM_DEBUG("IH: HPD_RX 6\n"); |
|
break; |
default: |
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
break; |
} |
break; |
case 96: |
DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR)); |
WREG32(SRBM_INT_ACK, 0x1); |
break; |
case 124: /* UVD */ |
DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); |
radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); |
6775,6 → 6952,22 |
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; |
} |
|
r = radeon_vce_resume(rdev); |
if (!r) { |
r = vce_v1_0_resume(rdev); |
if (!r) |
r = radeon_fence_driver_start_ring(rdev, |
TN_RING_TYPE_VCE1_INDEX); |
if (!r) |
r = radeon_fence_driver_start_ring(rdev, |
TN_RING_TYPE_VCE2_INDEX); |
} |
if (r) { |
dev_err(rdev->dev, "VCE init error (%d).\n", r); |
rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0; |
rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0; |
} |
|
/* Enable IRQ */ |
if (!rdev->irq.installed) { |
r = radeon_irq_kms_init(rdev); |
6843,6 → 7036,23 |
} |
} |
|
r = -ENOENT; |
|
ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; |
if (ring->ring_size) |
r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
VCE_CMD_NO_OP); |
|
ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; |
if (ring->ring_size) |
r = radeon_ring_init(rdev, ring, ring->ring_size, 0, |
VCE_CMD_NO_OP); |
|
if (!r) |
r = vce_v1_0_init(rdev); |
else if (r != -ENOENT) |
DRM_ERROR("radeon: failed initializing VCE (%d).\n", r); |
|
r = radeon_ib_pool_init(rdev); |
if (r) { |
dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
6855,7 → 7065,7 |
return r; |
} |
|
r = dce6_audio_init(rdev); |
r = radeon_audio_init(rdev); |
if (r) |
return r; |
|
6862,9 → 7072,35 |
return 0; |
} |
|
int si_resume(struct radeon_device *rdev) |
{ |
int r; |
|
/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, |
* posting will perform necessary task to bring back GPU into good |
* shape. |
*/ |
/* post card */ |
atom_asic_init(rdev->mode_info.atom_context); |
|
/* init golden registers */ |
si_init_golden_registers(rdev); |
|
if (rdev->pm.pm_method == PM_METHOD_DPM) |
radeon_pm_resume(rdev); |
|
rdev->accel_working = true; |
r = si_startup(rdev); |
if (r) { |
DRM_ERROR("si startup failed on resume\n"); |
rdev->accel_working = false; |
return r; |
} |
|
return r; |
|
} |
|
/* Plan is to move initialization in that function and use |
* helper function so that radeon_device_init pretty much |
* do nothing more than calling asic specific function. This |
6963,6 → 7199,17 |
} |
} |
|
r = radeon_vce_init(rdev); |
if (!r) { |
ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 4096); |
|
ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; |
ring->ring_obj = NULL; |
r600_ring_init(rdev, ring, 4096); |
} |
|
rdev->ih.ring_obj = NULL; |
r600_ih_ring_init(rdev, 64 * 1024); |
|
6975,13 → 7222,14 |
if (r) { |
dev_err(rdev->dev, "disabling GPU acceleration\n"); |
si_cp_fini(rdev); |
// si_irq_fini(rdev); |
// si_rlc_fini(rdev); |
// radeon_wb_fini(rdev); |
// radeon_ib_pool_fini(rdev); |
// radeon_vm_manager_fini(rdev); |
// radeon_irq_kms_fini(rdev); |
// si_pcie_gart_fini(rdev); |
cayman_dma_fini(rdev); |
si_irq_fini(rdev); |
sumo_rlc_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_vm_manager_fini(rdev); |
radeon_irq_kms_fini(rdev); |
si_pcie_gart_fini(rdev); |
rdev->accel_working = false; |
} |
|
6997,6 → 7245,34 |
return 0; |
} |
|
void si_fini(struct radeon_device *rdev) |
{ |
radeon_pm_fini(rdev); |
si_cp_fini(rdev); |
cayman_dma_fini(rdev); |
si_fini_pg(rdev); |
si_fini_cg(rdev); |
si_irq_fini(rdev); |
sumo_rlc_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_vm_manager_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_irq_kms_fini(rdev); |
if (rdev->has_uvd) { |
uvd_v1_0_fini(rdev); |
radeon_uvd_fini(rdev); |
radeon_vce_fini(rdev); |
} |
si_pcie_gart_fini(rdev); |
r600_vram_scratch_fini(rdev); |
radeon_gem_fini(rdev); |
radeon_fence_driver_fini(rdev); |
radeon_bo_fini(rdev); |
radeon_atombios_fini(rdev); |
kfree(rdev->bios); |
rdev->bios = NULL; |
} |
|
/** |
* si_get_gpu_clock_counter - return GPU clock counter snapshot |
* |
7031,8 → 7307,7 |
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); |
|
if (!vclk || !dclk) { |
/* keep the Bypass mode, put PLL to sleep */ |
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); |
/* keep the Bypass mode */ |
return 0; |
} |
|
7048,8 → 7323,7 |
/* set VCO_MODE to 1 */ |
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); |
|
/* toggle UPLL_SLEEP to 1 then back to 0 */ |
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); |
/* disable sleep mode */ |
WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); |
|
/* deassert UPLL_RESET */ |
7468,3 → 7742,124 |
} |
} |
} |
|
int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev) |
{ |
unsigned i; |
|
/* make sure VCEPLL_CTLREQ is deasserted */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK); |
|
mdelay(10); |
|
/* assert UPLL_CTLREQ */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK); |
|
/* wait for CTLACK and CTLACK2 to get asserted */ |
for (i = 0; i < 100; ++i) { |
uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK; |
if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask) |
break; |
mdelay(10); |
} |
|
/* deassert UPLL_CTLREQ */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK); |
|
if (i == 100) { |
DRM_ERROR("Timeout setting UVD clocks!\n"); |
return -ETIMEDOUT; |
} |
|
return 0; |
} |
|
int si_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk) |
{ |
unsigned fb_div = 0, evclk_div = 0, ecclk_div = 0; |
int r; |
|
/* bypass evclk and ecclk with bclk */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2, |
EVCLK_SRC_SEL(1) | ECCLK_SRC_SEL(1), |
~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK)); |
|
/* put PLL in bypass mode */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_BYPASS_EN_MASK, |
~VCEPLL_BYPASS_EN_MASK); |
|
if (!evclk || !ecclk) { |
/* keep the Bypass mode, put PLL to sleep */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK, |
~VCEPLL_SLEEP_MASK); |
return 0; |
} |
|
r = radeon_uvd_calc_upll_dividers(rdev, evclk, ecclk, 125000, 250000, |
16384, 0x03FFFFFF, 0, 128, 5, |
&fb_div, &evclk_div, &ecclk_div); |
if (r) |
return r; |
|
/* set RESET_ANTI_MUX to 0 */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK); |
|
/* set VCO_MODE to 1 */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_VCO_MODE_MASK, |
~VCEPLL_VCO_MODE_MASK); |
|
/* toggle VCEPLL_SLEEP to 1 then back to 0 */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK, |
~VCEPLL_SLEEP_MASK); |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_SLEEP_MASK); |
|
/* deassert VCEPLL_RESET */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK); |
|
mdelay(1); |
|
r = si_vce_send_vcepll_ctlreq(rdev); |
if (r) |
return r; |
|
/* assert VCEPLL_RESET again */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_RESET_MASK, ~VCEPLL_RESET_MASK); |
|
/* disable spread spectrum. */ |
WREG32_SMC_P(CG_VCEPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK); |
|
/* set feedback divider */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_3, VCEPLL_FB_DIV(fb_div), ~VCEPLL_FB_DIV_MASK); |
|
/* set ref divider to 0 */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_REF_DIV_MASK); |
|
/* set PDIV_A and PDIV_B */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2, |
VCEPLL_PDIV_A(evclk_div) | VCEPLL_PDIV_B(ecclk_div), |
~(VCEPLL_PDIV_A_MASK | VCEPLL_PDIV_B_MASK)); |
|
/* give the PLL some time to settle */ |
mdelay(15); |
|
/* deassert PLL_RESET */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK); |
|
mdelay(15); |
|
/* switch from bypass mode to normal mode */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_BYPASS_EN_MASK); |
|
r = si_vce_send_vcepll_ctlreq(rdev); |
if (r) |
return r; |
|
/* switch VCLK and DCLK selection */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2, |
EVCLK_SRC_SEL(16) | ECCLK_SRC_SEL(16), |
~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK)); |
|
mdelay(100); |
|
return 0; |
} |