Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 5271 → Rev 6104

/drivers/video/drm/radeon/cik.c
27,6 → 27,7
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_audio.h"
#include "cikd.h"
#include "atom.h"
#include "cik_blit_shaders.h"
140,6 → 141,64
static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
bool enable);
 
/**
* cik_get_allowed_info_register - fetch the register for the info ioctl
*
* @rdev: radeon_device pointer
* @reg: register offset in bytes
* @val: register value
*
* Returns 0 for success or -EINVAL for an invalid register
*
*/
int cik_get_allowed_info_register(struct radeon_device *rdev,
u32 reg, u32 *val)
{
switch (reg) {
case GRBM_STATUS:
case GRBM_STATUS2:
case GRBM_STATUS_SE0:
case GRBM_STATUS_SE1:
case GRBM_STATUS_SE2:
case GRBM_STATUS_SE3:
case SRBM_STATUS:
case SRBM_STATUS2:
case (SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET):
case (SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET):
case UVD_STATUS:
/* TODO VCE */
*val = RREG32(reg);
return 0;
default:
return -EINVAL;
}
}
 
/*
* Indirect registers accessor
*/
u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg)
{
unsigned long flags;
u32 r;
 
spin_lock_irqsave(&rdev->didt_idx_lock, flags);
WREG32(CIK_DIDT_IND_INDEX, (reg));
r = RREG32(CIK_DIDT_IND_DATA);
spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
return r;
}
 
void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
unsigned long flags;
 
spin_lock_irqsave(&rdev->didt_idx_lock, flags);
WREG32(CIK_DIDT_IND_INDEX, (reg));
WREG32(CIK_DIDT_IND_DATA, (v));
spin_unlock_irqrestore(&rdev->didt_idx_lock, flags);
}
 
/* get temperature in millidegrees */
int ci_get_temp(struct radeon_device *rdev)
{
3612,6 → 3671,8
}
 
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
WREG32(SRBM_INT_CNTL, 0x1);
WREG32(SRBM_INT_ACK, 0x1);
 
WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
 
3904,7 → 3965,9
struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
 
/* EVENT_WRITE_EOP - flush caches, send int */
/* Workaround for cache flush problems. First send a dummy EOP
* event down the pipe with seq one below.
*/
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
EOP_TC_ACTION_EN |
3911,6 → 3974,18
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
EVENT_INDEX(5)));
radeon_ring_write(ring, addr & 0xfffffffc);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
DATA_SEL(1) | INT_SEL(0));
radeon_ring_write(ring, fence->seq - 1);
radeon_ring_write(ring, 0);
 
/* Then send the real EOP event down the pipe. */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
EOP_TC_ACTION_EN |
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
EVENT_INDEX(5)));
radeon_ring_write(ring, addr & 0xfffffffc);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
4098,11 → 4173,7
control |= ib->length_dw | (vm_id << 24);
 
radeon_ring_write(ring, header);
radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
radeon_ring_write(ring, control);
}
4529,6 → 4600,31
WDOORBELL32(ring->doorbell_index, ring->wptr);
}
 
static void cik_compute_stop(struct radeon_device *rdev,
struct radeon_ring *ring)
{
u32 j, tmp;
 
cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
/* Disable wptr polling. */
tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
tmp &= ~WPTR_POLL_EN;
WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
/* Disable HQD. */
if (RREG32(CP_HQD_ACTIVE) & 1) {
WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
for (j = 0; j < rdev->usec_timeout; j++) {
if (!(RREG32(CP_HQD_ACTIVE) & 1))
break;
udelay(1);
}
WREG32(CP_HQD_DEQUEUE_REQUEST, 0);
WREG32(CP_HQD_PQ_RPTR, 0);
WREG32(CP_HQD_PQ_WPTR, 0);
}
cik_srbm_select(rdev, 0, 0, 0, 0);
}
 
/**
* cik_cp_compute_enable - enable/disable the compute CP MEs
*
4542,6 → 4638,15
if (enable)
WREG32(CP_MEC_CNTL, 0);
else {
/*
* To make hibernation reliable we need to clear compute ring
* configuration before halting the compute ring.
*/
mutex_lock(&rdev->srbm_mutex);
cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
mutex_unlock(&rdev->srbm_mutex);
 
WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
5707,6 → 5812,28
WREG32(VM_INVALIDATE_REQUEST, 0x1);
}
 
static void cik_pcie_init_compute_vmid(struct radeon_device *rdev)
{
int i;
uint32_t sh_mem_bases, sh_mem_config;
 
sh_mem_bases = 0x6000 | 0x6000 << 16;
sh_mem_config = ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED);
sh_mem_config |= DEFAULT_MTYPE(MTYPE_NONCACHED);
 
mutex_lock(&rdev->srbm_mutex);
for (i = 8; i < 16; i++) {
cik_srbm_select(rdev, 0, 0, 0, i);
/* CP and shaders */
WREG32(SH_MEM_CONFIG, sh_mem_config);
WREG32(SH_MEM_APE1_BASE, 1);
WREG32(SH_MEM_APE1_LIMIT, 0);
WREG32(SH_MEM_BASES, sh_mem_bases);
}
cik_srbm_select(rdev, 0, 0, 0, 0);
mutex_unlock(&rdev->srbm_mutex);
}
 
/**
* cik_pcie_gart_enable - gart enable
*
5765,7 → 5892,7
/* restore context1-15 */
/* set vm size, must be a multiple of 4 */
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
for (i = 1; i < 16; i++) {
if (i < 8)
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
5820,6 → 5947,8
cik_srbm_select(rdev, 0, 0, 0, 0);
mutex_unlock(&rdev->srbm_mutex);
 
cik_pcie_init_compute_vmid(rdev);
 
cik_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
6033,6 → 6162,17
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 1 << vm_id);
 
/* wait for the invalidate to complete */
radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
WAIT_REG_MEM_FUNCTION(0) | /* always */
WAIT_REG_MEM_ENGINE(0))); /* me */
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0); /* ref */
radeon_ring_write(ring, 0); /* mask */
radeon_ring_write(ring, 0x20); /* poll interval */
 
/* compute doesn't have PFP */
if (usepfp) {
/* sync PFP to ME, otherwise we might get invalid PFP reads */
7180,6 → 7320,8
WREG32(CP_ME2_PIPE3_INT_CNTL, 0);
/* grbm */
WREG32(GRBM_INT_CNTL, 0);
/* SRBM */
WREG32(SRBM_INT_CNTL, 0);
/* vline/vblank, etc. */
WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
7323,7 → 7465,6
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
u32 dma_cntl, dma_cntl1;
u32 thermal_int;
 
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
7341,12 → 7482,12
(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
 
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
 
dma_cntl = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
7353,13 → 7494,6
 
cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
 
if (rdev->flags & RADEON_IS_IGP)
thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
~(THERM_INTH_MASK | THERM_INTL_MASK);
else
thermal_int = RREG32_SMC(CG_THERMAL_INT) &
~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
 
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("cik_irq_set: sw int gfx\n");
7440,37 → 7574,29
}
if (rdev->irq.hpd[0]) {
DRM_DEBUG("cik_irq_set: hpd 1\n");
hpd1 |= DC_HPDx_INT_EN;
hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[1]) {
DRM_DEBUG("cik_irq_set: hpd 2\n");
hpd2 |= DC_HPDx_INT_EN;
hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[2]) {
DRM_DEBUG("cik_irq_set: hpd 3\n");
hpd3 |= DC_HPDx_INT_EN;
hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[3]) {
DRM_DEBUG("cik_irq_set: hpd 4\n");
hpd4 |= DC_HPDx_INT_EN;
hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[4]) {
DRM_DEBUG("cik_irq_set: hpd 5\n");
hpd5 |= DC_HPDx_INT_EN;
hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
if (rdev->irq.hpd[5]) {
DRM_DEBUG("cik_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
}
 
if (rdev->irq.dpm_thermal) {
DRM_DEBUG("dpm thermal\n");
if (rdev->flags & RADEON_IS_IGP)
thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
else
thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
}
 
WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
 
WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
7517,10 → 7643,8
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
 
if (rdev->flags & RADEON_IS_IGP)
WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
else
WREG32_SMC(CG_THERMAL_INT, thermal_int);
/* posting read */
RREG32(SRBM_STATUS);
 
return 0;
}
7642,7 → 7766,37
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) {
tmp = RREG32(DC_HPD1_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD1_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
tmp = RREG32(DC_HPD2_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD2_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
tmp = RREG32(DC_HPD3_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD3_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
tmp = RREG32(DC_HPD4_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD4_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
}
 
/**
* cik_irq_disable - disable interrupts
7767,6 → 7921,7
u8 me_id, pipe_id, queue_id;
u32 ring_index;
bool queue_hotplug = false;
bool queue_dp = false;
bool queue_reset = false;
u32 addr, status, mc_client;
bool queue_thermal = false;
7805,19 → 7960,27
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {
if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
if (rdev->irq.crtc_vblank_int[0]) {
drm_handle_vblank(rdev->ddev, 0);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_vblank(rdev, 0);
rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
 
break;
case 1: /* D1 vline */
if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) {
if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
DRM_DEBUG("IH: D1 vline\n");
}
 
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7827,19 → 7990,27
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
if (rdev->irq.crtc_vblank_int[1]) {
drm_handle_vblank(rdev->ddev, 1);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_vblank(rdev, 1);
rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
 
break;
case 1: /* D2 vline */
if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
DRM_DEBUG("IH: D2 vline\n");
}
 
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7849,19 → 8020,27
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
if (rdev->irq.crtc_vblank_int[2]) {
drm_handle_vblank(rdev->ddev, 2);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[2]))
radeon_crtc_handle_vblank(rdev, 2);
rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n");
}
 
break;
case 1: /* D3 vline */
if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
DRM_DEBUG("IH: D3 vline\n");
}
 
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7871,19 → 8050,27
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
if (rdev->irq.crtc_vblank_int[3]) {
drm_handle_vblank(rdev->ddev, 3);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[3]))
radeon_crtc_handle_vblank(rdev, 3);
rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n");
}
 
break;
case 1: /* D4 vline */
if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
DRM_DEBUG("IH: D4 vline\n");
}
 
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7893,19 → 8080,27
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
if (rdev->irq.crtc_vblank_int[4]) {
drm_handle_vblank(rdev->ddev, 4);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[4]))
radeon_crtc_handle_vblank(rdev, 4);
rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n");
}
 
break;
case 1: /* D5 vline */
if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
DRM_DEBUG("IH: D5 vline\n");
}
 
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7915,19 → 8110,27
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
if (rdev->irq.crtc_vblank_int[5]) {
drm_handle_vblank(rdev->ddev, 5);
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[5]))
radeon_crtc_handle_vblank(rdev, 5);
rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n");
}
 
break;
case 1: /* D6 vline */
if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
DRM_DEBUG("IH: D6 vline\n");
}
 
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7945,52 → 8148,122
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD1\n");
}
 
break;
case 1:
if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD2\n");
}
 
break;
case 2:
if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD3\n");
}
 
break;
case 3:
if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD4\n");
}
 
break;
case 4:
if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD5\n");
}
 
break;
case 5:
if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD6\n");
}
 
break;
case 6:
if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
queue_dp = true;
DRM_DEBUG("IH: HPD_RX 1\n");
 
break;
case 7:
if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
queue_dp = true;
DRM_DEBUG("IH: HPD_RX 2\n");
 
break;
case 8:
if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
queue_dp = true;
DRM_DEBUG("IH: HPD_RX 3\n");
 
break;
case 9:
if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
queue_dp = true;
DRM_DEBUG("IH: HPD_RX 4\n");
 
break;
case 10:
if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
queue_dp = true;
DRM_DEBUG("IH: HPD_RX 5\n");
 
break;
case 11:
if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
 
rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
queue_dp = true;
DRM_DEBUG("IH: HPD_RX 6\n");
 
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 96:
DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
WREG32(SRBM_INT_ACK, 0x1);
break;
case 124: /* UVD */
DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
8319,6 → 8592,16
if (r)
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
 
r = radeon_vce_resume(rdev);
if (!r) {
r = vce_v2_0_resume(rdev);
if (!r)
r = radeon_fence_driver_start_ring(rdev,
TN_RING_TYPE_VCE1_INDEX);
if (!r)
r = radeon_fence_driver_start_ring(rdev,
TN_RING_TYPE_VCE2_INDEX);
}
if (r) {
dev_err(rdev->dev, "VCE init error (%d).\n", r);
rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
8408,6 → 8691,24
if (r)
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
}
 
r = -ENOENT;
 
ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
if (ring->ring_size)
r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
VCE_CMD_NO_OP);
 
ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
if (ring->ring_size)
r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
VCE_CMD_NO_OP);
 
if (!r)
r = vce_v1_0_init(rdev);
else if (r != -ENOENT)
DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
 
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
8551,6 → 8852,18
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 4096);
}
 
r = radeon_vce_init(rdev);
if (!r) {
ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 4096);
 
ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 4096);
}
 
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
 
8562,6 → 8875,16
r = cik_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
cik_cp_fini(rdev);
cik_sdma_fini(rdev);
cik_irq_fini(rdev);
sumo_rlc_fini(rdev);
cik_mec_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_vm_manager_fini(rdev);
radeon_irq_kms_fini(rdev);
cik_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
 
8588,6 → 8911,27
*/
void cik_fini(struct radeon_device *rdev)
{
radeon_pm_fini(rdev);
cik_cp_fini(rdev);
cik_sdma_fini(rdev);
cik_fini_pg(rdev);
cik_fini_cg(rdev);
cik_irq_fini(rdev);
sumo_rlc_fini(rdev);
cik_mec_fini(rdev);
radeon_wb_fini(rdev);
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
radeon_vce_fini(rdev);
cik_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
}
9205,6 → 9549,9
(rdev->disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
}
 
/* Save number of lines the linebuffer leads before the scanout */
radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
}
 
/* select wm A */