Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 5271 → Rev 5270

/drivers/video/drm/drm_atomic.c
File deleted
/drivers/video/drm/drm_legacy.h
File deleted
/drivers/video/drm/drm_internal.h
File deleted
/drivers/video/drm/radeon/radeon_kfd.h
File deleted
/drivers/video/drm/radeon/radeon_sync.c
File deleted
/drivers/video/drm/radeon/cik.c
32,7 → 32,6
#include "cik_blit_shaders.h"
#include "radeon_ucode.h"
#include "clearstate_ci.h"
#include "radeon_kfd.h"
 
MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
1564,8 → 1563,6
 
static void cik_init_golden_registers(struct radeon_device *rdev)
{
/* Some of the registers might be dependent on GRBM_GFX_INDEX */
mutex_lock(&rdev->grbm_idx_mutex);
switch (rdev->family) {
case CHIP_BONAIRE:
radeon_program_register_sequence(rdev,
1640,7 → 1637,6
default:
break;
}
mutex_unlock(&rdev->grbm_idx_mutex);
}
 
/**
1810,7 → 1806,7
{
const __be32 *fw_data = NULL;
const __le32 *new_fw_data = NULL;
u32 running, blackout = 0, tmp;
u32 running, blackout = 0;
u32 *io_mc_regs = NULL;
const __le32 *new_io_mc_regs = NULL;
int i, regs_size, ucode_size;
1870,15 → 1866,6
WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
}
}
 
tmp = RREG32(MC_SEQ_MISC0);
if ((rdev->pdev->device == 0x6649) && ((tmp & 0xff00) == 0x5600)) {
WREG32(MC_SEQ_IO_DEBUG_INDEX, 5);
WREG32(MC_SEQ_IO_DEBUG_DATA, 0x00000023);
WREG32(MC_SEQ_IO_DEBUG_INDEX, 9);
WREG32(MC_SEQ_IO_DEBUG_DATA, 0x000001f0);
}
 
/* load the MC ucode */
for (i = 0; i < ucode_size; i++) {
if (rdev->new_fw)
3432,7 → 3419,6
u32 disabled_rbs = 0;
u32 enabled_rbs = 0;
 
mutex_lock(&rdev->grbm_idx_mutex);
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
cik_select_se_sh(rdev, i, j);
3444,7 → 3430,6
}
}
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
mutex_unlock(&rdev->grbm_idx_mutex);
 
mask = 1;
for (i = 0; i < max_rb_num_per_se * se_num; i++) {
3455,7 → 3440,6
 
rdev->config.cik.backend_enable_mask = enabled_rbs;
 
mutex_lock(&rdev->grbm_idx_mutex);
for (i = 0; i < se_num; i++) {
cik_select_se_sh(rdev, i, 0xffffffff);
data = 0;
3483,7 → 3467,6
WREG32(PA_SC_RASTER_CONFIG, data);
}
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
mutex_unlock(&rdev->grbm_idx_mutex);
}
 
/**
3701,12 → 3684,6
/* set HW defaults for 3D engine */
WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
 
mutex_lock(&rdev->grbm_idx_mutex);
/*
* making sure that the following register writes will be broadcasted
* to all the shaders
*/
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(SX_DEBUG_1, 0x20);
 
WREG32(TA_CNTL_AUX, 0x00010000);
3762,7 → 3739,6
 
WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
mutex_unlock(&rdev->grbm_idx_mutex);
 
udelay(50);
}
3983,19 → 3959,18
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer
* @resv: reservation object to sync to
* @fence: radeon fence object
*
* Copy GPU paging using the CP DMA engine (CIK+).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
int cik_copy_cpdma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv)
struct radeon_fence **fence)
{
struct radeon_fence *fence;
struct radeon_sync sync;
struct radeon_semaphore *sem = NULL;
int ring_index = rdev->asic->copy.blit_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes, control;
4002,7 → 3977,11
int i, num_loops;
int r = 0;
 
radeon_sync_create(&sync);
r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
 
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
4009,12 → 3988,12
r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
 
radeon_sync_resv(rdev, &sync, resv, false);
radeon_sync_rings(rdev, &sync, ring->idx);
radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
 
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
4035,17 → 4014,17
dst_offset += cur_size_in_bytes;
}
 
r = radeon_fence_emit(rdev, &fence, ring->idx);
r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
 
radeon_ring_unlock_commit(rdev, ring, false);
radeon_sync_free(rdev, &sync, fence);
radeon_semaphore_free(rdev, &sem, *fence);
 
return fence;
return r;
}
 
/*
4066,7 → 4045,6
void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
u32 header, control = INDIRECT_BUFFER_VALID;
 
if (ib->is_const_ib) {
4095,7 → 4073,8
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
}
 
control |= ib->length_dw | (vm_id << 24);
control |= ib->length_dw |
(ib->vm ? (ib->vm->id << 24) : 0);
 
radeon_ring_write(ring, header);
radeon_ring_write(ring,
4255,7 → 4234,7
WREG32(CP_PFP_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
WREG32(CP_PFP_UCODE_ADDR, le32_to_cpu(pfp_hdr->header.ucode_version));
WREG32(CP_PFP_UCODE_ADDR, 0);
 
/* CE */
fw_data = (const __le32 *)
4264,7 → 4243,7
WREG32(CP_CE_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
WREG32(CP_CE_UCODE_ADDR, le32_to_cpu(ce_hdr->header.ucode_version));
WREG32(CP_CE_UCODE_ADDR, 0);
 
/* ME */
fw_data = (const __be32 *)
4273,8 → 4252,7
WREG32(CP_ME_RAM_WADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
WREG32(CP_ME_RAM_WADDR, le32_to_cpu(me_hdr->header.ucode_version));
WREG32(CP_ME_RAM_RADDR, le32_to_cpu(me_hdr->header.ucode_version));
WREG32(CP_ME_RAM_WADDR, 0);
} else {
const __be32 *fw_data;
 
4300,6 → 4278,10
WREG32(CP_ME_RAM_WADDR, 0);
}
 
WREG32(CP_PFP_UCODE_ADDR, 0);
WREG32(CP_CE_UCODE_ADDR, 0);
WREG32(CP_ME_RAM_WADDR, 0);
WREG32(CP_ME_RAM_RADDR, 0);
return 0;
}
 
4333,8 → 4315,8
/* init the CE partitions. CE only used for gfx on CIK */
radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
radeon_ring_write(ring, 0x8000);
radeon_ring_write(ring, 0x8000);
radeon_ring_write(ring, 0xc000);
radeon_ring_write(ring, 0xc000);
 
/* setup clear context state */
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4581,7 → 4563,7
WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32(CP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
WREG32(CP_MEC_ME1_UCODE_ADDR, le32_to_cpu(mec_hdr->header.ucode_version));
WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
 
/* MEC2 */
if (rdev->family == CHIP_KAVERI) {
4595,7 → 4577,7
WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32(CP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
WREG32(CP_MEC_ME2_UCODE_ADDR, le32_to_cpu(mec2_hdr->header.ucode_version));
WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
}
} else {
const __be32 *fw_data;
4695,11 → 4677,12
/*
* KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
* CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
* Nonetheless, we assign only 1 pipe because all other pipes will
* be handled by KFD
*/
if (rdev->family == CHIP_KAVERI)
rdev->mec.num_mec = 2;
else
rdev->mec.num_mec = 1;
rdev->mec.num_pipe = 1;
rdev->mec.num_pipe = 4;
rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8;
 
if (rdev->mec.hpd_eop_obj == NULL) {
4706,7 → 4689,7
r = radeon_bo_create(rdev,
rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
RADEON_GEM_DOMAIN_GTT, 0, NULL,
&rdev->mec.hpd_eop_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
4841,10 → 4824,13
 
/* init the pipes */
mutex_lock(&rdev->srbm_mutex);
for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) {
int me = (i < 4) ? 1 : 2;
int pipe = (i < 4) ? i : (i - 4);
 
eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr;
eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2);
 
cik_srbm_select(rdev, 0, 0, 0, 0);
cik_srbm_select(rdev, me, pipe, 0, 0);
 
/* write the EOP addr */
WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
4858,7 → 4844,8
tmp &= ~EOP_SIZE_MASK;
tmp |= order_base_2(MEC_HPD_SIZE / 8);
WREG32(CP_HPD_EOP_CONTROL, tmp);
 
}
cik_srbm_select(rdev, 0, 0, 0, 0);
mutex_unlock(&rdev->srbm_mutex);
 
/* init the queues. Just two for now. */
4873,7 → 4860,7
sizeof(struct bonaire_mqd),
PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, 0, NULL,
NULL, &rdev->ring[idx].mqd_obj);
&rdev->ring[idx].mqd_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
return r;
5912,13 → 5899,8
*/
int cik_vm_init(struct radeon_device *rdev)
{
/*
* number of VMs
* VMID 0 is reserved for System
* radeon graphics/compute will use VMIDs 1-7
* amdkfd will use VMIDs 8-15
*/
rdev->vm_manager.nvm = RADEON_NUM_OF_VMIDS;
/* number of VMs */
rdev->vm_manager.nvm = 16;
/* base offset of vram pages */
if (rdev->flags & RADEON_IS_IGP) {
u64 tmp = RREG32(MC_VM_FB_OFFSET);
5978,23 → 5960,26
* Update the page table base and flush the VM TLB
* using the CP (CIK).
*/
void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned vm_id, uint64_t pd_addr)
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
int usepfp = (ring->idx == RADEON_RING_TYPE_GFX_INDEX);
struct radeon_ring *ring = &rdev->ring[ridx];
int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX);
 
if (vm == NULL)
return;
 
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
if (vm_id < 8) {
if (vm->id < 8) {
radeon_ring_write(ring,
(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
} else {
radeon_ring_write(ring,
(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
}
radeon_ring_write(ring, 0);
radeon_ring_write(ring, pd_addr >> 12);
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
 
/* update SH_MEM_* regs */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6002,7 → 5987,7
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, VMID(vm_id));
radeon_ring_write(ring, VMID(vm->id));
 
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
6023,7 → 6008,7
radeon_ring_write(ring, VMID(0));
 
/* HDP flush */
cik_hdp_flush_cp_ring_emit(rdev, ring->idx);
cik_hdp_flush_cp_ring_emit(rdev, ridx);
 
/* bits 0-15 are the VM contexts0-15 */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6031,7 → 6016,7
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 1 << vm_id);
radeon_ring_write(ring, 1 << vm->id);
 
/* compute doesn't have PFP */
if (usepfp) {
6076,7 → 6061,6
u32 i, j, k;
u32 mask;
 
mutex_lock(&rdev->grbm_idx_mutex);
for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
cik_select_se_sh(rdev, i, j);
6088,7 → 6072,6
}
}
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
mutex_unlock(&rdev->grbm_idx_mutex);
 
mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY;
for (k = 0; k < rdev->usec_timeout; k++) {
6223,12 → 6206,10
WREG32(RLC_LB_CNTR_INIT, 0);
WREG32(RLC_LB_CNTR_MAX, 0x00008000);
 
mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
WREG32(RLC_LB_PARAMS, 0x00600408);
WREG32(RLC_LB_CNTL, 0x80000004);
mutex_unlock(&rdev->grbm_idx_mutex);
 
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
6245,7 → 6226,7
WREG32(RLC_GPM_UCODE_ADDR, 0);
for (i = 0; i < size; i++)
WREG32(RLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
WREG32(RLC_GPM_UCODE_ADDR, le32_to_cpu(hdr->header.ucode_version));
WREG32(RLC_GPM_UCODE_ADDR, 0);
} else {
const __be32 *fw_data;
 
6295,13 → 6276,11
 
tmp = cik_halt_rlc(rdev);
 
mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
WREG32(RLC_SERDES_WR_CTRL, tmp2);
mutex_unlock(&rdev->grbm_idx_mutex);
 
cik_update_rlc(rdev, tmp);
 
6337,7 → 6316,6
}
 
orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
data |= 0x00000001;
data &= 0xfffffffd;
if (orig != data)
WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
6344,13 → 6322,11
 
tmp = cik_halt_rlc(rdev);
 
mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
WREG32(RLC_SERDES_WR_CTRL, data);
mutex_unlock(&rdev->grbm_idx_mutex);
 
cik_update_rlc(rdev, tmp);
 
6371,7 → 6347,7
}
} else {
orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
data |= 0x00000003;
data |= 0x00000002;
if (orig != data)
WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
 
6394,13 → 6370,11
 
tmp = cik_halt_rlc(rdev);
 
mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
WREG32(RLC_SERDES_WR_CTRL, data);
mutex_unlock(&rdev->grbm_idx_mutex);
 
cik_update_rlc(rdev, tmp);
}
6829,12 → 6803,10
u32 mask = 0, tmp, tmp1;
int i;
 
mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, se, sh);
tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
mutex_unlock(&rdev->grbm_idx_mutex);
 
tmp &= 0xffff0000;
 
7318,7 → 7290,8
int cik_irq_set(struct radeon_device *rdev)
{
u32 cp_int_cntl;
u32 cp_m1p0;
u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
7352,6 → 7325,13
dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
 
cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
cp_m1p1 = RREG32(CP_ME1_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
cp_m1p2 = RREG32(CP_ME1_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
cp_m1p3 = RREG32(CP_ME1_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
cp_m2p0 = RREG32(CP_ME2_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
cp_m2p1 = RREG32(CP_ME2_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
 
if (rdev->flags & RADEON_IS_IGP)
thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
7373,10 → 7353,37
case 0:
cp_m1p0 |= TIME_STAMP_INT_ENABLE;
break;
case 1:
cp_m1p1 |= TIME_STAMP_INT_ENABLE;
break;
case 2:
cp_m1p2 |= TIME_STAMP_INT_ENABLE;
break;
case 3:
cp_m1p2 |= TIME_STAMP_INT_ENABLE;
break;
default:
DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
break;
}
} else if (ring->me == 2) {
switch (ring->pipe) {
case 0:
cp_m2p0 |= TIME_STAMP_INT_ENABLE;
break;
case 1:
cp_m2p1 |= TIME_STAMP_INT_ENABLE;
break;
case 2:
cp_m2p2 |= TIME_STAMP_INT_ENABLE;
break;
case 3:
cp_m2p2 |= TIME_STAMP_INT_ENABLE;
break;
default:
DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
break;
}
} else {
DRM_DEBUG("si_irq_set: sw int cp1 invalid me %d\n", ring->me);
}
7389,10 → 7396,37
case 0:
cp_m1p0 |= TIME_STAMP_INT_ENABLE;
break;
case 1:
cp_m1p1 |= TIME_STAMP_INT_ENABLE;
break;
case 2:
cp_m1p2 |= TIME_STAMP_INT_ENABLE;
break;
case 3:
cp_m1p2 |= TIME_STAMP_INT_ENABLE;
break;
default:
DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
break;
}
} else if (ring->me == 2) {
switch (ring->pipe) {
case 0:
cp_m2p0 |= TIME_STAMP_INT_ENABLE;
break;
case 1:
cp_m2p1 |= TIME_STAMP_INT_ENABLE;
break;
case 2:
cp_m2p2 |= TIME_STAMP_INT_ENABLE;
break;
case 3:
cp_m2p2 |= TIME_STAMP_INT_ENABLE;
break;
default:
DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
break;
}
} else {
DRM_DEBUG("si_irq_set: sw int cp2 invalid me %d\n", ring->me);
}
7477,6 → 7511,13
WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1);
 
WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0);
WREG32(CP_ME1_PIPE1_INT_CNTL, cp_m1p1);
WREG32(CP_ME1_PIPE2_INT_CNTL, cp_m1p2);
WREG32(CP_ME1_PIPE3_INT_CNTL, cp_m1p3);
WREG32(CP_ME2_PIPE0_INT_CNTL, cp_m2p0);
WREG32(CP_ME2_PIPE1_INT_CNTL, cp_m2p1);
WREG32(CP_ME2_PIPE2_INT_CNTL, cp_m2p2);
WREG32(CP_ME2_PIPE3_INT_CNTL, cp_m2p3);
 
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 
7793,10 → 7834,6
while (rptr != wptr) {
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
 
// radeon_kfd_interrupt(rdev,
// (const void *) &rdev->ih.ring[ring_index]);
 
src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
8420,10 → 8457,6
return r;
}
 
// r = radeon_kfd_resume(rdev);
// if (r)
// return r;
 
return 0;
}
 
9247,9 → 9280,6
u32 num_heads = 0, lb_size;
int i;
 
if (!rdev->mode_info.mode_config_initialized)
return;
 
radeon_update_display_priority(rdev);
 
for (i = 0; i < rdev->num_crtc; i++) {
/drivers/video/drm/radeon/cik_sdma.c
134,7 → 134,7
struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
u32 extra_bits = (ib->vm ? ib->vm->ids[ib->ring].id : 0) & 0xf;
u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
 
if (rdev->wb.enabled) {
u32 next_rptr = ring->wptr + 5;
530,19 → 530,18
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer
* @resv: reservation object to sync to
* @fence: radeon fence object
*
* Copy GPU paging using the DMA engine (CIK).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
int cik_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv)
struct radeon_fence **fence)
{
struct radeon_fence *fence;
struct radeon_sync sync;
struct radeon_semaphore *sem = NULL;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes;
549,7 → 548,11
int i, num_loops;
int r = 0;
 
radeon_sync_create(&sync);
r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
 
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
556,12 → 559,12
r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
 
radeon_sync_resv(rdev, &sync, resv, false);
radeon_sync_rings(rdev, &sync, ring->idx);
radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
 
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
579,17 → 582,17
dst_offset += cur_size_in_bytes;
}
 
r = radeon_fence_emit(rdev, &fence, ring->idx);
r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
 
radeon_ring_unlock_commit(rdev, ring, false);
radeon_sync_free(rdev, &sync, fence);
radeon_semaphore_free(rdev, &sem, *fence);
 
return fence;
return r;
}
 
/**
663,20 → 666,17
{
struct radeon_ib ib;
unsigned i;
unsigned index;
int r;
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
u32 tmp = 0;
u64 gpu_addr;
 
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
index = R600_WB_DMA_RING_TEST_OFFSET;
else
index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
if (!ptr) {
DRM_ERROR("invalid vram scratch pointer\n");
return -EINVAL;
}
 
gpu_addr = rdev->wb.gpu_addr + index;
 
tmp = 0xCAFEDEAD;
rdev->wb.wb[index/4] = cpu_to_le32(tmp);
writel(tmp, ptr);
 
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
if (r) {
685,8 → 685,8
}
 
ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
ib.ptr[1] = lower_32_bits(gpu_addr);
ib.ptr[2] = upper_32_bits(gpu_addr);
ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr);
ib.ptr[3] = 1;
ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5;
703,7 → 703,7
return r;
}
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = le32_to_cpu(rdev->wb.wb[index/4]);
tmp = readl(ptr);
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
900,21 → 900,25
* Update the page table base and flush the VM TLB
* using sDMA (CIK).
*/
void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned vm_id, uint64_t pd_addr)
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
struct radeon_ring *ring = &rdev->ring[ridx];
 
if (vm == NULL)
return;
 
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
if (vm_id < 8) {
radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
if (vm->id < 8) {
radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
} else {
radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
}
radeon_ring_write(ring, pd_addr >> 12);
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
 
/* update SH_MEM_* regs */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
radeon_ring_write(ring, VMID(vm_id));
radeon_ring_write(ring, VMID(vm->id));
 
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
radeon_ring_write(ring, SH_MEM_BASES >> 2);
937,11 → 941,11
radeon_ring_write(ring, VMID(0));
 
/* flush HDP */
cik_sdma_hdp_flush_ring_emit(rdev, ring->idx);
cik_sdma_hdp_flush_ring_emit(rdev, ridx);
 
/* flush TLB */
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 1 << vm_id);
radeon_ring_write(ring, 1 << vm->id);
}
 
/drivers/video/drm/radeon/dce3_1_afmt.c
32,7 → 32,7
struct drm_connector *connector;
struct radeon_connector *radeon_connector = NULL;
u32 tmp;
u8 *sadb = NULL;
u8 *sadb;
int sad_count;
 
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
165,7 → 165,7
 
/* disable audio prior to setting up hw */
dig->afmt->pin = r600_audio_get_pin(rdev);
r600_audio_enable(rdev, dig->afmt->pin, 0);
r600_audio_enable(rdev, dig->afmt->pin, false);
 
r600_audio_set_dto(encoder, mode->clock);
 
240,5 → 240,5
r600_hdmi_audio_workaround(encoder);
 
/* enable audio after to setting up hw */
r600_audio_enable(rdev, dig->afmt->pin, 0xf);
r600_audio_enable(rdev, dig->afmt->pin, true);
}
/drivers/video/drm/radeon/dce6_afmt.c
155,7 → 155,7
struct drm_connector *connector;
struct radeon_connector *radeon_connector = NULL;
u32 offset, tmp;
u8 *sadb = NULL;
u8 *sadb;
int sad_count;
 
if (!dig || !dig->afmt || !dig->afmt->pin)
284,13 → 284,13
 
void dce6_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin,
u8 enable_mask)
bool enable)
{
if (!pin)
return;
 
WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
enable_mask ? AUDIO_ENABLED : 0);
WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL,
enable ? AUDIO_ENABLED : 0);
}
 
static const u32 pin_offsets[7] =
/drivers/video/drm/radeon/evergreen.c
22,6 → 22,7
* Authors: Alex Deucher
*/
#include <linux/firmware.h>
//#include <linux/platform_device.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include "radeon.h"
2345,9 → 2346,6
u32 num_heads = 0, lb_size;
int i;
 
if (!rdev->mode_info.mode_config_initialized)
return;
 
radeon_update_display_priority(rdev);
 
for (i = 0; i < rdev->num_crtc; i++) {
2555,7 → 2553,6
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
3009,7 → 3006,7
u32 vgt_cache_invalidation;
u32 hdp_host_path_cntl, tmp;
u32 disabled_rb_mask;
int i, j, ps_thread_count;
int i, j, num_shader_engines, ps_thread_count;
 
switch (rdev->family) {
case CHIP_CYPRESS:
3307,6 → 3304,8
rdev->config.evergreen.tile_config |=
((gb_addr_config & 0x30000000) >> 28) << 12;
 
num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
 
if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
u32 efuse_straps_4;
u32 efuse_straps_3;
4024,7 → 4023,7
if (rdev->rlc.save_restore_obj == NULL) {
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
NULL, &rdev->rlc.save_restore_obj);
&rdev->rlc.save_restore_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
return r;
4103,7 → 4102,7
if (rdev->rlc.clear_state_obj == NULL) {
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
NULL, &rdev->rlc.clear_state_obj);
&rdev->rlc.clear_state_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
sumo_rlc_fini(rdev);
4180,7 → 4179,7
r = radeon_bo_create(rdev, rdev->rlc.cp_table_size,
PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
NULL, &rdev->rlc.cp_table_obj);
&rdev->rlc.cp_table_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
sumo_rlc_fini(rdev);
5136,9 → 5135,9
/* wptr/rptr are in bytes! */
rptr += 16;
rptr &= rdev->ih.ptr_mask;
WREG32(IH_RB_RPTR, rptr);
}
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
 
/* make sure wptr hasn't changed while processing */
/drivers/video/drm/radeon/evergreen_hdmi.c
38,37 → 38,6
extern void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode);
 
/* enable the audio stream */
static void dce4_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin,
u8 enable_mask)
{
u32 tmp = RREG32(AZ_HOT_PLUG_CONTROL);
 
if (!pin)
return;
 
if (enable_mask) {
tmp |= AUDIO_ENABLED;
if (enable_mask & 1)
tmp |= PIN0_AUDIO_ENABLED;
if (enable_mask & 2)
tmp |= PIN1_AUDIO_ENABLED;
if (enable_mask & 4)
tmp |= PIN2_AUDIO_ENABLED;
if (enable_mask & 8)
tmp |= PIN3_AUDIO_ENABLED;
} else {
tmp &= ~(AUDIO_ENABLED |
PIN0_AUDIO_ENABLED |
PIN1_AUDIO_ENABLED |
PIN2_AUDIO_ENABLED |
PIN3_AUDIO_ENABLED);
}
 
WREG32(AZ_HOT_PLUG_CONTROL, tmp);
}
 
/*
* update the N and CTS parameters for a given pixel clock rate
*/
133,7 → 102,7
struct drm_connector *connector;
struct radeon_connector *radeon_connector = NULL;
u32 tmp;
u8 *sadb = NULL;
u8 *sadb;
int sad_count;
 
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
349,10 → 318,10
/* disable audio prior to setting up hw */
if (ASIC_IS_DCE6(rdev)) {
dig->afmt->pin = dce6_audio_get_pin(rdev);
dce6_audio_enable(rdev, dig->afmt->pin, 0);
dce6_audio_enable(rdev, dig->afmt->pin, false);
} else {
dig->afmt->pin = r600_audio_get_pin(rdev);
dce4_audio_enable(rdev, dig->afmt->pin, 0);
r600_audio_enable(rdev, dig->afmt->pin, false);
}
 
evergreen_audio_set_dto(encoder, mode->clock);
494,15 → 463,13
 
/* enable audio after to setting up hw */
if (ASIC_IS_DCE6(rdev))
dce6_audio_enable(rdev, dig->afmt->pin, 1);
dce6_audio_enable(rdev, dig->afmt->pin, true);
else
dce4_audio_enable(rdev, dig->afmt->pin, 0xf);
r600_audio_enable(rdev, dig->afmt->pin, true);
}
 
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 
515,14 → 482,6
if (!enable && !dig->afmt->enabled)
return;
 
if (!enable && dig->afmt->pin) {
if (ASIC_IS_DCE6(rdev))
dce6_audio_enable(rdev, dig->afmt->pin, 0);
else
dce4_audio_enable(rdev, dig->afmt->pin, 0);
dig->afmt->pin = NULL;
}
 
dig->afmt->enabled = enable;
 
DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
/drivers/video/drm/radeon/kv_dpm.c
2800,8 → 2800,6
tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
SMU_VOLTAGE_CURRENT_LEVEL_SHIFT;
vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp);
seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
seq_printf(m, "power level %d sclk: %u vddc: %u\n",
current_index, sclk, vddc);
}
/drivers/video/drm/radeon/r600.c
122,97 → 122,9
 
int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
{
unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0;
int r;
 
/* bypass vclk and dclk with bclk */
WREG32_P(CG_UPLL_FUNC_CNTL_2,
VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
 
/* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~(
UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK));
 
if (rdev->family >= CHIP_RS780)
WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL,
~UPLL_BYPASS_CNTL);
 
if (!vclk || !dclk) {
/* keep the Bypass mode, put PLL to sleep */
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
return 0;
}
 
if (rdev->clock.spll.reference_freq == 10000)
ref_div = 34;
else
ref_div = 4;
 
r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
ref_div + 1, 0xFFF, 2, 30, ~0,
&fb_div, &vclk_div, &dclk_div);
if (r)
return r;
 
if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780)
fb_div >>= 1;
else
fb_div |= 1;
 
r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
if (r)
return r;
 
/* assert PLL_RESET */
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
 
/* For RS780 we have to choose ref clk */
if (rdev->family >= CHIP_RS780)
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK,
~UPLL_REFCLK_SRC_SEL_MASK);
 
/* set the required fb, ref and post divder values */
WREG32_P(CG_UPLL_FUNC_CNTL,
UPLL_FB_DIV(fb_div) |
UPLL_REF_DIV(ref_div),
~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK));
WREG32_P(CG_UPLL_FUNC_CNTL_2,
UPLL_SW_HILEN(vclk_div >> 1) |
UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
UPLL_SW_HILEN2(dclk_div >> 1) |
UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) |
UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK,
~UPLL_SW_MASK);
 
/* give the PLL some time to settle */
mdelay(15);
 
/* deassert PLL_RESET */
WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
 
mdelay(15);
 
/* deassert BYPASS EN */
WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
 
if (rdev->family >= CHIP_RS780)
WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL);
 
r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
if (r)
return r;
 
/* switch VCLK and DCLK selection */
WREG32_P(CG_UPLL_FUNC_CNTL_2,
VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
 
mdelay(100);
 
return 0;
}
 
void dce3_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
1080,8 → 992,6
WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1132,8 → 1042,6
WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
radeon_gart_table_vram_unpin(rdev);
}
 
1430,7 → 1338,7
if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
0, NULL, NULL, &rdev->vram_scratch.robj);
0, NULL, &rdev->vram_scratch.robj);
if (r) {
return r;
}
2884,13 → 2792,12
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
int r600_copy_cpdma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv)
struct radeon_fence **fence)
{
struct radeon_fence *fence;
struct radeon_sync sync;
struct radeon_semaphore *sem = NULL;
int ring_index = rdev->asic->copy.blit_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes, tmp;
2897,7 → 2804,11
int i, num_loops;
int r = 0;
 
radeon_sync_create(&sync);
r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
 
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
2904,12 → 2815,12
r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
 
radeon_sync_resv(rdev, &sync, resv, false);
radeon_sync_rings(rdev, &sync, ring->idx);
radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2935,17 → 2846,17
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
 
r = radeon_fence_emit(rdev, &fence, ring->idx);
r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
 
radeon_ring_unlock_commit(rdev, ring, false);
radeon_sync_free(rdev, &sync, fence);
radeon_semaphore_free(rdev, &sem, *fence);
 
return fence;
return r;
}
 
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
3260,7 → 3171,7
r = radeon_bo_create(rdev, rdev->ih.ring_size,
PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, 0,
NULL, NULL, &rdev->ih.ring_obj);
NULL, &rdev->ih.ring_obj);
if (r) {
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
return r;
/drivers/video/drm/radeon/r600_dma.c
338,17 → 338,17
{
struct radeon_ib ib;
unsigned i;
unsigned index;
int r;
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
u32 tmp = 0;
u64 gpu_addr;
 
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
index = R600_WB_DMA_RING_TEST_OFFSET;
else
index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
if (!ptr) {
DRM_ERROR("invalid vram scratch pointer\n");
return -EINVAL;
}
 
gpu_addr = rdev->wb.gpu_addr + index;
tmp = 0xCAFEDEAD;
writel(tmp, ptr);
 
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
if (r) {
357,8 → 357,8
}
 
ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
ib.ptr[1] = lower_32_bits(gpu_addr);
ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
ib.ptr[3] = 0xDEADBEEF;
ib.length_dw = 4;
 
374,7 → 374,7
return r;
}
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = le32_to_cpu(rdev->wb.wb[index/4]);
tmp = readl(ptr);
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
430,19 → 430,18
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer
* @resv: reservation object to sync to
* @fence: radeon fence object
*
* Copy GPU paging using the DMA engine (r6xx).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
int r600_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv)
struct radeon_fence **fence)
{
struct radeon_fence *fence;
struct radeon_sync sync;
struct radeon_semaphore *sem = NULL;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_dw, cur_size_in_dw;
449,7 → 448,11
int i, num_loops;
int r = 0;
 
radeon_sync_create(&sync);
r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
 
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
456,12 → 459,12
r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
 
radeon_sync_resv(rdev, &sync, resv, false);
radeon_sync_rings(rdev, &sync, ring->idx);
radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
 
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
477,15 → 480,15
dst_offset += cur_size_in_dw * 4;
}
 
r = radeon_fence_emit(rdev, &fence, ring->idx);
r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
 
radeon_ring_unlock_commit(rdev, ring, false);
radeon_sync_free(rdev, &sync, fence);
radeon_semaphore_free(rdev, &sem, *fence);
 
return fence;
return r;
}
/drivers/video/drm/radeon/radeon.h
60,13 → 60,12
* are considered as fatal)
*/
 
#include <linux/atomic.h>
#include <asm/atomic.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/interval_tree.h>
#include <asm/div64.h>
#include <linux/fence.h>
 
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
74,11 → 73,11
//#include <ttm/ttm_module.h>
#include <ttm/ttm_execbuf_util.h>
 
#include <drm/drm_gem.h>
 
#include <linux/irqreturn.h>
#include <linux/pci.h>
#include <pci.h>
 
#include <errno-base.h>
 
#include "radeon_family.h"
#include "radeon_mode.h"
#include "radeon_reg.h"
155,6 → 154,9
#define RADEONFB_CONN_LIMIT 4
#define RADEON_BIOS_NUM_SCRATCH 8
 
/* fence seq are set to this number when signaled */
#define RADEON_FENCE_SIGNALED_SEQ 0LL
 
/* internal ring indices */
/* r1xx+ has gfx CP ring */
#define RADEON_RING_TYPE_GFX_INDEX 0
181,6 → 183,9
/* number of hw syncs before falling back on blocking */
#define RADEON_NUM_SYNCS 4
 
/* number of hw syncs before falling back on blocking */
#define RADEON_NUM_SYNCS 4
 
/* hardcode those limit for now */
#define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20)
379,7 → 384,6
* Fences.
*/
struct radeon_fence_driver {
struct radeon_device *rdev;
uint32_t scratch_reg;
uint64_t gpu_addr;
volatile uint32_t *cpu_addr;
386,26 → 390,22
/* sync_seq is protected by ring emission lock */
uint64_t sync_seq[RADEON_NUM_RINGS];
atomic64_t last_seq;
bool initialized, delayed_irq;
struct delayed_work lockup_work;
bool initialized;
};
 
struct radeon_fence {
struct fence base;
 
struct radeon_device *rdev;
struct kref kref;
/* protected by radeon_fence.lock */
uint64_t seq;
/* RB, DMA, etc. */
unsigned ring;
bool is_vm_update;
 
wait_queue_t fence_wake;
};
 
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev);
void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
void radeon_fence_driver_force_completion(struct radeon_device *rdev);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
481,15 → 481,6
#endif
};
 
struct radeon_bo_list {
struct radeon_bo *robj;
struct ttm_validate_buffer tv;
uint64_t gpu_offset;
unsigned prefered_domains;
unsigned allowed_domains;
uint32_t tiling_flags;
};
 
/* bo virtual address in a specific vm */
struct radeon_bo_va {
/* protected by bo being reserved */
496,7 → 487,6
struct list_head bo_list;
uint32_t flags;
uint64_t addr;
struct radeon_fence *last_pt_update;
unsigned ref_count;
 
/* protected by vm mutex */
513,7 → 503,7
struct list_head list;
/* Protected by tbo.reserved */
u32 initial_domain;
struct ttm_place placements[4];
u32 placements[3];
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
532,8 → 522,6
struct drm_gem_object gem_base;
 
pid_t pid;
 
struct radeon_mn *mn;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
 
616,6 → 604,7
struct radeon_sa_bo *sa_bo;
signed waiters;
uint64_t gpu_addr;
struct radeon_fence *sync_to[RADEON_NUM_RINGS];
};
 
int radeon_semaphore_create(struct radeon_device *rdev,
624,33 → 613,16
struct radeon_semaphore *semaphore);
bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
struct radeon_fence *fence);
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
int waiting_ring);
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore **semaphore,
struct radeon_fence *fence);
 
/*
* Synchronization
*/
struct radeon_sync {
struct radeon_semaphore *semaphores[RADEON_NUM_SYNCS];
struct radeon_fence *sync_to[RADEON_NUM_RINGS];
struct radeon_fence *last_vm_update;
};
 
void radeon_sync_create(struct radeon_sync *sync);
void radeon_sync_fence(struct radeon_sync *sync,
struct radeon_fence *fence);
int radeon_sync_resv(struct radeon_device *rdev,
struct radeon_sync *sync,
struct reservation_object *resv,
bool shared);
int radeon_sync_rings(struct radeon_device *rdev,
struct radeon_sync *sync,
int waiting_ring);
void radeon_sync_free(struct radeon_device *rdev, struct radeon_sync *sync,
struct radeon_fence *fence);
 
/*
* GART structures, functions & helpers
*/
struct radeon_mc;
750,10 → 722,6
 
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
phys_addr_t *aperture_base,
size_t *aperture_size,
size_t *start_offset);
 
/*
* IRQS.
833,7 → 801,6
int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev);
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring);
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
855,7 → 822,7
struct radeon_fence *fence;
struct radeon_vm *vm;
bool is_const_ib;
struct radeon_sync sync;
struct radeon_semaphore *semaphore;
};
 
struct radeon_ring {
932,23 → 899,10
uint64_t addr;
};
 
struct radeon_vm_id {
unsigned id;
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
struct radeon_fence *flushed_updates;
/* last use of vmid */
struct radeon_fence *last_id_use;
};
 
struct radeon_vm {
struct mutex mutex;
 
struct rb_root va;
unsigned id;
 
/* protecting invalidated and freed */
spinlock_t status_lock;
 
/* BOs moved, but not yet updated in the PT */
struct list_head invalidated;
 
957,6 → 911,7
 
/* contains the page directory */
struct radeon_bo *page_directory;
uint64_t pd_gpu_addr;
unsigned max_pde_used;
 
/* array of page tables, one for each page directory entry */
964,8 → 919,13
 
struct radeon_bo_va *ib_bo_va;
 
/* for id and flush management per ring */
struct radeon_vm_id ids[RADEON_NUM_RINGS];
struct mutex mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
/* last flush or NULL if we still need to flush */
struct radeon_fence *last_flush;
/* last use of vmid */
struct radeon_fence *last_id_use;
};
 
struct radeon_vm_manager {
1073,7 → 1033,19
/*
* CS.
*/
struct radeon_cs_reloc {
struct drm_gem_object *gobj;
struct radeon_bo *robj;
struct ttm_validate_buffer tv;
uint64_t gpu_offset;
unsigned prefered_domains;
unsigned allowed_domains;
uint32_t tiling_flags;
uint32_t handle;
};
 
struct radeon_cs_chunk {
uint32_t chunk_id;
uint32_t length_dw;
uint32_t *kdata;
void __user *user_ptr;
1091,15 → 1063,16
unsigned idx;
/* relocations */
unsigned nrelocs;
struct radeon_bo_list *relocs;
struct radeon_bo_list *vm_bos;
struct radeon_cs_reloc *relocs;
struct radeon_cs_reloc **relocs_ptr;
struct radeon_cs_reloc *vm_bos;
struct list_head validated;
unsigned dma_reloc_idx;
/* indices of various chunks */
struct radeon_cs_chunk *chunk_ib;
struct radeon_cs_chunk *chunk_relocs;
struct radeon_cs_chunk *chunk_flags;
struct radeon_cs_chunk *chunk_const_ib;
int chunk_ib_idx;
int chunk_relocs_idx;
int chunk_flags_idx;
int chunk_const_ib_idx;
struct radeon_ib ib;
struct radeon_ib const_ib;
void *track;
1113,7 → 1086,7
 
static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
struct radeon_cs_chunk *ibc = p->chunk_ib;
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
 
if (ibc->kdata)
return ibc->kdata[idx];
1525,10 → 1498,6
u8 t_hyst;
u32 cycle_delay;
u16 t_max;
u8 control_mode;
u16 default_max_fan_pwm;
u16 default_fan_output_sensitivity;
u16 fan_output_sensitivity;
bool ucode_fan_control;
};
 
1662,11 → 1631,6
/* internal thermal controller on rv6xx+ */
enum radeon_int_thermal_type int_thermal_type;
struct device *int_hwmon_dev;
/* fan control parameters */
bool no_fan;
u8 fan_pulses_per_revolution;
u8 fan_min_rpm;
u8 fan_max_rpm;
/* dpm */
bool dpm_enabled;
struct radeon_dpm dpm;
1701,8 → 1665,7
uint32_t handle, struct radeon_fence **fence);
int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
uint32_t handle, struct radeon_fence **fence);
void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
uint32_t allowed_domains);
void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo);
void radeon_uvd_free_handles(struct radeon_device *rdev,
struct drm_file *filp);
int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
1791,11 → 1754,6
struct radeon_ring *cpB);
void radeon_test_syncing(struct radeon_device *rdev);
 
/*
* MMU Notifier
*/
int radeon_mn_register(struct radeon_bo *bo, unsigned long addr);
void radeon_mn_unregister(struct radeon_bo *bo);
 
/*
* Debugfs
1829,8 → 1787,7
void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring);
bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, bool emit_wait);
void (*vm_flush)(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned vm_id, uint64_t pd_addr);
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 
/* testing functions */
int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1911,24 → 1868,24
} display;
/* copy functions for bo handling */
struct {
struct radeon_fence *(*blit)(struct radeon_device *rdev,
int (*blit)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv);
struct radeon_fence **fence);
u32 blit_ring_index;
struct radeon_fence *(*dma)(struct radeon_device *rdev,
int (*dma)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv);
struct radeon_fence **fence);
u32 dma_ring_index;
/* method used for bo copy */
struct radeon_fence *(*copy)(struct radeon_device *rdev,
int (*copy)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv);
struct radeon_fence **fence);
/* ring used for bo copies */
u32 copy_ring_index;
} copy;
2334,7 → 2291,6
struct radeon_mman mman;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
wait_queue_head_t fence_queue;
unsigned fence_context;
struct mutex ring_lock;
struct radeon_ring ring[RADEON_NUM_RINGS];
bool ib_pool_ready;
2353,7 → 2309,7
bool need_dma32;
bool accel_working;
bool fastfb_working; /* IGP feature*/
bool needs_reset, in_reset;
bool needs_reset;
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
2374,6 → 2330,7
struct radeon_mec mec;
struct work_struct hotplug_work;
struct work_struct audio_work;
struct work_struct reset_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
bool has_uvd;
2398,8 → 2355,6
struct radeon_atcs atcs;
/* srbm instance registers */
struct mutex srbm_mutex;
/* GRBM index mutex. Protects concurrents access to GRBM index */
struct mutex grbm_idx_mutex;
/* clock, powergating flags */
u32 cg_flags;
u32 pg_flags;
2411,7 → 2366,6
/* tracking pinned memory */
u64 vram_pin_size;
u64 gart_pin_size;
struct mutex mn_lock;
};
 
bool radeon_is_px(struct drm_device *dev);
2467,18 → 2421,8
/*
* Cast helper
*/
extern const struct fence_ops radeon_fence_ops;
#define to_radeon_fence(p) ((struct radeon_fence *)(p))
 
static inline struct radeon_fence *to_radeon_fence(struct fence *f)
{
struct radeon_fence *__f = container_of(f, struct radeon_fence, base);
 
if (__f->base.ops == &radeon_fence_ops)
return __f;
 
return NULL;
}
 
/*
* Registers read & write functions.
*/
2797,25 → 2741,18
/*
* RING helpers.
*/
 
/**
* radeon_ring_write - write a value to the ring
*
* @ring: radeon_ring structure holding ring information
* @v: dword (dw) value to write
*
* Write a value to the requested ring buffer (all asics).
*/
#if DRM_DEBUG_CODE == 0
static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
{
if (ring->count_dw <= 0)
DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
 
ring->ring[ring->wptr++] = v;
ring->wptr &= ring->ptr_mask;
ring->count_dw--;
ring->ring_free_dw--;
}
#else
/* With debugging this is just too big to inline */
void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#endif
 
/*
* ASICs macro.
2841,7 → 2778,7
#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib))
#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib))
#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp))
#define radeon_ring_vm_flush(rdev, r, vm_id, pd_addr) (rdev)->asic->ring[(r)->idx]->vm_flush((rdev), (r), (vm_id), (pd_addr))
#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)]->vm_flush((rdev), (r), (vm))
#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r))
#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r))
#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r))
2854,9 → 2791,9
#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence))
#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
#define radeon_copy_blit(rdev, s, d, np, resv) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (resv))
#define radeon_copy_dma(rdev, s, d, np, resv) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (resv))
#define radeon_copy(rdev, s, d, np, resv) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (resv))
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
2930,10 → 2867,6
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
extern int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags);
extern bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm);
extern bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm);
extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
2950,7 → 2883,7
void radeon_vm_manager_fini(struct radeon_device *rdev);
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
struct radeon_vm *vm,
struct list_head *head);
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
2957,7 → 2890,7
struct radeon_vm *vm, int ring);
void radeon_vm_flush(struct radeon_device *rdev,
struct radeon_vm *vm,
int ring, struct radeon_fence *fence);
int ring);
void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence);
2991,10 → 2924,10
struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
void r600_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin,
u8 enable_mask);
bool enable);
void dce6_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin,
u8 enable_mask);
bool enable);
 
/*
* R600 vram scratch functions
3064,7 → 2997,7
void radeon_cs_dump_packet(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt);
int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
struct radeon_bo_list **cs_reloc,
struct radeon_cs_reloc **cs_reloc,
int nomm);
int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
uint32_t *vline_start_end,
3072,7 → 3005,7
 
#include "radeon_object.h"
 
#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159
#define DRM_UDELAY(d) udelay(d)
 
resource_size_t
drm_get_resource_start(struct drm_device *dev, unsigned int resource);
/drivers/video/drm/radeon/radeon_device.c
82,7 → 82,7
int init_display(struct radeon_device *rdev, videomode_t *mode);
int init_display_kms(struct drm_device *dev, videomode_t *usermode);
 
int get_modes(videomode_t *mode, u32 *count);
int get_modes(videomode_t *mode, u32_t *count);
int set_user_mode(videomode_t *mode);
int r100_2D_test(struct radeon_device *rdev);
 
437,37 → 437,6
__clear_bit(doorbell, rdev->doorbell.used);
}
 
/**
* radeon_doorbell_get_kfd_info - Report doorbell configuration required to
* setup KFD
*
* @rdev: radeon_device pointer
* @aperture_base: output returning doorbell aperture base physical address
* @aperture_size: output returning doorbell aperture size in bytes
* @start_offset: output returning # of doorbell bytes reserved for radeon.
*
* Radeon and the KFD share the doorbell aperture. Radeon sets it up,
* takes doorbells required for its own rings and reports the setup to KFD.
* Radeon reserved doorbells are at the start of the doorbell aperture.
*/
void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
phys_addr_t *aperture_base,
size_t *aperture_size,
size_t *start_offset)
{
/* The first num_doorbells are used by radeon.
* KFD takes whatever's left in the aperture. */
if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
*aperture_base = rdev->doorbell.base;
*aperture_size = rdev->doorbell.size;
*start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
} else {
*aperture_base = 0;
*aperture_size = 0;
*start_offset = 0;
}
}
 
/*
* radeon_wb_*()
* Writeback is the the method by which the the GPU updates special pages
525,7 → 494,7
 
if (rdev->wb.wb_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
RADEON_GEM_DOMAIN_GTT, 0, NULL,
&rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
1029,7 → 998,6
}
 
mutex_init(&rdev->mode_info.atom_context->mutex);
mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
atom_allocate_fb_scratch(rdev->mode_info.atom_context);
return 0;
1266,7 → 1234,6
for (i = 0; i < RADEON_NUM_RINGS; i++) {
rdev->ring[i].idx = i;
}
rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
 
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1281,13 → 1248,9
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->gpu_clock_mutex);
mutex_init(&rdev->srbm_mutex);
mutex_init(&rdev->grbm_idx_mutex);
 
// init_rwsem(&rdev->pm.mclk_lock);
// init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
mutex_init(&rdev->mn_lock);
// hash_init(rdev->mn_hash);
r = radeon_gem_init(rdev);
if (r)
return r;
1399,6 → 1362,9
if (r)
return r;
 
r = radeon_ib_ring_tests(rdev);
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
 
 
if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1413,10 → 1379,6
return r;
}
 
// r = radeon_ib_ring_tests(rdev);
// if (r)
// DRM_ERROR("ib ring test failed (%d).\n", r);
 
if ((radeon_testing & 1)) {
if (rdev->accel_working)
radeon_test_moves(rdev);
1474,6 → 1436,7
}
}
 
retry:
r = radeon_asic_reset(rdev);
if (!r) {
dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1482,12 → 1445,25
 
radeon_restore_bios_scratch_regs(rdev);
 
if (!r) {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!r && ring_data[i]) {
radeon_ring_restore(rdev, &rdev->ring[i],
ring_sizes[i], ring_data[i]);
ring_sizes[i] = 0;
ring_data[i] = NULL;
}
 
// r = radeon_ib_ring_tests(rdev);
// if (r) {
// dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
// if (saved) {
// saved = false;
// radeon_suspend(rdev);
// goto retry;
// }
// }
} else {
radeon_fence_driver_force_completion(rdev, i);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
kfree(ring_data[i]);
}
}
/drivers/video/drm/radeon/radeon_encoders.c
179,9 → 179,6
(rdev->pdev->subsystem_vendor == 0x1734) &&
(rdev->pdev->subsystem_device == 0x1107))
use_bl = false;
/* disable native backlight control on older asics */
else if (rdev->family < CHIP_R600)
use_bl = false;
else
use_bl = true;
}
413,24 → 410,3
}
}
 
bool radeon_encoder_is_digital(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
return true;
default:
return false;
}
}
/drivers/video/drm/radeon/si.c
2384,9 → 2384,6
u32 num_heads = 0, lb_size;
int i;
 
if (!rdev->mode_info.mode_config_initialized)
return;
 
radeon_update_display_priority(rdev);
 
for (i = 0; i < rdev->num_crtc; i++) {
3365,7 → 3362,6
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
u32 header;
 
if (ib->is_const_ib) {
3401,13 → 3397,14
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
radeon_ring_write(ring, ib->length_dw |
(ib->vm ? (ib->vm->id << 24) : 0));
 
if (!ib->is_const_ib) {
/* flush read cache over gart for this vmid */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, vm_id);
radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
PACKET3_TC_ACTION_ENA |
4687,7 → 4684,7
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
{
int ret = 0;
u32 idx = 0, i;
u32 idx = 0;
struct radeon_cs_packet pkt;
 
do {
4698,12 → 4695,6
switch (pkt.type) {
case RADEON_PACKET_TYPE0:
dev_err(rdev->dev, "Packet0 not allowed!\n");
for (i = 0; i < ib->length_dw; i++) {
if (i == idx)
printk("\t0x%08x <---\n", ib->ptr[i]);
else
printk("\t0x%08x\n", ib->ptr[i]);
}
ret = -EINVAL;
break;
case RADEON_PACKET_TYPE2:
5023,23 → 5014,27
block, mc_id);
}
 
void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned vm_id, uint64_t pd_addr)
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
struct radeon_ring *ring = &rdev->ring[ridx];
 
if (vm == NULL)
return;
 
/* write new base address */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
 
if (vm_id < 8) {
if (vm->id < 8) {
radeon_ring_write(ring,
(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
} else {
radeon_ring_write(ring,
(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
}
radeon_ring_write(ring, 0);
radeon_ring_write(ring, pd_addr >> 12);
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
 
/* flush hdp cache */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5055,7 → 5050,7
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 1 << vm_id);
radeon_ring_write(ring, 1 << vm->id);
 
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
/drivers/video/drm/radeon/si_dpm.c
23,7 → 23,6
 
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "sid.h"
#include "r600_dpm.h"
#include "si_dpm.h"
3398,15 → 3397,6
 
ret = si_read_smc_sram_dword(rdev,
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
SISLANDS_SMC_FIRMWARE_HEADER_fanTable,
&tmp, si_pi->sram_end);
if (ret)
return ret;
 
si_pi->fan_table_start = tmp;
 
ret = si_read_smc_sram_dword(rdev,
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
&tmp, si_pi->sram_end);
if (ret)
5826,32 → 5816,7
si_enable_acpi_power_management(rdev);
}
 
static int si_thermal_enable_alert(struct radeon_device *rdev,
bool enable)
{
u32 thermal_int = RREG32(CG_THERMAL_INT);
 
if (enable) {
PPSMC_Result result;
 
thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
WREG32(CG_THERMAL_INT, thermal_int);
rdev->irq.dpm_thermal = false;
result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
if (result != PPSMC_Result_OK) {
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
return -EINVAL;
}
} else {
thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
WREG32(CG_THERMAL_INT, thermal_int);
rdev->irq.dpm_thermal = true;
}
 
return 0;
}
 
static int si_thermal_set_temperature_range(struct radeon_device *rdev,
static int si_set_thermal_temperature_range(struct radeon_device *rdev,
int min_temp, int max_temp)
{
int low_temp = 0 * 1000;
5876,309 → 5841,6
return 0;
}
 
static void si_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
{
struct si_power_info *si_pi = si_get_pi(rdev);
u32 tmp;
 
if (si_pi->fan_ctrl_is_in_default_mode) {
tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
si_pi->fan_ctrl_default_mode = tmp;
tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
si_pi->t_min = tmp;
si_pi->fan_ctrl_is_in_default_mode = false;
}
 
tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
tmp |= TMIN(0);
WREG32(CG_FDO_CTRL2, tmp);
 
tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
tmp |= FDO_PWM_MODE(mode);
WREG32(CG_FDO_CTRL2, tmp);
}
 
static int si_thermal_setup_fan_table(struct radeon_device *rdev)
{
struct si_power_info *si_pi = si_get_pi(rdev);
PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE };
u32 duty100;
u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
u16 fdo_min, slope1, slope2;
u32 reference_clock, tmp;
int ret;
u64 tmp64;
 
if (!si_pi->fan_table_start) {
rdev->pm.dpm.fan.ucode_fan_control = false;
return 0;
}
 
duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
 
if (duty100 == 0) {
rdev->pm.dpm.fan.ucode_fan_control = false;
return 0;
}
 
tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
do_div(tmp64, 10000);
fdo_min = (u16)tmp64;
 
t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
 
pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
 
slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
 
fan_table.slope1 = cpu_to_be16(slope1);
fan_table.slope2 = cpu_to_be16(slope2);
 
fan_table.fdo_min = cpu_to_be16(fdo_min);
 
fan_table.hys_down = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
 
fan_table.hys_up = cpu_to_be16(1);
 
fan_table.hys_slope = cpu_to_be16(1);
 
fan_table.temp_resp_lim = cpu_to_be16(5);
 
reference_clock = radeon_get_xclk(rdev);
 
fan_table.refresh_period = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
reference_clock) / 1600);
 
fan_table.fdo_max = cpu_to_be16((u16)duty100);
 
tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
fan_table.temp_src = (uint8_t)tmp;
 
ret = si_copy_bytes_to_smc(rdev,
si_pi->fan_table_start,
(u8 *)(&fan_table),
sizeof(fan_table),
si_pi->sram_end);
 
if (ret) {
DRM_ERROR("Failed to load fan table to the SMC.");
rdev->pm.dpm.fan.ucode_fan_control = false;
}
 
return 0;
}
 
static int si_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
{
PPSMC_Result ret;
 
ret = si_send_msg_to_smc(rdev, PPSMC_StartFanControl);
if (ret == PPSMC_Result_OK)
return 0;
else
return -EINVAL;
}
 
static int si_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
{
PPSMC_Result ret;
 
ret = si_send_msg_to_smc(rdev, PPSMC_StopFanControl);
if (ret == PPSMC_Result_OK)
return 0;
else
return -EINVAL;
}
 
#if 0
static int si_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
u32 *speed)
{
u32 duty, duty100;
u64 tmp64;
 
if (rdev->pm.no_fan)
return -ENOENT;
 
duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
 
if (duty100 == 0)
return -EINVAL;
 
tmp64 = (u64)duty * 100;
do_div(tmp64, duty100);
*speed = (u32)tmp64;
 
if (*speed > 100)
*speed = 100;
 
return 0;
}
 
static int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
u32 speed)
{
u32 tmp;
u32 duty, duty100;
u64 tmp64;
 
if (rdev->pm.no_fan)
return -ENOENT;
 
if (speed > 100)
return -EINVAL;
 
if (rdev->pm.dpm.fan.ucode_fan_control)
si_fan_ctrl_stop_smc_fan_control(rdev);
 
duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
 
if (duty100 == 0)
return -EINVAL;
 
tmp64 = (u64)speed * duty100;
do_div(tmp64, 100);
duty = (u32)tmp64;
 
tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
tmp |= FDO_STATIC_DUTY(duty);
WREG32(CG_FDO_CTRL0, tmp);
 
si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
 
return 0;
}
 
static int si_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
u32 *speed)
{
u32 tach_period;
u32 xclk = radeon_get_xclk(rdev);
 
if (rdev->pm.no_fan)
return -ENOENT;
 
if (rdev->pm.fan_pulses_per_revolution == 0)
return -ENOENT;
 
tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
if (tach_period == 0)
return -ENOENT;
 
*speed = 60 * xclk * 10000 / tach_period;
 
return 0;
}
 
static int si_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
u32 speed)
{
u32 tach_period, tmp;
u32 xclk = radeon_get_xclk(rdev);
 
if (rdev->pm.no_fan)
return -ENOENT;
 
if (rdev->pm.fan_pulses_per_revolution == 0)
return -ENOENT;
 
if ((speed < rdev->pm.fan_min_rpm) ||
(speed > rdev->pm.fan_max_rpm))
return -EINVAL;
 
if (rdev->pm.dpm.fan.ucode_fan_control)
si_fan_ctrl_stop_smc_fan_control(rdev);
 
tach_period = 60 * xclk * 10000 / (8 * speed);
tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
tmp |= TARGET_PERIOD(tach_period);
WREG32(CG_TACH_CTRL, tmp);
 
si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
 
return 0;
}
#endif
 
static void si_fan_ctrl_set_default_mode(struct radeon_device *rdev)
{
struct si_power_info *si_pi = si_get_pi(rdev);
u32 tmp;
 
if (!si_pi->fan_ctrl_is_in_default_mode) {
tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
WREG32(CG_FDO_CTRL2, tmp);
 
tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
tmp |= TMIN(si_pi->t_min);
WREG32(CG_FDO_CTRL2, tmp);
si_pi->fan_ctrl_is_in_default_mode = true;
}
}
 
static void si_thermal_start_smc_fan_control(struct radeon_device *rdev)
{
if (rdev->pm.dpm.fan.ucode_fan_control) {
si_fan_ctrl_start_smc_fan_control(rdev);
si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
}
}
 
static void si_thermal_initialize(struct radeon_device *rdev)
{
u32 tmp;
 
if (rdev->pm.fan_pulses_per_revolution) {
tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
WREG32(CG_TACH_CTRL, tmp);
}
 
tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
tmp |= TACH_PWM_RESP_RATE(0x28);
WREG32(CG_FDO_CTRL2, tmp);
}
 
static int si_thermal_start_thermal_controller(struct radeon_device *rdev)
{
int ret;
 
si_thermal_initialize(rdev);
ret = si_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
if (ret)
return ret;
ret = si_thermal_enable_alert(rdev, true);
if (ret)
return ret;
if (rdev->pm.dpm.fan.ucode_fan_control) {
ret = si_halt_smc(rdev);
if (ret)
return ret;
ret = si_thermal_setup_fan_table(rdev);
if (ret)
return ret;
ret = si_resume_smc(rdev);
if (ret)
return ret;
si_thermal_start_smc_fan_control(rdev);
}
 
return 0;
}
 
static void si_thermal_stop_thermal_controller(struct radeon_device *rdev)
{
if (!rdev->pm.no_fan) {
si_fan_ctrl_set_default_mode(rdev);
si_fan_ctrl_stop_smc_fan_control(rdev);
}
}
 
int si_dpm_enable(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
6291,39 → 5953,31
 
si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
 
si_thermal_start_thermal_controller(rdev);
 
ni_update_current_ps(rdev, boot_ps);
 
return 0;
}
 
static int si_set_temperature_range(struct radeon_device *rdev)
int si_dpm_late_enable(struct radeon_device *rdev)
{
int ret;
 
ret = si_thermal_enable_alert(rdev, false);
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
PPSMC_Result result;
 
ret = si_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
if (ret)
return ret;
ret = si_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
if (ret)
return ret;
ret = si_thermal_enable_alert(rdev, true);
if (ret)
return ret;
rdev->irq.dpm_thermal = true;
radeon_irq_set(rdev);
result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
 
return ret;
if (result != PPSMC_Result_OK)
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
}
 
int si_dpm_late_enable(struct radeon_device *rdev)
{
int ret;
 
ret = si_set_temperature_range(rdev);
if (ret)
return ret;
 
return ret;
return 0;
}
 
void si_dpm_disable(struct radeon_device *rdev)
6333,7 → 5987,6
 
if (!si_is_smc_running(rdev))
return;
si_thermal_stop_thermal_controller(rdev);
si_disable_ulv(rdev);
si_clear_vc(rdev);
if (pi->thermal_protection)
6872,9 → 6525,6
rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
 
si_pi->fan_ctrl_is_in_default_mode = true;
rdev->pm.dpm.fan.ucode_fan_control = false;
 
return 0;
}
 
/drivers/video/drm/radeon/ni_dma.c
123,7 → 123,6
struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
 
if (rdev->wb.enabled) {
u32 next_rptr = ring->wptr + 4;
141,7 → 140,7
*/
while ((ring->wptr & 7) != 5)
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
 
447,12 → 446,16
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
}
 
void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned vm_id, uint64_t pd_addr)
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
struct radeon_ring *ring = &rdev->ring[ridx];
 
if (vm == NULL)
return;
 
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
radeon_ring_write(ring, pd_addr >> 12);
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
 
/* flush hdp cache */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
462,6 → 465,6
/* bits 0-7 are the VM contexts0-7 */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
radeon_ring_write(ring, 1 << vm_id);
radeon_ring_write(ring, 1 << vm->id);
}
 
/drivers/video/drm/radeon/r100.c
869,14 → 869,13
return false;
}
 
struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv)
struct radeon_fence **fence)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
struct radeon_fence *fence;
uint32_t cur_pages;
uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
uint32_t pitch;
897,7 → 896,7
r = radeon_ring_lock(rdev, ring, ndw);
if (r) {
DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
return ERR_PTR(-EINVAL);
return -EINVAL;
}
while (num_gpu_pages > 0) {
cur_pages = num_gpu_pages;
937,13 → 936,11
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_HOST_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE);
r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
return ERR_PTR(r);
if (fence) {
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
}
radeon_ring_unlock_commit(rdev, ring, false);
return fence;
return r;
}
 
static int r100_cp_wait_for_idle(struct radeon_device *rdev)
1250,7 → 1247,7
int r;
u32 tile_flags = 0;
u32 tmp;
struct radeon_bo_list *reloc;
struct radeon_cs_reloc *reloc;
u32 value;
 
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1289,7 → 1286,7
int idx)
{
unsigned c, i;
struct radeon_bo_list *reloc;
struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
int r = 0;
volatile uint32_t *ib;
1538,7 → 1535,7
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
{
struct radeon_bo_list *reloc;
struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
uint32_t tmp;
1897,7 → 1894,7
static int r100_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
struct radeon_bo_list *reloc;
struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
unsigned idx;
volatile uint32_t *ib;
2057,7 → 2054,7
}
if (r)
return r;
} while (p->idx < p->chunk_ib->length_dw);
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
return 0;
}
 
3203,9 → 3200,6
uint32_t pixel_bytes1 = 0;
uint32_t pixel_bytes2 = 0;
 
if (!rdev->mode_info.mode_config_initialized)
return;
 
radeon_update_display_priority(rdev);
 
if (rdev->mode_info.crtcs[0]->base.enabled) {
/drivers/video/drm/radeon/r600d.h
323,12 → 323,11
#define HDP_TILING_CONFIG 0x2F3C
#define HDP_DEBUG1 0x2F34
 
#define MC_CONFIG 0x2000
#define MC_VM_AGP_TOP 0x2184
#define MC_VM_AGP_BOT 0x2188
#define MC_VM_AGP_BASE 0x218C
#define MC_VM_FB_LOCATION 0x2180
#define MC_VM_L1_TLB_MCB_RD_UVD_CNTL 0x2124
#define MC_VM_L1_TLB_MCD_RD_A_CNTL 0x219C
#define ENABLE_L1_TLB (1 << 0)
#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
#define ENABLE_L1_STRICT_ORDERING (1 << 2)
348,7 → 347,6
#define EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 7) << 15)
#define EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00038000
#define EFFECTIVE_L1_QUEUE_SIZE_SHIFT 15
#define MC_VM_L1_TLB_MCD_RD_A_CNTL 0x219C
#define MC_VM_L1_TLB_MCD_RD_B_CNTL 0x21A0
#define MC_VM_L1_TLB_MCB_RD_GFX_CNTL 0x21FC
#define MC_VM_L1_TLB_MCB_RD_HDP_CNTL 0x2204
355,7 → 353,6
#define MC_VM_L1_TLB_MCB_RD_PDMA_CNTL 0x2208
#define MC_VM_L1_TLB_MCB_RD_SEM_CNTL 0x220C
#define MC_VM_L1_TLB_MCB_RD_SYS_CNTL 0x2200
#define MC_VM_L1_TLB_MCB_WR_UVD_CNTL 0x212c
#define MC_VM_L1_TLB_MCD_WR_A_CNTL 0x21A4
#define MC_VM_L1_TLB_MCD_WR_B_CNTL 0x21A8
#define MC_VM_L1_TLB_MCB_WR_GFX_CNTL 0x2210
369,8 → 366,6
#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194
#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198
 
#define RS_DQ_RD_RET_CONF 0x2348
 
#define PA_CL_ENHANCE 0x8A14
#define CLIP_VTX_REORDER_ENA (1 << 0)
#define NUM_CLIP_SEQ(x) ((x) << 1)
927,23 → 922,6
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
 
/* Audio */
#define AZ_HOT_PLUG_CONTROL 0x7300
# define AZ_FORCE_CODEC_WAKE (1 << 0)
# define JACK_DETECTION_ENABLE (1 << 4)
# define UNSOLICITED_RESPONSE_ENABLE (1 << 8)
# define CODEC_HOT_PLUG_ENABLE (1 << 12)
# define AUDIO_ENABLED (1 << 31)
/* DCE3 adds */
# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
# define PIN1_JACK_DETECTION_ENABLE (1 << 5)
# define PIN2_JACK_DETECTION_ENABLE (1 << 6)
# define PIN3_JACK_DETECTION_ENABLE (1 << 7)
# define PIN0_AUDIO_ENABLED (1 << 24)
# define PIN1_AUDIO_ENABLED (1 << 25)
# define PIN2_AUDIO_ENABLED (1 << 26)
# define PIN3_AUDIO_ENABLED (1 << 27)
 
/* Audio clocks DCE 2.0/3.0 */
#define AUDIO_DTO 0x7340
# define AUDIO_DTO_PHASE(x) (((x) & 0xffff) << 0)
1498,7 → 1476,6
#define UVD_CGC_GATE 0xf4a8
#define UVD_LMI_CTRL2 0xf4f4
#define UVD_MASTINT_EN 0xf500
#define UVD_FW_START 0xf51C
#define UVD_LMI_ADDR_EXT 0xf594
#define UVD_LMI_CTRL 0xf598
#define UVD_LMI_SWAP_CNTL 0xf5b4
1511,13 → 1488,6
#define UVD_MPC_SET_MUX 0xf5f4
#define UVD_MPC_SET_ALU 0xf5f8
 
#define UVD_VCPU_CACHE_OFFSET0 0xf608
#define UVD_VCPU_CACHE_SIZE0 0xf60c
#define UVD_VCPU_CACHE_OFFSET1 0xf610
#define UVD_VCPU_CACHE_SIZE1 0xf614
#define UVD_VCPU_CACHE_OFFSET2 0xf618
#define UVD_VCPU_CACHE_SIZE2 0xf61c
 
#define UVD_VCPU_CNTL 0xf660
#define UVD_SOFT_RESET 0xf680
#define RBC_SOFT_RESET (1<<0)
1547,35 → 1517,9
 
#define UVD_CONTEXT_ID 0xf6f4
 
/* rs780 only */
#define GFX_MACRO_BYPASS_CNTL 0x30c0
#define SPLL_BYPASS_CNTL (1 << 0)
#define UPLL_BYPASS_CNTL (1 << 1)
 
#define CG_UPLL_FUNC_CNTL 0x7e0
# define UPLL_RESET_MASK 0x00000001
# define UPLL_SLEEP_MASK 0x00000002
# define UPLL_BYPASS_EN_MASK 0x00000004
# define UPLL_CTLREQ_MASK 0x00000008
# define UPLL_FB_DIV(x) ((x) << 4)
# define UPLL_FB_DIV_MASK 0x0000FFF0
# define UPLL_REF_DIV(x) ((x) << 16)
# define UPLL_REF_DIV_MASK 0x003F0000
# define UPLL_REFCLK_SRC_SEL_MASK 0x20000000
# define UPLL_CTLACK_MASK 0x40000000
# define UPLL_CTLACK2_MASK 0x80000000
#define CG_UPLL_FUNC_CNTL_2 0x7e4
# define UPLL_SW_HILEN(x) ((x) << 0)
# define UPLL_SW_LOLEN(x) ((x) << 4)
# define UPLL_SW_HILEN2(x) ((x) << 8)
# define UPLL_SW_LOLEN2(x) ((x) << 12)
# define UPLL_DIVEN_MASK 0x00010000
# define UPLL_DIVEN2_MASK 0x00020000
# define UPLL_SW_MASK 0x0003FFFF
# define VCLK_SRC_SEL(x) ((x) << 20)
# define VCLK_SRC_SEL_MASK 0x01F00000
# define DCLK_SRC_SEL(x) ((x) << 25)
# define DCLK_SRC_SEL_MASK 0x3E000000
 
/*
* PM4
/drivers/video/drm/radeon/radeon_asic.c
2294,14 → 2294,6
case CHIP_RS780:
case CHIP_RS880:
rdev->asic = &rs780_asic;
/* 760G/780V/880V don't have UVD */
if ((rdev->pdev->device == 0x9616)||
(rdev->pdev->device == 0x9611)||
(rdev->pdev->device == 0x9613)||
(rdev->pdev->device == 0x9711)||
(rdev->pdev->device == 0x9713))
rdev->has_uvd = false;
else
rdev->has_uvd = true;
break;
case CHIP_RV770:
/drivers/video/drm/radeon/radeon_asic.h
81,11 → 81,11
int r100_cs_parse(struct radeon_cs_parser *p);
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv);
struct radeon_fence **fence);
int r100_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
152,11 → 152,11
/*
* r200,rv250,rs300,rv280
*/
struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
extern int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv);
struct radeon_fence **fence);
void r200_set_safe_registers(struct radeon_device *rdev);
 
/*
340,14 → 340,12
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
int r600_copy_cpdma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv);
struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages, struct radeon_fence **fence);
int r600_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv);
unsigned num_gpu_pages, struct radeon_fence **fence);
void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
391,6 → 389,7
void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
int r600_audio_init(struct radeon_device *rdev);
struct r600_audio_pin r600_audio_status(struct radeon_device *rdev);
void r600_audio_fini(struct radeon_device *rdev);
void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock);
void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
462,10 → 461,10
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
void r700_cp_stop(struct radeon_device *rdev);
void r700_cp_fini(struct radeon_device *rdev);
struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
int rv770_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv);
struct radeon_fence **fence);
u32 rv770_get_xclk(struct radeon_device *rdev);
int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int rv770_get_temp(struct radeon_device *rdev);
536,10 → 535,10
struct radeon_fence *fence);
void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib);
struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
int evergreen_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv);
struct radeon_fence **fence);
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
int evergreen_get_temp(struct radeon_device *rdev);
599,8 → 598,7
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int cayman_vm_init(struct radeon_device *rdev);
void cayman_vm_fini(struct radeon_device *rdev);
void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned vm_id, uint64_t pd_addr);
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
625,8 → 623,7
uint32_t incr, uint32_t flags);
void cayman_dma_vm_pad_ib(struct radeon_ib *ib);
 
void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned vm_id, uint64_t pd_addr);
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 
u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring);
701,13 → 698,12
int si_irq_process(struct radeon_device *rdev);
int si_vm_init(struct radeon_device *rdev);
void si_vm_fini(struct radeon_device *rdev);
void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned vm_id, uint64_t pd_addr);
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
int si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv);
struct radeon_fence **fence);
 
void si_dma_vm_copy_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
724,8 → 720,7
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
 
void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned vm_id, uint64_t pd_addr);
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
u32 si_get_xclk(struct radeon_device *rdev);
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
764,14 → 759,14
struct radeon_semaphore *semaphore,
bool emit_wait);
void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
int cik_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv);
struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
struct radeon_fence **fence);
int cik_copy_cpdma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv);
struct radeon_fence **fence);
int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
797,8 → 792,7
int cik_irq_process(struct radeon_device *rdev);
int cik_vm_init(struct radeon_device *rdev);
void cik_vm_fini(struct radeon_device *rdev);
void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned vm_id, uint64_t pd_addr);
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 
void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
816,8 → 810,7
uint32_t incr, uint32_t flags);
void cik_sdma_vm_pad_ib(struct radeon_ib *ib);
 
void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned vm_id, uint64_t pd_addr);
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
u32 cik_gfx_get_rptr(struct radeon_device *rdev,
struct radeon_ring *ring);
889,7 → 882,6
struct radeon_ring *ring);
void uvd_v1_0_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
int uvd_v1_0_resume(struct radeon_device *rdev);
 
int uvd_v1_0_init(struct radeon_device *rdev);
void uvd_v1_0_fini(struct radeon_device *rdev);
897,8 → 889,6
void uvd_v1_0_stop(struct radeon_device *rdev);
 
int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
void uvd_v1_0_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
/drivers/video/drm/radeon/atombios_dp.c
100,7 → 100,6
memset(&args, 0, sizeof(args));
 
mutex_lock(&chan->mutex);
mutex_lock(&rdev->mode_info.atom_context->scratch_mutex);
 
base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
 
114,7 → 113,7
if (ASIC_IS_DCE4(rdev))
args.v2.ucHPD_ID = chan->rec.hpd;
 
atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args);
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
*ack = args.v1.ucReplyStatus;
 
148,7 → 147,6
 
r = recv_bytes;
done:
mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex);
mutex_unlock(&chan->mutex);
 
return r;
234,8 → 232,8
 
/***** general DP utility functions *****/
 
#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3
#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPH_LEVEL_3
#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5
 
static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count,
/drivers/video/drm/radeon/main.c
29,7 → 29,7
 
videomode_t usermode;
 
void cpu_detect1();
void cpu_detect();
 
int _stdcall display_handler(ioctl_t *io);
static char log[256];
117,7 → 117,7
asm volatile ("int $0x40"::"a"(-1));
}
 
u32 __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
{
struct radeon_device *rdev = NULL;
 
134,7 → 134,7
if( GetService("DISPLAY") != 0 )
return 0;
 
printf("Radeon v3.19-rc1 cmdline %s\n", cmdline);
printf("Radeon v3.17-rc5 cmdline %s\n", cmdline);
 
if( cmdline && *cmdline )
parse_cmdline(cmdline, &usermode, log, &radeon_modeset);
145,7 → 145,7
return 0;
}
 
cpu_detect1();
cpu_detect();
 
err = enum_pci_devices();
if( unlikely(err != 0) )
217,8 → 217,8
int _stdcall display_handler(ioctl_t *io)
{
int retval = -1;
u32 *inp;
u32 *outp;
u32_t *inp;
u32_t *outp;
 
inp = io->input;
outp = io->output;
273,10 → 273,10
#define PCI_CLASS_REVISION 0x08
#define PCI_CLASS_DISPLAY_VGA 0x0300
 
int pci_scan_filter(u32 id, u32 busnr, u32 devfn)
int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn)
{
u16 vendor, device;
u32 class;
u16_t vendor, device;
u32_t class;
int ret = 0;
 
vendor = id & 0xffff;
/drivers/video/drm/radeon/radeon_atombios.c
196,7 → 196,7
}
}
 
struct radeon_gpio_rec radeon_atombios_lookup_gpio(struct radeon_device *rdev,
static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
u8 id)
{
struct atom_context *ctx = rdev->mode_info.atom_context;
221,7 → 221,6
if (id == pin->ucGPIO_ID) {
gpio.id = pin->ucGPIO_ID;
gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
gpio.shift = pin->ucGpioPinBitShift;
gpio.mask = (1 << pin->ucGpioPinBitShift);
gpio.valid = true;
break;
459,7 → 458,7
return true;
}
 
static const int supported_devices_connector_convert[] = {
const int supported_devices_connector_convert[] = {
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_VGA,
DRM_MODE_CONNECTOR_DVII,
478,7 → 477,7
DRM_MODE_CONNECTOR_DisplayPort
};
 
static const uint16_t supported_devices_connector_object_id_convert[] = {
const uint16_t supported_devices_connector_object_id_convert[] = {
CONNECTOR_OBJECT_ID_NONE,
CONNECTOR_OBJECT_ID_VGA,
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, /* not all boards support DL */
495,7 → 494,7
CONNECTOR_OBJECT_ID_SVIDEO
};
 
static const int object_connector_convert[] = {
const int object_connector_convert[] = {
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_DVII,
DRM_MODE_CONNECTOR_DVII,
802,7 → 801,7
hpd_record =
(ATOM_HPD_INT_RECORD *)
record;
gpio = radeon_atombios_lookup_gpio(rdev,
gpio = radeon_lookup_gpio(rdev,
hpd_record->ucHPDIntGPIOID);
hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
hpd.plugged_state = hpd_record->ucPlugged_PinState;
2129,7 → 2128,7
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
radeon_atombios_lookup_gpio(rdev,
radeon_lookup_gpio(rdev,
power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
2165,7 → 2164,7
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
radeon_atombios_lookup_gpio(rdev,
radeon_lookup_gpio(rdev,
power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
2201,7 → 2200,7
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
radeon_atombios_lookup_gpio(rdev,
radeon_lookup_gpio(rdev,
power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
2249,14 → 2248,6
 
/* add the i2c bus for thermal/fan chip */
if (controller->ucType > 0) {
if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
rdev->pm.no_fan = true;
rdev->pm.fan_pulses_per_revolution =
controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
if (rdev->pm.fan_pulses_per_revolution) {
rdev->pm.fan_min_rpm = controller->ucFanMinRPM;
rdev->pm.fan_max_rpm = controller->ucFanMaxRPM;
}
if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
/drivers/video/drm/radeon/radeon_semaphore.c
34,14 → 34,15
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
int r;
uint64_t *cpu_addr;
int i, r;
 
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) {
return -ENOMEM;
}
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
&(*semaphore)->sa_bo, 8, 8);
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo,
8 * RADEON_NUM_SYNCS, 8);
if (r) {
kfree(*semaphore);
*semaphore = NULL;
50,8 → 51,13
(*semaphore)->waiters = 0;
(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
 
*((uint64_t *)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo);
for (i = 0; i < RADEON_NUM_SYNCS; ++i)
cpu_addr[i] = 0;
 
for (i = 0; i < RADEON_NUM_RINGS; ++i)
(*semaphore)->sync_to[i] = NULL;
 
return 0;
}
 
89,6 → 95,99
return false;
}
 
/**
* radeon_semaphore_sync_to - use the semaphore to sync to a fence
*
* @semaphore: semaphore object to add fence to
* @fence: fence to sync to
*
* Sync to the fence using this semaphore object
*/
void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
struct radeon_fence *fence)
{
struct radeon_fence *other;
 
if (!fence)
return;
 
other = semaphore->sync_to[fence->ring];
semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other);
}
 
/**
* radeon_semaphore_sync_rings - sync ring to all registered fences
*
* @rdev: radeon_device pointer
* @semaphore: semaphore object to use for sync
* @ring: ring that needs sync
*
* Ensure that all registered fences are signaled before letting
* the ring continue. The caller must hold the ring lock.
*/
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
int ring)
{
unsigned count = 0;
int i, r;
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
struct radeon_fence *fence = semaphore->sync_to[i];
 
/* check if we really need to sync */
if (!radeon_fence_need_sync(fence, ring))
continue;
 
/* prevent GPU deadlocks */
if (!rdev->ring[i].ready) {
dev_err(rdev->dev, "Syncing to a disabled ring!");
return -EINVAL;
}
 
if (++count > RADEON_NUM_SYNCS) {
/* not enough room, wait manually */
r = radeon_fence_wait(fence, false);
if (r)
return r;
continue;
}
 
/* allocate enough space for sync command */
r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
if (r) {
return r;
}
 
/* emit the signal semaphore */
if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
/* signaling wasn't successful wait manually */
radeon_ring_undo(&rdev->ring[i]);
r = radeon_fence_wait(fence, false);
if (r)
return r;
continue;
}
 
/* we assume caller has already allocated space on waiters ring */
if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
/* waiting wasn't successful wait manually */
radeon_ring_undo(&rdev->ring[i]);
r = radeon_fence_wait(fence, false);
if (r)
return r;
continue;
}
 
radeon_ring_commit(rdev, &rdev->ring[i], false);
radeon_fence_note_sync(fence, ring);
 
semaphore->gpu_addr += 8;
}
 
return 0;
}
 
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore **semaphore,
struct radeon_fence *fence)
/drivers/video/drm/radeon/Makefile
1,10 → 1,11
 
 
CC = gcc
LD = ld
AS = as
FASM = fasm.exe
 
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6
DEFINES += -DCONFIG_ARCH_HAS_CACHE_LINE_SIZE
DEFINES = -D__KERNEL__ -DCONFIG_X86_32
 
DRV_TOPDIR = $(CURDIR)/../../..
DRM_TOPDIR = $(CURDIR)/..
11,13 → 12,10
 
DRV_INCLUDES = $(DRV_TOPDIR)/include
 
INCLUDES = -I$(DRV_INCLUDES) \
-I$(DRV_INCLUDES)/asm \
-I$(DRV_INCLUDES)/uapi \
-I$(DRV_INCLUDES)/drm -I./ -I$(DRV_INCLUDES)
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/drm \
-I$(DRV_INCLUDES)/linux
 
CFLAGS= -c -O2 $(INCLUDES) $(DEFINES) -march=i686 -fno-ident -fomit-frame-pointer -fno-builtin-printf
CFLAGS+= -mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields
CFLAGS = -c -Os $(INCLUDES) $(DEFINES) -march=i686 -fomit-frame-pointer -fno-builtin-printf
 
LIBPATH:= $(DRV_TOPDIR)/ddk
 
32,6 → 30,7
HFILES:= $(DRV_INCLUDES)/linux/types.h \
$(DRV_INCLUDES)/linux/list.h \
$(DRV_INCLUDES)/linux/pci.h \
$(DRV_INCLUDES)/drm/drm.h \
$(DRV_INCLUDES)/drm/drmP.h \
$(DRV_INCLUDES)/drm/drm_edid.h \
$(DRV_INCLUDES)/drm/drm_crtc.h \
55,7 → 54,6
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_dp_helper.c \
$(DRM_TOPDIR)/drm_drv.c \
$(DRM_TOPDIR)/drm_atomic.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_fb_helper.c \
$(DRM_TOPDIR)/drm_gem.c \
125,7 → 123,6
radeon_ring.c \
radeon_sa.c \
radeon_semaphore.c \
radeon_sync.c \
radeon_test.c \
radeon_ttm.c \
radeon_ucode.c \
142,6 → 139,7
rv740_dpm.c \
r520.c \
r600.c \
r600_audio.c \
r600_blit_shaders.c \
r600_cs.c \
r600_dma.c \
/drivers/video/drm/radeon/Makefile.lto
5,17 → 5,15
AS = as
FASM = fasm
 
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6
DEFINES += -DCONFIG_ARCH_HAS_CACHE_LINE_SIZE
DEFINES = -D__KERNEL__ -DCONFIG_X86_32
 
DDK_TOPDIR = d:\kos\kolibri\drivers\ddk
DRV_INCLUDES = /d/kos/kolibri/drivers/include
DRM_TOPDIR = $(CURDIR)/..
 
INCLUDES = -I$(DRV_INCLUDES) \
-I$(DRV_INCLUDES)/asm \
-I$(DRV_INCLUDES)/uapi \
-I$(DRV_INCLUDES)/drm -I./ -I$(DRV_INCLUDES)
INCLUDES = -I$(DRV_INCLUDES)/linux/uapi -I$(DRV_INCLUDES)/linux \
-I$(DRV_INCLUDES)/linux/asm -I$(DRV_INCLUDES)/drm \
-I./ -I$(DRV_INCLUDES)
 
CFLAGS_OPT = -Os -march=i686 -fno-ident -fomit-frame-pointer -fno-builtin-printf -mno-ms-bitfields
CFLAGS_OPT+= -mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -flto
36,6 → 34,7
HFILES:= $(DRV_INCLUDES)/linux/types.h \
$(DRV_INCLUDES)/linux/list.h \
$(DRV_INCLUDES)/linux/pci.h \
$(DRV_INCLUDES)/drm/drm.h \
$(DRV_INCLUDES)/drm/drmP.h \
$(DRV_INCLUDES)/drm/drm_edid.h \
$(DRV_INCLUDES)/drm/drm_crtc.h \
59,7 → 58,6
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_dp_helper.c \
$(DRM_TOPDIR)/drm_drv.c \
$(DRM_TOPDIR)/drm_atomic.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_fb_helper.c \
$(DRM_TOPDIR)/drm_gem.c \
129,7 → 127,6
radeon_ring.c \
radeon_sa.c \
radeon_semaphore.c \
radeon_sync.c \
radeon_test.c \
radeon_ttm.c \
radeon_ucode.c \
146,6 → 143,7
rv740_dpm.c \
r520.c \
r600.c \
r600_audio.c \
r600_blit_shaders.c \
r600_cs.c \
r600_dma.c \
/drivers/video/drm/radeon/atom.c
1215,7 → 1215,7
return ret;
}
 
int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t * params)
int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
{
int r;
 
1236,15 → 1236,6
return r;
}
 
int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
{
int r;
mutex_lock(&ctx->scratch_mutex);
r = atom_execute_table_scratch_unlocked(ctx, index, params);
mutex_unlock(&ctx->scratch_mutex);
return r;
}
 
static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
 
static void atom_index_iio(struct atom_context *ctx, int base)
/drivers/video/drm/radeon/atombios_crtc.c
2039,7 → 2039,6
atombios_crtc_set_base(crtc, x, y, old_fb);
atombios_overscan_setup(crtc, mode, adjusted_mode);
atombios_scaler_setup(crtc);
// radeon_cursor_reset(crtc);
/* update the hw version fpr dpm */
radeon_crtc->hw_mode = *adjusted_mode;
 
/drivers/video/drm/radeon/atombios_encoders.c
291,6 → 291,29
bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
struct drm_display_mode *mode);
 
 
static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
return true;
default:
return false;
}
}
 
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
/drivers/video/drm/radeon/atombios_i2c.c
48,7 → 48,6
memset(&args, 0, sizeof(args));
 
mutex_lock(&chan->mutex);
mutex_lock(&rdev->mode_info.atom_context->scratch_mutex);
 
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
 
83,7 → 82,7
args.ucSlaveAddr = slave_addr << 1;
args.ucLineNumber = chan->rec.i2c_id;
 
atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args);
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
/* error */
if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
96,7 → 95,6
radeon_atom_copy_swap(buf, base, num, false);
 
done:
mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex);
mutex_unlock(&chan->mutex);
 
return r;
/drivers/video/drm/radeon/btc_dpm.c
24,7 → 24,6
 
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "btcd.h"
#include "r600_dpm.h"
#include "cypress_dpm.h"
2100,6 → 2099,7
bool disable_mclk_switching;
u32 mclk, sclk;
u16 vddc, vddci;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
 
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
btc_dpm_vblank_too_short(rdev))
2141,6 → 2141,39
ps->low.vddci = max_limits->vddci;
}
 
/* limit clocks to max supported clocks based on voltage dependency tables */
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
&max_sclk_vddc);
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
&max_mclk_vddci);
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
&max_mclk_vddc);
 
if (max_sclk_vddc) {
if (ps->low.sclk > max_sclk_vddc)
ps->low.sclk = max_sclk_vddc;
if (ps->medium.sclk > max_sclk_vddc)
ps->medium.sclk = max_sclk_vddc;
if (ps->high.sclk > max_sclk_vddc)
ps->high.sclk = max_sclk_vddc;
}
if (max_mclk_vddci) {
if (ps->low.mclk > max_mclk_vddci)
ps->low.mclk = max_mclk_vddci;
if (ps->medium.mclk > max_mclk_vddci)
ps->medium.mclk = max_mclk_vddci;
if (ps->high.mclk > max_mclk_vddci)
ps->high.mclk = max_mclk_vddci;
}
if (max_mclk_vddc) {
if (ps->low.mclk > max_mclk_vddc)
ps->low.mclk = max_mclk_vddc;
if (ps->medium.mclk > max_mclk_vddc)
ps->medium.mclk = max_mclk_vddc;
if (ps->high.mclk > max_mclk_vddc)
ps->high.mclk = max_mclk_vddc;
}
 
/* XXX validate the min clocks required for display */
 
if (disable_mclk_switching) {
/drivers/video/drm/radeon/ci_dpm.c
24,7 → 24,6
#include <linux/firmware.h>
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_ucode.h"
#include "cikd.h"
#include "r600_dpm.h"
46,15 → 45,15
static const struct ci_pt_defaults defaults_hawaii_xt =
{
1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
{ 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
{ 0x84, 0x0, 0x0, 0x7F, 0x0, 0x0, 0x5A, 0x60, 0x51, 0x8E, 0x79, 0x6B, 0x5F, 0x90, 0x79 },
{ 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
};
 
static const struct ci_pt_defaults defaults_hawaii_pro =
{
1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
{ 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
{ 0x93, 0x0, 0x0, 0x97, 0x0, 0x0, 0x6B, 0x60, 0x51, 0x95, 0x79, 0x6B, 0x5F, 0x90, 0x79 },
{ 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
};
 
static const struct ci_pt_defaults defaults_bonaire_xt =
163,6 → 162,8
};
 
extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
u32 *max_clock);
extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
u32 arb_freq_src, u32 arb_freq_dest);
extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
184,9 → 185,6
u32 target_tdp);
static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
 
static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
PPSMC_Msg msg, u32 parameter);
 
static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
{
struct ci_power_info *pi = rdev->pm.dpm.priv;
252,9 → 250,6
 
if (pi->caps_power_containment) {
pi->caps_cac = true;
if (rdev->family == CHIP_HAWAII)
pi->enable_bapm_feature = false;
else
pi->enable_bapm_feature = true;
pi->enable_tdc_limit_feature = true;
pi->enable_pkg_pwr_tracking_feature = true;
358,21 → 353,6
return 0;
}
 
static int ci_populate_fuzzy_fan(struct radeon_device *rdev)
{
struct ci_power_info *pi = ci_get_pi(rdev);
 
if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
(rdev->pm.dpm.fan.fan_output_sensitivity == 0))
rdev->pm.dpm.fan.fan_output_sensitivity =
rdev->pm.dpm.fan.default_fan_output_sensitivity;
 
pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity);
 
return 0;
}
 
static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
{
struct ci_power_info *pi = ci_get_pi(rdev);
498,9 → 478,6
ret = ci_populate_dw8(rdev);
if (ret)
return ret;
ret = ci_populate_fuzzy_fan(rdev);
if (ret)
return ret;
ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
if (ret)
return ret;
714,25 → 691,6
return ret;
}
 
static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev,
bool enable)
{
struct ci_power_info *pi = ci_get_pi(rdev);
PPSMC_Result smc_result = PPSMC_Result_OK;
 
if (pi->thermal_sclk_dpm_enabled) {
if (enable)
smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM);
else
smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM);
}
 
if (smc_result == PPSMC_Result_OK)
return 0;
else
return -EINVAL;
}
 
static int ci_power_control_set_level(struct radeon_device *rdev)
{
struct ci_power_info *pi = ci_get_pi(rdev);
743,11 → 701,13
int ret = 0;
bool adjust_polarity = false; /* ??? */
 
if (pi->caps_power_containment) {
if (pi->caps_power_containment &&
(pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
adjust_percent = adjust_polarity ?
rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
target_tdp = ((100 + adjust_percent) *
(s32)cac_tdp_table->configurable_tdp) / 100;
target_tdp *= 256;
 
ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
}
788,6 → 748,7
struct radeon_clock_and_voltage_limits *max_limits;
bool disable_mclk_switching;
u32 sclk, mclk;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
int i;
 
if (rps->vce_active) {
823,6 → 784,29
}
}
 
/* limit clocks to max supported clocks based on voltage dependency tables */
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
&max_sclk_vddc);
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
&max_mclk_vddci);
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
&max_mclk_vddc);
 
for (i = 0; i < ps->performance_level_count; i++) {
if (max_sclk_vddc) {
if (ps->performance_levels[i].sclk > max_sclk_vddc)
ps->performance_levels[i].sclk = max_sclk_vddc;
}
if (max_mclk_vddci) {
if (ps->performance_levels[i].mclk > max_mclk_vddci)
ps->performance_levels[i].mclk = max_mclk_vddci;
}
if (max_mclk_vddc) {
if (ps->performance_levels[i].mclk > max_mclk_vddc)
ps->performance_levels[i].mclk = max_mclk_vddc;
}
}
 
/* XXX validate the min clocks required for display */
 
if (disable_mclk_switching) {
855,7 → 839,7
}
}
 
static int ci_thermal_set_temperature_range(struct radeon_device *rdev,
static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
int min_temp, int max_temp)
{
int low_temp = 0 * 1000;
891,351 → 875,7
return 0;
}
 
static int ci_thermal_enable_alert(struct radeon_device *rdev,
bool enable)
{
u32 thermal_int = RREG32_SMC(CG_THERMAL_INT);
PPSMC_Result result;
 
if (enable) {
thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
WREG32_SMC(CG_THERMAL_INT, thermal_int);
rdev->irq.dpm_thermal = false;
result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable);
if (result != PPSMC_Result_OK) {
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
return -EINVAL;
}
} else {
thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
WREG32_SMC(CG_THERMAL_INT, thermal_int);
rdev->irq.dpm_thermal = true;
result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable);
if (result != PPSMC_Result_OK) {
DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
return -EINVAL;
}
}
 
return 0;
}
 
static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
{
struct ci_power_info *pi = ci_get_pi(rdev);
u32 tmp;
 
if (pi->fan_ctrl_is_in_default_mode) {
tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
pi->fan_ctrl_default_mode = tmp;
tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
pi->t_min = tmp;
pi->fan_ctrl_is_in_default_mode = false;
}
 
tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
tmp |= TMIN(0);
WREG32_SMC(CG_FDO_CTRL2, tmp);
 
tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
tmp |= FDO_PWM_MODE(mode);
WREG32_SMC(CG_FDO_CTRL2, tmp);
}
 
static int ci_thermal_setup_fan_table(struct radeon_device *rdev)
{
struct ci_power_info *pi = ci_get_pi(rdev);
SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
u32 duty100;
u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
u16 fdo_min, slope1, slope2;
u32 reference_clock, tmp;
int ret;
u64 tmp64;
 
if (!pi->fan_table_start) {
rdev->pm.dpm.fan.ucode_fan_control = false;
return 0;
}
 
duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
 
if (duty100 == 0) {
rdev->pm.dpm.fan.ucode_fan_control = false;
return 0;
}
 
tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
do_div(tmp64, 10000);
fdo_min = (u16)tmp64;
 
t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
 
pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
 
slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
 
fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100);
fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100);
fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100);
 
fan_table.Slope1 = cpu_to_be16(slope1);
fan_table.Slope2 = cpu_to_be16(slope2);
 
fan_table.FdoMin = cpu_to_be16(fdo_min);
 
fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
 
fan_table.HystUp = cpu_to_be16(1);
 
fan_table.HystSlope = cpu_to_be16(1);
 
fan_table.TempRespLim = cpu_to_be16(5);
 
reference_clock = radeon_get_xclk(rdev);
 
fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
reference_clock) / 1600);
 
fan_table.FdoMax = cpu_to_be16((u16)duty100);
 
tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
fan_table.TempSrc = (uint8_t)tmp;
 
ret = ci_copy_bytes_to_smc(rdev,
pi->fan_table_start,
(u8 *)(&fan_table),
sizeof(fan_table),
pi->sram_end);
 
if (ret) {
DRM_ERROR("Failed to load fan table to the SMC.");
rdev->pm.dpm.fan.ucode_fan_control = false;
}
 
return 0;
}
 
static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
{
struct ci_power_info *pi = ci_get_pi(rdev);
PPSMC_Result ret;
 
if (pi->caps_od_fuzzy_fan_control_support) {
ret = ci_send_msg_to_smc_with_parameter(rdev,
PPSMC_StartFanControl,
FAN_CONTROL_FUZZY);
if (ret != PPSMC_Result_OK)
return -EINVAL;
ret = ci_send_msg_to_smc_with_parameter(rdev,
PPSMC_MSG_SetFanPwmMax,
rdev->pm.dpm.fan.default_max_fan_pwm);
if (ret != PPSMC_Result_OK)
return -EINVAL;
} else {
ret = ci_send_msg_to_smc_with_parameter(rdev,
PPSMC_StartFanControl,
FAN_CONTROL_TABLE);
if (ret != PPSMC_Result_OK)
return -EINVAL;
}
 
return 0;
}
 
#if 0
static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
{
PPSMC_Result ret;
 
ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl);
if (ret == PPSMC_Result_OK)
return 0;
else
return -EINVAL;
}
 
static int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
u32 *speed)
{
u32 duty, duty100;
u64 tmp64;
 
if (rdev->pm.no_fan)
return -ENOENT;
 
duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
 
if (duty100 == 0)
return -EINVAL;
 
tmp64 = (u64)duty * 100;
do_div(tmp64, duty100);
*speed = (u32)tmp64;
 
if (*speed > 100)
*speed = 100;
 
return 0;
}
 
static int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
u32 speed)
{
u32 tmp;
u32 duty, duty100;
u64 tmp64;
 
if (rdev->pm.no_fan)
return -ENOENT;
 
if (speed > 100)
return -EINVAL;
 
if (rdev->pm.dpm.fan.ucode_fan_control)
ci_fan_ctrl_stop_smc_fan_control(rdev);
 
duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
 
if (duty100 == 0)
return -EINVAL;
 
tmp64 = (u64)speed * duty100;
do_div(tmp64, 100);
duty = (u32)tmp64;
 
tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
tmp |= FDO_STATIC_DUTY(duty);
WREG32_SMC(CG_FDO_CTRL0, tmp);
 
ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
 
return 0;
}
 
static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
u32 *speed)
{
u32 tach_period;
u32 xclk = radeon_get_xclk(rdev);
 
if (rdev->pm.no_fan)
return -ENOENT;
 
if (rdev->pm.fan_pulses_per_revolution == 0)
return -ENOENT;
 
tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
if (tach_period == 0)
return -ENOENT;
 
*speed = 60 * xclk * 10000 / tach_period;
 
return 0;
}
 
static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
u32 speed)
{
u32 tach_period, tmp;
u32 xclk = radeon_get_xclk(rdev);
 
if (rdev->pm.no_fan)
return -ENOENT;
 
if (rdev->pm.fan_pulses_per_revolution == 0)
return -ENOENT;
 
if ((speed < rdev->pm.fan_min_rpm) ||
(speed > rdev->pm.fan_max_rpm))
return -EINVAL;
 
if (rdev->pm.dpm.fan.ucode_fan_control)
ci_fan_ctrl_stop_smc_fan_control(rdev);
 
tach_period = 60 * xclk * 10000 / (8 * speed);
tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
tmp |= TARGET_PERIOD(tach_period);
WREG32_SMC(CG_TACH_CTRL, tmp);
 
ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
 
return 0;
}
#endif
 
static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
{
struct ci_power_info *pi = ci_get_pi(rdev);
u32 tmp;
 
if (!pi->fan_ctrl_is_in_default_mode) {
tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
WREG32_SMC(CG_FDO_CTRL2, tmp);
 
tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
tmp |= TMIN(pi->t_min);
WREG32_SMC(CG_FDO_CTRL2, tmp);
pi->fan_ctrl_is_in_default_mode = true;
}
}
 
static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev)
{
if (rdev->pm.dpm.fan.ucode_fan_control) {
ci_fan_ctrl_start_smc_fan_control(rdev);
ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
}
}
 
static void ci_thermal_initialize(struct radeon_device *rdev)
{
u32 tmp;
 
if (rdev->pm.fan_pulses_per_revolution) {
tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
WREG32_SMC(CG_TACH_CTRL, tmp);
}
 
tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
tmp |= TACH_PWM_RESP_RATE(0x28);
WREG32_SMC(CG_FDO_CTRL2, tmp);
}
 
static int ci_thermal_start_thermal_controller(struct radeon_device *rdev)
{
int ret;
 
ci_thermal_initialize(rdev);
ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
if (ret)
return ret;
ret = ci_thermal_enable_alert(rdev, true);
if (ret)
return ret;
if (rdev->pm.dpm.fan.ucode_fan_control) {
ret = ci_thermal_setup_fan_table(rdev);
if (ret)
return ret;
ci_thermal_start_smc_fan_control(rdev);
}
 
return 0;
}
 
static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev)
{
if (!rdev->pm.no_fan)
ci_fan_ctrl_set_default_mode(rdev);
}
 
#if 0
static int ci_read_smc_soft_register(struct radeon_device *rdev,
u16 reg_offset, u32 *value)
{
1638,7 → 1278,7
 
if (!pi->sclk_dpm_key_disabled) {
PPSMC_Result smc_result =
ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
if (smc_result != PPSMC_Result_OK)
return -EINVAL;
}
1652,7 → 1292,7
 
if (!pi->mclk_dpm_key_disabled) {
PPSMC_Result smc_result =
ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
if (smc_result != PPSMC_Result_OK)
return -EINVAL;
}
2427,33 → 2067,6
return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
}
 
static void ci_register_patching_mc_arb(struct radeon_device *rdev,
const u32 engine_clock,
const u32 memory_clock,
u32 *dram_timimg2)
{
bool patch;
u32 tmp, tmp2;
 
tmp = RREG32(MC_SEQ_MISC0);
patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
 
if (patch &&
((rdev->pdev->device == 0x67B0) ||
(rdev->pdev->device == 0x67B1))) {
if ((memory_clock > 100000) && (memory_clock <= 125000)) {
tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
*dram_timimg2 &= ~0x00ff0000;
*dram_timimg2 |= tmp2 << 16;
} else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
*dram_timimg2 &= ~0x00ff0000;
*dram_timimg2 |= tmp2 << 16;
}
}
}
 
 
static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
u32 sclk,
u32 mclk,
2469,8 → 2082,6
dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
 
ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2);
 
arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
arb_regs->McArbBurstTime = (u8)burst_time;
2765,10 → 2376,10
u32 tmp;
u32 reference_clock = rdev->clock.mpll.reference_freq;
 
if (mpll_param.qdr == 1)
freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
if (pi->mem_gddr5)
freq_nom = memory_clock * 4;
else
freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
freq_nom = memory_clock * 2;
 
tmp = (freq_nom / reference_clock);
tmp = tmp * tmp;
2848,6 → 2459,7
&memory_level->MinVddcPhases);
 
memory_level->EnabledForThrottle = 1;
memory_level->EnabledForActivity = 1;
memory_level->UpH = 0;
memory_level->DownH = 100;
memory_level->VoltageDownH = 0;
3180,6 → 2792,7
 
graphic_level->CcPwrDynRm = 0;
graphic_level->CcPwrDynRm1 = 0;
graphic_level->EnabledForActivity = 1;
graphic_level->EnabledForThrottle = 1;
graphic_level->UpH = 0;
graphic_level->DownH = 0;
3228,13 → 2841,10
&pi->smc_state_table.GraphicsLevel[i]);
if (ret)
return ret;
if (i > 1)
pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
if (i == (dpm_table->sclk_table.count - 1))
pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
PPSMC_DISPLAY_WATERMARK_HIGH;
}
pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
 
pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3278,16 → 2888,6
return ret;
}
 
pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
 
if ((dpm_table->mclk_table.count >= 2) &&
((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) {
pi->smc_state_table.MemoryLevel[1].MinVddc =
pi->smc_state_table.MemoryLevel[0].MinVddc;
pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
}
 
pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
 
pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3344,13 → 2944,8
&pi->dpm_table.pcie_speed_table,
SMU7_MAX_LEVELS_LINK);
 
if (rdev->family == CHIP_BONAIRE)
ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
pi->pcie_gen_powersaving.min,
pi->pcie_lane_powersaving.max);
else
ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
pi->pcie_gen_powersaving.min,
pi->pcie_lane_powersaving.min);
ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
pi->pcie_gen_performance.min,
3418,8 → 3013,7
allowed_sclk_vddc_table->entries[i].clk)) {
pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
allowed_sclk_vddc_table->entries[i].clk;
pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
(i == 0) ? true : false;
pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
pi->dpm_table.sclk_table.count++;
}
}
3431,8 → 3025,7
allowed_mclk_table->entries[i].clk)) {
pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
allowed_mclk_table->entries[i].clk;
pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
(i == 0) ? true : false;
pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
pi->dpm_table.mclk_table.count++;
}
}
3598,7 → 3191,7
table->VddcVddciDelta = 4000;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
table->PCIeBootLinkLevel = 0;
table->PCIeGenInterval = 1;
if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
table->SVI2Enable = 1;
3752,8 → 3345,6
struct ci_power_info *pi = ci_get_pi(rdev);
PPSMC_Result result;
 
ci_apply_disp_minimum_voltage_request(rdev);
 
if (!pi->sclk_dpm_key_disabled) {
if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
result = ci_send_msg_to_smc_with_parameter(rdev,
3773,7 → 3364,7
return -EINVAL;
}
}
#if 0
 
if (!pi->pcie_dpm_key_disabled) {
if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
result = ci_send_msg_to_smc_with_parameter(rdev,
3783,7 → 3374,9
return -EINVAL;
}
}
#endif
 
ci_apply_disp_minimum_voltage_request(rdev);
 
return 0;
}
 
3809,7 → 3402,7
pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
} else {
/* XXX check display min clock requirements */
if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
}
 
4139,23 → 3732,24
enum radeon_dpm_forced_level level)
{
struct ci_power_info *pi = ci_get_pi(rdev);
PPSMC_Result smc_result;
u32 tmp, levels, i;
int ret;
 
if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
if ((!pi->pcie_dpm_key_disabled) &&
pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
if ((!pi->sclk_dpm_key_disabled) &&
pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
levels = 0;
tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
while (tmp >>= 1)
levels++;
if (levels) {
ret = ci_dpm_force_state_pcie(rdev, level);
ret = ci_dpm_force_state_sclk(rdev, levels);
if (ret)
return ret;
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
if (tmp == levels)
break;
udelay(1);
4162,19 → 3756,19
}
}
}
if ((!pi->sclk_dpm_key_disabled) &&
pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
if ((!pi->mclk_dpm_key_disabled) &&
pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
levels = 0;
tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
while (tmp >>= 1)
levels++;
if (levels) {
ret = ci_dpm_force_state_sclk(rdev, levels);
ret = ci_dpm_force_state_mclk(rdev, levels);
if (ret)
return ret;
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
if (tmp == levels)
break;
udelay(1);
4181,19 → 3775,19
}
}
}
if ((!pi->mclk_dpm_key_disabled) &&
pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
if ((!pi->pcie_dpm_key_disabled) &&
pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
levels = 0;
tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
while (tmp >>= 1)
levels++;
if (levels) {
ret = ci_dpm_force_state_mclk(rdev, levels);
ret = ci_dpm_force_state_pcie(rdev, level);
if (ret)
return ret;
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
if (tmp == levels)
break;
udelay(1);
4247,17 → 3841,21
}
}
} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
if (!pi->sclk_dpm_key_disabled) {
smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
if (smc_result != PPSMC_Result_OK)
return -EINVAL;
}
if (!pi->mclk_dpm_key_disabled) {
smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
if (smc_result != PPSMC_Result_OK)
return -EINVAL;
}
if (!pi->pcie_dpm_key_disabled) {
PPSMC_Result smc_result;
 
smc_result = ci_send_msg_to_smc(rdev,
PPSMC_MSG_PCIeDPM_UnForceLevel);
smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel);
if (smc_result != PPSMC_Result_OK)
return -EINVAL;
}
ret = ci_upload_dpm_level_enable_mask(rdev);
if (ret)
return ret;
}
 
rdev->pm.dpm.forced_level = level;
4463,96 → 4061,6
return 0;
}
 
static int ci_register_patching_mc_seq(struct radeon_device *rdev,
struct ci_mc_reg_table *table)
{
u8 i, k;
u32 tmp;
bool patch;
 
tmp = RREG32(MC_SEQ_MISC0);
patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
 
if (patch &&
((rdev->pdev->device == 0x67B0) ||
(rdev->pdev->device == 0x67B1))) {
for (i = 0; i < table->last; i++) {
if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;
switch(table->mc_reg_address[i].s1 >> 2) {
case MC_SEQ_MISC1:
for (k = 0; k < table->num_entries; k++) {
if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
(table->mc_reg_table_entry[k].mclk_max == 137500))
table->mc_reg_table_entry[k].mc_data[i] =
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
0x00000007;
}
break;
case MC_SEQ_WR_CTL_D0:
for (k = 0; k < table->num_entries; k++) {
if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
(table->mc_reg_table_entry[k].mclk_max == 137500))
table->mc_reg_table_entry[k].mc_data[i] =
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
0x0000D0DD;
}
break;
case MC_SEQ_WR_CTL_D1:
for (k = 0; k < table->num_entries; k++) {
if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
(table->mc_reg_table_entry[k].mclk_max == 137500))
table->mc_reg_table_entry[k].mc_data[i] =
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
0x0000D0DD;
}
break;
case MC_SEQ_WR_CTL_2:
for (k = 0; k < table->num_entries; k++) {
if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
(table->mc_reg_table_entry[k].mclk_max == 137500))
table->mc_reg_table_entry[k].mc_data[i] = 0;
}
break;
case MC_SEQ_CAS_TIMING:
for (k = 0; k < table->num_entries; k++) {
if (table->mc_reg_table_entry[k].mclk_max == 125000)
table->mc_reg_table_entry[k].mc_data[i] =
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
0x000C0140;
else if (table->mc_reg_table_entry[k].mclk_max == 137500)
table->mc_reg_table_entry[k].mc_data[i] =
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
0x000C0150;
}
break;
case MC_SEQ_MISC_TIMING:
for (k = 0; k < table->num_entries; k++) {
if (table->mc_reg_table_entry[k].mclk_max == 125000)
table->mc_reg_table_entry[k].mc_data[i] =
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
0x00000030;
else if (table->mc_reg_table_entry[k].mclk_max == 137500)
table->mc_reg_table_entry[k].mc_data[i] =
(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
0x00000035;
}
break;
default:
break;
}
}
 
WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
tmp = RREG32(MC_SEQ_IO_DEBUG_DATA);
tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
WREG32(MC_SEQ_IO_DEBUG_DATA, tmp);
}
 
return 0;
}
 
static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
{
struct ci_power_info *pi = ci_get_pi(rdev);
4596,10 → 4104,6
 
ci_set_s0_mc_reg_index(ci_table);
 
ret = ci_register_patching_mc_seq(rdev, ci_table);
if (ret)
goto init_mc_done;
 
ret = ci_set_mc_special_registers(rdev, ci_table);
if (ret)
goto init_mc_done;
5196,52 → 4700,37
return ret;
}
 
ret = ci_power_control_set_level(rdev);
if (ret) {
DRM_ERROR("ci_power_control_set_level failed\n");
return ret;
}
 
ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
 
ret = ci_enable_thermal_based_sclk_dpm(rdev, true);
if (ret) {
DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
return ret;
}
 
ci_thermal_start_thermal_controller(rdev);
 
ci_update_current_ps(rdev, boot_ps);
 
return 0;
}
 
static int ci_set_temperature_range(struct radeon_device *rdev)
int ci_dpm_late_enable(struct radeon_device *rdev)
{
int ret;
 
ret = ci_thermal_enable_alert(rdev, false);
if (ret)
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
#if 0
PPSMC_Result result;
#endif
ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
if (ret) {
DRM_ERROR("ci_set_thermal_temperature_range failed\n");
return ret;
ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
if (ret)
return ret;
ret = ci_thermal_enable_alert(rdev, true);
if (ret)
return ret;
}
rdev->irq.dpm_thermal = true;
radeon_irq_set(rdev);
#if 0
result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
 
return ret;
if (result != PPSMC_Result_OK)
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
#endif
}
 
int ci_dpm_late_enable(struct radeon_device *rdev)
{
int ret;
 
ret = ci_set_temperature_range(rdev);
if (ret)
return ret;
 
ci_dpm_powergate_uvd(rdev, true);
 
return 0;
5257,8 → 4746,6
if (!ci_is_smc_running(rdev))
return;
 
ci_thermal_stop_thermal_controller(rdev);
 
if (pi->thermal_protection)
ci_enable_thermal_protection(rdev, false);
ci_enable_power_containment(rdev, false);
5267,13 → 4754,12
ci_enable_spread_spectrum(rdev, false);
ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
ci_stop_dpm(rdev);
ci_enable_ds_master_switch(rdev, false);
ci_enable_ds_master_switch(rdev, true);
ci_enable_ulv(rdev, false);
ci_clear_vc(rdev);
ci_reset_to_default(rdev);
ci_dpm_stop_smc(rdev);
ci_force_switch_to_arb_f0(rdev);
ci_enable_thermal_based_sclk_dpm(rdev, false);
 
ci_update_current_ps(rdev, boot_ps);
}
5343,6 → 4829,11
return 0;
}
 
int ci_dpm_power_control_set_level(struct radeon_device *rdev)
{
return ci_power_control_set_level(rdev);
}
 
void ci_dpm_reset_asic(struct radeon_device *rdev)
{
ci_set_boot_state(rdev);
5602,8 → 5093,6
int ci_dpm_init(struct radeon_device *rdev)
{
int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
SMU7_Discrete_DpmTable *dpm_table;
struct radeon_gpio_rec gpio;
u16 data_offset, size;
u8 frev, crev;
struct ci_power_info *pi;
5673,7 → 5162,6
pi->sclk_dpm_key_disabled = 0;
pi->mclk_dpm_key_disabled = 0;
pi->pcie_dpm_key_disabled = 0;
pi->thermal_sclk_dpm_enabled = 0;
 
/* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
if ((rdev->pdev->device == 0x6658) &&
5738,55 → 5226,6
 
pi->uvd_enabled = false;
 
dpm_table = &pi->smc_state_table;
 
gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID);
if (gpio.valid) {
dpm_table->VRHotGpio = gpio.shift;
rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
} else {
dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
}
 
gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID);
if (gpio.valid) {
dpm_table->AcDcGpio = gpio.shift;
rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
} else {
dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
}
 
gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID);
if (gpio.valid) {
u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL);
 
switch (gpio.shift) {
case 0:
tmp &= ~GNB_SLOW_MODE_MASK;
tmp |= GNB_SLOW_MODE(1);
break;
case 1:
tmp &= ~GNB_SLOW_MODE_MASK;
tmp |= GNB_SLOW_MODE(2);
break;
case 2:
tmp |= GNB_SLOW;
break;
case 3:
tmp |= FORCE_NB_PS1;
break;
case 4:
tmp |= DPM_ENABLED;
break;
default:
DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
break;
}
WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
}
 
pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5848,8 → 5287,6
rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
 
pi->fan_ctrl_is_in_default_mode = true;
 
return 0;
}
 
5856,13 → 5293,9
void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
struct ci_power_info *pi = ci_get_pi(rdev);
struct radeon_ps *rps = &pi->current_rps;
u32 sclk = ci_get_average_sclk_freq(rdev);
u32 mclk = ci_get_average_mclk_freq(rdev);
 
seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis");
seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
seq_printf(m, "power level avg sclk: %u mclk: %u\n",
sclk, mclk);
}
/drivers/video/drm/radeon/ci_dpm.h
33,8 → 33,6
 
#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2
 
#define CISLANDS_UNUSED_GPIO_PIN 0x7F
 
struct ci_pl {
u32 mclk;
u32 sclk;
239,7 → 237,6
u32 sclk_dpm_key_disabled;
u32 mclk_dpm_key_disabled;
u32 pcie_dpm_key_disabled;
u32 thermal_sclk_dpm_enabled;
struct ci_pcie_perf_range pcie_gen_performance;
struct ci_pcie_perf_range pcie_lane_performance;
struct ci_pcie_perf_range pcie_gen_powersaving;
267,7 → 264,6
bool caps_automatic_dc_transition;
bool caps_sclk_throttle_low_notification;
bool caps_dynamic_ac_timing;
bool caps_od_fuzzy_fan_control_support;
/* flags */
bool thermal_protection;
bool pcie_performance_request;
289,10 → 285,6
struct ci_ps current_ps;
struct radeon_ps requested_rps;
struct ci_ps requested_ps;
/* fan control */
bool fan_ctrl_is_in_default_mode;
u32 t_min;
u32 fan_ctrl_default_mode;
};
 
#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0
/drivers/video/drm/radeon/ci_smc.c
129,7 → 129,7
 
int ci_program_jump_on_start(struct radeon_device *rdev)
{
static const u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
 
return ci_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1);
}
/drivers/video/drm/radeon/cik_reg.h
147,140 → 147,4
 
#define CIK_LB_DESKTOP_HEIGHT 0x6b0c
 
#define CP_HQD_IQ_RPTR 0xC970u
#define AQL_ENABLE (1U << 0)
 
#define IDLE (1 << 2)
 
struct cik_mqd {
uint32_t header;
uint32_t compute_dispatch_initiator;
uint32_t compute_dim_x;
uint32_t compute_dim_y;
uint32_t compute_dim_z;
uint32_t compute_start_x;
uint32_t compute_start_y;
uint32_t compute_start_z;
uint32_t compute_num_thread_x;
uint32_t compute_num_thread_y;
uint32_t compute_num_thread_z;
uint32_t compute_pipelinestat_enable;
uint32_t compute_perfcount_enable;
uint32_t compute_pgm_lo;
uint32_t compute_pgm_hi;
uint32_t compute_tba_lo;
uint32_t compute_tba_hi;
uint32_t compute_tma_lo;
uint32_t compute_tma_hi;
uint32_t compute_pgm_rsrc1;
uint32_t compute_pgm_rsrc2;
uint32_t compute_vmid;
uint32_t compute_resource_limits;
uint32_t compute_static_thread_mgmt_se0;
uint32_t compute_static_thread_mgmt_se1;
uint32_t compute_tmpring_size;
uint32_t compute_static_thread_mgmt_se2;
uint32_t compute_static_thread_mgmt_se3;
uint32_t compute_restart_x;
uint32_t compute_restart_y;
uint32_t compute_restart_z;
uint32_t compute_thread_trace_enable;
uint32_t compute_misc_reserved;
uint32_t compute_user_data_0;
uint32_t compute_user_data_1;
uint32_t compute_user_data_2;
uint32_t compute_user_data_3;
uint32_t compute_user_data_4;
uint32_t compute_user_data_5;
uint32_t compute_user_data_6;
uint32_t compute_user_data_7;
uint32_t compute_user_data_8;
uint32_t compute_user_data_9;
uint32_t compute_user_data_10;
uint32_t compute_user_data_11;
uint32_t compute_user_data_12;
uint32_t compute_user_data_13;
uint32_t compute_user_data_14;
uint32_t compute_user_data_15;
uint32_t cp_compute_csinvoc_count_lo;
uint32_t cp_compute_csinvoc_count_hi;
uint32_t cp_mqd_base_addr_lo;
uint32_t cp_mqd_base_addr_hi;
uint32_t cp_hqd_active;
uint32_t cp_hqd_vmid;
uint32_t cp_hqd_persistent_state;
uint32_t cp_hqd_pipe_priority;
uint32_t cp_hqd_queue_priority;
uint32_t cp_hqd_quantum;
uint32_t cp_hqd_pq_base_lo;
uint32_t cp_hqd_pq_base_hi;
uint32_t cp_hqd_pq_rptr;
uint32_t cp_hqd_pq_rptr_report_addr_lo;
uint32_t cp_hqd_pq_rptr_report_addr_hi;
uint32_t cp_hqd_pq_wptr_poll_addr_lo;
uint32_t cp_hqd_pq_wptr_poll_addr_hi;
uint32_t cp_hqd_pq_doorbell_control;
uint32_t cp_hqd_pq_wptr;
uint32_t cp_hqd_pq_control;
uint32_t cp_hqd_ib_base_addr_lo;
uint32_t cp_hqd_ib_base_addr_hi;
uint32_t cp_hqd_ib_rptr;
uint32_t cp_hqd_ib_control;
uint32_t cp_hqd_iq_timer;
uint32_t cp_hqd_iq_rptr;
uint32_t cp_hqd_dequeue_request;
uint32_t cp_hqd_dma_offload;
uint32_t cp_hqd_sema_cmd;
uint32_t cp_hqd_msg_type;
uint32_t cp_hqd_atomic0_preop_lo;
uint32_t cp_hqd_atomic0_preop_hi;
uint32_t cp_hqd_atomic1_preop_lo;
uint32_t cp_hqd_atomic1_preop_hi;
uint32_t cp_hqd_hq_status0;
uint32_t cp_hqd_hq_control0;
uint32_t cp_mqd_control;
uint32_t cp_mqd_query_time_lo;
uint32_t cp_mqd_query_time_hi;
uint32_t cp_mqd_connect_start_time_lo;
uint32_t cp_mqd_connect_start_time_hi;
uint32_t cp_mqd_connect_end_time_lo;
uint32_t cp_mqd_connect_end_time_hi;
uint32_t cp_mqd_connect_end_wf_count;
uint32_t cp_mqd_connect_end_pq_rptr;
uint32_t cp_mqd_connect_end_pq_wptr;
uint32_t cp_mqd_connect_end_ib_rptr;
uint32_t reserved_96;
uint32_t reserved_97;
uint32_t reserved_98;
uint32_t reserved_99;
uint32_t iqtimer_pkt_header;
uint32_t iqtimer_pkt_dw0;
uint32_t iqtimer_pkt_dw1;
uint32_t iqtimer_pkt_dw2;
uint32_t iqtimer_pkt_dw3;
uint32_t iqtimer_pkt_dw4;
uint32_t iqtimer_pkt_dw5;
uint32_t iqtimer_pkt_dw6;
uint32_t reserved_108;
uint32_t reserved_109;
uint32_t reserved_110;
uint32_t reserved_111;
uint32_t queue_doorbell_id0;
uint32_t queue_doorbell_id1;
uint32_t queue_doorbell_id2;
uint32_t queue_doorbell_id3;
uint32_t queue_doorbell_id4;
uint32_t queue_doorbell_id5;
uint32_t queue_doorbell_id6;
uint32_t queue_doorbell_id7;
uint32_t queue_doorbell_id8;
uint32_t queue_doorbell_id9;
uint32_t queue_doorbell_id10;
uint32_t queue_doorbell_id11;
uint32_t queue_doorbell_id12;
uint32_t queue_doorbell_id13;
uint32_t queue_doorbell_id14;
uint32_t queue_doorbell_id15;
};
 
#endif
/drivers/video/drm/radeon/cikd.h
30,8 → 30,6
#define CIK_RB_BITMAP_WIDTH_PER_SH 2
#define HAWAII_RB_BITMAP_WIDTH_PER_SH 4
 
#define RADEON_NUM_OF_VMIDS 8
 
/* DIDT IND registers */
#define DIDT_SQ_CTRL0 0x0
# define DIDT_CTRL_EN (1 << 0)
186,10 → 184,7
#define DIG_THERM_DPM(x) ((x) << 14)
#define DIG_THERM_DPM_MASK 0x003FC000
#define DIG_THERM_DPM_SHIFT 14
#define CG_THERMAL_STATUS 0xC0300008
#define FDO_PWM_DUTY(x) ((x) << 9)
#define FDO_PWM_DUTY_MASK (0xff << 9)
#define FDO_PWM_DUTY_SHIFT 9
 
#define CG_THERMAL_INT 0xC030000C
#define CI_DIG_THERM_INTH(x) ((x) << 8)
#define CI_DIG_THERM_INTH_MASK 0x0000FF00
199,10 → 194,7
#define CI_DIG_THERM_INTL_SHIFT 16
#define THERM_INT_MASK_HIGH (1 << 24)
#define THERM_INT_MASK_LOW (1 << 25)
#define CG_MULT_THERMAL_CTRL 0xC0300010
#define TEMP_SEL(x) ((x) << 20)
#define TEMP_SEL_MASK (0xff << 20)
#define TEMP_SEL_SHIFT 20
 
#define CG_MULT_THERMAL_STATUS 0xC0300014
#define ASIC_MAX_TEMP(x) ((x) << 0)
#define ASIC_MAX_TEMP_MASK 0x000001ff
211,36 → 203,6
#define CTF_TEMP_MASK 0x0003fe00
#define CTF_TEMP_SHIFT 9
 
#define CG_FDO_CTRL0 0xC0300064
#define FDO_STATIC_DUTY(x) ((x) << 0)
#define FDO_STATIC_DUTY_MASK 0x000000FF
#define FDO_STATIC_DUTY_SHIFT 0
#define CG_FDO_CTRL1 0xC0300068
#define FMAX_DUTY100(x) ((x) << 0)
#define FMAX_DUTY100_MASK 0x000000FF
#define FMAX_DUTY100_SHIFT 0
#define CG_FDO_CTRL2 0xC030006C
#define TMIN(x) ((x) << 0)
#define TMIN_MASK 0x000000FF
#define TMIN_SHIFT 0
#define FDO_PWM_MODE(x) ((x) << 11)
#define FDO_PWM_MODE_MASK (7 << 11)
#define FDO_PWM_MODE_SHIFT 11
#define TACH_PWM_RESP_RATE(x) ((x) << 25)
#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
#define TACH_PWM_RESP_RATE_SHIFT 25
#define CG_TACH_CTRL 0xC0300070
# define EDGE_PER_REV(x) ((x) << 0)
# define EDGE_PER_REV_MASK (0x7 << 0)
# define EDGE_PER_REV_SHIFT 0
# define TARGET_PERIOD(x) ((x) << 3)
# define TARGET_PERIOD_MASK 0xfffffff8
# define TARGET_PERIOD_SHIFT 3
#define CG_TACH_STATUS 0xC0300074
# define TACH_PERIOD(x) ((x) << 0)
# define TACH_PERIOD_MASK 0xffffffff
# define TACH_PERIOD_SHIFT 0
 
#define CG_ECLK_CNTL 0xC05000AC
# define ECLK_DIVIDER_MASK 0x7f
# define ECLK_DIR_CNTL_EN (1 << 8)
1175,9 → 1137,6
#define SH_MEM_ALIGNMENT_MODE_UNALIGNED 3
#define DEFAULT_MTYPE(x) ((x) << 4)
#define APE1_MTYPE(x) ((x) << 7)
/* valid for both DEFAULT_MTYPE and APE1_MTYPE */
#define MTYPE_CACHED 0
#define MTYPE_NONCACHED 3
 
#define SX_DEBUG_1 0x9060
 
1488,16 → 1447,6
#define CP_HQD_ACTIVE 0xC91C
#define CP_HQD_VMID 0xC920
 
#define CP_HQD_PERSISTENT_STATE 0xC924u
#define DEFAULT_CP_HQD_PERSISTENT_STATE (0x33U << 8)
 
#define CP_HQD_PIPE_PRIORITY 0xC928u
#define CP_HQD_QUEUE_PRIORITY 0xC92Cu
#define CP_HQD_QUANTUM 0xC930u
#define QUANTUM_EN 1U
#define QUANTUM_SCALE_1MS (1U << 4)
#define QUANTUM_DURATION(x) ((x) << 8)
 
#define CP_HQD_PQ_BASE 0xC934
#define CP_HQD_PQ_BASE_HI 0xC938
#define CP_HQD_PQ_RPTR 0xC93C
1525,32 → 1474,12
#define PRIV_STATE (1 << 30)
#define KMD_QUEUE (1 << 31)
 
#define CP_HQD_IB_BASE_ADDR 0xC95Cu
#define CP_HQD_IB_BASE_ADDR_HI 0xC960u
#define CP_HQD_IB_RPTR 0xC964u
#define CP_HQD_IB_CONTROL 0xC968u
#define IB_ATC_EN (1U << 23)
#define DEFAULT_MIN_IB_AVAIL_SIZE (3U << 20)
 
#define CP_HQD_DEQUEUE_REQUEST 0xC974
#define DEQUEUE_REQUEST_DRAIN 1
#define DEQUEUE_REQUEST_RESET 2
 
#define CP_MQD_CONTROL 0xC99C
#define MQD_VMID(x) ((x) << 0)
#define MQD_VMID_MASK (0xf << 0)
 
#define CP_HQD_SEMA_CMD 0xC97Cu
#define CP_HQD_MSG_TYPE 0xC980u
#define CP_HQD_ATOMIC0_PREOP_LO 0xC984u
#define CP_HQD_ATOMIC0_PREOP_HI 0xC988u
#define CP_HQD_ATOMIC1_PREOP_LO 0xC98Cu
#define CP_HQD_ATOMIC1_PREOP_HI 0xC990u
#define CP_HQD_HQ_SCHEDULER0 0xC994u
#define CP_HQD_HQ_SCHEDULER1 0xC998u
 
#define SH_STATIC_MEM_CONFIG 0x9604u
 
#define DB_RENDER_CONTROL 0x28000
 
#define PA_SC_RASTER_CONFIG 0x28350
2140,20 → 2069,4
#define VCE_CMD_IB_AUTO 0x00000005
#define VCE_CMD_SEMAPHORE 0x00000006
 
#define ATC_VMID0_PASID_MAPPING 0x339Cu
#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS 0x3398u
#define ATC_VMID_PASID_MAPPING_VALID (1U << 31)
 
#define ATC_VM_APERTURE0_CNTL 0x3310u
#define ATS_ACCESS_MODE_NEVER 0
#define ATS_ACCESS_MODE_ALWAYS 1
 
#define ATC_VM_APERTURE0_CNTL2 0x3318u
#define ATC_VM_APERTURE0_HIGH_ADDR 0x3308u
#define ATC_VM_APERTURE0_LOW_ADDR 0x3300u
#define ATC_VM_APERTURE1_CNTL 0x3314u
#define ATC_VM_APERTURE1_CNTL2 0x331Cu
#define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu
#define ATC_VM_APERTURE1_LOW_ADDR 0x3304u
 
#endif
/drivers/video/drm/radeon/cmdline.c
1,5 → 1,7
 
#include <drm/drmP.h>
#include <drm.h>
#include <drm_mm.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_object.h"
/drivers/video/drm/radeon/cypress_dpm.c
24,7 → 24,6
 
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "evergreend.h"
#include "r600_dpm.h"
#include "cypress_dpm.h"
/drivers/video/drm/radeon/evergreen_cs.c
35,7 → 35,7
#define MIN(a,b) (((a)<(b))?(a):(b))
 
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
struct radeon_bo_list **cs_reloc);
struct radeon_cs_reloc **cs_reloc);
struct evergreen_cs_track {
u32 group_size;
u32 nbanks;
1094,7 → 1094,7
static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
{
struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
struct radeon_bo_list *reloc;
struct radeon_cs_reloc *reloc;
u32 last_reg;
u32 m, i, tmp, *ib;
int r;
1792,7 → 1792,7
static int evergreen_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
struct radeon_bo_list *reloc;
struct radeon_cs_reloc *reloc;
struct evergreen_cs_track *track;
volatile u32 *ib;
unsigned idx;
2661,7 → 2661,7
p->track = NULL;
return r;
}
} while (p->idx < p->chunk_ib->length_dw);
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
for (r = 0; r < p->ib.length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
2684,8 → 2684,8
**/
int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
{
struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc;
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
u32 header, cmd, count, sub_cmd;
volatile u32 *ib = p->ib.ptr;
u32 idx;
3100,7 → 3100,7
DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
return -EINVAL;
}
} while (p->idx < p->chunk_ib->length_dw);
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
for (r = 0; r < p->ib->length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
/drivers/video/drm/radeon/evergreen_dma.c
104,14 → 104,12
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
int evergreen_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv)
struct radeon_fence **fence)
{
struct radeon_fence *fence;
struct radeon_sync sync;
struct radeon_semaphore *sem = NULL;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_dw, cur_size_in_dw;
118,7 → 116,11
int i, num_loops;
int r = 0;
 
radeon_sync_create(&sync);
r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
 
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
125,12 → 127,12
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
 
radeon_sync_resv(rdev, &sync, resv, false);
radeon_sync_rings(rdev, &sync, ring->idx);
radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
 
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
146,17 → 148,17
dst_offset += cur_size_in_dw * 4;
}
 
r = radeon_fence_emit(rdev, &fence, ring->idx);
r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
 
radeon_ring_unlock_commit(rdev, ring, false);
radeon_sync_free(rdev, &sync, fence);
radeon_semaphore_free(rdev, &sem, *fence);
 
return fence;
return r;
}
 
/**
/drivers/video/drm/radeon/ni.c
1366,7 → 1366,6
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
PACKET3_SH_ACTION_ENA;
 
1389,7 → 1388,8
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
radeon_ring_write(ring, ib->length_dw |
(ib->vm ? (ib->vm->id << 24) : 0));
 
/* flush read cache over gart for this vmid */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1396,7 → 1396,7
radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, (vm_id << 24) | 10); /* poll interval */
radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */
}
 
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
2395,12 → 2395,16
* Update the page table base and flush the VM TLB
* using the CP (cayman-si).
*/
void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned vm_id, uint64_t pd_addr)
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2), 0));
radeon_ring_write(ring, pd_addr >> 12);
struct radeon_ring *ring = &rdev->ring[ridx];
 
if (vm == NULL)
return;
 
radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
 
/* flush hdp cache */
radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
radeon_ring_write(ring, 0x1);
2407,7 → 2411,7
 
/* bits 0-7 are the VM contexts0-7 */
radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
radeon_ring_write(ring, 1 << vm_id);
radeon_ring_write(ring, 1 << vm->id);
 
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
/drivers/video/drm/radeon/ni_dpm.c
23,7 → 23,6
 
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "nid.h"
#include "r600_dpm.h"
#include "ni_dpm.h"
790,6 → 789,7
bool disable_mclk_switching;
u32 mclk;
u16 vddci;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
int i;
 
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
816,6 → 816,29
}
}
 
/* limit clocks to max supported clocks based on voltage dependency tables */
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
&max_sclk_vddc);
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
&max_mclk_vddci);
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
&max_mclk_vddc);
 
for (i = 0; i < ps->performance_level_count; i++) {
if (max_sclk_vddc) {
if (ps->performance_levels[i].sclk > max_sclk_vddc)
ps->performance_levels[i].sclk = max_sclk_vddc;
}
if (max_mclk_vddci) {
if (ps->performance_levels[i].mclk > max_mclk_vddci)
ps->performance_levels[i].mclk = max_mclk_vddci;
}
if (max_mclk_vddc) {
if (ps->performance_levels[i].mclk > max_mclk_vddc)
ps->performance_levels[i].mclk = max_mclk_vddc;
}
}
 
/* XXX validate the min clocks required for display */
 
/* adjust low state */
/drivers/video/drm/radeon/pci.c
1,13 → 1,21
 
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <errno-base.h>
#include <pci.h>
#include <syscall.h>
 
extern int pci_scan_filter(u32 id, u32 busnr, u32 devfn);
static inline __attribute__((const))
bool is_power_of_2(unsigned long n)
{
return (n != 0 && ((n & (n - 1)) == 0));
}
 
extern int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn);
 
static LIST_HEAD(devices);
 
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
31,9 → 39,9
}
 
 
static u32 pci_size(u32 base, u32 maxbase, u32 mask)
static u32_t pci_size(u32_t base, u32_t maxbase, u32_t mask)
{
u32 size = mask & maxbase; /* Find the significant bits */
u32_t size = mask & maxbase; /* Find the significant bits */
 
if (!size)
return 0;
50,9 → 58,9
return size;
}
 
static u64 pci_size64(u64 base, u64 maxbase, u64 mask)
static u64_t pci_size64(u64_t base, u64_t maxbase, u64_t mask)
{
u64 size = mask & maxbase; /* Find the significant bits */
u64_t size = mask & maxbase; /* Find the significant bits */
 
if (!size)
return 0;
69,7 → 77,7
return size;
}
 
static inline int is_64bit_memory(u32 mask)
static inline int is_64bit_memory(u32_t mask)
{
if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
(PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64))
79,15 → 87,15
 
static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
{
u32 pos, reg, next;
u32 l, sz;
u32_t pos, reg, next;
u32_t l, sz;
struct resource *res;
 
for(pos=0; pos < howmany; pos = next)
{
u64 l64;
u64 sz64;
u32 raw_sz;
u64_t l64;
u64_t sz64;
u32_t raw_sz;
 
next = pos + 1;
 
109,7 → 117,7
if ((l & PCI_BASE_ADDRESS_SPACE) ==
PCI_BASE_ADDRESS_SPACE_MEMORY)
{
sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK);
sz = pci_size(l, sz, (u32_t)PCI_BASE_ADDRESS_MEM_MASK);
/*
* For 64bit prefetchable memory sz could be 0, if the
* real size is bigger than 4G, so we need to check
131,14 → 139,14
res->flags |= pci_calc_resource_flags(l);
if (is_64bit_memory(l))
{
u32 szhi, lhi;
u32_t szhi, lhi;
 
lhi = PciRead32(dev->busnr, dev->devfn, reg+4);
PciWrite32(dev->busnr, dev->devfn, reg+4, ~0);
szhi = PciRead32(dev->busnr, dev->devfn, reg+4);
PciWrite32(dev->busnr, dev->devfn, reg+4, lhi);
sz64 = ((u64)szhi << 32) | raw_sz;
l64 = ((u64)lhi << 32) | l;
sz64 = ((u64_t)szhi << 32) | raw_sz;
l64 = ((u64_t)lhi << 32) | l;
sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK);
next++;
 
162,7 → 170,7
{
/* 64-bit wide address, treat as disabled */
PciWrite32(dev->busnr, dev->devfn, reg,
l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK);
l & ~(u32_t)PCI_BASE_ADDRESS_MEM_MASK);
PciWrite32(dev->busnr, dev->devfn, reg+4, 0);
res->start = 0;
res->end = sz;
186,7 → 194,7
 
if (sz && sz != 0xffffffff)
{
sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK);
sz = pci_size(l, sz, (u32_t)PCI_ROM_ADDRESS_MASK);
 
if (sz)
{
202,7 → 210,7
 
static void pci_read_irq(struct pci_dev *dev)
{
u8 irq;
u8_t irq;
 
irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN);
dev->pin = irq;
214,7 → 222,7
 
int pci_setup_device(struct pci_dev *dev)
{
u32 class;
u32_t class;
 
class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION);
dev->revision = class & 0xff;
246,7 → 254,7
*/
if (class == PCI_CLASS_STORAGE_IDE)
{
u8 progif;
u8_t progif;
 
progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG);
if ((progif & 1) == 0)
311,12 → 319,12
return 0;
};
 
static pci_dev_t* pci_scan_device(u32 busnr, int devfn)
static pci_dev_t* pci_scan_device(u32_t busnr, int devfn)
{
pci_dev_t *dev;
 
u32 id;
u8 hdr;
u32_t id;
u8_t hdr;
 
int timeout = 10;
 
372,7 → 380,7
 
 
 
int pci_scan_slot(u32 bus, int devfn)
int pci_scan_slot(u32_t bus, int devfn)
{
int func, nr = 0;
 
480,8 → 488,8
int enum_pci_devices()
{
pci_dev_t *dev;
u32 last_bus;
u32 bus = 0 , devfn = 0;
u32_t last_bus;
u32_t bus = 0 , devfn = 0;
 
 
last_bus = PciApi(1);
664,6 → 672,11
}
 
 
struct pci_bus_region {
resource_size_t start;
resource_size_t end;
};
 
static inline void
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
struct resource *res)
762,11 → 775,21
loff_t start;
void __iomem *rom;
 
// ENTER();
 
// dbgprintf("resource start %x end %x flags %x\n",
// res->start, res->end, res->flags);
/*
* IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy
* memory map if the VGA enable bit of the Bridge Control register is
* set for embedded VGA.
*/
 
start = (loff_t)0xC0000;
*size = 0x20000; /* cover C000:0 through E000:0 */
 
#if 0
 
if (res->flags & IORESOURCE_ROM_SHADOW) {
/* primary video rom always starts here */
start = (loff_t)0xC0000;
778,11 → 801,21
return (void __iomem *)(unsigned long)
pci_resource_start(pdev, PCI_ROM_RESOURCE);
} else {
start = (loff_t)0xC0000;
*size = 0x20000; /* cover C000:0 through E000:0 */
/* assign the ROM an address if it doesn't have one */
// if (res->parent == NULL &&
// pci_assign_resource(pdev,PCI_ROM_RESOURCE))
return NULL;
// start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
// *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
// if (*size == 0)
// return NULL;
 
/* Enable ROM space decodes */
// if (pci_enable_rom(pdev))
// return NULL;
}
}
#endif
 
rom = ioremap(start, *size);
if (!rom) {
800,6 → 833,7
* True size is important if the ROM is going to be copied.
*/
*size = pci_get_rom_size(pdev, rom, *size);
// LEAVE();
return rom;
}
 
827,8 → 861,6
else
cmd = old_cmd & ~PCI_COMMAND_MASTER;
if (cmd != old_cmd) {
dbgprintf("%s bus mastering\n",
enable ? "enabling" : "disabling");
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
dev->is_busmaster = enable;
/drivers/video/drm/radeon/ppsmc.h
56,14 → 56,6
#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
 
#define FDO_MODE_HARDWARE 0
#define FDO_MODE_PIECE_WISE_LINEAR 1
 
enum FAN_CONTROL {
FAN_CONTROL_FUZZY,
FAN_CONTROL_TABLE
};
 
#define PPSMC_Result_OK ((uint8_t)0x01)
#define PPSMC_Result_Failed ((uint8_t)0xFF)
 
87,8 → 79,6
#define PPSMC_MSG_DisableCac ((uint8_t)0x54)
#define PPSMC_TDPClampingActive ((uint8_t)0x59)
#define PPSMC_TDPClampingInactive ((uint8_t)0x5A)
#define PPSMC_StartFanControl ((uint8_t)0x5B)
#define PPSMC_StopFanControl ((uint8_t)0x5C)
#define PPSMC_MSG_NoDisplay ((uint8_t)0x5D)
#define PPSMC_MSG_HasDisplay ((uint8_t)0x5E)
#define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60)
116,7 → 106,6
#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130)
#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131)
#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133)
#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
160,11 → 149,7
#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A)
 
#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C)
#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
 
#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
 
172,11 → 157,10
#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102)
#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104)
#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108)
#define PPSMC_MSG_Thermal_Cntl_Enable ((uint32_t) 0x10a)
#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112)
#define PPSMC_MSG_Voltage_Cntl_Enable ((uint32_t) 0x109)
#define PPSMC_MSG_VCEPowerOFF ((uint32_t) 0x10e)
#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f)
#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112)
#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
#define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120)
/drivers/video/drm/radeon/pptable.h
96,14 → 96,6
USHORT usTMax; // The max temperature
} ATOM_PPLIB_FANTABLE2;
 
typedef struct _ATOM_PPLIB_FANTABLE3
{
ATOM_PPLIB_FANTABLE2 basicTable2;
UCHAR ucFanControlMode;
USHORT usFanPWMMax;
USHORT usFanOutputSensitivity;
} ATOM_PPLIB_FANTABLE3;
 
typedef struct _ATOM_PPLIB_EXTENDEDHEADER
{
USHORT usSize;
/drivers/video/drm/radeon/r200.c
80,14 → 80,13
return vtx_size;
}
 
struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv)
struct radeon_fence **fence)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
struct radeon_fence *fence;
uint32_t size;
uint32_t cur_size;
int i, num_loops;
99,7 → 98,7
r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return ERR_PTR(r);
return r;
}
/* Must wait for 2D idle & clean before DMA or hangs might happen */
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
119,13 → 118,11
}
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
return ERR_PTR(r);
if (fence) {
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
}
radeon_ring_unlock_commit(rdev, ring, false);
return fence;
return r;
}
 
 
146,7 → 143,7
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
{
struct radeon_bo_list *reloc;
struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
uint32_t tmp;
/drivers/video/drm/radeon/r300.c
598,7 → 598,7
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
{
struct radeon_bo_list *reloc;
struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
uint32_t tmp, tile_flags = 0;
1142,7 → 1142,7
static int r300_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
struct radeon_bo_list *reloc;
struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
unsigned idx;
1283,7 → 1283,7
if (r) {
return r;
}
} while (p->idx < p->chunk_ib->length_dw);
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
return 0;
}
 
/drivers/video/drm/radeon/r600_cs.c
969,7 → 969,7
static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
{
struct r600_cs_track *track = (struct r600_cs_track *)p->track;
struct radeon_bo_list *reloc;
struct radeon_cs_reloc *reloc;
u32 m, i, tmp, *ib;
int r;
 
1626,7 → 1626,7
static int r600_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
struct radeon_bo_list *reloc;
struct radeon_cs_reloc *reloc;
struct r600_cs_track *track;
volatile u32 *ib;
unsigned idx;
2316,7 → 2316,7
p->track = NULL;
return r;
}
} while (p->idx < p->chunk_ib->length_dw);
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
for (r = 0; r < p->ib.length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
2351,10 → 2351,10
 
static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
{
if (p->chunk_relocs == NULL) {
if (p->chunk_relocs_idx == -1) {
return 0;
}
p->relocs = kzalloc(sizeof(struct radeon_bo_list), GFP_KERNEL);
p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
if (p->relocs == NULL) {
return -ENOMEM;
}
2398,7 → 2398,7
/* Copy the packet into the IB, the parser will read from the
* input memory (cached) and write to the IB (which can be
* uncached). */
ib_chunk = parser.chunk_ib;
ib_chunk = &parser.chunks[parser.chunk_ib_idx];
parser.ib.length_dw = ib_chunk->length_dw;
*l = parser.ib.length_dw;
if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
2435,17 → 2435,17
* GPU offset using the provided start.
**/
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
struct radeon_bo_list **cs_reloc)
struct radeon_cs_reloc **cs_reloc)
{
struct radeon_cs_chunk *relocs_chunk;
unsigned idx;
 
*cs_reloc = NULL;
if (p->chunk_relocs == NULL) {
if (p->chunk_relocs_idx == -1) {
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
relocs_chunk = p->chunk_relocs;
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
idx = p->dma_reloc_idx;
if (idx >= p->nrelocs) {
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
2452,7 → 2452,7
idx, p->nrelocs);
return -EINVAL;
}
*cs_reloc = &p->relocs[idx];
*cs_reloc = p->relocs_ptr[idx];
p->dma_reloc_idx++;
return 0;
}
2472,8 → 2472,8
**/
int r600_dma_cs_parse(struct radeon_cs_parser *p)
{
struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
struct radeon_bo_list *src_reloc, *dst_reloc;
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
struct radeon_cs_reloc *src_reloc, *dst_reloc;
u32 header, cmd, count, tiled;
volatile u32 *ib = p->ib.ptr;
u32 idx, idx_value;
2619,7 → 2619,7
DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
return -EINVAL;
}
} while (p->idx < p->chunk_ib->length_dw);
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
for (r = 0; r < p->ib->length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
/drivers/video/drm/radeon/r600_dpm.c
24,7 → 24,6
 
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "r600d.h"
#include "r600_dpm.h"
#include "atom.h"
811,7 → 810,6
union fan_info {
struct _ATOM_PPLIB_FANTABLE fan;
struct _ATOM_PPLIB_FANTABLE2 fan2;
struct _ATOM_PPLIB_FANTABLE3 fan3;
};
 
static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table,
901,14 → 899,6
else
rdev->pm.dpm.fan.t_max = 10900;
rdev->pm.dpm.fan.cycle_delay = 100000;
if (fan_info->fan.ucFanTableFormat >= 3) {
rdev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
rdev->pm.dpm.fan.default_max_fan_pwm =
le16_to_cpu(fan_info->fan3.usFanPWMMax);
rdev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
rdev->pm.dpm.fan.fan_output_sensitivity =
le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
}
rdev->pm.dpm.fan.ucode_fan_control = true;
}
}
1265,7 → 1255,7
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
le16_to_cpu(ppt->usMaximumPowerDeliveryLimit);
ppt->usMaximumPowerDeliveryLimit;
pt = &ppt->power_tune_table;
} else {
ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
/drivers/video/drm/radeon/r600_dpm.h
96,9 → 96,6
#define R600_TEMP_RANGE_MIN (90 * 1000)
#define R600_TEMP_RANGE_MAX (120 * 1000)
 
#define FDO_PWM_MODE_STATIC 1
#define FDO_PWM_MODE_STATIC_RPM 5
 
enum r600_power_level {
R600_POWER_LEVEL_LOW = 0,
R600_POWER_LEVEL_MEDIUM = 1,
/drivers/video/drm/radeon/r600_hdmi.c
71,169 → 71,6
 
 
/*
* check if the chipset is supported
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
return ASIC_IS_DCE2(rdev) && !ASIC_IS_NODCE(rdev);
}
 
static struct r600_audio_pin r600_audio_status(struct radeon_device *rdev)
{
struct r600_audio_pin status;
uint32_t value;
 
value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
 
/* number of channels */
status.channels = (value & 0x7) + 1;
 
/* bits per sample */
switch ((value & 0xF0) >> 4) {
case 0x0:
status.bits_per_sample = 8;
break;
case 0x1:
status.bits_per_sample = 16;
break;
case 0x2:
status.bits_per_sample = 20;
break;
case 0x3:
status.bits_per_sample = 24;
break;
case 0x4:
status.bits_per_sample = 32;
break;
default:
dev_err(rdev->dev, "Unknown bits per sample 0x%x, using 16\n",
(int)value);
status.bits_per_sample = 16;
}
 
/* current sampling rate in HZ */
if (value & 0x4000)
status.rate = 44100;
else
status.rate = 48000;
status.rate *= ((value >> 11) & 0x7) + 1;
status.rate /= ((value >> 8) & 0x7) + 1;
 
value = RREG32(R600_AUDIO_STATUS_BITS);
 
/* iec 60958 status bits */
status.status_bits = value & 0xff;
 
/* iec 60958 category code */
status.category_code = (value >> 8) & 0xff;
 
return status;
}
 
/*
* update all hdmi interfaces with current audio parameters
*/
void r600_audio_update_hdmi(struct work_struct *work)
{
struct radeon_device *rdev = container_of(work, struct radeon_device,
audio_work);
struct drm_device *dev = rdev->ddev;
struct r600_audio_pin audio_status = r600_audio_status(rdev);
struct drm_encoder *encoder;
bool changed = false;
 
if (rdev->audio.pin[0].channels != audio_status.channels ||
rdev->audio.pin[0].rate != audio_status.rate ||
rdev->audio.pin[0].bits_per_sample != audio_status.bits_per_sample ||
rdev->audio.pin[0].status_bits != audio_status.status_bits ||
rdev->audio.pin[0].category_code != audio_status.category_code) {
rdev->audio.pin[0] = audio_status;
changed = true;
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (!radeon_encoder_is_digital(encoder))
continue;
if (changed || r600_hdmi_buffer_status_changed(encoder))
r600_hdmi_update_audio_settings(encoder);
}
}
 
/* enable the audio stream */
void r600_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin,
u8 enable_mask)
{
u32 tmp = RREG32(AZ_HOT_PLUG_CONTROL);
 
if (!pin)
return;
 
if (enable_mask) {
tmp |= AUDIO_ENABLED;
if (enable_mask & 1)
tmp |= PIN0_AUDIO_ENABLED;
if (enable_mask & 2)
tmp |= PIN1_AUDIO_ENABLED;
if (enable_mask & 4)
tmp |= PIN2_AUDIO_ENABLED;
if (enable_mask & 8)
tmp |= PIN3_AUDIO_ENABLED;
} else {
tmp &= ~(AUDIO_ENABLED |
PIN0_AUDIO_ENABLED |
PIN1_AUDIO_ENABLED |
PIN2_AUDIO_ENABLED |
PIN3_AUDIO_ENABLED);
}
 
WREG32(AZ_HOT_PLUG_CONTROL, tmp);
}
 
/*
* initialize the audio vars
*/
int r600_audio_init(struct radeon_device *rdev)
{
if (!radeon_audio || !r600_audio_chipset_supported(rdev))
return 0;
 
rdev->audio.enabled = true;
 
rdev->audio.num_pins = 1;
rdev->audio.pin[0].channels = -1;
rdev->audio.pin[0].rate = -1;
rdev->audio.pin[0].bits_per_sample = -1;
rdev->audio.pin[0].status_bits = 0;
rdev->audio.pin[0].category_code = 0;
rdev->audio.pin[0].id = 0;
/* disable audio. it will be set up later */
r600_audio_enable(rdev, &rdev->audio.pin[0], 0);
 
return 0;
}
 
/*
* release the audio timer
* TODO: How to do this correctly on SMP systems?
*/
void r600_audio_fini(struct radeon_device *rdev)
{
if (!rdev->audio.enabled)
return;
 
r600_audio_enable(rdev, &rdev->audio.pin[0], 0);
 
rdev->audio.enabled = false;
}
 
struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev)
{
/* only one pin on 6xx-NI */
return &rdev->audio.pin[0];
}
 
/*
* calculate CTS and N values if they are not found in the table
*/
static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int *N, int freq)
519,7 → 356,7
 
/* disable audio prior to setting up hw */
dig->afmt->pin = r600_audio_get_pin(rdev);
r600_audio_enable(rdev, dig->afmt->pin, 0xf);
r600_audio_enable(rdev, dig->afmt->pin, false);
 
r600_audio_set_dto(encoder, mode->clock);
 
605,7 → 442,7
WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
 
/* enable audio after to setting up hw */
r600_audio_enable(rdev, dig->afmt->pin, 0xf);
r600_audio_enable(rdev, dig->afmt->pin, true);
}
 
/**
690,11 → 527,6
if (!enable && !dig->afmt->enabled)
return;
 
if (!enable && dig->afmt->pin) {
r600_audio_enable(rdev, dig->afmt->pin, 0);
dig->afmt->pin = NULL;
}
 
/* Older chipsets require setting HDMI and routing manually */
if (!ASIC_IS_DCE3(rdev)) {
if (enable)
/drivers/video/drm/radeon/radeon_benchmark.c
45,29 → 45,33
for (i = 0; i < n; i++) {
switch (flag) {
case RADEON_BENCHMARK_COPY_DMA:
fence = radeon_copy_dma(rdev, saddr, daddr,
r = radeon_copy_dma(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE,
NULL);
&fence);
break;
case RADEON_BENCHMARK_COPY_BLIT:
fence = radeon_copy_blit(rdev, saddr, daddr,
r = radeon_copy_blit(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE,
NULL);
&fence);
break;
default:
DRM_ERROR("Unknown copy method\n");
return -EINVAL;
r = -EINVAL;
}
if (IS_ERR(fence))
return PTR_ERR(fence);
 
if (r)
goto exit_do_move;
r = radeon_fence_wait(fence, false);
if (r)
goto exit_do_move;
radeon_fence_unref(&fence);
if (r)
return r;
}
end_jiffies = jiffies;
return jiffies_to_msecs(end_jiffies - start_jiffies);
r = jiffies_to_msecs(end_jiffies - start_jiffies);
 
exit_do_move:
if (fence)
radeon_fence_unref(&fence);
return r;
}
 
 
96,7 → 100,7
ENTER();
 
n = RADEON_BENCHMARK_ITERATIONS;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, NULL, &sobj);
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj);
if (r) {
goto out_cleanup;
}
108,7 → 112,7
if (r) {
goto out_cleanup;
}
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, NULL, &dobj);
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj);
if (r) {
goto out_cleanup;
}
/drivers/video/drm/radeon/radeon_combios.c
116,7 → 116,7
CONNECTOR_UNSUPPORTED_LEGACY
};
 
static const int legacy_connector_convert[] = {
const int legacy_connector_convert[] = {
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_DVID,
DRM_MODE_CONNECTOR_VGA,
/drivers/video/drm/radeon/radeon_connectors.c
322,12 → 322,6
}
 
if (!radeon_connector->edid) {
/* don't fetch the edid from the vbios if ddc fails and runpm is
* enabled so we report disconnected.
*/
if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
return;
 
if (rdev->is_atom_bios) {
/* some laptops provide a hardcoded edid in rom for LCDs */
if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
832,8 → 826,6
static enum drm_connector_status
radeon_lvds_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
enum drm_connector_status ret = connector_status_disconnected;
/drivers/video/drm/radeon/radeon_cs.c
84,18 → 84,21
struct drm_device *ddev = p->rdev->ddev;
struct radeon_cs_chunk *chunk;
struct radeon_cs_buckets buckets;
unsigned i;
bool need_mmap_lock = false;
int r;
unsigned i, j;
bool duplicate;
 
if (p->chunk_relocs == NULL) {
if (p->chunk_relocs_idx == -1) {
return 0;
}
chunk = p->chunk_relocs;
chunk = &p->chunks[p->chunk_relocs_idx];
p->dma_reloc_idx = 0;
/* FIXME: we assume that each relocs use 4 dwords */
p->nrelocs = chunk->length_dw / 4;
p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL);
p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
if (p->relocs_ptr == NULL) {
return -ENOMEM;
}
p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
if (p->relocs == NULL) {
return -ENOMEM;
}
104,17 → 107,31
 
for (i = 0; i < p->nrelocs; i++) {
struct drm_radeon_cs_reloc *r;
struct drm_gem_object *gobj;
unsigned priority;
 
duplicate = false;
r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
gobj = drm_gem_object_lookup(ddev, p->filp, r->handle);
if (gobj == NULL) {
for (j = 0; j < i; j++) {
if (r->handle == p->relocs[j].handle) {
p->relocs_ptr[i] = &p->relocs[j];
duplicate = true;
break;
}
}
if (duplicate) {
p->relocs[i].handle = 0;
continue;
}
 
p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
r->handle);
if (p->relocs[i].gobj == NULL) {
DRM_ERROR("gem object lookup failed 0x%x\n",
r->handle);
return -ENOENT;
}
p->relocs[i].robj = gem_to_radeon_bo(gobj);
p->relocs_ptr[i] = &p->relocs[i];
p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
 
/* The userspace buffer priorities are from 0 to 15. A higher
* number means the buffer is more important.
126,13 → 143,10
+ !!r->write_domain;
 
/* the first reloc of an UVD job is the msg and that must be in
VRAM, also but everything into VRAM on AGP cards and older
IGP chips to avoid image corruptions */
VRAM, also but everything into VRAM on AGP cards to avoid
image corruptions */
if (p->ring == R600_RING_TYPE_UVD_INDEX &&
(i == 0 || drm_pci_device_is_agp(p->rdev->ddev) ||
p->rdev->family == CHIP_RS780 ||
p->rdev->family == CHIP_RS880)) {
 
(i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
/* TODO: is this still needed for NI+ ? */
p->relocs[i].prefered_domains =
RADEON_GEM_DOMAIN_VRAM;
157,22 → 171,9
domain |= RADEON_GEM_DOMAIN_GTT;
p->relocs[i].allowed_domains = domain;
}
/*
if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
uint32_t domain = p->relocs[i].prefered_domains;
if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
"allowed for userptr BOs\n");
return -EINVAL;
}
need_mmap_lock = true;
domain = RADEON_GEM_DOMAIN_GTT;
p->relocs[i].prefered_domains = domain;
p->relocs[i].allowed_domains = domain;
}
*/
 
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
p->relocs[i].tv.shared = !r->write_domain;
p->relocs[i].handle = r->handle;
 
radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
priority);
183,15 → 184,8
if (p->cs_flags & RADEON_CS_USE_VM)
p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
&p->validated);
// if (need_mmap_lock)
// down_read(&current->mm->mmap_sem);
 
r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
 
// if (need_mmap_lock)
// up_read(&current->mm->mmap_sem);
 
return r;
return radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
}
 
static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
237,21 → 231,17
return 0;
}
 
static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
struct radeon_bo_list *reloc;
int r;
int i;
 
list_for_each_entry(reloc, &p->validated, tv.head) {
struct reservation_object *resv;
for (i = 0; i < p->nrelocs; i++) {
if (!p->relocs[i].robj)
continue;
 
resv = reloc->robj->tbo.resv;
r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
reloc->tv.shared);
if (r)
return r;
radeon_semaphore_sync_to(p->ib.semaphore,
p->relocs[i].robj->tbo.sync_obj);
}
return 0;
}
 
/* XXX: note that this is called from the legacy UMS CS ioctl as well */
270,11 → 260,13
INIT_LIST_HEAD(&p->validated);
p->idx = 0;
p->ib.sa_bo = NULL;
p->ib.semaphore = NULL;
p->const_ib.sa_bo = NULL;
p->chunk_ib = NULL;
p->chunk_relocs = NULL;
p->chunk_flags = NULL;
p->chunk_const_ib = NULL;
p->const_ib.semaphore = NULL;
p->chunk_ib_idx = -1;
p->chunk_relocs_idx = -1;
p->chunk_flags_idx = -1;
p->chunk_const_ib_idx = -1;
p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (p->chunks_array == NULL) {
return -ENOMEM;
301,23 → 293,24
return -EFAULT;
}
p->chunks[i].length_dw = user_chunk.length_dw;
if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
p->chunk_relocs = &p->chunks[i];
p->chunks[i].chunk_id = user_chunk.chunk_id;
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
p->chunk_relocs_idx = i;
}
if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
p->chunk_ib = &p->chunks[i];
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
p->chunk_ib_idx = i;
/* zero length IB isn't useful */
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
p->chunk_const_ib = &p->chunks[i];
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
p->chunk_const_ib_idx = i;
/* zero length CONST IB isn't useful */
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
p->chunk_flags = &p->chunks[i];
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
p->chunk_flags_idx = i;
/* zero length flags aren't useful */
if (p->chunks[i].length_dw == 0)
return -EINVAL;
326,10 → 319,10
size = p->chunks[i].length_dw;
cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
p->chunks[i].user_ptr = cdata;
if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
continue;
 
if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
continue;
}
342,7 → 335,7
if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
return -EFAULT;
}
if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
p->cs_flags = p->chunks[i].kdata[0];
if (p->chunks[i].length_dw > 1)
ring = p->chunks[i].kdata[1];
383,8 → 376,8
static int cmp_size_smaller_first(void *priv, struct list_head *a,
struct list_head *b)
{
struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
 
/* Sort A before B if A is smaller. */
return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
417,7 → 410,7
 
ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated,
&parser->ib.fence->base);
parser->ib.fence);
} else if (backoff) {
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
425,16 → 418,14
 
if (parser->relocs != NULL) {
for (i = 0; i < parser->nrelocs; i++) {
struct radeon_bo *bo = parser->relocs[i].robj;
if (bo == NULL)
continue;
 
drm_gem_object_unreference_unlocked(&bo->gem_base);
if (parser->relocs[i].gobj)
drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
}
}
kfree(parser->track);
kfree(parser->relocs);
drm_free_large(parser->vm_bos);
kfree(parser->relocs_ptr);
kfree(parser->vm_bos);
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
448,7 → 439,7
{
int r;
 
if (parser->chunk_ib == NULL)
if (parser->chunk_ib_idx == -1)
return 0;
 
if (parser->cs_flags & RADEON_CS_USE_VM)
460,13 → 451,6
return r;
}
 
r = radeon_cs_sync_rings(parser);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to sync rings: %i\n", r);
return r;
}
 
if (parser->ring == R600_RING_TYPE_UVD_INDEX)
radeon_uvd_note_usage(rdev);
else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
473,6 → 457,7
(parser->ring == TN_RING_TYPE_VCE2_INDEX))
radeon_vce_note_usage(rdev);
 
radeon_cs_sync_rings(parser);
r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
if (r) {
DRM_ERROR("Failed to schedule IB !\n");
508,6 → 493,10
for (i = 0; i < p->nrelocs; i++) {
struct radeon_bo *bo;
 
/* ignore duplicates */
if (p->relocs_ptr[i] != &p->relocs[i])
continue;
 
bo = p->relocs[i].robj;
bo_va = radeon_vm_bo_find(vm, bo);
if (bo_va == NULL) {
518,8 → 507,6
r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
if (r)
return r;
 
radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
}
 
return radeon_vm_clear_invalids(rdev, vm);
532,7 → 519,7
struct radeon_vm *vm = &fpriv->vm;
int r;
 
if (parser->chunk_ib == NULL)
if (parser->chunk_ib_idx == -1)
return 0;
if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
return 0;
557,16 → 544,11
if (r) {
goto out;
}
radeon_cs_sync_rings(parser);
radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence);
 
r = radeon_cs_sync_rings(parser);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to sync rings: %i\n", r);
goto out;
}
 
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib != NULL)) {
(parser->chunk_const_ib_idx != -1)) {
r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
} else {
r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
593,7 → 575,7
struct radeon_vm *vm = NULL;
int r;
 
if (parser->chunk_ib == NULL)
if (parser->chunk_ib_idx == -1)
return 0;
 
if (parser->cs_flags & RADEON_CS_USE_VM) {
601,8 → 583,8
vm = &fpriv->vm;
 
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib != NULL)) {
ib_chunk = parser->chunk_const_ib;
(parser->chunk_const_ib_idx != -1)) {
ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
return -EINVAL;
621,13 → 603,13
return -EFAULT;
}
 
ib_chunk = parser->chunk_ib;
ib_chunk = &parser->chunks[parser->chunk_ib_idx];
if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
return -EINVAL;
}
}
ib_chunk = parser->chunk_ib;
ib_chunk = &parser->chunks[parser->chunk_ib_idx];
 
r = radeon_ib_get(rdev, parser->ring, &parser->ib,
vm, ib_chunk->length_dw * 4);
712,7 → 694,7
struct radeon_cs_packet *pkt,
unsigned idx)
{
struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
struct radeon_device *rdev = p->rdev;
uint32_t header;
 
806,7 → 788,7
* GPU offset using the provided start.
**/
int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
struct radeon_bo_list **cs_reloc,
struct radeon_cs_reloc **cs_reloc,
int nomm)
{
struct radeon_cs_chunk *relocs_chunk;
814,12 → 796,12
unsigned idx;
int r;
 
if (p->chunk_relocs == NULL) {
if (p->chunk_relocs_idx == -1) {
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
*cs_reloc = NULL;
relocs_chunk = p->chunk_relocs;
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
if (r)
return r;
845,6 → 827,6
(u64)relocs_chunk->kdata[idx + 3] << 32;
(*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
} else
*cs_reloc = &p->relocs[(idx / 4)];
*cs_reloc = p->relocs_ptr[(idx / 4)];
return 0;
}
/drivers/video/drm/radeon/radeon_display.c
1537,7 → 1537,7
 
/* In vblank? */
if (in_vbl)
ret |= DRM_SCANOUTPOS_IN_VBLANK;
ret |= DRM_SCANOUTPOS_INVBL;
 
/* Is vpos outside nominal vblank area, but less than
* 1/100 of a frame height away from start of vblank?
/drivers/video/drm/radeon/radeon_fb.c
184,8 → 184,7
static int radeonfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct radeon_fbdev *rfbdev =
container_of(helper, struct radeon_fbdev, helper);
struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
struct radeon_device *rdev = rfbdev->rdev;
struct fb_info *info;
struct drm_framebuffer *fb = NULL;
/drivers/video/drm/radeon/radeon_fence.c
29,8 → 29,9
* Dave Airlie
*/
#include <linux/seq_file.h>
#include <linux/atomic.h>
#include <asm/atomic.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/slab.h>
#include <drm/drmP.h>
110,19 → 111,15
struct radeon_fence **fence,
int ring)
{
u64 seq = ++rdev->fence_drv[ring].sync_seq[ring];
 
/* we are protected by the ring emission mutex */
*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
if ((*fence) == NULL) {
return -ENOMEM;
}
kref_init(&((*fence)->kref));
(*fence)->rdev = rdev;
(*fence)->seq = seq;
(*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
(*fence)->ring = ring;
(*fence)->is_vm_update = false;
fence_init(&(*fence)->base, &radeon_fence_ops,
&rdev->fence_queue.lock, rdev->fence_context + ring, seq);
radeon_fence_ring_emit(rdev, ring, *fence);
trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
return 0;
129,51 → 126,15
}
 
/**
* radeon_fence_check_signaled - callback from fence_queue
* radeon_fence_process - process a fence
*
* this function is called with fence_queue lock held, which is also used
* for the fence locking itself, so unlocked variants are used for
* fence_signal, and remove_wait_queue.
*/
static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
{
struct radeon_fence *fence;
u64 seq;
 
fence = container_of(wait, struct radeon_fence, fence_wake);
 
/*
* We cannot use radeon_fence_process here because we're already
* in the waitqueue, in a call from wake_up_all.
*/
seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
if (seq >= fence->seq) {
int ret = fence_signal_locked(&fence->base);
 
if (!ret)
FENCE_TRACE(&fence->base, "signaled from irq context\n");
else
FENCE_TRACE(&fence->base, "was already signaled\n");
 
radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
// __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
fence_put(&fence->base);
} else
FENCE_TRACE(&fence->base, "pending\n");
return 0;
}
 
/**
* radeon_fence_activity - check for fence activity
*
* @rdev: radeon_device pointer
* @ring: ring index the fence is associated with
*
* Checks the current fence value and calculates the last
* signalled fence value. Returns true if activity occured
* on the ring, and the fence_queue should be waken up.
* Checks the current fence value and wakes the fence queue
* if the sequence number has increased (all asics).
*/
static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
void radeon_fence_process(struct radeon_device *rdev, int ring)
{
uint64_t seq, last_seq, last_emitted;
unsigned count_loop = 0;
229,80 → 190,26
}
} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
 
// if (seq < last_emitted)
// radeon_fence_schedule_check(rdev, ring);
 
return wake;
if (wake)
wake_up_all(&rdev->fence_queue);
}
 
/**
* radeon_fence_check_lockup - check for hardware lockup
* radeon_fence_destroy - destroy a fence
*
* @work: delayed work item
* @kref: fence kref
*
* Checks for fence activity and if there is none probe
* the hardware if a lockup occured.
* Frees the fence object (all asics).
*/
static void radeon_fence_check_lockup(struct work_struct *work)
static void radeon_fence_destroy(struct kref *kref)
{
struct radeon_fence_driver *fence_drv;
struct radeon_device *rdev;
int ring;
struct radeon_fence *fence;
 
fence_drv = container_of(work, struct radeon_fence_driver,
lockup_work.work);
rdev = fence_drv->rdev;
ring = fence_drv - &rdev->fence_drv[0];
 
// if (!down_read_trylock(&rdev->exclusive_lock)) {
// /* just reschedule the check if a reset is going on */
// radeon_fence_schedule_check(rdev, ring);
// return;
// }
 
if (fence_drv->delayed_irq && rdev->ddev->irq_enabled) {
unsigned long irqflags;
 
fence_drv->delayed_irq = false;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
fence = container_of(kref, struct radeon_fence, kref);
kfree(fence);
}
 
if (radeon_fence_activity(rdev, ring))
wake_up_all(&rdev->fence_queue);
 
else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
 
/* good news we believe it's a lockup */
dev_warn(rdev->dev, "GPU lockup (current fence id "
"0x%016llx last fence id 0x%016llx on ring %d)\n",
(uint64_t)atomic64_read(&fence_drv->last_seq),
fence_drv->sync_seq[ring], ring);
 
/* remember that we need an reset */
rdev->needs_reset = true;
wake_up_all(&rdev->fence_queue);
}
// up_read(&rdev->exclusive_lock);
}
 
/**
* radeon_fence_process - process a fence
*
* @rdev: radeon_device pointer
* @ring: ring index the fence is associated with
*
* Checks the current fence value and wakes the fence queue
* if the sequence number has increased (all asics).
*/
void radeon_fence_process(struct radeon_device *rdev, int ring)
{
if (radeon_fence_activity(rdev, ring))
wake_up_all(&rdev->fence_queue);
}
 
/**
* radeon_fence_seq_signaled - check if a fence sequence number has signaled
*
* @rdev: radeon device pointer
330,78 → 237,7
return false;
}
 
static bool radeon_fence_is_signaled(struct fence *f)
{
struct radeon_fence *fence = to_radeon_fence(f);
struct radeon_device *rdev = fence->rdev;
unsigned ring = fence->ring;
u64 seq = fence->seq;
 
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
return true;
}
 
// if (down_read_trylock(&rdev->exclusive_lock))
{
radeon_fence_process(rdev, ring);
// up_read(&rdev->exclusive_lock);
 
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
return true;
}
}
return false;
}
 
/**
* radeon_fence_enable_signaling - enable signalling on fence
* @fence: fence
*
* This function is called with fence_queue lock held, and adds a callback
* to fence_queue that checks if this fence is signaled, and if so it
* signals the fence and removes itself.
*/
static bool radeon_fence_enable_signaling(struct fence *f)
{
struct radeon_fence *fence = to_radeon_fence(f);
struct radeon_device *rdev = fence->rdev;
 
if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq)
return false;
 
// if (down_read_trylock(&rdev->exclusive_lock))
{
radeon_irq_kms_sw_irq_get(rdev, fence->ring);
 
// if (radeon_fence_activity(rdev, fence->ring))
// wake_up_all_locked(&rdev->fence_queue);
 
/* did fence get signaled after we enabled the sw irq? */
if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
radeon_irq_kms_sw_irq_put(rdev, fence->ring);
// up_read(&rdev->exclusive_lock);
return false;
}
 
// up_read(&rdev->exclusive_lock);
// } else {
/* we're probably in a lockup, lets not fiddle too much */
// if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring))
// rdev->fence_drv[fence->ring].delayed_irq = true;
// radeon_fence_schedule_check(rdev, fence->ring);
}
 
// fence->fence_wake.flags = 0;
// fence->fence_wake.private = NULL;
fence->fence_wake.func = radeon_fence_check_signaled;
__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
fence_get(f);
 
FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
return true;
}
 
/**
* radeon_fence_signaled - check if a fence has signaled
*
* @fence: radeon fence object
411,15 → 247,14
*/
bool radeon_fence_signaled(struct radeon_fence *fence)
{
if (!fence)
if (!fence) {
return true;
 
}
if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
return true;
}
if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
int ret;
 
ret = fence_signal(&fence->base);
if (!ret)
FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
fence->seq = RADEON_FENCE_SIGNALED_SEQ;
return true;
}
return false;
448,12 → 283,11
}
 
/**
* radeon_fence_wait_seq_timeout - wait for a specific sequence numbers
* radeon_fence_wait_seq - wait for a specific sequence numbers
*
* @rdev: radeon device pointer
* @target_seq: sequence number(s) we want to wait for
* @intr: use interruptable sleep
* @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
*
* Wait for the requested sequence number(s) to be written by any ring
* (all asics). Sequnce number array is indexed by ring id.
460,25 → 294,24
* @intr selects whether to use interruptable (true) or non-interruptable
* (false) sleep when waiting for the sequence number. Helper function
* for radeon_fence_wait_*().
* Returns remaining time if the sequence number has passed, 0 when
* the wait timeout, or an error for all other cases.
* Returns 0 if the sequence number has passed, error for all other cases.
* -EDEADLK is returned when a GPU lockup has been detected.
*/
static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
u64 *target_seq, bool intr,
long timeout)
static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
bool intr)
{
long r;
int i;
uint64_t last_seq[RADEON_NUM_RINGS];
bool signaled;
int i, r;
 
if (radeon_fence_any_seq_signaled(rdev, target_seq))
return timeout;
while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
 
/* enable IRQs and tracing */
/* Save current sequence values, used to check for GPU lockups */
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!target_seq[i])
continue;
 
last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq);
trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
radeon_irq_kms_sw_irq_get(rdev, i);
}
485,17 → 318,14
 
if (intr) {
r = wait_event_interruptible_timeout(rdev->fence_queue, (
radeon_fence_any_seq_signaled(rdev, target_seq)
|| rdev->needs_reset), timeout);
(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
|| rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
} else {
r = wait_event_timeout(rdev->fence_queue, (
radeon_fence_any_seq_signaled(rdev, target_seq)
|| rdev->needs_reset), timeout);
(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
|| rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
}
 
if (rdev->needs_reset)
r = -EDEADLK;
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!target_seq[i])
continue;
504,14 → 334,59
trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
}
 
if (unlikely(r < 0))
return r;
 
if (unlikely(!signaled)) {
if (rdev->needs_reset)
return -EDEADLK;
 
/* we were interrupted for some reason and fence
* isn't signaled yet, resume waiting */
if (r)
continue;
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!target_seq[i])
continue;
 
if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq))
break;
}
 
if (i != RADEON_NUM_RINGS)
continue;
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!target_seq[i])
continue;
 
if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i]))
break;
}
 
if (i < RADEON_NUM_RINGS) {
/* good news we believe it's a lockup */
dev_warn(rdev->dev, "GPU lockup (waiting for "
"0x%016llx last fence id 0x%016llx on"
" ring %d)\n",
target_seq[i], last_seq[i], i);
 
/* remember that we need an reset */
rdev->needs_reset = true;
wake_up_all(&rdev->fence_queue);
return -EDEADLK;
}
}
}
return 0;
}
 
/**
* radeon_fence_wait - wait for a fence to signal
*
* @fence: radeon fence object
* @intr: use interruptible sleep
* @intr: use interruptable sleep
*
* Wait for the requested fence to signal (all asics).
* @intr selects whether to use interruptable (true) or non-interruptable
521,26 → 396,22
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
uint64_t seq[RADEON_NUM_RINGS] = {};
long r;
int r;
 
/*
* This function should not be called on !radeon fences.
* If this is the case, it would mean this function can
* also be called on radeon fences belonging to another card.
* exclusive_lock is not held in that case.
*/
if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
return fence_wait(&fence->base, intr);
if (fence == NULL) {
WARN(1, "Querying an invalid fence : %p !\n", fence);
return -EINVAL;
}
 
seq[fence->ring] = fence->seq;
r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
if (r < 0) {
if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
return 0;
 
r = radeon_fence_wait_seq(fence->rdev, seq, intr);
if (r)
return r;
}
 
r = fence_signal(&fence->base);
if (!r)
FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
fence->seq = RADEON_FENCE_SIGNALED_SEQ;
return 0;
}
 
563,7 → 434,7
{
uint64_t seq[RADEON_NUM_RINGS];
unsigned i, num_rings = 0;
long r;
int r;
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
seq[i] = 0;
574,6 → 445,10
 
seq[i] = fences[i]->seq;
++num_rings;
 
/* test if something was allready signaled */
if (seq[i] == RADEON_FENCE_SIGNALED_SEQ)
return 0;
}
 
/* nothing to wait for ? */
580,8 → 455,8
if (num_rings == 0)
return -ENOENT;
 
r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
if (r < 0) {
r = radeon_fence_wait_seq(rdev, seq, intr);
if (r) {
return r;
}
return 0;
600,7 → 475,6
int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
{
uint64_t seq[RADEON_NUM_RINGS] = {};
long r;
 
seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
608,10 → 482,7
already the last emited fence */
return -ENOENT;
}
r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
if (r < 0)
return r;
return 0;
return radeon_fence_wait_seq(rdev, seq, false);
}
 
/**
627,18 → 498,18
int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
{
uint64_t seq[RADEON_NUM_RINGS] = {};
long r;
int r;
 
seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
if (!seq[ring])
return 0;
 
r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
if (r < 0) {
r = radeon_fence_wait_seq(rdev, seq, false);
if (r) {
if (r == -EDEADLK)
return -EDEADLK;
 
dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
ring, r);
}
return 0;
654,7 → 525,7
*/
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
{
fence_get(&fence->base);
kref_get(&fence->kref);
return fence;
}
 
671,7 → 542,7
 
*fence = NULL;
if (tmp) {
fence_put(&tmp->base);
kref_put(&tmp->kref, radeon_fence_destroy);
}
}
 
840,9 → 711,6
rdev->fence_drv[ring].sync_seq[i] = 0;
atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
rdev->fence_drv[ring].initialized = false;
INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work,
radeon_fence_check_lockup);
rdev->fence_drv[ring].rdev = rdev;
}
 
/**
890,7 → 758,7
r = radeon_fence_wait_empty(rdev, ring);
if (r) {
/* no need to trigger GPU reset as we are unloading */
radeon_fence_driver_force_completion(rdev, ring);
radeon_fence_driver_force_completion(rdev);
}
wake_up_all(&rdev->fence_queue);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
903,14 → 771,17
* radeon_fence_driver_force_completion - force all fence waiter to complete
*
* @rdev: radeon device pointer
* @ring: the ring to complete
*
* In case of GPU reset failure make sure no process keep waiting on fence
* that will never complete.
*/
void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
void radeon_fence_driver_force_completion(struct radeon_device *rdev)
{
if (rdev->fence_drv[ring].initialized) {
int ring;
 
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
if (!rdev->fence_drv[ring].initialized)
continue;
radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
}
}
962,7 → 833,6
down_read(&rdev->exclusive_lock);
seq_printf(m, "%d\n", rdev->needs_reset);
rdev->needs_reset = true;
wake_up_all(&rdev->fence_queue);
up_read(&rdev->exclusive_lock);
 
return 0;
982,72 → 852,3
return 0;
#endif
}
 
static const char *radeon_fence_get_driver_name(struct fence *fence)
{
return "radeon";
}
 
static const char *radeon_fence_get_timeline_name(struct fence *f)
{
struct radeon_fence *fence = to_radeon_fence(f);
switch (fence->ring) {
case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx";
case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1";
case CAYMAN_RING_TYPE_CP2_INDEX: return "radeon.cp2";
case R600_RING_TYPE_DMA_INDEX: return "radeon.dma";
case CAYMAN_RING_TYPE_DMA1_INDEX: return "radeon.dma1";
case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd";
case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1";
case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2";
default: WARN_ON_ONCE(1); return "radeon.unk";
}
}
 
static inline bool radeon_test_signaled(struct radeon_fence *fence)
{
return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
}
 
static signed long radeon_fence_default_wait(struct fence *f, bool intr,
signed long t)
{
struct radeon_fence *fence = to_radeon_fence(f);
struct radeon_device *rdev = fence->rdev;
bool signaled;
 
fence_enable_sw_signaling(&fence->base);
 
/*
* This function has to return -EDEADLK, but cannot hold
* exclusive_lock during the wait because some callers
* may already hold it. This means checking needs_reset without
* lock, and not fiddling with any gpu internals.
*
* The callback installed with fence_enable_sw_signaling will
* run before our wait_event_*timeout call, so we will see
* both the signaled fence and the changes to needs_reset.
*/
 
if (intr)
t = wait_event_interruptible_timeout(rdev->fence_queue,
((signaled = radeon_test_signaled(fence)) ||
rdev->needs_reset), t);
else
t = wait_event_timeout(rdev->fence_queue,
((signaled = radeon_test_signaled(fence)) ||
rdev->needs_reset), t);
 
if (t > 0 && !signaled)
return -EDEADLK;
return t;
}
 
const struct fence_ops radeon_fence_ops = {
.get_driver_name = radeon_fence_get_driver_name,
.get_timeline_name = radeon_fence_get_timeline_name,
.enable_signaling = radeon_fence_enable_signaling,
.signaled = radeon_fence_is_signaled,
.wait = radeon_fence_default_wait,
.release = NULL,
};
/drivers/video/drm/radeon/radeon_gart.c
137,7 → 137,7
if (rdev->gart.robj == NULL) {
r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
0, NULL, NULL, &rdev->gart.robj);
0, NULL, &rdev->gart.robj);
if (r) {
return r;
}
/drivers/video/drm/radeon/radeon_gem.c
65,7 → 65,7
 
retry:
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
flags, NULL, NULL, &robj);
flags, NULL, &robj);
if (r) {
if (r != -ERESTARTSYS) {
if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
91,7 → 91,7
{
struct radeon_bo *robj;
uint32_t domain;
long r;
int r;
 
/* FIXME: reeimplement */
robj = gem_to_radeon_bo(gobj);
229,10 → 229,9
return r;
}
 
static int radeon_mode_mmap(struct drm_file *filp,
int radeon_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, bool dumb,
uint64_t *offset_p)
uint32_t handle, uint64_t *offset_p)
{
struct drm_gem_object *gobj;
struct radeon_bo *robj;
241,14 → 240,6
if (gobj == NULL) {
return -ENOENT;
}
 
/*
* We don't allow dumb mmaps on objects created using another
* interface.
*/
WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach),
"Illegal dumb map of GPU buffer.\n");
 
robj = gem_to_radeon_bo(gobj);
*offset_p = radeon_bo_mmap_offset(robj);
drm_gem_object_unreference_unlocked(gobj);
260,8 → 251,7
{
struct drm_radeon_gem_mmap *args = data;
 
return radeon_mode_mmap(filp, dev, args->handle, false,
&args->addr_ptr);
return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
}
 
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
293,9 → 283,8
struct drm_radeon_gem_wait_idle *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
int r = 0;
int r;
uint32_t cur_placement = 0;
long ret;
 
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
/drivers/video/drm/radeon/radeon_ib.c
64,7 → 64,10
return r;
}
 
radeon_sync_create(&ib->sync);
r = radeon_semaphore_create(rdev, &ib->semaphore);
if (r) {
return r;
}
 
ib->ring = ring;
ib->fence = NULL;
93,7 → 96,7
*/
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
{
radeon_sync_free(rdev, &ib->sync, ib->fence);
radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
radeon_fence_unref(&ib->fence);
}
142,11 → 145,11
if (ib->vm) {
struct radeon_fence *vm_id_fence;
vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
radeon_sync_fence(&ib->sync, vm_id_fence);
radeon_semaphore_sync_to(ib->semaphore, vm_id_fence);
}
 
/* sync with other rings */
r = radeon_sync_rings(rdev, &ib->sync, ib->ring);
r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
if (r) {
dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
radeon_ring_unlock_undo(rdev, ring);
154,12 → 157,11
}
 
if (ib->vm)
radeon_vm_flush(rdev, ib->vm, ib->ring,
ib->sync.last_vm_update);
radeon_vm_flush(rdev, ib->vm, ib->ring);
 
if (const_ib) {
radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
radeon_sync_free(rdev, &const_ib->sync, NULL);
radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
}
radeon_ring_ib_execute(rdev, ib->ring, ib);
r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
267,7 → 269,6
 
r = radeon_ib_test(rdev, i, ring);
if (r) {
radeon_fence_driver_force_completion(rdev, i);
ring->ready = false;
rdev->needs_reset = false;
 
/drivers/video/drm/radeon/radeon_irq_kms.c
206,21 → 206,6
}
 
/**
* radeon_irq_kms_sw_irq_get_delayed - enable software interrupt
*
* @rdev: radeon device pointer
* @ring: ring whose interrupt you want to enable
*
* Enables the software interrupt for a specific ring (all asics).
* The software interrupt is generally used to signal a fence on
* a particular ring.
*/
bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring)
{
return atomic_inc_return(&rdev->irq.ring_int[ring]) == 1;
}
 
/**
* radeon_irq_kms_sw_irq_put - disable software interrupt
*
* @rdev: radeon device pointer
/drivers/video/drm/radeon/radeon_mode.h
321,10 → 321,6
uint32_t crtc_offset;
struct drm_gem_object *cursor_bo;
uint64_t cursor_addr;
int cursor_x;
int cursor_y;
int cursor_hot_x;
int cursor_hot_y;
int cursor_width;
int cursor_height;
int max_cursor_width;
466,7 → 462,6
u8 id;
u32 reg;
u32 mask;
u32 shift;
};
 
struct radeon_hpd {
753,8 → 748,6
extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
struct radeon_atom_ss *ss,
int id, u32 clock);
extern struct radeon_gpio_rec radeon_atombios_lookup_gpio(struct radeon_device *rdev,
u8 id);
 
extern void radeon_compute_pll_legacy(struct radeon_pll *pll,
uint64_t freq,
784,7 → 777,6
extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
extern bool atombios_set_edp_panel_power(struct drm_connector *connector, int action);
extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
extern bool radeon_encoder_is_digital(struct drm_encoder *encoder);
 
extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
809,16 → 801,13
extern int radeon_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, int atomic);
extern int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
uint32_t width,
uint32_t height,
int32_t hot_x,
int32_t hot_y);
uint32_t height);
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y);
extern void radeon_cursor_reset(struct drm_crtc *crtc);
 
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
unsigned int flags,
/drivers/video/drm/radeon/radeon_object.c
96,83 → 96,40
{
u32 c = 0, i;
 
rbo->placement.fpfn = 0;
rbo->placement.lpfn = 0;
rbo->placement.placement = rbo->placements;
rbo->placement.busy_placement = rbo->placements;
if (domain & RADEON_GEM_DOMAIN_VRAM) {
/* Try placing BOs which don't need CPU access outside of the
* CPU accessible part of VRAM
*/
if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
rbo->placements[c].fpfn =
rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
rbo->placements[c++].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED |
if (domain & RADEON_GEM_DOMAIN_VRAM)
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
}
 
rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
}
 
if (domain & RADEON_GEM_DOMAIN_GTT) {
if (rbo->flags & RADEON_GEM_GTT_UC) {
rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_TT;
 
rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT;
} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
(rbo->rdev->flags & RADEON_IS_AGP)) {
rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED |
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_TT;
} else {
rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_TT;
rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
}
}
 
if (domain & RADEON_GEM_DOMAIN_CPU) {
if (rbo->flags & RADEON_GEM_GTT_UC) {
rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_SYSTEM;
 
rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM;
} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
rbo->rdev->flags & RADEON_IS_AGP) {
rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED |
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_SYSTEM;
} else {
rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_SYSTEM;
rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
}
}
if (!c) {
rbo->placements[c].fpfn = 0;
rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
TTM_PL_FLAG_SYSTEM;
}
 
if (!c)
rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
rbo->placement.num_placement = c;
rbo->placement.num_busy_placement = c;
 
for (i = 0; i < c; ++i) {
if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
(rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
!rbo->placements[i].fpfn)
rbo->placements[i].lpfn =
rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
else
rbo->placements[i].lpfn = 0;
}
 
/*
* Use two-ended allocation depending on the buffer size to
* improve fragmentation quality.
180,16 → 137,14
*/
if (rbo->tbo.mem.size > 512 * 1024) {
for (i = 0; i < c; i++) {
rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
rbo->placements[i] |= TTM_PL_FLAG_TOPDOWN;
}
}
}
 
int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, bool kernel,
u32 domain, u32 flags, struct sg_table *sg,
struct reservation_object *resv,
struct radeon_bo **bo_ptr)
unsigned long size, int byte_align, bool kernel, u32 domain,
u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr)
{
struct radeon_bo *bo;
enum ttm_bo_type type;
232,12 → 187,11
if (!(rdev->flags & RADEON_IS_PCIE))
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
 
#ifdef CONFIG_X86_32
/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
* See https://bugs.freedesktop.org/show_bug.cgi?id=84627
*/
// printf("%s rdev->flags %x bo->flags %x\n",
// __FUNCTION__, bo->flags);
 
if(flags & RADEON_GEM_GTT_WC)
bo->flags &= ~RADEON_GEM_GTT_WC;
#endif
 
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
244,7 → 198,7
// down_read(&rdev->pm.mclk_lock);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL,
acc_size, sg, resv, &radeon_ttm_bo_destroy);
acc_size, sg, &radeon_ttm_bo_destroy);
// up_read(&rdev->pm.mclk_lock);
if (unlikely(r != 0)) {
return r;
335,19 → 289,21
return 0;
}
radeon_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
if (domain == RADEON_GEM_DOMAIN_VRAM) {
/* force to pin into visible video ram */
if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
!(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
(!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
bo->placements[i].lpfn =
bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
else
bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
}
if (max_offset) {
u64 lpfn = max_offset >> PAGE_SHIFT;
 
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
if (!bo->placement.lpfn)
bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;
 
if (lpfn < bo->placement.lpfn)
bo->placement.lpfn = lpfn;
}
 
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
bo->pin_count = 1;
379,10 → 335,8
bo->pin_count--;
if (bo->pin_count)
return 0;
for (i = 0; i < bo->placement.num_placement; i++) {
bo->placements[i].lpfn = 0;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
}
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
468,29 → 422,24
struct ww_acquire_ctx *ticket,
struct list_head *head, int ring)
{
struct radeon_bo_list *lobj;
struct list_head duplicates;
struct radeon_cs_reloc *lobj;
struct radeon_bo *bo;
int r;
u64 bytes_moved = 0, initial_bytes_moved;
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
 
INIT_LIST_HEAD(&duplicates);
r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
r = ttm_eu_reserve_buffers(ticket, head);
if (unlikely(r != 0)) {
return r;
}
 
list_for_each_entry(lobj, head, tv.head) {
struct radeon_bo *bo = lobj->robj;
bo = lobj->robj;
if (!bo->pin_count) {
u32 domain = lobj->prefered_domains;
u32 allowed = lobj->allowed_domains;
u32 current_domain =
radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
 
WARN_ONCE(bo->gem_base.dumb,
"GPU use of dumb buffer is illegal.\n");
 
/* Check if this buffer will be moved and don't move it
* if we have moved too many buffers for this IB already.
*
499,7 → 448,7
* into account. We don't want to disallow buffer moves
* completely.
*/
if ((allowed & current_domain) != 0 &&
if ((lobj->allowed_domains & current_domain) != 0 &&
(domain & current_domain) == 0 && /* will be moved */
bytes_moved > bytes_moved_threshold) {
/* don't move it */
509,7 → 458,7
retry:
radeon_ttm_placement_from_domain(bo, domain);
if (ring == R600_RING_TYPE_UVD_INDEX)
radeon_uvd_force_into_uvd_segment(bo, allowed);
radeon_uvd_force_into_uvd_segment(bo);
 
initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
529,12 → 478,6
lobj->gpu_offset = radeon_bo_gpu_offset(bo);
lobj->tiling_flags = bo->tiling_flags;
}
 
list_for_each_entry(lobj, &duplicates, tv.head) {
lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
lobj->tiling_flags = lobj->robj->tiling_flags;
}
 
return 0;
}
 
735,29 → 678,12
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
if (unlikely(r != 0))
return r;
spin_lock(&bo->tbo.bdev->fence_lock);
if (mem_type)
*mem_type = bo->tbo.mem.mem_type;
 
if (bo->tbo.sync_obj)
r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
spin_unlock(&bo->tbo.bdev->fence_lock);
ttm_bo_unreserve(&bo->tbo);
return r;
}
 
/**
* radeon_bo_fence - add fence to buffer object
*
* @bo: buffer object in question
* @fence: fence to add
* @shared: true if fence should be added shared
*
*/
void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
bool shared)
{
struct reservation_object *resv = bo->tbo.resv;
 
if (shared)
reservation_object_add_shared_fence(resv, &fence->base);
else
reservation_object_add_excl_fence(resv, &fence->base);
}
/drivers/video/drm/radeon/radeon_object.h
126,7 → 126,6
unsigned long size, int byte_align,
bool kernel, u32 domain, u32 flags,
struct sg_table *sg,
struct reservation_object *resv,
struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
155,8 → 154,6
struct ttm_mem_reg *new_mem);
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
bool shared);
 
/*
* sub allocation
/drivers/video/drm/radeon/radeon_pm.c
1479,7 → 1479,7
if (rdev->pm.active_crtcs & (1 << crtc)) {
vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL);
if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
!(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
!(vbl_status & DRM_SCANOUTPOS_INVBL))
in_vbl = false;
}
}
/drivers/video/drm/radeon/radeon_ring.c
45,6 → 45,27
static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
 
/**
* radeon_ring_write - write a value to the ring
*
* @ring: radeon_ring structure holding ring information
* @v: dword (dw) value to write
*
* Write a value to the requested ring buffer (all asics).
*/
void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
{
#if DRM_DEBUG_CODE
if (ring->count_dw <= 0) {
DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
}
#endif
ring->ring[ring->wptr++] = v;
ring->wptr &= ring->ptr_mask;
ring->count_dw--;
ring->ring_free_dw--;
}
 
/**
* radeon_ring_supports_scratch_reg - check if the ring supports
* writing to scratch registers
*
383,7 → 404,7
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, 0, NULL,
RADEON_GEM_DOMAIN_GTT, 0,
NULL, &ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
/drivers/video/drm/radeon/radeon_sa.c
65,7 → 65,7
}
 
r = radeon_bo_create(rdev, size, align, true,
domain, flags, NULL, NULL, &sa_manager->bo);
domain, flags, NULL, &sa_manager->bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
/drivers/video/drm/radeon/radeon_test.c
67,7 → 67,7
}
 
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
0, NULL, NULL, &vram_obj);
0, NULL, &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
87,8 → 87,7
struct radeon_fence *fence = NULL;
 
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
gtt_obj + i);
RADEON_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_lclean;
117,16 → 116,11
radeon_bo_kunmap(gtt_obj[i]);
 
if (ring == R600_RING_TYPE_DMA_INDEX)
fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
size / RADEON_GPU_PAGE_SIZE,
NULL);
r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
else
fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
size / RADEON_GPU_PAGE_SIZE,
NULL);
if (IS_ERR(fence)) {
r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
r = PTR_ERR(fence);
goto out_lclean_unpin;
}
 
168,16 → 162,11
radeon_bo_kunmap(vram_obj);
 
if (ring == R600_RING_TYPE_DMA_INDEX)
fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
size / RADEON_GPU_PAGE_SIZE,
NULL);
r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
else
fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
size / RADEON_GPU_PAGE_SIZE,
NULL);
if (IS_ERR(fence)) {
r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
r = PTR_ERR(fence);
goto out_lclean_unpin;
}
 
233,7 → 222,7
radeon_bo_unreserve(gtt_obj[i]);
radeon_bo_unref(&gtt_obj[i]);
}
if (fence && !IS_ERR(fence))
if (fence)
radeon_fence_unref(&fence);
break;
}
/drivers/video/drm/radeon/radeon_ttm.c
166,15 → 166,12
static void radeon_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
static struct ttm_place placements = {
.fpfn = 0,
.lpfn = 0,
.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
};
 
struct radeon_bo *rbo;
static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
 
if (!radeon_ttm_bo_is_radeon_bo(bo)) {
placement->fpfn = 0;
placement->lpfn = 0;
placement->placement = &placements;
placement->busy_placement = &placements;
placement->num_placement = 1;
184,32 → 181,9
rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
int i;
 
/* Try evicting to the CPU inaccessible part of VRAM
* first, but only set GTT as busy placement, so this
* BO will be evicted to GTT rather than causing other
* BOs to be evicted from VRAM
*/
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
RADEON_GEM_DOMAIN_GTT);
rbo->placement.num_busy_placement = 0;
for (i = 0; i < rbo->placement.num_placement; i++) {
if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
if (rbo->placements[0].fpfn < fpfn)
rbo->placements[0].fpfn = fpfn;
} else {
rbo->placement.busy_placement =
&rbo->placements[i];
rbo->placement.num_busy_placement = 1;
}
}
} else
else
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
break;
case TTM_PL_TT:
242,7 → 216,6
struct radeon_device *rdev;
uint64_t old_start, new_start;
struct radeon_fence *fence;
unsigned num_pages;
int r, ridx;
 
rdev = radeon_get_rdev(bo->bdev);
279,12 → 252,13
 
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
 
num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv);
if (IS_ERR(fence))
return PTR_ERR(fence);
 
r = ttm_bo_move_accel_cleanup(bo, &fence->base,
/* sync other rings */
fence = bo->sync_obj;
r = radeon_copy(rdev, old_start, new_start,
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
&fence);
/* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
evict, no_wait_gpu, new_mem);
radeon_fence_unref(&fence);
return r;
298,7 → 272,7
struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
struct ttm_place placements;
u32 placements;
struct ttm_placement placement;
int r;
 
305,13 → 279,13
rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.fpfn = 0;
placement.lpfn = 0;
placement.num_placement = 1;
placement.placement = &placements;
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = 0;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait_gpu);
if (unlikely(r)) {
346,19 → 320,19
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
struct ttm_placement placement;
struct ttm_place placements;
u32 placements;
int r;
 
rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.fpfn = 0;
placement.lpfn = 0;
placement.num_placement = 1;
placement.placement = &placements;
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = 0;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait_gpu);
if (unlikely(r)) {
497,6 → 471,31
{
}
 
static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
{
return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
}
 
static int radeon_sync_obj_flush(void *sync_obj)
{
return 0;
}
 
static void radeon_sync_obj_unref(void **sync_obj)
{
radeon_fence_unref((struct radeon_fence **)sync_obj);
}
 
static void *radeon_sync_obj_ref(void *sync_obj)
{
return radeon_fence_ref((struct radeon_fence *)sync_obj);
}
 
static bool radeon_sync_obj_signaled(void *sync_obj)
{
return radeon_fence_signaled((struct radeon_fence *)sync_obj);
}
 
/*
* TTM backend functions.
*/
504,10 → 503,6
struct ttm_dma_tt ttm;
struct radeon_device *rdev;
u64 offset;
 
uint64_t userptr;
struct mm_struct *usermm;
uint32_t userflags;
};
 
static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
585,17 → 580,10
return &gtt->ttm.ttm;
}
 
static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm)
{
if (!ttm || ttm->func != &radeon_backend_func)
return NULL;
return (struct radeon_ttm_tt *)ttm;
}
 
static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
{
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt = (void *)ttm;
unsigned i;
int r;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
640,7 → 628,7
static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
struct radeon_ttm_tt *gtt = (void *)ttm;
unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 
675,6 → 663,11
.evict_flags = &radeon_evict_flags,
.move = &radeon_bo_move,
.verify_access = &radeon_verify_access,
.sync_obj_signaled = &radeon_sync_obj_signaled,
.sync_obj_wait = &radeon_sync_obj_wait,
.sync_obj_flush = &radeon_sync_obj_flush,
.sync_obj_unref = &radeon_sync_obj_unref,
.sync_obj_ref = &radeon_sync_obj_ref,
.move_notify = &radeon_bo_move_notify,
// .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
.io_mem_reserve = &radeon_ttm_io_mem_reserve,
711,7 → 704,7
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 
r = radeon_bo_create(rdev, 16 * 1024 * 1024, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
RADEON_GEM_DOMAIN_VRAM, 0,
NULL, &rdev->stollen_vga_memory);
if (r) {
return r;
/drivers/video/drm/radeon/radeon_uvd.c
46,9 → 46,6
#define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin"
#define FIRMWARE_BONAIRE "radeon/BONAIRE_uvd.bin"
 
MODULE_FIRMWARE(FIRMWARE_R600);
MODULE_FIRMWARE(FIRMWARE_RS780);
MODULE_FIRMWARE(FIRMWARE_RV770);
MODULE_FIRMWARE(FIRMWARE_RV710);
MODULE_FIRMWARE(FIRMWARE_CYPRESS);
MODULE_FIRMWARE(FIRMWARE_SUMO);
118,11 → 115,9
}
 
bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE +
RADEON_GPU_PAGE_SIZE;
RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
NULL, &rdev->uvd.vcpu_bo);
RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->uvd.vcpu_bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
236,32 → 231,12
return 0;
}
 
void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
uint32_t allowed_domains)
void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo)
{
int i;
 
for (i = 0; i < rbo->placement.num_placement; ++i) {
rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
rbo->placement.fpfn = 0 >> PAGE_SHIFT;
rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
}
 
/* If it must be in VRAM it must be in the first segment as well */
if (allowed_domains == RADEON_GEM_DOMAIN_VRAM)
return;
 
/* abort if we already have more than one placement */
if (rbo->placement.num_placement > 1)
return;
 
/* add another 256MB segment */
rbo->placements[1] = rbo->placements[0];
rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
rbo->placement.num_placement++;
rbo->placement.num_busy_placement++;
}
 
void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
{
int i, r;
381,7 → 356,6
{
int32_t *msg, msg_type, handle;
unsigned img_size = 0;
struct fence *f;
void *ptr;
 
int i, r;
391,9 → 365,8
return -EINVAL;
}
 
f = reservation_object_get_excl(bo->tbo.resv);
if (f) {
r = radeon_fence_wait((struct radeon_fence *)f, false);
if (bo->tbo.sync_obj) {
r = radeon_fence_wait(bo->tbo.sync_obj, false);
if (r) {
DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
return r;
468,12 → 441,12
unsigned buf_sizes[], bool *has_msg_cmd)
{
struct radeon_cs_chunk *relocs_chunk;
struct radeon_bo_list *reloc;
struct radeon_cs_reloc *reloc;
unsigned idx, cmd, offset;
uint64_t start, end;
int r;
 
relocs_chunk = p->chunk_relocs;
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
offset = radeon_get_ib_value(p, data0);
idx = radeon_get_ib_value(p, data1);
if (idx >= relocs_chunk->length_dw) {
482,7 → 455,7
return -EINVAL;
}
 
reloc = &p->relocs[(idx / 4)];
reloc = p->relocs_ptr[(idx / 4)];
start = reloc->gpu_offset;
end = start + radeon_bo_size(reloc->robj);
start += offset;
590,13 → 563,13
[0x00000003] = 2048,
};
 
if (p->chunk_ib->length_dw % 16) {
if (p->chunks[p->chunk_ib_idx].length_dw % 16) {
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
p->chunk_ib->length_dw);
p->chunks[p->chunk_ib_idx].length_dw);
return -EINVAL;
}
 
if (p->chunk_relocs == NULL) {
if (p->chunk_relocs_idx == -1) {
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
620,7 → 593,7
DRM_ERROR("Unknown packet type %d !\n", pkt.type);
return -EINVAL;
}
} while (p->idx < p->chunk_ib->length_dw);
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
 
if (!has_msg_cmd) {
DRM_ERROR("UVD-IBs need a msg command!\n");
631,16 → 604,38
}
 
static int radeon_uvd_send_msg(struct radeon_device *rdev,
int ring, uint64_t addr,
int ring, struct radeon_bo *bo,
struct radeon_fence **fence)
{
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head head;
struct radeon_ib ib;
uint64_t addr;
int i, r;
 
r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
memset(&tv, 0, sizeof(tv));
tv.bo = &bo->tbo;
 
INIT_LIST_HEAD(&head);
list_add(&tv.head, &head);
 
r = ttm_eu_reserve_buffers(&ticket, &head);
if (r)
return r;
 
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM);
radeon_uvd_force_into_uvd_segment(bo);
 
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
if (r)
goto err;
 
r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
if (r)
goto err;
 
addr = radeon_bo_gpu_offset(bo);
ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
ib.ptr[1] = addr;
ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0);
652,11 → 647,19
ib.length_dw = 16;
 
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r)
goto err;
ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
 
if (fence)
*fence = radeon_fence_ref(ib.fence);
 
radeon_ib_free(rdev, &ib);
radeon_bo_unref(&bo);
return 0;
 
err:
ttm_eu_backoff_reservation(&ticket, &head);
return r;
}
 
666,19 → 669,28
int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
uint32_t handle, struct radeon_fence **fence)
{
/* we use the last page of the vcpu bo for the UVD message */
uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
RADEON_GPU_PAGE_SIZE;
 
uint32_t *msg = rdev->uvd.cpu_addr + offs;
uint64_t addr = rdev->uvd.gpu_addr + offs;
 
struct radeon_bo *bo;
uint32_t *msg;
int r, i;
 
r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
if (r)
return r;
 
r = radeon_bo_reserve(bo, false);
if (r) {
radeon_bo_unref(&bo);
return r;
}
 
r = radeon_bo_kmap(bo, (void **)&msg);
if (r) {
radeon_bo_unreserve(bo);
radeon_bo_unref(&bo);
return r;
}
 
/* stitch together an UVD create msg */
msg[0] = cpu_to_le32(0x00000de4);
msg[1] = cpu_to_le32(0x00000000);
694,27 → 706,37
for (i = 11; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0);
 
r = radeon_uvd_send_msg(rdev, ring, addr, fence);
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
return r;
radeon_bo_kunmap(bo);
radeon_bo_unreserve(bo);
 
return radeon_uvd_send_msg(rdev, ring, bo, fence);
}
 
int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
uint32_t handle, struct radeon_fence **fence)
{
/* we use the last page of the vcpu bo for the UVD message */
uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
RADEON_GPU_PAGE_SIZE;
 
uint32_t *msg = rdev->uvd.cpu_addr + offs;
uint64_t addr = rdev->uvd.gpu_addr + offs;
 
struct radeon_bo *bo;
uint32_t *msg;
int r, i;
 
r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
if (r)
return r;
 
r = radeon_bo_reserve(bo, false);
if (r) {
radeon_bo_unref(&bo);
return r;
}
 
r = radeon_bo_kmap(bo, (void **)&msg);
if (r) {
radeon_bo_unreserve(bo);
radeon_bo_unref(&bo);
return r;
}
 
/* stitch together an UVD destroy msg */
msg[0] = cpu_to_le32(0x00000de4);
msg[1] = cpu_to_le32(0x00000002);
723,9 → 745,10
for (i = 4; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0);
 
r = radeon_uvd_send_msg(rdev, ring, addr, fence);
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
return r;
radeon_bo_kunmap(bo);
radeon_bo_unreserve(bo);
 
return radeon_uvd_send_msg(rdev, ring, bo, fence);
}
 
/**
/drivers/video/drm/radeon/radeon_vce.c
126,8 → 126,7
size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) +
RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL,
&rdev->vce.vcpu_bo);
RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->vce.vcpu_bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r);
return r;
453,11 → 452,11
unsigned size)
{
struct radeon_cs_chunk *relocs_chunk;
struct radeon_bo_list *reloc;
struct radeon_cs_reloc *reloc;
uint64_t start, end, offset;
unsigned idx;
 
relocs_chunk = p->chunk_relocs;
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
offset = radeon_get_ib_value(p, lo);
idx = radeon_get_ib_value(p, hi);
 
467,7 → 466,7
return -EINVAL;
}
 
reloc = &p->relocs[(idx / 4)];
reloc = p->relocs_ptr[(idx / 4)];
start = reloc->gpu_offset;
end = start + radeon_bo_size(reloc->robj);
start += offset;
534,7 → 533,7
uint32_t *size = &tmp;
int i, r;
 
while (p->idx < p->chunk_ib->length_dw) {
while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
uint32_t len = radeon_get_ib_value(p, p->idx);
uint32_t cmd = radeon_get_ib_value(p, p->idx + 1);
 
/drivers/video/drm/radeon/radeon_vm.c
125,25 → 125,26
* Add the page directory to the list of BOs to
* validate for command submission (cayman+).
*/
struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
struct radeon_vm *vm,
struct list_head *head)
{
struct radeon_bo_list *list;
struct radeon_cs_reloc *list;
unsigned i, idx;
 
list = kmalloc_array(vm->max_pde_used + 2,
sizeof(struct radeon_bo_list), GFP_KERNEL);
sizeof(struct radeon_cs_reloc), GFP_KERNEL);
if (!list)
return NULL;
 
/* add the vm page table to the list */
list[0].gobj = NULL;
list[0].robj = vm->page_directory;
list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].tv.bo = &vm->page_directory->tbo;
list[0].tv.shared = true;
list[0].tiling_flags = 0;
list[0].handle = 0;
list_add(&list[0].tv.head, head);
 
for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
150,12 → 151,13
if (!vm->page_tables[i].bo)
continue;
 
list[idx].gobj = NULL;
list[idx].robj = vm->page_tables[i].bo;
list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].tv.bo = &list[idx].robj->tbo;
list[idx].tv.shared = true;
list[idx].tiling_flags = 0;
list[idx].handle = 0;
list_add(&list[idx++].tv.head, head);
}
 
178,18 → 180,15
struct radeon_vm *vm, int ring)
{
struct radeon_fence *best[RADEON_NUM_RINGS] = {};
struct radeon_vm_id *vm_id = &vm->ids[ring];
 
unsigned choices[2] = {};
unsigned i;
 
/* check if the id is still valid */
if (vm_id->id && vm_id->last_id_use &&
vm_id->last_id_use == rdev->vm_manager.active[vm_id->id])
if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
return NULL;
 
/* we definately need to flush */
vm_id->pd_gpu_addr = ~0ll;
radeon_fence_unref(&vm->last_flush);
 
/* skip over VMID 0, since it is the system VM */
for (i = 1; i < rdev->vm_manager.nvm; ++i) {
197,8 → 196,8
 
if (fence == NULL) {
/* found a free one */
vm_id->id = i;
trace_radeon_vm_grab_id(i, ring);
vm->id = i;
trace_radeon_vm_grab_id(vm->id, ring);
return NULL;
}
 
210,8 → 209,8
 
for (i = 0; i < 2; ++i) {
if (choices[i]) {
vm_id->id = choices[i];
trace_radeon_vm_grab_id(choices[i], ring);
vm->id = choices[i];
trace_radeon_vm_grab_id(vm->id, ring);
return rdev->vm_manager.active[choices[i]];
}
}
227,7 → 226,6
* @rdev: radeon_device pointer
* @vm: vm we want to flush
* @ring: ring to use for flush
* @updates: last vm update that is waited for
*
* Flush the vm (cayman+).
*
235,21 → 233,15
*/
void radeon_vm_flush(struct radeon_device *rdev,
struct radeon_vm *vm,
int ring, struct radeon_fence *updates)
int ring)
{
uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
struct radeon_vm_id *vm_id = &vm->ids[ring];
 
if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates ||
radeon_fence_is_earlier(vm_id->flushed_updates, updates)) {
 
trace_radeon_vm_flush(pd_addr, ring, vm->ids[ring].id);
radeon_fence_unref(&vm_id->flushed_updates);
vm_id->flushed_updates = radeon_fence_ref(updates);
vm_id->pd_gpu_addr = pd_addr;
radeon_ring_vm_flush(rdev, &rdev->ring[ring],
vm_id->id, vm_id->pd_gpu_addr);
 
/* if we can't remember our last VM flush then flush now! */
if (!vm->last_flush || pd_addr != vm->pd_gpu_addr) {
trace_radeon_vm_flush(pd_addr, ring, vm->id);
vm->pd_gpu_addr = pd_addr;
radeon_ring_vm_flush(rdev, ring, vm);
}
}
 
269,13 → 261,18
struct radeon_vm *vm,
struct radeon_fence *fence)
{
unsigned vm_id = vm->ids[fence->ring].id;
radeon_fence_unref(&vm->fence);
vm->fence = radeon_fence_ref(fence);
 
radeon_fence_unref(&rdev->vm_manager.active[vm_id]);
rdev->vm_manager.active[vm_id] = radeon_fence_ref(fence);
radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
 
radeon_fence_unref(&vm->ids[fence->ring].last_id_use);
vm->ids[fence->ring].last_id_use = radeon_fence_ref(fence);
radeon_fence_unref(&vm->last_id_use);
vm->last_id_use = radeon_fence_ref(fence);
 
/* we just flushed the VM, remember that */
if (!vm->last_flush)
vm->last_flush = radeon_fence_ref(fence);
}
 
/**
388,18 → 385,27
static int radeon_vm_clear_bo(struct radeon_device *rdev,
struct radeon_bo *bo)
{
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head head;
struct radeon_ib ib;
unsigned entries;
uint64_t addr;
int r;
 
r = radeon_bo_reserve(bo, false);
memset(&tv, 0, sizeof(tv));
tv.bo = &bo->tbo;
 
INIT_LIST_HEAD(&head);
list_add(&tv.head, &head);
 
r = ttm_eu_reserve_buffers(&ticket, &head);
if (r)
return r;
 
r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
if (r)
goto error_unreserve;
goto error;
 
addr = radeon_bo_gpu_offset(bo);
entries = radeon_bo_size(bo) / 8;
406,7 → 412,7
 
r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256);
if (r)
goto error_unreserve;
goto error;
 
ib.length_dw = 0;
 
416,16 → 422,15
 
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r)
goto error_free;
goto error;
 
ib.fence->is_vm_update = true;
radeon_bo_fence(bo, ib.fence, false);
 
error_free:
ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
radeon_ib_free(rdev, &ib);
 
error_unreserve:
radeon_bo_unreserve(bo);
return 0;
 
error:
ttm_eu_backoff_reservation(&ticket, &head);
return r;
}
 
441,7 → 446,7
* Validate and set the offset requested within the vm address space.
* Returns 0 for success, error for failure.
*
* Object has to be reserved and gets unreserved by this function!
* Object has to be reserved!
*/
int radeon_vm_bo_set_addr(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
487,9 → 492,7
tmp->vm = vm;
tmp->addr = bo_va->addr;
tmp->bo = radeon_bo_ref(bo_va->bo);
spin_lock(&vm->status_lock);
list_add(&tmp->vm_status, &vm->freed);
spin_unlock(&vm->status_lock);
}
 
interval_tree_remove(&bo_va->it, &vm->va);
542,8 → 545,7
 
r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
RADEON_GPU_PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0,
NULL, NULL, &pt);
RADEON_GEM_DOMAIN_VRAM, 0, NULL, &pt);
if (r)
return r;
 
569,7 → 571,7
}
 
mutex_unlock(&vm->mutex);
return 0;
return radeon_bo_reserve(bo_va->bo, false);
}
 
/**
692,8 → 694,8
 
if (ib.length_dw != 0) {
radeon_asic_vm_pad_ib(rdev, &ib);
 
radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv, true);
radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
WARN_ON(ib.length_dw > ndw);
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
700,8 → 702,9
radeon_ib_free(rdev, &ib);
return r;
}
ib.fence->is_vm_update = true;
radeon_bo_fence(pd, ib.fence, false);
radeon_fence_unref(&vm->fence);
vm->fence = radeon_fence_ref(ib.fence);
radeon_fence_unref(&vm->last_flush);
}
radeon_ib_free(rdev, &ib);
 
800,7 → 803,7
*
* Global and local mutex must be locked!
*/
static int radeon_vm_update_ptes(struct radeon_device *rdev,
static void radeon_vm_update_ptes(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_ib *ib,
uint64_t start, uint64_t end,
817,12 → 820,8
struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
unsigned nptes;
uint64_t pte;
int r;
 
radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
r = reservation_object_reserve_shared(pt->tbo.resv);
if (r)
return r;
radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj);
 
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
856,36 → 855,9
last_pte + 8 * count,
last_dst, flags);
}
 
return 0;
}
 
/**
* radeon_vm_fence_pts - fence page tables after an update
*
* @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
* @fence: fence to use
*
* Fence the page tables in the range @start - @end (cayman+).
*
* Global and local mutex must be locked!
*/
static void radeon_vm_fence_pts(struct radeon_vm *vm,
uint64_t start, uint64_t end,
struct radeon_fence *fence)
{
unsigned i;
 
start >>= radeon_vm_block_size;
end >>= radeon_vm_block_size;
 
for (i = start; i <= end; ++i)
radeon_bo_fence(vm->page_tables[i].bo, fence, true);
}
 
/**
* radeon_vm_bo_update - map a bo into the vm page table
*
* @rdev: radeon_device pointer
915,16 → 887,11
return -EINVAL;
}
 
spin_lock(&vm->status_lock);
list_del_init(&bo_va->vm_status);
spin_unlock(&vm->status_lock);
 
bo_va->flags &= ~RADEON_VM_PAGE_VALID;
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
// if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm))
// bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE;
 
if (mem) {
addr = mem->start << PAGE_SHIFT;
if (mem->mem_type != TTM_PL_SYSTEM) {
986,34 → 953,23
return r;
ib.length_dw = 0;
 
if (!(bo_va->flags & RADEON_VM_PAGE_VALID)) {
unsigned i;
 
for (i = 0; i < RADEON_NUM_RINGS; ++i)
radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use);
}
 
r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
bo_va->it.last + 1, addr,
radeon_vm_page_flags(bo_va->flags));
if (r) {
radeon_ib_free(rdev, &ib);
return r;
}
 
radeon_asic_vm_pad_ib(rdev, &ib);
WARN_ON(ib.length_dw > ndw);
 
radeon_semaphore_sync_to(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
radeon_ib_free(rdev, &ib);
return r;
}
ib.fence->is_vm_update = true;
radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence);
radeon_fence_unref(&bo_va->last_pt_update);
bo_va->last_pt_update = radeon_fence_ref(ib.fence);
radeon_fence_unref(&vm->fence);
vm->fence = radeon_fence_ref(ib.fence);
radeon_ib_free(rdev, &ib);
radeon_fence_unref(&vm->last_flush);
 
return 0;
}
1032,25 → 988,16
int radeon_vm_clear_freed(struct radeon_device *rdev,
struct radeon_vm *vm)
{
struct radeon_bo_va *bo_va;
struct radeon_bo_va *bo_va, *tmp;
int r;
 
spin_lock(&vm->status_lock);
while (!list_empty(&vm->freed)) {
bo_va = list_first_entry(&vm->freed,
struct radeon_bo_va, vm_status);
spin_unlock(&vm->status_lock);
 
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
r = radeon_vm_bo_update(rdev, bo_va, NULL);
radeon_bo_unref(&bo_va->bo);
radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va);
if (r)
return r;
 
spin_lock(&vm->status_lock);
}
spin_unlock(&vm->status_lock);
return 0;
 
}
1069,23 → 1016,14
int radeon_vm_clear_invalids(struct radeon_device *rdev,
struct radeon_vm *vm)
{
struct radeon_bo_va *bo_va;
struct radeon_bo_va *bo_va, *tmp;
int r;
 
spin_lock(&vm->status_lock);
while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated,
struct radeon_bo_va, vm_status);
spin_unlock(&vm->status_lock);
 
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, vm_status) {
r = radeon_vm_bo_update(rdev, bo_va, NULL);
if (r)
return r;
 
spin_lock(&vm->status_lock);
}
spin_unlock(&vm->status_lock);
 
return 0;
}
 
1108,7 → 1046,6
 
mutex_lock(&vm->mutex);
interval_tree_remove(&bo_va->it, &vm->va);
spin_lock(&vm->status_lock);
list_del(&bo_va->vm_status);
 
if (bo_va->addr) {
1115,10 → 1052,8
bo_va->bo = radeon_bo_ref(bo_va->bo);
list_add(&bo_va->vm_status, &vm->freed);
} else {
radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va);
}
spin_unlock(&vm->status_lock);
 
mutex_unlock(&vm->mutex);
}
1139,10 → 1074,10
 
list_for_each_entry(bo_va, &bo->va, bo_list) {
if (bo_va->addr) {
spin_lock(&bo_va->vm->status_lock);
mutex_lock(&bo_va->vm->mutex);
list_del(&bo_va->vm_status);
list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
spin_unlock(&bo_va->vm->status_lock);
mutex_unlock(&bo_va->vm->mutex);
}
}
}
1160,17 → 1095,15
const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE,
RADEON_VM_PTE_COUNT * 8);
unsigned pd_size, pd_entries, pts_size;
int i, r;
int r;
 
vm->id = 0;
vm->ib_bo_va = NULL;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
vm->ids[i].id = 0;
vm->ids[i].flushed_updates = NULL;
vm->ids[i].last_id_use = NULL;
}
vm->fence = NULL;
vm->last_flush = NULL;
vm->last_id_use = NULL;
mutex_init(&vm->mutex);
vm->va = RB_ROOT;
spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->invalidated);
INIT_LIST_HEAD(&vm->freed);
 
1187,7 → 1120,7
 
r = radeon_bo_create(rdev, pd_size, align, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
NULL, &vm->page_directory);
&vm->page_directory);
if (r)
return r;
 
1224,13 → 1157,11
if (!r) {
list_del_init(&bo_va->bo_list);
radeon_bo_unreserve(bo_va->bo);
radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va);
}
}
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
radeon_bo_unref(&bo_va->bo);
radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va);
}
 
1240,10 → 1171,9
 
radeon_bo_unref(&vm->page_directory);
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
radeon_fence_unref(&vm->ids[i].flushed_updates);
radeon_fence_unref(&vm->ids[i].last_id_use);
}
radeon_fence_unref(&vm->fence);
radeon_fence_unref(&vm->last_flush);
radeon_fence_unref(&vm->last_id_use);
 
mutex_destroy(&vm->mutex);
}
/drivers/video/drm/radeon/rdisplay.c
1,6 → 1,8
 
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include <drm.h>
#include <drm_mm.h>
#include "radeon.h"
#include "radeon_object.h"
#include "bitmap.h"
32,7 → 34,7
rdev = (struct radeon_device *)os_display->ddev->dev_private;
 
r = radeon_bo_create(rdev, CURSOR_WIDTH*CURSOR_HEIGHT*4,
4096, false, RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL, &cursor->robj);
PAGE_SIZE, false, RADEON_GEM_DOMAIN_VRAM, 0, NULL, &cursor->robj);
 
if (unlikely(r != 0))
return r;
227,7 → 229,7
 
cursor_t *cursor;
bool retval = true;
u32 ifl;
u32_t ifl;
 
ENTER();
 
/drivers/video/drm/radeon/rdisplay_kms.c
1,6 → 1,8
 
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include <drm.h>
#include <drm_mm.h>
#include "radeon.h"
#include "radeon_object.h"
#include "drm_fb_helper.h"
405,7 → 407,7
struct drm_framebuffer *fb;
 
cursor_t *cursor;
u32 ifl;
u32_t ifl;
int ret;
 
mutex_lock(&dev->mode_config.mutex);
/drivers/video/drm/radeon/rs600.c
840,9 → 840,6
u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
/* FIXME: implement full support */
 
if (!rdev->mode_info.mode_config_initialized)
return;
 
radeon_update_display_priority(rdev);
 
if (rdev->mode_info.crtcs[0]->base.enabled)
/drivers/video/drm/radeon/rs690.c
579,9 → 579,6
u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
 
if (!rdev->mode_info.mode_config_initialized)
return;
 
radeon_update_display_priority(rdev);
 
if (rdev->mode_info.crtcs[0]->base.enabled)
/drivers/video/drm/radeon/rs780_dpm.c
24,7 → 24,6
 
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "rs780d.h"
#include "r600_dpm.h"
#include "rs780_dpm.h"
/drivers/video/drm/radeon/rv515.c
1214,9 → 1214,6
struct drm_display_mode *mode0 = NULL;
struct drm_display_mode *mode1 = NULL;
 
if (!rdev->mode_info.mode_config_initialized)
return;
 
radeon_update_display_priority(rdev);
 
if (rdev->mode_info.crtcs[0]->base.enabled)
/drivers/video/drm/radeon/rv6xx_dpm.c
24,7 → 24,6
 
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "rv6xxd.h"
#include "r600_dpm.h"
#include "rv6xx_dpm.h"
/drivers/video/drm/radeon/rv770.c
26,6 → 26,7
* Jerome Glisse
*/
#include <linux/firmware.h>
//#include <linux/platform_device.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include "radeon.h"
/drivers/video/drm/radeon/rv770_dma.c
33,19 → 33,18
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer
* @resv: reservation object to sync to
* @fence: radeon fence object
*
* Copy GPU paging using the DMA engine (r7xx).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
int rv770_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv)
struct radeon_fence **fence)
{
struct radeon_fence *fence;
struct radeon_sync sync;
struct radeon_semaphore *sem = NULL;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_dw, cur_size_in_dw;
52,7 → 51,11
int i, num_loops;
int r = 0;
 
radeon_sync_create(&sync);
r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
 
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
59,12 → 62,12
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
 
radeon_sync_resv(rdev, &sync, resv, false);
radeon_sync_rings(rdev, &sync, ring->idx);
radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
 
for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw;
80,15 → 83,15
dst_offset += cur_size_in_dw * 4;
}
 
r = radeon_fence_emit(rdev, &fence, ring->idx);
r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
 
radeon_ring_unlock_commit(rdev, ring, false);
radeon_sync_free(rdev, &sync, fence);
radeon_semaphore_free(rdev, &sem, *fence);
 
return fence;
return r;
}
/drivers/video/drm/radeon/rv770_dpm.c
24,7 → 24,6
 
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "rv770d.h"
#include "r600_dpm.h"
#include "rv770_dpm.h"
/drivers/video/drm/radeon/si_dma.c
185,17 → 185,20
}
}
 
void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned vm_id, uint64_t pd_addr)
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
struct radeon_ring *ring = &rdev->ring[ridx];
 
{
if (vm == NULL)
return;
 
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
if (vm_id < 8) {
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
if (vm->id < 8) {
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
} else {
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2));
radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
}
radeon_ring_write(ring, pd_addr >> 12);
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
 
/* flush hdp cache */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
205,7 → 208,7
/* bits 0-7 are the VM contexts0-7 */
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
radeon_ring_write(ring, 1 << vm_id);
radeon_ring_write(ring, 1 << vm->id);
}
 
/**
215,19 → 218,18
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @num_gpu_pages: number of GPU pages to xfer
* @resv: reservation object to sync to
* @fence: radeon fence object
*
* Copy GPU paging using the DMA engine (SI).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
int si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct reservation_object *resv)
struct radeon_fence **fence)
{
struct radeon_fence *fence;
struct radeon_sync sync;
struct radeon_semaphore *sem = NULL;
int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
u32 size_in_bytes, cur_size_in_bytes;
234,7 → 236,11
int i, num_loops;
int r = 0;
 
radeon_sync_create(&sync);
r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
 
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
241,12 → 247,12
r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
 
radeon_sync_resv(rdev, &sync, resv, false);
radeon_sync_rings(rdev, &sync, ring->idx);
radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, ring->idx);
 
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
262,16 → 268,16
dst_offset += cur_size_in_bytes;
}
 
r = radeon_fence_emit(rdev, &fence, ring->idx);
r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
radeon_sync_free(rdev, &sync, NULL);
return ERR_PTR(r);
radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
 
radeon_ring_unlock_commit(rdev, ring, false);
radeon_sync_free(rdev, &sync, fence);
radeon_semaphore_free(rdev, &sem, *fence);
 
return fence;
return r;
}
 
/drivers/video/drm/radeon/si_dpm.h
182,7 → 182,6
u32 dte_table_start;
u32 spll_table_start;
u32 papm_cfg_table_start;
u32 fan_table_start;
/* CAC stuff */
const struct si_cac_config_reg *cac_weights;
const struct si_cac_config_reg *lcac_config;
198,10 → 197,6
/* SVI2 */
u8 svd_gpio_id;
u8 svc_gpio_id;
/* fan control */
bool fan_ctrl_is_in_default_mode;
u32 t_min;
u32 fan_ctrl_default_mode;
};
 
#define SISLANDS_INITIAL_STATE_ARB_INDEX 0
/drivers/video/drm/radeon/si_smc.c
135,7 → 135,7
 
int si_program_jump_on_start(struct radeon_device *rdev)
{
static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
static u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
 
return si_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1);
}
/drivers/video/drm/radeon/sid.h
180,10 → 180,7
#define DIG_THERM_DPM(x) ((x) << 14)
#define DIG_THERM_DPM_MASK 0x003FC000
#define DIG_THERM_DPM_SHIFT 14
#define CG_THERMAL_STATUS 0x704
#define FDO_PWM_DUTY(x) ((x) << 9)
#define FDO_PWM_DUTY_MASK (0xff << 9)
#define FDO_PWM_DUTY_SHIFT 9
 
#define CG_THERMAL_INT 0x708
#define DIG_THERM_INTH(x) ((x) << 8)
#define DIG_THERM_INTH_MASK 0x0000FF00
194,10 → 191,6
#define THERM_INT_MASK_HIGH (1 << 24)
#define THERM_INT_MASK_LOW (1 << 25)
 
#define CG_MULT_THERMAL_CTRL 0x710
#define TEMP_SEL(x) ((x) << 20)
#define TEMP_SEL_MASK (0xff << 20)
#define TEMP_SEL_SHIFT 20
#define CG_MULT_THERMAL_STATUS 0x714
#define ASIC_MAX_TEMP(x) ((x) << 0)
#define ASIC_MAX_TEMP_MASK 0x000001ff
206,37 → 199,6
#define CTF_TEMP_MASK 0x0003fe00
#define CTF_TEMP_SHIFT 9
 
#define CG_FDO_CTRL0 0x754
#define FDO_STATIC_DUTY(x) ((x) << 0)
#define FDO_STATIC_DUTY_MASK 0x000000FF
#define FDO_STATIC_DUTY_SHIFT 0
#define CG_FDO_CTRL1 0x758
#define FMAX_DUTY100(x) ((x) << 0)
#define FMAX_DUTY100_MASK 0x000000FF
#define FMAX_DUTY100_SHIFT 0
#define CG_FDO_CTRL2 0x75C
#define TMIN(x) ((x) << 0)
#define TMIN_MASK 0x000000FF
#define TMIN_SHIFT 0
#define FDO_PWM_MODE(x) ((x) << 11)
#define FDO_PWM_MODE_MASK (7 << 11)
#define FDO_PWM_MODE_SHIFT 11
#define TACH_PWM_RESP_RATE(x) ((x) << 25)
#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
#define TACH_PWM_RESP_RATE_SHIFT 25
 
#define CG_TACH_CTRL 0x770
# define EDGE_PER_REV(x) ((x) << 0)
# define EDGE_PER_REV_MASK (0x7 << 0)
# define EDGE_PER_REV_SHIFT 0
# define TARGET_PERIOD(x) ((x) << 3)
# define TARGET_PERIOD_MASK 0xfffffff8
# define TARGET_PERIOD_SHIFT 3
#define CG_TACH_STATUS 0x774
# define TACH_PERIOD(x) ((x) << 0)
# define TACH_PERIOD_MASK 0xffffffff
# define TACH_PERIOD_SHIFT 0
 
#define GENERAL_PWRMGT 0x780
# define GLOBAL_PWRMGT_EN (1 << 0)
# define STATIC_PM_EN (1 << 1)
774,7 → 736,7
# define DESCRIPTION16(x) (((x) & 0xff) << 0)
# define DESCRIPTION17(x) (((x) & 0xff) << 8)
 
#define AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x54
#define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL 0x54
# define AUDIO_ENABLED (1 << 31)
 
#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56
/drivers/video/drm/radeon/sislands_smc.h
245,31 → 245,6
#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c
#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120
 
struct PP_SIslands_FanTable
{
uint8_t fdo_mode;
uint8_t padding;
int16_t temp_min;
int16_t temp_med;
int16_t temp_max;
int16_t slope1;
int16_t slope2;
int16_t fdo_min;
int16_t hys_up;
int16_t hys_down;
int16_t hys_slope;
int16_t temp_resp_lim;
int16_t temp_curr;
int16_t slope_curr;
int16_t pwm_curr;
uint32_t refresh_period;
int16_t fdo_max;
uint8_t temp_src;
int8_t padding2;
};
 
typedef struct PP_SIslands_FanTable PP_SIslands_FanTable;
 
#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
 
/drivers/video/drm/radeon/smu7_discrete.h
431,31 → 431,6
 
typedef struct SMU7_Discrete_MCRegisters SMU7_Discrete_MCRegisters;
 
struct SMU7_Discrete_FanTable
{
uint16_t FdoMode;
int16_t TempMin;
int16_t TempMed;
int16_t TempMax;
int16_t Slope1;
int16_t Slope2;
int16_t FdoMin;
int16_t HystUp;
int16_t HystDown;
int16_t HystSlope;
int16_t TempRespLim;
int16_t TempCurr;
int16_t SlopeCurr;
int16_t PwmCurr;
uint32_t RefreshPeriod;
int16_t FdoMax;
uint8_t TempSrc;
int8_t Padding;
};
 
typedef struct SMU7_Discrete_FanTable SMU7_Discrete_FanTable;
 
 
struct SMU7_Discrete_PmFuses {
// dw0-dw1
uint8_t BapmVddCVidHiSidd[8];
487,10 → 462,7
uint8_t BapmVddCVidHiSidd2[8];
 
// dw11-dw12
int16_t FuzzyFan_ErrorSetDelta;
int16_t FuzzyFan_ErrorRateSetDelta;
int16_t FuzzyFan_PwmSetDelta;
uint16_t CalcMeasPowerBlend;
uint32_t Reserved6[2];
 
// dw13-dw16
uint8_t GnbLPML[16];
/drivers/video/drm/radeon/sumo_dpm.c
23,7 → 23,6
 
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "sumod.h"
#include "r600_dpm.h"
#include "cypress_dpm.h"
/drivers/video/drm/radeon/trinity_dpm.c
23,7 → 23,6
 
#include "drmP.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "trinityd.h"
#include "r600_dpm.h"
#include "trinity_dpm.h"
/drivers/video/drm/radeon/utils.c
1,8 → 1,8
#include <ddk.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <drm/drmP.h>
#include <linux/hdmi.h>
#include "radeon.h"
 
int x86_clflush_size;
unsigned int tsc_khz;
12,7 → 12,7
struct file *filep;
int count;
 
filep = __builtin_malloc(sizeof(*filep));
filep = malloc(sizeof(*filep));
 
if(unlikely(filep == NULL))
return ERR_PTR(-ENOMEM);
159,6 → 159,7
}
 
 
 
//const char hex_asc[] = "0123456789abcdef";
 
/**
377,93 → 378,45
buf, len, true);
}
 
void msleep(unsigned int msecs)
{
msecs /= 10;
if(!msecs) msecs = 1;
 
__asm__ __volatile__ (
"call *__imp__Delay"
::"b" (msecs));
__asm__ __volatile__ (
"":::"ebx");
 
};
 
 
/* simple loop based delay: */
static void delay_loop(unsigned long loops)
static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
asm volatile(
" test %0,%0 \n"
" jz 3f \n"
" jmp 1f \n"
 
".align 16 \n"
"1: jmp 2f \n"
 
".align 16 \n"
"2: dec %0 \n"
" jnz 2b \n"
"3: dec %0 \n"
 
: /* we don't need output */
:"a" (loops)
);
/* ecx is often an input as well as an output. */
asm volatile("cpuid"
: "=a" (*eax),
"=b" (*ebx),
"=c" (*ecx),
"=d" (*edx)
: "0" (*eax), "2" (*ecx)
: "memory");
}
 
 
static void (*delay_fn)(unsigned long) = delay_loop;
 
void __delay(unsigned long loops)
static inline void cpuid(unsigned int op,
unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
delay_fn(loops);
*eax = op;
*ecx = 0;
__cpuid(eax, ebx, ecx, edx);
}
 
 
inline void __const_udelay(unsigned long xloops)
void cpu_detect()
{
int d0;
u32 junk, tfms, cap0, misc;
 
xloops *= 4;
asm("mull %%edx"
: "=d" (xloops), "=&a" (d0)
: "1" (xloops), ""
(loops_per_jiffy * (HZ/4)));
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
 
__delay(++xloops);
}
 
void __udelay(unsigned long usecs)
if (cap0 & (1<<19))
{
__const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
x86_clflush_size = ((misc >> 8) & 0xff) * 8;
}
 
unsigned int _sw_hweight32(unsigned int w)
{
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x55555555;
w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
w = (w + (w >> 4)) & 0x0f0f0f0f;
return (w * 0x01010101) >> 24;
#else
unsigned int res = w - ((w >> 1) & 0x55555555);
res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
res = (res + (res >> 4)) & 0x0F0F0F0F;
res = res + (res >> 8);
return (res + (res >> 16)) & 0x000000FF;
#endif
tsc_khz = GetCpuFreq()/1000;
}
EXPORT_SYMBOL(_sw_hweight32);
 
 
void usleep_range(unsigned long min, unsigned long max)
{
udelay(max);
}
EXPORT_SYMBOL(usleep_range);
 
 
void *kmemdup(const void *src, size_t len, gfp_t gfp)
{
void *p;
474,504 → 427,26
return p;
}
 
void cpu_detect1()
{
 
u32 junk, tfms, cap0, misc;
int i;
 
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
 
if (cap0 & (1<<19))
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
x86_clflush_size = ((misc >> 8) & 0xff) * 8;
}
const unsigned long *p = addr;
unsigned long result = 0;
unsigned long tmp;
 
#if 0
cpuid(0x80000002, (unsigned int*)&cpuinfo.model_name[0], (unsigned int*)&cpuinfo.model_name[4],
(unsigned int*)&cpuinfo.model_name[8], (unsigned int*)&cpuinfo.model_name[12]);
cpuid(0x80000003, (unsigned int*)&cpuinfo.model_name[16], (unsigned int*)&cpuinfo.model_name[20],
(unsigned int*)&cpuinfo.model_name[24], (unsigned int*)&cpuinfo.model_name[28]);
cpuid(0x80000004, (unsigned int*)&cpuinfo.model_name[32], (unsigned int*)&cpuinfo.model_name[36],
(unsigned int*)&cpuinfo.model_name[40], (unsigned int*)&cpuinfo.model_name[44]);
 
printf("\n%s\n\n",cpuinfo.model_name);
 
cpuinfo.def_mtrr = read_msr(MSR_MTRRdefType);
cpuinfo.mtrr_cap = read_msr(IA32_MTRRCAP);
 
printf("MSR_MTRRdefType %016llx\n\n", cpuinfo.def_mtrr);
 
cpuinfo.var_mtrr_count = (u8_t)cpuinfo.mtrr_cap;
 
for(i = 0; i < cpuinfo.var_mtrr_count; i++)
{
u64_t mtrr_base;
u64_t mtrr_mask;
 
cpuinfo.var_mtrr[i].base = read_msr(MTRRphysBase_MSR(i));
cpuinfo.var_mtrr[i].mask = read_msr(MTRRphysMask_MSR(i));
 
printf("MTRR_%d base: %016llx mask: %016llx\n", i,
cpuinfo.var_mtrr[i].base,
cpuinfo.var_mtrr[i].mask);
};
 
unsigned int cr0, cr3, cr4, eflags;
 
eflags = safe_cli();
 
/* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
cr0 = read_cr0() | (1<<30);
write_cr0(cr0);
wbinvd();
 
cr4 = read_cr4();
write_cr4(cr4 & ~(1<<7));
 
cr3 = read_cr3();
write_cr3(cr3);
 
/* Save MTRR state */
rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
 
/* Disable MTRRs, and set the default type to uncached */
native_write_msr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
wbinvd();
 
i = 0;
set_mtrr(i++,0,0x80000000>>12,MTRR_WB);
set_mtrr(i++,0x80000000>>12,0x40000000>>12,MTRR_WB);
set_mtrr(i++,0xC0000000>>12,0x20000000>>12,MTRR_WB);
set_mtrr(i++,0xdb800000>>12,0x00800000>>12,MTRR_UC);
set_mtrr(i++,0xdc000000>>12,0x04000000>>12,MTRR_UC);
set_mtrr(i++,0xE0000000>>12,0x10000000>>12,MTRR_WC);
 
for(; i < cpuinfo.var_mtrr_count; i++)
set_mtrr(i,0,0,0);
 
write_cr3(cr3);
 
/* Intel (P6) standard MTRRs */
native_write_msr(MSR_MTRRdefType, deftype_lo, deftype_hi);
 
/* Enable caches */
write_cr0(read_cr0() & ~(1<<30));
 
/* Restore value of CR4 */
write_cr4(cr4);
 
safe_sti(eflags);
 
printf("\nnew MTRR map\n\n");
 
for(i = 0; i < cpuinfo.var_mtrr_count; i++)
{
u64_t mtrr_base;
u64_t mtrr_mask;
 
cpuinfo.var_mtrr[i].base = read_msr(MTRRphysBase_MSR(i));
cpuinfo.var_mtrr[i].mask = read_msr(MTRRphysMask_MSR(i));
 
printf("MTRR_%d base: %016llx mask: %016llx\n", i,
cpuinfo.var_mtrr[i].base,
cpuinfo.var_mtrr[i].mask);
};
#endif
 
tsc_khz = (unsigned int)(GetCpuFreq()/1000);
while (size & ~(BITS_PER_LONG-1)) {
if (~(tmp = *(p++)))
goto found;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
 
 
static atomic_t fence_context_counter = ATOMIC_INIT(0);
 
/**
* fence_context_alloc - allocate an array of fence contexts
* @num: [in] amount of contexts to allocate
*
* This function will return the first index of the number of fences allocated.
* The fence context is used for setting fence->context to a unique number.
*/
unsigned fence_context_alloc(unsigned num)
{
BUG_ON(!num);
return atomic_add_return(num, &fence_context_counter) - num;
tmp = (*p) | (~0UL << size);
if (tmp == ~0UL) /* Are any bits zero? */
return result + size; /* Nope. */
found:
return result + ffz(tmp);
}
EXPORT_SYMBOL(fence_context_alloc);
 
 
int fence_signal(struct fence *fence)
{
unsigned long flags;
 
if (!fence)
return -EINVAL;
 
// if (!ktime_to_ns(fence->timestamp)) {
// fence->timestamp = ktime_get();
// smp_mb__before_atomic();
// }
 
if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return -EINVAL;
 
// trace_fence_signaled(fence);
 
if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
struct fence_cb *cur, *tmp;
 
spin_lock_irqsave(fence->lock, flags);
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
list_del_init(&cur->node);
cur->func(fence, cur);
}
spin_unlock_irqrestore(fence->lock, flags);
}
return 0;
}
EXPORT_SYMBOL(fence_signal);
 
int fence_signal_locked(struct fence *fence)
{
struct fence_cb *cur, *tmp;
int ret = 0;
 
if (WARN_ON(!fence))
return -EINVAL;
 
// if (!ktime_to_ns(fence->timestamp)) {
// fence->timestamp = ktime_get();
// smp_mb__before_atomic();
// }
 
if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
ret = -EINVAL;
 
/*
* we might have raced with the unlocked fence_signal,
* still run through all callbacks
*/
}// else
// trace_fence_signaled(fence);
 
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
list_del_init(&cur->node);
cur->func(fence, cur);
}
return ret;
}
EXPORT_SYMBOL(fence_signal_locked);
 
 
void fence_enable_sw_signaling(struct fence *fence)
{
unsigned long flags;
 
if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
// trace_fence_enable_signal(fence);
 
spin_lock_irqsave(fence->lock, flags);
 
if (!fence->ops->enable_signaling(fence))
fence_signal_locked(fence);
 
spin_unlock_irqrestore(fence->lock, flags);
}
}
EXPORT_SYMBOL(fence_enable_sw_signaling);
 
 
 
signed long
fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
{
signed long ret;
 
if (WARN_ON(timeout < 0))
return -EINVAL;
 
// trace_fence_wait_start(fence);
ret = fence->ops->wait(fence, intr, timeout);
// trace_fence_wait_end(fence);
return ret;
}
EXPORT_SYMBOL(fence_wait_timeout);
 
void fence_release(struct kref *kref)
{
struct fence *fence =
container_of(kref, struct fence, refcount);
 
// trace_fence_destroy(fence);
 
BUG_ON(!list_empty(&fence->cb_list));
 
if (fence->ops->release)
fence->ops->release(fence);
else
fence_free(fence);
}
EXPORT_SYMBOL(fence_release);
 
void fence_free(struct fence *fence)
{
kfree_rcu(fence, rcu);
}
EXPORT_SYMBOL(fence_free);
 
 
reservation_object_add_shared_inplace(struct reservation_object *obj,
struct reservation_object_list *fobj,
struct fence *fence)
{
u32 i;
 
fence_get(fence);
 
// preempt_disable();
write_seqcount_begin(&obj->seq);
 
for (i = 0; i < fobj->shared_count; ++i) {
struct fence *old_fence;
 
old_fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(obj));
 
if (old_fence->context == fence->context) {
/* memory barrier is added by write_seqcount_begin */
RCU_INIT_POINTER(fobj->shared[i], fence);
write_seqcount_end(&obj->seq);
preempt_enable();
 
fence_put(old_fence);
return;
}
}
 
/*
* memory barrier is added by write_seqcount_begin,
* fobj->shared_count is protected by this lock too
*/
RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
fobj->shared_count++;
 
write_seqcount_end(&obj->seq);
// preempt_enable();
}
 
 
 
static void
reservation_object_add_shared_replace(struct reservation_object *obj,
struct reservation_object_list *old,
struct reservation_object_list *fobj,
struct fence *fence)
{
unsigned i;
struct fence *old_fence = NULL;
 
fence_get(fence);
 
if (!old) {
RCU_INIT_POINTER(fobj->shared[0], fence);
fobj->shared_count = 1;
goto done;
}
 
/*
* no need to bump fence refcounts, rcu_read access
* requires the use of kref_get_unless_zero, and the
* references from the old struct are carried over to
* the new.
*/
fobj->shared_count = old->shared_count;
 
for (i = 0; i < old->shared_count; ++i) {
struct fence *check;
 
check = rcu_dereference_protected(old->shared[i],
reservation_object_held(obj));
 
if (!old_fence && check->context == fence->context) {
old_fence = check;
RCU_INIT_POINTER(fobj->shared[i], fence);
} else
RCU_INIT_POINTER(fobj->shared[i], check);
}
if (!old_fence) {
RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
fobj->shared_count++;
}
 
done:
// preempt_disable();
write_seqcount_begin(&obj->seq);
/*
* RCU_INIT_POINTER can be used here,
* seqcount provides the necessary barriers
*/
RCU_INIT_POINTER(obj->fence, fobj);
write_seqcount_end(&obj->seq);
// preempt_enable();
 
if (old)
kfree_rcu(old, rcu);
 
if (old_fence)
fence_put(old_fence);
}
 
 
int reservation_object_reserve_shared(struct reservation_object *obj)
{
struct reservation_object_list *fobj, *old;
u32 max;
 
old = reservation_object_get_list(obj);
 
if (old && old->shared_max) {
if (old->shared_count < old->shared_max) {
/* perform an in-place update */
kfree(obj->staged);
obj->staged = NULL;
return 0;
} else
max = old->shared_max * 2;
} else
max = 4;
 
/*
* resize obj->staged or allocate if it doesn't exist,
* noop if already correct size
*/
fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
GFP_KERNEL);
if (!fobj)
return -ENOMEM;
 
obj->staged = fobj;
fobj->shared_max = max;
return 0;
}
EXPORT_SYMBOL(reservation_object_reserve_shared);
 
void reservation_object_add_shared_fence(struct reservation_object *obj,
struct fence *fence)
{
struct reservation_object_list *old, *fobj = obj->staged;
 
old = reservation_object_get_list(obj);
obj->staged = NULL;
 
if (!fobj) {
BUG_ON(old->shared_count >= old->shared_max);
reservation_object_add_shared_inplace(obj, old, fence);
} else
reservation_object_add_shared_replace(obj, old, fobj, fence);
}
EXPORT_SYMBOL(reservation_object_add_shared_fence);
 
 
void reservation_object_add_excl_fence(struct reservation_object *obj,
struct fence *fence)
{
struct fence *old_fence = reservation_object_get_excl(obj);
struct reservation_object_list *old;
u32 i = 0;
 
old = reservation_object_get_list(obj);
if (old)
i = old->shared_count;
 
if (fence)
fence_get(fence);
 
// preempt_disable();
write_seqcount_begin(&obj->seq);
/* write_seqcount_begin provides the necessary memory barrier */
RCU_INIT_POINTER(obj->fence_excl, fence);
if (old)
old->shared_count = 0;
write_seqcount_end(&obj->seq);
// preempt_enable();
 
/* inplace update, no shared fences */
while (i--)
fence_put(rcu_dereference_protected(old->shared[i],
reservation_object_held(obj)));
 
if (old_fence)
fence_put(old_fence);
}
EXPORT_SYMBOL(reservation_object_add_excl_fence);
 
void
fence_init(struct fence *fence, const struct fence_ops *ops,
spinlock_t *lock, unsigned context, unsigned seqno)
{
BUG_ON(!lock);
BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
!ops->get_driver_name || !ops->get_timeline_name);
 
kref_init(&fence->refcount);
fence->ops = ops;
INIT_LIST_HEAD(&fence->cb_list);
fence->lock = lock;
fence->context = context;
fence->seqno = seqno;
fence->flags = 0UL;
 
// trace_fence_init(fence);
}
EXPORT_SYMBOL(fence_init);
 
 
#include <linux/rcupdate.h>
 
struct rcu_ctrlblk {
struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
struct rcu_head **curtail; /* ->next pointer of last CB. */
// RCU_TRACE(long qlen); /* Number of pending CBs. */
// RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
// RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
// RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
// RCU_TRACE(const char *name); /* Name of RCU type. */
};
 
/* Definition for rcupdate control block. */
static struct rcu_ctrlblk rcu_sched_ctrlblk = {
.donetail = &rcu_sched_ctrlblk.rcucblist,
.curtail = &rcu_sched_ctrlblk.rcucblist,
// RCU_TRACE(.name = "rcu_sched")
};
 
static void __call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *rcu),
struct rcu_ctrlblk *rcp)
{
unsigned long flags;
 
// debug_rcu_head_queue(head);
head->func = func;
head->next = NULL;
 
local_irq_save(flags);
*rcp->curtail = head;
rcp->curtail = &head->next;
// RCU_TRACE(rcp->qlen++);
local_irq_restore(flags);
}
 
/*
* Post an RCU callback to be invoked after the end of an RCU-sched grace
* period. But since we have but one CPU, that would be after any
* quiescent state.
*/
void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{
__call_rcu(head, func, &rcu_sched_ctrlblk);
}
 
 
/drivers/video/drm/radeon/uvd_v1_0.c
70,82 → 70,6
}
 
/**
* uvd_v1_0_fence_emit - emit an fence & trap command
*
* @rdev: radeon_device pointer
* @fence: fence to emit
*
* Write a fence and a trap command to the ring.
*/
void uvd_v1_0_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
 
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
radeon_ring_write(ring, 0);
 
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
radeon_ring_write(ring, 2);
return;
}
 
/**
* uvd_v1_0_resume - memory controller programming
*
* @rdev: radeon_device pointer
*
* Let the UVD memory controller know it's offsets
*/
int uvd_v1_0_resume(struct radeon_device *rdev)
{
uint64_t addr;
uint32_t size;
int r;
 
r = radeon_uvd_resume(rdev);
if (r)
return r;
 
/* programm the VCPU memory controller bits 0-27 */
addr = (rdev->uvd.gpu_addr >> 3) + 16;
size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size) >> 3;
WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
WREG32(UVD_VCPU_CACHE_SIZE0, size);
 
addr += size;
size = RADEON_UVD_STACK_SIZE >> 3;
WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
WREG32(UVD_VCPU_CACHE_SIZE1, size);
 
addr += size;
size = RADEON_UVD_HEAP_SIZE >> 3;
WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
WREG32(UVD_VCPU_CACHE_SIZE2, size);
 
/* bits 28-31 */
addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
 
/* bits 32-39 */
addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
 
WREG32(UVD_FW_START, *((uint32_t*)rdev->uvd.cpu_addr));
 
return 0;
}
 
/**
* uvd_v1_0_init - start and test UVD block
*
* @rdev: radeon_device pointer
206,32 → 130,8
/* lower clocks again */
radeon_set_uvd_clocks(rdev, 0, 0);
 
if (!r) {
switch (rdev->family) {
case CHIP_RV610:
case CHIP_RV630:
case CHIP_RV620:
/* 64byte granularity workaround */
WREG32(MC_CONFIG, 0);
WREG32(MC_CONFIG, 1 << 4);
WREG32(RS_DQ_RD_RET_CONF, 0x3f);
WREG32(MC_CONFIG, 0x1f);
 
/* fall through */
case CHIP_RV670:
case CHIP_RV635:
 
/* write clean workaround */
WREG32_P(UVD_VCPU_CNTL, 0x10, ~0x10);
break;
 
default:
/* TODO: Do we need more? */
break;
}
 
if (!r)
DRM_INFO("UVD initialized successfully.\n");
}
 
return r;
}
318,12 → 218,12
/* enable UMC */
WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
 
WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
 
/* boot up the VCPU */
WREG32(UVD_SOFT_RESET, 0);
mdelay(10);
 
WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
 
for (i = 0; i < 10; ++i) {
uint32_t status;
for (j = 0; j < 100; ++j) {
/drivers/video/drm/radeon/uvd_v2_2.c
72,10 → 72,6
uint32_t chip_id, size;
int r;
 
/* RV770 uses V1.0 MC */
if (rdev->family == CHIP_RV770)
return uvd_v1_0_resume(rdev);
 
r = radeon_uvd_resume(rdev);
if (r)
return r;
/drivers/video/drm/radeon/atom.h
125,7 → 125,6
struct atom_context {
struct card_info *card;
struct mutex mutex;
struct mutex scratch_mutex;
void *bios;
uint32_t cmd_table, data_table;
uint16_t *iio;
146,7 → 145,6
 
struct atom_context *atom_parse(struct card_info *, void *);
int atom_execute_table(struct atom_context *, int, uint32_t *);
int atom_execute_table_scratch_unlocked(struct atom_context *, int, uint32_t *);
int atom_asic_init(struct atom_context *);
void atom_destroy(struct atom_context *);
bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
/drivers/video/drm/ttm/ttm_bo.c
37,11 → 37,11
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/atomic.h>
#include <linux/reservation.h>
 
#define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
 
#define TTM_ASSERT_LOCKED(param)
#define TTM_DEBUG(fmt, arg...)
#define TTM_BO_HASH_ORDER 13
48,13 → 48,12
 
 
 
static inline int ttm_mem_type_from_place(const struct ttm_place *place,
uint32_t *mem_type)
static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
{
int i;
 
for (i = 0; i <= TTM_PL_PRIV5; i++)
if (place->flags & (1 << i)) {
if (flags & (1 << i)) {
*mem_type = i;
return 0;
}
84,6 → 83,7
BUG_ON(atomic_read(&bo->list_kref.refcount));
BUG_ON(atomic_read(&bo->kref.refcount));
BUG_ON(atomic_read(&bo->cpu_writers));
BUG_ON(bo->sync_obj != NULL);
BUG_ON(bo->mem.mm_node != NULL);
BUG_ON(!list_empty(&bo->lru));
BUG_ON(!list_empty(&bo->ddestroy));
343,30 → 343,12
ww_mutex_unlock (&bo->resv->lock);
}
 
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
{
struct reservation_object_list *fobj;
struct fence *fence;
int i;
 
fobj = reservation_object_get_list(bo->resv);
fence = reservation_object_get_excl(bo->resv);
if (fence && !fence->ops->signaled)
fence_enable_sw_signaling(fence);
 
for (i = 0; fobj && i < fobj->shared_count; ++i) {
fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(bo->resv));
 
if (!fence->ops->signaled)
fence_enable_sw_signaling(fence);
}
}
 
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
struct ttm_bo_driver *driver = bdev->driver;
void *sync_obj = NULL;
int put_count;
int ret;
 
373,8 → 355,10
spin_lock(&glob->lru_lock);
ret = __ttm_bo_reserve(bo, false, true, false, NULL);
 
if (!ret) {
if (!ttm_bo_wait(bo, false, false, true)) {
spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, true);
if (!ret && !bo->sync_obj) {
spin_unlock(&bdev->fence_lock);
put_count = ttm_bo_del_from_lru(bo);
 
spin_unlock(&glob->lru_lock);
383,9 → 367,13
ttm_bo_list_ref_sub(bo, put_count, true);
 
return;
} else
ttm_bo_flush_all_fences(bo);
}
if (bo->sync_obj)
sync_obj = driver->sync_obj_ref(bo->sync_obj);
spin_unlock(&bdev->fence_lock);
 
if (!ret) {
 
/*
* Make NO_EVICT bos immediately available to
* shrinkers, now that they are queued for
403,6 → 391,10
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&glob->lru_lock);
 
if (sync_obj) {
driver->sync_obj_flush(sync_obj);
driver->sync_obj_unref(&sync_obj);
}
// schedule_delayed_work(&bdev->wq,
// ((HZ / 100) < 1) ? 1 : HZ / 100);
}
423,26 → 415,44
bool interruptible,
bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_driver *driver = bdev->driver;
struct ttm_bo_global *glob = bo->glob;
int put_count;
int ret;
 
spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, false, true);
 
if (ret && !no_wait_gpu) {
long lret;
ww_mutex_unlock(&bo->resv->lock);
void *sync_obj;
 
/*
* Take a reference to the fence and unreserve,
* at this point the buffer should be dead, so
* no new sync objects can be attached.
*/
sync_obj = driver->sync_obj_ref(bo->sync_obj);
spin_unlock(&bdev->fence_lock);
 
__ttm_bo_unreserve(bo);
spin_unlock(&glob->lru_lock);
 
lret = reservation_object_wait_timeout_rcu(bo->resv,
true,
interruptible,
30 * HZ);
ret = driver->sync_obj_wait(sync_obj, false, interruptible);
driver->sync_obj_unref(&sync_obj);
if (ret)
return ret;
 
if (lret < 0)
return lret;
else if (lret == 0)
return -EBUSY;
/*
* remove sync_obj with ttm_bo_wait, the wait should be
* finished, and no new wait object should have been added.
*/
spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, false, true);
WARN_ON(ret);
spin_unlock(&bdev->fence_lock);
if (ret)
return ret;
 
spin_lock(&glob->lru_lock);
ret = __ttm_bo_reserve(bo, false, true, false, NULL);
459,15 → 469,9
spin_unlock(&glob->lru_lock);
return 0;
}
} else
spin_unlock(&bdev->fence_lock);
 
/*
* remove sync_obj with ttm_bo_wait, the wait should be
* finished, and no new wait object should have been added.
*/
ret = ttm_bo_wait(bo, false, false, true);
WARN_ON(ret);
}
 
if (ret || unlikely(list_empty(&bo->ddestroy))) {
__ttm_bo_unreserve(bo);
spin_unlock(&glob->lru_lock);
597,7 → 601,7
*/
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
uint32_t mem_type,
const struct ttm_place *place,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
bool interruptible,
bool no_wait_gpu)
607,7 → 611,7
int ret;
 
do {
ret = (*man->func->get_node)(man, bo, place, mem);
ret = (*man->func->get_node)(man, bo, placement, 0, mem);
if (unlikely(ret != 0))
return ret;
if (mem->mm_node)
650,18 → 654,18
 
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
uint32_t mem_type,
const struct ttm_place *place,
uint32_t proposed_placement,
uint32_t *masked_placement)
{
uint32_t cur_flags = ttm_bo_type_flags(mem_type);
 
if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
return false;
 
if ((place->flags & man->available_caching) == 0)
if ((proposed_placement & man->available_caching) == 0)
return false;
 
cur_flags |= (place->flags & man->available_caching);
cur_flags |= (proposed_placement & man->available_caching);
 
*masked_placement = cur_flags;
return true;
692,14 → 696,15
 
mem->mm_node = NULL;
for (i = 0; i < placement->num_placement; ++i) {
const struct ttm_place *place = &placement->placement[i];
 
ret = ttm_mem_type_from_place(place, &mem_type);
ret = ttm_mem_type_from_flags(placement->placement[i],
&mem_type);
if (ret)
return ret;
man = &bdev->man[mem_type];
 
type_ok = ttm_bo_mt_compatible(man, mem_type, place,
type_ok = ttm_bo_mt_compatible(man,
mem_type,
placement->placement[i],
&cur_flags);
 
if (!type_ok)
711,7 → 716,7
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
ttm_flag_masked(&cur_flags, place->flags,
ttm_flag_masked(&cur_flags, placement->placement[i],
~TTM_PL_MASK_MEMTYPE);
 
if (mem_type == TTM_PL_SYSTEM)
719,7 → 724,8
 
if (man->has_type && man->use_type) {
type_found = true;
ret = (*man->func->get_node)(man, bo, place, mem);
ret = (*man->func->get_node)(man, bo, placement,
cur_flags, mem);
if (unlikely(ret))
return ret;
}
737,15 → 743,17
return -EINVAL;
 
for (i = 0; i < placement->num_busy_placement; ++i) {
const struct ttm_place *place = &placement->busy_placement[i];
 
ret = ttm_mem_type_from_place(place, &mem_type);
ret = ttm_mem_type_from_flags(placement->busy_placement[i],
&mem_type);
if (ret)
return ret;
man = &bdev->man[mem_type];
if (!man->has_type)
continue;
if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
if (!ttm_bo_mt_compatible(man,
mem_type,
placement->busy_placement[i],
&cur_flags))
continue;
 
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
754,7 → 762,7
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
ttm_flag_masked(&cur_flags, place->flags,
ttm_flag_masked(&cur_flags, placement->busy_placement[i],
~TTM_PL_MASK_MEMTYPE);
 
if (mem_type == TTM_PL_SYSTEM) {
764,7 → 772,7
return 0;
}
 
ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
interruptible, no_wait_gpu);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
785,6 → 793,7
{
int ret = 0;
struct ttm_mem_reg mem;
struct ttm_bo_device *bdev = bo->bdev;
 
lockdep_assert_held(&bo->resv->lock.base);
 
793,7 → 802,9
* Have the driver move function wait for idle when necessary,
* instead of doing it here.
*/
spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bdev->fence_lock);
if (ret)
return ret;
mem.num_pages = bo->num_pages;
822,14 → 833,13
{
int i;
 
if (mem->mm_node && placement->lpfn != 0 &&
(mem->start < placement->fpfn ||
mem->start + mem->num_pages > placement->lpfn))
return false;
 
for (i = 0; i < placement->num_placement; i++) {
const struct ttm_place *heap = &placement->placement[i];
if (mem->mm_node &&
(mem->start < heap->fpfn ||
(heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
continue;
 
*new_flags = heap->flags;
*new_flags = placement->placement[i];
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
(*new_flags & mem->placement & TTM_PL_MASK_MEM))
return true;
836,13 → 846,7
}
 
for (i = 0; i < placement->num_busy_placement; i++) {
const struct ttm_place *heap = &placement->busy_placement[i];
if (mem->mm_node &&
(mem->start < heap->fpfn ||
(heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
continue;
 
*new_flags = heap->flags;
*new_flags = placement->busy_placement[i];
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
(*new_flags & mem->placement & TTM_PL_MASK_MEM))
return true;
860,6 → 864,11
uint32_t new_flags;
 
lockdep_assert_held(&bo->resv->lock.base);
/* Check that range is valid */
if (placement->lpfn || placement->fpfn)
if (placement->fpfn > placement->lpfn ||
(placement->lpfn - placement->fpfn) < bo->num_pages)
return -EINVAL;
/*
* Check whether we need to move buffer.
*/
888,6 → 897,15
}
EXPORT_SYMBOL(ttm_bo_validate);
 
int ttm_bo_check_placement(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
BUG_ON((placement->fpfn || placement->lpfn) &&
(bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
 
return 0;
}
 
int ttm_bo_init(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo,
unsigned long size,
898,7 → 916,6
struct file *persistent_swap_storage,
size_t acc_size,
struct sg_table *sg,
struct reservation_object *resv,
void (*destroy) (struct ttm_buffer_object *))
{
int ret = 0;
940,37 → 957,29
bo->persistent_swap_storage = persistent_swap_storage;
bo->acc_size = acc_size;
bo->sg = sg;
if (resv) {
bo->resv = resv;
lockdep_assert_held(&bo->resv->lock.base);
} else {
bo->resv = &bo->ttm_resv;
reservation_object_init(&bo->ttm_resv);
}
reservation_object_init(bo->resv);
atomic_inc(&bo->glob->bo_count);
drm_vma_node_reset(&bo->vma_node);
 
ret = ttm_bo_check_placement(bo, placement);
 
/*
* For ttm_bo_type_device buffers, allocate
* address space from the device.
*/
if (bo->type == ttm_bo_type_device ||
bo->type == ttm_bo_type_sg)
if (likely(!ret) &&
(bo->type == ttm_bo_type_device ||
bo->type == ttm_bo_type_sg))
ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
bo->mem.num_pages);
 
/* passed reservation objects should already be locked,
* since otherwise lockdep will be angered in radeon.
*/
if (!resv) {
locked = ww_mutex_trylock(&bo->resv->lock);
WARN_ON(!locked);
}
 
if (likely(!ret))
ret = ttm_bo_validate(bo, placement, interruptible, false);
 
if (!resv)
ttm_bo_unreserve(bo);
 
if (unlikely(ret))
1109,6 → 1118,7
bdev->glob = glob;
bdev->need_dma32 = need_dma32;
bdev->val_seq = 0;
spin_lock_init(&bdev->fence_lock);
mutex_lock(&glob->device_list_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
mutex_unlock(&glob->device_list_mutex);
1161,52 → 1171,59
 
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 
 
int ttm_bo_wait(struct ttm_buffer_object *bo,
bool lazy, bool interruptible, bool no_wait)
{
struct reservation_object_list *fobj;
struct reservation_object *resv;
struct fence *excl;
long timeout = 15 * HZ;
int i;
struct ttm_bo_driver *driver = bo->bdev->driver;
struct ttm_bo_device *bdev = bo->bdev;
void *sync_obj;
int ret = 0;
 
resv = bo->resv;
fobj = reservation_object_get_list(resv);
excl = reservation_object_get_excl(resv);
if (excl) {
if (!fence_is_signaled(excl)) {
if (no_wait)
return -EBUSY;
if (likely(bo->sync_obj == NULL))
return 0;
 
timeout = fence_wait_timeout(excl,
interruptible, timeout);
while (bo->sync_obj) {
 
if (driver->sync_obj_signaled(bo->sync_obj)) {
void *tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
spin_unlock(&bdev->fence_lock);
driver->sync_obj_unref(&tmp_obj);
spin_lock(&bdev->fence_lock);
continue;
}
}
 
for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) {
struct fence *fence;
fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(resv));
 
if (!fence_is_signaled(fence)) {
if (no_wait)
return -EBUSY;
 
timeout = fence_wait_timeout(fence,
interruptible, timeout);
sync_obj = driver->sync_obj_ref(bo->sync_obj);
spin_unlock(&bdev->fence_lock);
ret = driver->sync_obj_wait(sync_obj,
lazy, interruptible);
if (unlikely(ret != 0)) {
driver->sync_obj_unref(&sync_obj);
spin_lock(&bdev->fence_lock);
return ret;
}
spin_lock(&bdev->fence_lock);
if (likely(bo->sync_obj == sync_obj)) {
void *tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
clear_bit(TTM_BO_PRIV_FLAG_MOVING,
&bo->priv_flags);
spin_unlock(&bdev->fence_lock);
driver->sync_obj_unref(&sync_obj);
driver->sync_obj_unref(&tmp_obj);
spin_lock(&bdev->fence_lock);
} else {
spin_unlock(&bdev->fence_lock);
driver->sync_obj_unref(&sync_obj);
spin_lock(&bdev->fence_lock);
}
 
if (timeout < 0)
return timeout;
 
if (timeout == 0)
return -EBUSY;
 
reservation_object_add_excl_fence(resv, NULL);
clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
}
return 0;
}
EXPORT_SYMBOL(ttm_bo_wait);
 
 
/drivers/video/drm/ttm/ttm_bo_manager.c
49,18 → 49,18
 
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_placement *placement,
uint32_t flags,
struct ttm_mem_reg *mem)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
struct drm_mm *mm = &rman->mm;
struct drm_mm_node *node = NULL;
enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
unsigned long lpfn;
int ret;
 
lpfn = place->lpfn;
lpfn = placement->lpfn;
if (!lpfn)
lpfn = man->size;
 
68,16 → 68,15
if (!node)
return -ENOMEM;
 
if (place->flags & TTM_PL_FLAG_TOPDOWN) {
sflags = DRM_MM_SEARCH_BELOW;
if (flags & TTM_PL_FLAG_TOPDOWN)
aflags = DRM_MM_CREATE_TOP;
}
 
spin_lock(&rman->lock);
ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
mem->page_alignment, 0,
place->fpfn, lpfn,
sflags, aflags);
placement->fpfn, lpfn,
DRM_MM_SEARCH_BEST,
aflags);
spin_unlock(&rman->lock);
 
if (unlikely(ret)) {
/drivers/video/drm/ttm/ttm_bo_util.c
41,6 → 41,7
#include <linux/module.h>
 
#define __pgprot(x) ((pgprot_t) { (x) } )
#define PAGE_KERNEL __pgprot(3)
 
void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
428,6 → 429,8
struct ttm_buffer_object **new_obj)
{
struct ttm_buffer_object *fbo;
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_driver *driver = bdev->driver;
int ret;
 
fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
448,6 → 451,12
drm_vma_node_reset(&fbo->vma_node);
atomic_set(&fbo->cpu_writers, 0);
 
spin_lock(&bdev->fence_lock);
if (bo->sync_obj)
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
else
fbo->sync_obj = NULL;
spin_unlock(&bdev->fence_lock);
kref_init(&fbo->list_kref);
kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy;
595,20 → 604,30
EXPORT_SYMBOL(ttm_bo_kunmap);
 
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct fence *fence,
void *sync_obj,
bool evict,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_driver *driver = bdev->driver;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
struct ttm_buffer_object *ghost_obj;
void *tmp_obj = NULL;
 
reservation_object_add_excl_fence(bo->resv, fence);
spin_lock(&bdev->fence_lock);
if (bo->sync_obj) {
tmp_obj = bo->sync_obj;
bo->sync_obj = NULL;
}
bo->sync_obj = driver->sync_obj_ref(sync_obj);
if (evict) {
ret = ttm_bo_wait(bo, false, false, false);
spin_unlock(&bdev->fence_lock);
if (tmp_obj)
driver->sync_obj_unref(&tmp_obj);
if (ret)
return ret;
 
629,13 → 648,14
*/
 
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
spin_unlock(&bdev->fence_lock);
if (tmp_obj)
driver->sync_obj_unref(&tmp_obj);
 
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
return ret;
 
reservation_object_add_excl_fence(ghost_obj->resv, fence);
 
/**
* If we're not moving to fixed memory, the TTM object
* needs to stay alive. Otherwhise hang it on the ghost
/drivers/video/drm/ttm/ttm_execbuf_util.c
34,12 → 34,20
 
DEFINE_WW_CLASS(reservation_ww_class);
 
static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
struct ttm_validate_buffer *entry)
static void ttm_eu_backoff_reservation_locked(struct list_head *list)
{
list_for_each_entry_continue_reverse(entry, list, head) {
struct ttm_validate_buffer *entry;
 
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
if (!entry->reserved)
continue;
 
entry->reserved = false;
if (entry->removed) {
ttm_bo_add_to_lru(bo);
entry->removed = false;
}
__ttm_bo_unreserve(bo);
}
}
50,12 → 58,30
 
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
unsigned put_count = ttm_bo_del_from_lru(bo);
if (!entry->reserved)
continue;
 
ttm_bo_list_ref_sub(bo, put_count, true);
if (!entry->removed) {
entry->put_count = ttm_bo_del_from_lru(bo);
entry->removed = true;
}
}
}
 
static void ttm_eu_list_ref_sub(struct list_head *list)
{
struct ttm_validate_buffer *entry;
 
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
 
if (entry->put_count) {
ttm_bo_list_ref_sub(bo, entry->put_count, true);
entry->put_count = 0;
}
}
}
 
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
struct list_head *list)
{
67,18 → 93,11
 
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
 
spin_lock(&glob->lru_lock);
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
 
ttm_bo_add_to_lru(bo);
__ttm_bo_unreserve(bo);
}
spin_unlock(&glob->lru_lock);
 
ttm_eu_backoff_reservation_locked(list);
if (ticket)
ww_acquire_fini(ticket);
spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
 
95,8 → 114,7
*/
 
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
struct list_head *dups)
struct list_head *list)
{
struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
105,72 → 123,61
if (list_empty(list))
return 0;
 
list_for_each_entry(entry, list, head) {
entry->reserved = false;
entry->put_count = 0;
entry->removed = false;
}
 
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
 
if (ticket)
ww_acquire_init(ticket, &reservation_ww_class);
 
retry:
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
 
ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true,
ticket);
if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
__ttm_bo_unreserve(bo);
 
ret = -EBUSY;
 
} else if (ret == -EALREADY && dups) {
struct ttm_validate_buffer *safe = entry;
entry = list_prev_entry(entry, head);
list_del(&safe->head);
list_add(&safe->head, dups);
/* already slowpath reserved? */
if (entry->reserved)
continue;
}
 
if (!ret) {
if (!entry->shared)
continue;
ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true,
ticket);
 
ret = reservation_object_reserve_shared(bo->resv);
if (!ret)
continue;
}
 
if (ret == -EDEADLK) {
/* uh oh, we lost out, drop every reservation and try
* to only reserve this buffer, then start over if
* this succeeds.
*/
ttm_eu_backoff_reservation_reverse(list, entry);
 
if (ret == -EDEADLK && intr) {
BUG_ON(ticket == NULL);
spin_lock(&glob->lru_lock);
ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
ticket);
} else if (ret == -EDEADLK) {
ww_mutex_lock_slow(&bo->resv->lock, ticket);
ret = 0;
}
 
if (!ret && entry->shared)
ret = reservation_object_reserve_shared(bo->resv);
 
if (unlikely(ret != 0)) {
if (ret == -EINTR)
ret = -ERESTARTSYS;
if (ticket) {
ww_acquire_done(ticket);
ww_acquire_fini(ticket);
goto err_fini;
}
return ret;
 
entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
ret = -EBUSY;
goto err;
}
goto retry;
} else if (ret)
goto err;
 
/* move this item to the front of the list,
* forces correct iteration of the loop without keeping track
*/
list_del(&entry->head);
list_add(&entry->head, list);
entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
ret = -EBUSY;
goto err;
}
}
 
if (ticket)
ww_acquire_done(ticket);
177,12 → 184,25
spin_lock(&glob->lru_lock);
ttm_eu_del_from_lru_locked(list);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
return 0;
 
err:
spin_lock(&glob->lru_lock);
ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
err_fini:
if (ticket) {
ww_acquire_done(ticket);
ww_acquire_fini(ticket);
}
return ret;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
 
void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct list_head *list, struct fence *fence)
struct list_head *list, void *sync_obj)
{
struct ttm_validate_buffer *entry;
struct ttm_buffer_object *bo;
199,18 → 219,24
glob = bo->glob;
 
spin_lock(&glob->lru_lock);
spin_lock(&bdev->fence_lock);
 
list_for_each_entry(entry, list, head) {
bo = entry->bo;
if (entry->shared)
reservation_object_add_shared_fence(bo->resv, fence);
else
reservation_object_add_excl_fence(bo->resv, fence);
entry->old_sync_obj = bo->sync_obj;
bo->sync_obj = driver->sync_obj_ref(sync_obj);
ttm_bo_add_to_lru(bo);
__ttm_bo_unreserve(bo);
entry->reserved = false;
}
spin_unlock(&bdev->fence_lock);
spin_unlock(&glob->lru_lock);
if (ticket)
ww_acquire_fini(ticket);
 
list_for_each_entry(entry, list, head) {
if (entry->old_sync_obj)
driver->sync_obj_unref(&entry->old_sync_obj);
}
}
EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
/drivers/video/drm/ttm/ttm_tt.c
36,11 → 36,11
//#include <linux/highmem.h>
//#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
#include <linux/file.h>
//#include <linux/file.h>
//#include <linux/swap.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <drm/drm_cache.h>
//#include <drm/drm_cache.h>
#include <drm/drm_mem_util.h>
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
/drivers/video/drm/drm_crtc.c
40,12 → 40,102
#include <drm/drm_modeset_lock.h>
 
#include "drm_crtc_internal.h"
#include "drm_internal.h"
 
static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
struct drm_mode_fb_cmd2 *r,
struct drm_file *file_priv);
 
/**
* drm_modeset_lock_all - take all modeset locks
* @dev: drm device
*
* This function takes all modeset locks, suitable where a more fine-grained
* scheme isn't (yet) implemented. Locks must be dropped with
* drm_modeset_unlock_all.
*/
void drm_modeset_lock_all(struct drm_device *dev)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_modeset_acquire_ctx *ctx;
int ret;
 
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (WARN_ON(!ctx))
return;
 
mutex_lock(&config->mutex);
 
drm_modeset_acquire_init(ctx, 0);
 
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
if (ret)
goto fail;
ret = drm_modeset_lock_all_crtcs(dev, ctx);
if (ret)
goto fail;
 
WARN_ON(config->acquire_ctx);
 
/* now we hold the locks, so now that it is safe, stash the
* ctx for drm_modeset_unlock_all():
*/
config->acquire_ctx = ctx;
 
drm_warn_on_modeset_not_all_locked(dev);
 
return;
 
fail:
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
}
}
EXPORT_SYMBOL(drm_modeset_lock_all);
 
/**
* drm_modeset_unlock_all - drop all modeset locks
* @dev: device
*
* This function drop all modeset locks taken by drm_modeset_lock_all.
*/
void drm_modeset_unlock_all(struct drm_device *dev)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
 
if (WARN_ON(!ctx))
return;
 
config->acquire_ctx = NULL;
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
 
kfree(ctx);
 
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_modeset_unlock_all);
 
/**
* drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
* @dev: device
*
* Useful as a debug assert.
*/
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
{
struct drm_crtc *crtc;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
}
EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
 
/* Avoid boilerplate. I'm tired of typing. */
#define DRM_ENUM_NAME_FN(fnname, list) \
const char *fnname(int val) \
421,6 → 511,9
if (ret)
goto out;
 
/* Grab the idr reference. */
drm_framebuffer_reference(fb);
 
dev->mode_config.num_fb++;
list_add(&fb->head, &dev->mode_config.fb_list);
out:
430,34 → 523,10
}
EXPORT_SYMBOL(drm_framebuffer_init);
 
/* dev->mode_config.fb_lock must be held! */
static void __drm_framebuffer_unregister(struct drm_device *dev,
struct drm_framebuffer *fb)
{
mutex_lock(&dev->mode_config.idr_mutex);
idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
mutex_unlock(&dev->mode_config.idr_mutex);
 
fb->base.id = 0;
}
 
static void drm_framebuffer_free(struct kref *kref)
{
struct drm_framebuffer *fb =
container_of(kref, struct drm_framebuffer, refcount);
struct drm_device *dev = fb->dev;
 
/*
* The lookup idr holds a weak reference, which has not necessarily been
* removed at this point. Check for that.
*/
mutex_lock(&dev->mode_config.fb_lock);
if (fb->base.id) {
/* Mark fb as reaped and drop idr ref. */
__drm_framebuffer_unregister(dev, fb);
}
mutex_unlock(&dev->mode_config.fb_lock);
 
fb->funcs->destroy(fb);
}
 
539,6 → 608,19
kref_put(&fb->refcount, drm_framebuffer_free_bug);
}
 
/* dev->mode_config.fb_lock must be held! */
static void __drm_framebuffer_unregister(struct drm_device *dev,
struct drm_framebuffer *fb)
{
mutex_lock(&dev->mode_config.idr_mutex);
idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
mutex_unlock(&dev->mode_config.idr_mutex);
 
fb->base.id = 0;
 
__drm_framebuffer_unreference(fb);
}
 
/**
* drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
* @fb: fb to unregister
678,10 → 760,14
crtc->funcs = funcs;
crtc->invert_dimensions = false;
 
drm_modeset_lock_all(dev);
drm_modeset_lock_init(&crtc->mutex);
/* dropped by _unlock_all(): */
drm_modeset_lock(&crtc->mutex, config->acquire_ctx);
 
ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
if (ret)
return ret;
goto out;
 
crtc->base.properties = &crtc->properties;
 
695,7 → 781,10
if (cursor)
cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
 
return 0;
out:
drm_modeset_unlock_all(dev);
 
return ret;
}
EXPORT_SYMBOL(drm_crtc_init_with_planes);
 
719,12 → 808,6
drm_mode_object_put(dev, &crtc->base);
list_del(&crtc->head);
dev->mode_config.num_crtc--;
 
WARN_ON(crtc->state && !crtc->funcs->atomic_destroy_state);
if (crtc->state && crtc->funcs->atomic_destroy_state)
crtc->funcs->atomic_destroy_state(crtc, crtc->state);
 
memset(crtc, 0, sizeof(*crtc));
}
EXPORT_SYMBOL(drm_crtc_cleanup);
 
866,43 → 949,10
connector->name = NULL;
list_del(&connector->head);
dev->mode_config.num_connector--;
 
WARN_ON(connector->state && !connector->funcs->atomic_destroy_state);
if (connector->state && connector->funcs->atomic_destroy_state)
connector->funcs->atomic_destroy_state(connector,
connector->state);
 
memset(connector, 0, sizeof(*connector));
}
EXPORT_SYMBOL(drm_connector_cleanup);
 
/**
* drm_connector_index - find the index of a registered connector
* @connector: connector to find index for
*
* Given a registered connector, return the index of that connector within a DRM
* device's list of connectors.
*/
unsigned int drm_connector_index(struct drm_connector *connector)
{
unsigned int index = 0;
struct drm_connector *tmp;
struct drm_mode_config *config = &connector->dev->mode_config;
 
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
list_for_each_entry(tmp, &connector->dev->mode_config.connector_list, head) {
if (tmp == connector)
return index;
 
index++;
}
 
BUG();
}
EXPORT_SYMBOL(drm_connector_index);
 
/**
* drm_connector_register - register a connector
* @connector: the connector to register
*
1002,8 → 1052,6
list_del(&bridge->head);
dev->mode_config.num_bridge--;
drm_modeset_unlock_all(dev);
 
memset(bridge, 0, sizeof(*bridge));
}
EXPORT_SYMBOL(drm_bridge_cleanup);
 
1070,11 → 1118,10
drm_modeset_lock_all(dev);
drm_mode_object_put(dev, &encoder->base);
kfree(encoder->name);
encoder->name = NULL;
list_del(&encoder->head);
dev->mode_config.num_encoder--;
drm_modeset_unlock_all(dev);
 
memset(encoder, 0, sizeof(*encoder));
}
EXPORT_SYMBOL(drm_encoder_cleanup);
 
1101,12 → 1148,12
{
int ret;
 
drm_modeset_lock_all(dev);
 
ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
if (ret)
return ret;
goto out;
 
drm_modeset_lock_init(&plane->mutex);
 
plane->base.properties = &plane->properties;
plane->dev = dev;
plane->funcs = funcs;
1115,7 → 1162,8
if (!plane->format_types) {
DRM_DEBUG_KMS("out of memory when allocating plane\n");
drm_mode_object_put(dev, &plane->base);
return -ENOMEM;
ret = -ENOMEM;
goto out;
}
 
memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
1132,7 → 1180,10
dev->mode_config.plane_type_property,
plane->type);
 
return 0;
out:
drm_modeset_unlock_all(dev);
 
return ret;
}
EXPORT_SYMBOL(drm_universal_plane_init);
 
1190,39 → 1241,10
if (plane->type == DRM_PLANE_TYPE_OVERLAY)
dev->mode_config.num_overlay_plane--;
drm_modeset_unlock_all(dev);
 
WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
if (plane->state && plane->funcs->atomic_destroy_state)
plane->funcs->atomic_destroy_state(plane, plane->state);
 
memset(plane, 0, sizeof(*plane));
}
EXPORT_SYMBOL(drm_plane_cleanup);
 
/**
* drm_plane_index - find the index of a registered plane
* @plane: plane to find index for
*
* Given a registered plane, return the index of that CRTC within a DRM
* device's list of planes.
*/
unsigned int drm_plane_index(struct drm_plane *plane)
{
unsigned int index = 0;
struct drm_plane *tmp;
 
list_for_each_entry(tmp, &plane->dev->mode_config.plane_list, head) {
if (tmp == plane)
return index;
 
index++;
}
 
BUG();
}
EXPORT_SYMBOL(drm_plane_index);
 
/**
* drm_plane_force_disable - Forcibly disable a plane
* @plane: plane to disable
*
1233,21 → 1255,19
*/
void drm_plane_force_disable(struct drm_plane *plane)
{
struct drm_framebuffer *old_fb = plane->fb;
int ret;
 
if (!plane->fb)
if (!old_fb)
return;
 
plane->old_fb = plane->fb;
ret = plane->funcs->disable_plane(plane);
if (ret) {
DRM_ERROR("failed to disable plane with busy fb\n");
plane->old_fb = NULL;
return;
}
/* disconnect the plane from the fb and crtc: */
__drm_framebuffer_unreference(plane->old_fb);
plane->old_fb = NULL;
__drm_framebuffer_unreference(old_fb);
plane->fb = NULL;
plane->crtc = NULL;
}
1278,11 → 1298,6
"PATH", 0);
dev->mode_config.path_property = dev_path;
 
dev->mode_config.tile_property = drm_property_create(dev,
DRM_MODE_PROP_BLOB |
DRM_MODE_PROP_IMMUTABLE,
"TILE", 0);
 
return 0;
}
 
1343,13 → 1358,12
* responsible for allocating a list of format names and passing them to
* this routine.
*/
int drm_mode_create_tv_properties(struct drm_device *dev,
unsigned int num_modes,
int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
char *modes[])
{
struct drm_property *tv_selector;
struct drm_property *tv_subconnector;
unsigned int i;
int i;
 
if (dev->mode_config.tv_select_subconnector_property)
return 0;
1447,7 → 1461,7
* connectors.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
{
1491,30 → 1505,6
}
EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
 
/**
* drm_mode_create_suggested_offset_properties - create suggests offset properties
* @dev: DRM device
*
* Create the the suggested x/y offset property for connectors.
*/
int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
{
if (dev->mode_config.suggested_x_property && dev->mode_config.suggested_y_property)
return 0;
 
dev->mode_config.suggested_x_property =
drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested X", 0, 0xffffffff);
 
dev->mode_config.suggested_y_property =
drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested Y", 0, 0xffffffff);
 
if (dev->mode_config.suggested_x_property == NULL ||
dev->mode_config.suggested_y_property == NULL)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
 
static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
{
uint32_t total_objects = 0;
1631,7 → 1621,7
* the caller.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
static int drm_crtc_convert_umode(struct drm_display_mode *out,
const struct drm_mode_modeinfo *in)
1676,7 → 1666,7
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_getresources(struct drm_device *dev, void *data,
struct drm_file *file_priv)
1727,9 → 1717,7
card_res->count_fbs = fb_count;
mutex_unlock(&file_priv->fbs_lock);
 
/* mode_config.mutex protects the connector list against e.g. DP MST
* connector hot-adding. CRTC/Plane lists are invariant. */
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
if (!drm_is_primary_client(file_priv)) {
 
mode_group = NULL;
1849,7 → 1837,7
card_res->count_connectors, card_res->count_encoders);
 
out:
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
return ret;
}
 
1864,7 → 1852,7
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_getcrtc(struct drm_device *dev,
void *data, struct drm_file *file_priv)
1871,15 → 1859,19
{
struct drm_mode_crtc *crtc_resp = data;
struct drm_crtc *crtc;
int ret = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
drm_modeset_lock_all(dev);
 
crtc = drm_crtc_find(dev, crtc_resp->crtc_id);
if (!crtc)
return -ENOENT;
if (!crtc) {
ret = -ENOENT;
goto out;
}
 
drm_modeset_lock_crtc(crtc, crtc->primary);
crtc_resp->x = crtc->x;
crtc_resp->y = crtc->y;
crtc_resp->gamma_size = crtc->gamma_size;
1896,9 → 1888,10
} else {
crtc_resp->mode_valid = 0;
}
drm_modeset_unlock_crtc(crtc);
 
return 0;
out:
drm_modeset_unlock_all(dev);
return ret;
}
 
static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
1914,15 → 1907,6
return true;
}
 
static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector)
{
/* For atomic drivers only state objects are synchronously updated and
* protected by modeset locks, so check those first. */
if (connector->state)
return connector->state->best_encoder;
return connector->encoder;
}
 
/**
* drm_mode_getconnector - get connector configuration
* @dev: drm device for the ioctl
1934,7 → 1918,7
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_getconnector(struct drm_device *dev, void *data,
struct drm_file *file_priv)
1941,7 → 1925,6
{
struct drm_mode_get_connector *out_resp = data;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *mode;
int mode_count = 0;
int props_count = 0;
1997,10 → 1980,8
out_resp->subpixel = connector->display_info.subpixel_order;
out_resp->connection = connector->status;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
 
encoder = drm_connector_get_encoder(connector);
if (encoder)
out_resp->encoder_id = encoder->base.id;
if (connector->encoder)
out_resp->encoder_id = connector->encoder->base.id;
else
out_resp->encoder_id = 0;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
2070,33 → 2051,6
return ret;
}
 
static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
{
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
bool uses_atomic = false;
 
/* For atomic drivers only state objects are synchronously updated and
* protected by modeset locks, so check those first. */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->state)
continue;
 
uses_atomic = true;
 
if (connector->state->best_encoder != encoder)
continue;
 
return connector->state->crtc;
}
 
/* Don't return stale data (e.g. pending async disable). */
if (uses_atomic)
return NULL;
 
return encoder->crtc;
}
 
/**
* drm_mode_getencoder - get encoder configuration
* @dev: drm device for the ioctl
2108,7 → 2062,7
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_getencoder(struct drm_device *dev, void *data,
struct drm_file *file_priv)
2115,31 → 2069,30
{
struct drm_mode_get_encoder *enc_resp = data;
struct drm_encoder *encoder;
struct drm_crtc *crtc;
int ret = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
drm_modeset_lock_all(dev);
encoder = drm_encoder_find(dev, enc_resp->encoder_id);
if (!encoder)
return -ENOENT;
if (!encoder) {
ret = -ENOENT;
goto out;
}
 
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
crtc = drm_encoder_get_crtc(encoder);
if (crtc)
enc_resp->crtc_id = crtc->base.id;
else if (encoder->crtc)
if (encoder->crtc)
enc_resp->crtc_id = encoder->crtc->base.id;
else
enc_resp->crtc_id = 0;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
 
enc_resp->encoder_type = encoder->encoder_type;
enc_resp->encoder_id = encoder->base.id;
enc_resp->possible_crtcs = encoder->possible_crtcs;
enc_resp->possible_clones = encoder->possible_clones;
 
return 0;
out:
drm_modeset_unlock_all(dev);
return ret;
}
 
/**
2153,7 → 2106,7
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_getplane_res(struct drm_device *dev, void *data,
struct drm_file *file_priv)
2162,12 → 2115,13
struct drm_mode_config *config;
struct drm_plane *plane;
uint32_t __user *plane_ptr;
int copied = 0;
int copied = 0, ret = 0;
unsigned num_planes;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
drm_modeset_lock_all(dev);
config = &dev->mode_config;
 
if (file_priv->universal_planes)
2183,7 → 2137,6
(plane_resp->count_planes >= num_planes)) {
plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
 
/* Plane lists are invariant, no locking needed. */
list_for_each_entry(plane, &config->plane_list, head) {
/*
* Unless userspace set the 'universal planes'
2193,14 → 2146,18
!file_priv->universal_planes)
continue;
 
if (put_user(plane->base.id, plane_ptr + copied))
return -EFAULT;
if (put_user(plane->base.id, plane_ptr + copied)) {
ret = -EFAULT;
goto out;
}
copied++;
}
}
plane_resp->count_planes = num_planes;
 
return 0;
out:
drm_modeset_unlock_all(dev);
return ret;
}
 
/**
2214,7 → 2171,7
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_getplane(struct drm_device *dev, void *data,
struct drm_file *file_priv)
2222,15 → 2179,18
struct drm_mode_get_plane *plane_resp = data;
struct drm_plane *plane;
uint32_t __user *format_ptr;
int ret = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
drm_modeset_lock_all(dev);
plane = drm_plane_find(dev, plane_resp->plane_id);
if (!plane)
return -ENOENT;
if (!plane) {
ret = -ENOENT;
goto out;
}
 
drm_modeset_lock(&plane->mutex, NULL);
if (plane->crtc)
plane_resp->crtc_id = plane->crtc->base.id;
else
2240,7 → 2200,6
plane_resp->fb_id = plane->fb->base.id;
else
plane_resp->fb_id = 0;
drm_modeset_unlock(&plane->mutex);
 
plane_resp->plane_id = plane->base.id;
plane_resp->possible_crtcs = plane->possible_crtcs;
2256,12 → 2215,15
if (copy_to_user(format_ptr,
plane->format_types,
sizeof(uint32_t) * plane->format_count)) {
return -EFAULT;
ret = -EFAULT;
goto out;
}
}
plane_resp->count_format_types = plane->format_count;
 
return 0;
out:
drm_modeset_unlock_all(dev);
return ret;
}
 
/*
2273,7 → 2235,7
*
* src_{x,y,w,h} are provided in 16.16 fixed point format
*/
static int __setplane_internal(struct drm_plane *plane,
static int setplane_internal(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int32_t crtc_x, int32_t crtc_y,
2282,20 → 2244,24
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct drm_device *dev = plane->dev;
struct drm_framebuffer *old_fb = NULL;
int ret = 0;
unsigned int fb_width, fb_height;
unsigned int i;
int i;
 
/* No fb means shut it down */
if (!fb) {
plane->old_fb = plane->fb;
drm_modeset_lock_all(dev);
old_fb = plane->fb;
ret = plane->funcs->disable_plane(plane);
if (!ret) {
plane->crtc = NULL;
plane->fb = NULL;
} else {
plane->old_fb = NULL;
old_fb = NULL;
}
drm_modeset_unlock_all(dev);
goto out;
}
 
2335,7 → 2301,8
goto out;
}
 
plane->old_fb = plane->fb;
drm_modeset_lock_all(dev);
old_fb = plane->fb;
ret = plane->funcs->update_plane(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
2344,37 → 2311,18
plane->fb = fb;
fb = NULL;
} else {
plane->old_fb = NULL;
old_fb = NULL;
}
drm_modeset_unlock_all(dev);
 
out:
if (fb)
drm_framebuffer_unreference(fb);
if (plane->old_fb)
drm_framebuffer_unreference(plane->old_fb);
plane->old_fb = NULL;
if (old_fb)
drm_framebuffer_unreference(old_fb);
 
return ret;
}
 
static int setplane_internal(struct drm_plane *plane,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int32_t crtc_x, int32_t crtc_y,
uint32_t crtc_w, uint32_t crtc_h,
/* src_{x,y,w,h} values are 16.16 fixed point */
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
int ret;
 
drm_modeset_lock_all(plane->dev);
ret = __setplane_internal(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
drm_modeset_unlock_all(plane->dev);
 
return ret;
}
 
/**
2388,12 → 2336,13
* valid crtc).
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_setplane(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_set_plane *plane_req = data;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct drm_crtc *crtc = NULL;
struct drm_framebuffer *fb = NULL;
2416,12 → 2365,14
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
*/
plane = drm_plane_find(dev, plane_req->plane_id);
if (!plane) {
obj = drm_mode_object_find(dev, plane_req->plane_id,
DRM_MODE_OBJECT_PLANE);
if (!obj) {
DRM_DEBUG_KMS("Unknown plane ID %d\n",
plane_req->plane_id);
return -ENOENT;
}
plane = obj_to_plane(obj);
 
if (plane_req->fb_id) {
fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
2431,12 → 2382,14
return -ENOENT;
}
 
crtc = drm_crtc_find(dev, plane_req->crtc_id);
if (!crtc) {
obj = drm_mode_object_find(dev, plane_req->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_DEBUG_KMS("Unknown crtc ID %d\n",
plane_req->crtc_id);
return -ENOENT;
}
crtc = obj_to_crtc(obj);
}
 
/*
2459,7 → 2412,7
* interface. The only thing it adds is correct refcounting dance.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_set_config_internal(struct drm_mode_set *set)
{
2474,7 → 2427,7
* crtcs. Atomic modeset will have saner semantics ...
*/
list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
tmp->primary->old_fb = tmp->primary->fb;
tmp->old_fb = tmp->primary->fb;
 
fb = set->fb;
 
2552,7 → 2505,7
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
2721,12 → 2674,10
* If this crtc has a universal cursor plane, call that plane's update
* handler rather than using legacy cursor handlers.
*/
drm_modeset_lock_crtc(crtc, crtc->cursor);
if (crtc->cursor) {
ret = drm_mode_cursor_universal(crtc, req, file_priv);
goto out;
}
if (crtc->cursor)
return drm_mode_cursor_universal(crtc, req, file_priv);
 
drm_modeset_lock(&crtc->mutex, NULL);
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
ret = -ENXIO;
2750,7 → 2701,7
}
}
out:
drm_modeset_unlock_crtc(crtc);
drm_modeset_unlock(&crtc->mutex);
 
return ret;
 
2768,7 → 2719,7
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_cursor_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
2795,7 → 2746,7
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_cursor2_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
2855,12 → 2806,12
* @file_priv: drm file for the ioctl call
*
* Add a new FB to the specified CRTC, given a user request. This is the
* original addfb ioctl which only supported RGB formats.
* original addfb ioclt which only supported RGB formats.
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_addfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
2867,9 → 2818,11
{
struct drm_mode_fb_cmd *or = data;
struct drm_mode_fb_cmd2 r = {};
int ret;
struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb;
int ret = 0;
 
/* convert to new format and call new ioctl */
/* Use new struct with format internally */
r.fb_id = or->fb_id;
r.width = or->width;
r.height = or->height;
2877,15 → 2830,30
r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
r.handles[0] = or->handle;
 
ret = drm_mode_addfb2(dev, &r, file_priv);
if (ret)
return ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
or->fb_id = r.fb_id;
if ((config->min_width > r.width) || (r.width > config->max_width))
return -EINVAL;
 
return 0;
if ((config->min_height > r.height) || (r.height > config->max_height))
return -EINVAL;
 
fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("could not create framebuffer\n");
return PTR_ERR(fb);
}
 
mutex_lock(&file_priv->fbs_lock);
or->fb_id = fb->base.id;
list_add(&fb->filp_head, &file_priv->fbs);
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
mutex_unlock(&file_priv->fbs_lock);
 
return ret;
}
 
static int format_check(const struct drm_mode_fb_cmd2 *r)
{
uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
2975,7 → 2943,7
num_planes = drm_format_num_planes(r->pixel_format);
 
if (r->width == 0 || r->width % hsub) {
DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height);
return -EINVAL;
}
 
3065,7 → 3033,7
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_addfb2(struct drm_device *dev,
void *data, struct drm_file *file_priv)
3093,7 → 3061,7
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_rmfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
3147,7 → 3115,7
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
3169,8 → 3137,7
r->bpp = fb->bits_per_pixel;
r->pitch = fb->pitches[0];
if (fb->funcs->create_handle) {
if (file_priv->is_master || capable(CAP_SYS_ADMIN) ||
drm_is_control_client(file_priv)) {
if (file_priv->is_master || capable(CAP_SYS_ADMIN)) {
ret = fb->funcs->create_handle(fb, file_priv,
&r->handle);
} else {
3208,7 → 3175,7
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
3288,7 → 3255,7
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
void drm_fb_release(struct drm_file *priv)
{
3295,16 → 3262,7
struct drm_device *dev = priv->minor->dev;
struct drm_framebuffer *fb, *tfb;
 
/*
* When the file gets released that means no one else can access the fb
* list any more, so no need to grab fpriv->fbs_lock. And we need to
* avoid upsetting lockdep since the universal cursor code adds a
* framebuffer while holding mutex locks.
*
* Note that a real deadlock between fpriv->fbs_lock and the modeset
* locks is impossible here since no one else but this function can get
* at it any more.
*/
mutex_lock(&priv->fbs_lock);
list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
 
mutex_lock(&dev->mode_config.fb_lock);
3317,6 → 3275,7
/* This will also drop the fpriv->fbs reference. */
drm_framebuffer_remove(fb);
}
mutex_unlock(&priv->fbs_lock);
}
#endif
 
3332,10 → 3291,6
* object with drm_object_attach_property. The returned property object must be
* freed with drm_property_destroy.
*
* Note that the DRM core keeps a per-device list of properties and that, if
* drm_mode_config_cleanup() is called, it will destroy all properties created
* by the driver.
*
* Returns:
* A pointer to the newly created property on success, NULL on failure.
*/
3363,7 → 3318,7
 
property->flags = flags;
property->num_values = num_values;
INIT_LIST_HEAD(&property->enum_list);
INIT_LIST_HEAD(&property->enum_blob_list);
 
if (name) {
strncpy(property->name, name, DRM_PROP_NAME_LEN);
3434,10 → 3389,9
* @flags: flags specifying the property type
* @name: name of the property
* @props: enumeration lists with property bitflags
* @num_props: size of the @props array
* @supported_bits: bitmask of all supported enumeration values
* @num_values: number of pre-defined values
*
* This creates a new bitmask drm property which can then be attached to a drm
* This creates a new generic drm property which can then be attached to a drm
* object with drm_object_attach_property. The returned property object must be
* freed with drm_property_destroy.
*
3512,7 → 3466,7
* object with drm_object_attach_property. The returned property object must be
* freed with drm_property_destroy.
*
* Userspace is allowed to set any integer value in the (min, max) range
* Userspace is allowed to set any interger value in the (min, max) range
* inclusive.
*
* Returns:
3585,8 → 3539,8
(value > 63))
return -EINVAL;
 
if (!list_empty(&property->enum_list)) {
list_for_each_entry(prop_enum, &property->enum_list, head) {
if (!list_empty(&property->enum_blob_list)) {
list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
if (prop_enum->value == value) {
strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
3604,7 → 3558,7
prop_enum->value = value;
 
property->values[index] = value;
list_add_tail(&prop_enum->head, &property->enum_list);
list_add_tail(&prop_enum->head, &property->enum_blob_list);
return 0;
}
EXPORT_SYMBOL(drm_property_add_enum);
3621,7 → 3575,7
{
struct drm_property_enum *prop_enum, *pt;
 
list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) {
list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) {
list_del(&prop_enum->head);
kfree(prop_enum);
}
3725,20 → 3679,17
 
#if 0
/**
* drm_mode_getproperty_ioctl - get the property metadata
* drm_mode_getproperty_ioctl - get the current value of a connector's property
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* This function retrieves the metadata for a given property, like the different
* possible values for an enum property or the limits for a range property.
* This function retrieves the current value for an connectors's property.
*
* Blob properties are special
*
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_getproperty_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
3746,12 → 3697,16
struct drm_mode_get_property *out_resp = data;
struct drm_property *property;
int enum_count = 0;
int blob_count = 0;
int value_count = 0;
int ret = 0, i;
int copied;
struct drm_property_enum *prop_enum;
struct drm_mode_property_enum __user *enum_ptr;
struct drm_property_blob *prop_blob;
uint32_t __user *blob_id_ptr;
uint64_t __user *values_ptr;
uint32_t __user *blob_length_ptr;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
3765,8 → 3720,11
 
if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
list_for_each_entry(prop_enum, &property->enum_list, head)
list_for_each_entry(prop_enum, &property->enum_blob_list, head)
enum_count++;
} else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
list_for_each_entry(prop_blob, &property->enum_blob_list, head)
blob_count++;
}
 
value_count = property->num_values;
3791,7 → 3749,7
if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
copied = 0;
enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
list_for_each_entry(prop_enum, &property->enum_list, head) {
list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
 
if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
ret = -EFAULT;
3809,16 → 3767,28
out_resp->count_enum_blobs = enum_count;
}
 
/*
* NOTE: The idea seems to have been to use this to read all the blob
* property values. But nothing ever added them to the corresponding
* list, userspace always used the special-purpose get_blob ioctl to
* read the value for a blob property. It also doesn't make a lot of
* sense to return values here when everything else is just metadata for
* the property itself.
*/
if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
out_resp->count_enum_blobs = 0;
if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
copied = 0;
blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
 
list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
ret = -EFAULT;
goto done;
}
 
if (put_user(prop_blob->length, blob_length_ptr + copied)) {
ret = -EFAULT;
goto done;
}
 
copied++;
}
}
out_resp->count_enum_blobs = blob_count;
}
done:
drm_modeset_unlock_all(dev);
return ret;
3825,9 → 3795,8
}
#endif
 
static struct drm_property_blob *
drm_property_create_blob(struct drm_device *dev, size_t length,
const void *data)
static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length,
void *data)
{
struct drm_property_blob *blob;
int ret;
3874,7 → 3843,7
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
3909,25 → 3878,12
}
#endif
 
/**
* drm_mode_connector_set_path_property - set tile property on connector
* @connector: connector to set property on.
* @path: path to use for property.
*
* This creates a property to expose to userspace to specify a
* connector path. This is mainly used for DisplayPort MST where
* connectors have a topology and we want to allow userspace to give
* them more meaningful names.
*
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_mode_connector_set_path_property(struct drm_connector *connector,
const char *path)
char *path)
{
struct drm_device *dev = connector->dev;
size_t size = strlen(path) + 1;
int ret;
int ret, size;
size = strlen(path) + 1;
 
connector->path_blob_ptr = drm_property_create_blob(connector->dev,
size, path);
3942,52 → 3898,6
EXPORT_SYMBOL(drm_mode_connector_set_path_property);
 
/**
* drm_mode_connector_set_tile_property - set tile property on connector
* @connector: connector to set property on.
*
* This looks up the tile information for a connector, and creates a
* property for userspace to parse if it exists. The property is of
* the form of 8 integers using ':' as a separator.
*
* Returns:
* Zero on success, errno on failure.
*/
int drm_mode_connector_set_tile_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
int ret, size;
char tile[256];
 
if (connector->tile_blob_ptr)
drm_property_destroy_blob(dev, connector->tile_blob_ptr);
 
if (!connector->has_tile) {
connector->tile_blob_ptr = NULL;
ret = drm_object_property_set_value(&connector->base,
dev->mode_config.tile_property, 0);
return ret;
}
 
snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d",
connector->tile_group->id, connector->tile_is_single_monitor,
connector->num_h_tile, connector->num_v_tile,
connector->tile_h_loc, connector->tile_v_loc,
connector->tile_h_size, connector->tile_v_size);
size = strlen(tile) + 1;
 
connector->tile_blob_ptr = drm_property_create_blob(connector->dev,
size, tile);
if (!connector->tile_blob_ptr)
return -EINVAL;
 
ret = drm_object_property_set_value(&connector->base,
dev->mode_config.tile_property,
connector->tile_blob_ptr->base.id);
return ret;
}
EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
 
/**
* drm_mode_connector_update_edid_property - update the edid property of a connector
* @connector: drm connector
* @edid: new value of the edid property
3999,11 → 3909,10
* Zero on success, errno on failure.
*/
int drm_mode_connector_update_edid_property(struct drm_connector *connector,
const struct edid *edid)
struct edid *edid)
{
struct drm_device *dev = connector->dev;
size_t size;
int ret;
int ret, size;
 
/* ignore requests to set edid when overridden */
if (connector->override_edid)
4033,8 → 3942,8
}
EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
 
#if 0
 
 
static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
4071,25 → 3980,12
return ret;
}
 
/**
* drm_mode_plane_set_obj_prop - set the value of a property
* @plane: drm plane object to set property value for
* @property: property to set
* @value: value the property should be set to
*
* This functions sets a given property on a given plane object. This function
* calls the driver's ->set_property callback and changes the software state of
* the property if the callback succeeds.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
{
int ret = -EINVAL;
struct drm_mode_object *obj = &plane->base;
struct drm_plane *plane = obj_to_plane(obj);
 
if (plane->funcs->set_property)
ret = plane->funcs->set_property(plane, property, value);
4098,11 → 3994,9
 
return ret;
}
EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
 
#if 0
/**
* drm_mode_obj_get_properties_ioctl - get the current value of a object's property
* drm_mode_getproperty_ioctl - get the current value of a object's property
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
4114,7 → 4008,7
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
4186,7 → 4080,7
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
4238,8 → 4132,7
ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
break;
case DRM_MODE_OBJECT_PLANE:
ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj),
property, arg->value);
ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value);
break;
}
 
4259,7 → 4152,7
* possible_clones and possible_crtcs bitmasks.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder)
4286,7 → 4179,7
* fixed gamma table size.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size)
4316,7 → 4209,7
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
4388,7 → 4281,7
* Called by the user via ioctl.
*
* Returns:
* Zero on success, negative errno on failure.
* Zero on success, errno on failure.
*/
int drm_mode_gamma_get_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
4452,14 → 4345,9
void drm_mode_config_reset(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_plane *plane;
struct drm_encoder *encoder;
struct drm_connector *connector;
 
list_for_each_entry(plane, &dev->mode_config.plane_list, head)
if (plane->funcs->reset)
plane->funcs->reset(plane);
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
if (crtc->funcs->reset)
crtc->funcs->reset(crtc);
4688,36 → 4576,6
EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
 
/**
* drm_rotation_simplify() - Try to simplify the rotation
* @rotation: Rotation to be simplified
* @supported_rotations: Supported rotations
*
* Attempt to simplify the rotation to a form that is supported.
* Eg. if the hardware supports everything except DRM_REFLECT_X
* one could call this function like this:
*
* drm_rotation_simplify(rotation, BIT(DRM_ROTATE_0) |
* BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_180) |
* BIT(DRM_ROTATE_270) | BIT(DRM_REFLECT_Y));
*
* to eliminate the DRM_ROTATE_X flag. Depending on what kind of
* transforms the hardware supports, this function may not
* be able to produce a supported transform, so the caller should
* check the result afterwards.
*/
unsigned int drm_rotation_simplify(unsigned int rotation,
unsigned int supported_rotations)
{
if (rotation & ~supported_rotations) {
rotation ^= BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y);
rotation = (rotation & ~0xf) | BIT((ffs(rotation & 0xf) + 1) % 4);
}
 
return rotation;
}
EXPORT_SYMBOL(drm_rotation_simplify);
 
/**
* drm_mode_config_init - initialize DRM mode_configuration structure
* @dev: DRM device
*
4744,7 → 4602,6
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
INIT_LIST_HEAD(&dev->mode_config.plane_list);
idr_init(&dev->mode_config.crtc_idr);
idr_init(&dev->mode_config.tile_idr);
 
drm_modeset_lock_all(dev);
drm_mode_create_standard_connector_properties(dev);
4774,181 → 4631,3
*
* FIXME: cleanup any dangling user buffer objects too
*/
void drm_mode_config_cleanup(struct drm_device *dev)
{
struct drm_connector *connector, *ot;
struct drm_crtc *crtc, *ct;
struct drm_encoder *encoder, *enct;
struct drm_bridge *bridge, *brt;
struct drm_framebuffer *fb, *fbt;
struct drm_property *property, *pt;
struct drm_property_blob *blob, *bt;
struct drm_plane *plane, *plt;
 
list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
head) {
encoder->funcs->destroy(encoder);
}
 
list_for_each_entry_safe(bridge, brt,
&dev->mode_config.bridge_list, head) {
bridge->funcs->destroy(bridge);
}
 
list_for_each_entry_safe(connector, ot,
&dev->mode_config.connector_list, head) {
connector->funcs->destroy(connector);
}
 
list_for_each_entry_safe(property, pt, &dev->mode_config.property_list,
head) {
drm_property_destroy(dev, property);
}
 
list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
head) {
drm_property_destroy_blob(dev, blob);
}
 
/*
* Single-threaded teardown context, so it's not required to grab the
* fb_lock to protect against concurrent fb_list access. Contrary, it
* would actually deadlock with the drm_framebuffer_cleanup function.
*
* Also, if there are any framebuffers left, that's a driver leak now,
* so politely WARN about this.
*/
WARN_ON(!list_empty(&dev->mode_config.fb_list));
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
drm_framebuffer_remove(fb);
}
 
list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
head) {
plane->funcs->destroy(plane);
}
 
list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
crtc->funcs->destroy(crtc);
}
 
idr_destroy(&dev->mode_config.tile_idr);
idr_destroy(&dev->mode_config.crtc_idr);
drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
}
EXPORT_SYMBOL(drm_mode_config_cleanup);
 
struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
unsigned int supported_rotations)
{
static const struct drm_prop_enum_list props[] = {
{ DRM_ROTATE_0, "rotate-0" },
{ DRM_ROTATE_90, "rotate-90" },
{ DRM_ROTATE_180, "rotate-180" },
{ DRM_ROTATE_270, "rotate-270" },
{ DRM_REFLECT_X, "reflect-x" },
{ DRM_REFLECT_Y, "reflect-y" },
};
 
return drm_property_create_bitmask(dev, 0, "rotation",
props, ARRAY_SIZE(props),
supported_rotations);
}
EXPORT_SYMBOL(drm_mode_create_rotation_property);
 
/**
* DOC: Tile group
*
* Tile groups are used to represent tiled monitors with a unique
* integer identifier. Tiled monitors using DisplayID v1.3 have
* a unique 8-byte handle, we store this in a tile group, so we
* have a common identifier for all tiles in a monitor group.
*/
static void drm_tile_group_free(struct kref *kref)
{
struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount);
struct drm_device *dev = tg->dev;
mutex_lock(&dev->mode_config.idr_mutex);
idr_remove(&dev->mode_config.tile_idr, tg->id);
mutex_unlock(&dev->mode_config.idr_mutex);
kfree(tg);
}
 
/**
* drm_mode_put_tile_group - drop a reference to a tile group.
* @dev: DRM device
* @tg: tile group to drop reference to.
*
* drop reference to tile group and free if 0.
*/
void drm_mode_put_tile_group(struct drm_device *dev,
struct drm_tile_group *tg)
{
kref_put(&tg->refcount, drm_tile_group_free);
}
 
/**
* drm_mode_get_tile_group - get a reference to an existing tile group
* @dev: DRM device
* @topology: 8-bytes unique per monitor.
*
* Use the unique bytes to get a reference to an existing tile group.
*
* RETURNS:
* tile group or NULL if not found.
*/
struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
char topology[8])
{
struct drm_tile_group *tg;
int id;
mutex_lock(&dev->mode_config.idr_mutex);
idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) {
if (!memcmp(tg->group_data, topology, 8)) {
// if (!kref_get_unless_zero(&tg->refcount))
// tg = NULL;
mutex_unlock(&dev->mode_config.idr_mutex);
return tg;
}
}
mutex_unlock(&dev->mode_config.idr_mutex);
return NULL;
}
 
/**
* drm_mode_create_tile_group - create a tile group from a displayid description
* @dev: DRM device
* @topology: 8-bytes unique per monitor.
*
* Create a tile group for the unique monitor, and get a unique
* identifier for the tile group.
*
* RETURNS:
* new tile group or error.
*/
struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
char topology[8])
{
struct drm_tile_group *tg;
int ret;
 
tg = kzalloc(sizeof(*tg), GFP_KERNEL);
if (!tg)
return ERR_PTR(-ENOMEM);
 
kref_init(&tg->refcount);
memcpy(tg->group_data, topology, 8);
tg->dev = dev;
 
mutex_lock(&dev->mode_config.idr_mutex);
ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL);
if (ret >= 0) {
tg->id = ret;
} else {
kfree(tg);
tg = ERR_PTR(ret);
}
 
mutex_unlock(&dev->mode_config.idr_mutex);
return tg;
}
/drivers/video/drm/drm_crtc_helper.c
34,35 → 34,12
#include <linux/moduleparam.h>
 
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
 
/**
* DOC: overview
*
* The CRTC modeset helper library provides a default set_config implementation
* in drm_crtc_helper_set_config(). Plus a few other convenience functions using
* the same callbacks which drivers can use to e.g. restore the modeset
* configuration on resume with drm_helper_resume_force_mode().
*
* The driver callbacks are mostly compatible with the atomic modeset helpers,
* except for the handling of the primary plane: Atomic helpers require that the
* primary plane is implemented as a real standalone plane and not directly tied
* to the CRTC state. For easier transition this library provides functions to
* implement the old semantics required by the CRTC helpers using the new plane
* and atomic helper callbacks.
*
* Drivers are strongly urged to convert to the atomic helpers (by way of first
* converting to the plane helpers). New drivers must not use these functions
* but need to implement the atomic interface instead, potentially using the
* atomic helpers for that.
*/
MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper");
MODULE_LICENSE("GPL and additional rights");
894,112 → 871,3
drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
 
/**
* drm_helper_crtc_mode_set - mode_set implementation for atomic plane helpers
* @crtc: DRM CRTC
* @mode: DRM display mode which userspace requested
* @adjusted_mode: DRM display mode adjusted by ->mode_fixup callbacks
* @x: x offset of the CRTC scanout area on the underlying framebuffer
* @y: y offset of the CRTC scanout area on the underlying framebuffer
* @old_fb: previous framebuffer
*
* This function implements a callback useable as the ->mode_set callback
* required by the crtc helpers. Besides the atomic plane helper functions for
* the primary plane the driver must also provide the ->mode_set_nofb callback
* to set up the crtc.
*
* This is a transitional helper useful for converting drivers to the atomic
* interfaces.
*/
int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_crtc_state *crtc_state;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
int ret;
 
if (crtc->funcs->atomic_duplicate_state)
crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
else if (crtc->state)
crtc_state = kmemdup(crtc->state, sizeof(*crtc_state),
GFP_KERNEL);
else
crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
if (!crtc_state)
return -ENOMEM;
 
crtc_state->enable = true;
crtc_state->planes_changed = true;
crtc_state->mode_changed = true;
drm_mode_copy(&crtc_state->mode, mode);
drm_mode_copy(&crtc_state->adjusted_mode, adjusted_mode);
 
if (crtc_funcs->atomic_check) {
ret = crtc_funcs->atomic_check(crtc, crtc_state);
if (ret) {
kfree(crtc_state);
 
return ret;
}
}
 
swap(crtc->state, crtc_state);
 
crtc_funcs->mode_set_nofb(crtc);
 
if (crtc_state) {
if (crtc->funcs->atomic_destroy_state)
crtc->funcs->atomic_destroy_state(crtc, crtc_state);
else
kfree(crtc_state);
}
 
return drm_helper_crtc_mode_set_base(crtc, x, y, old_fb);
}
EXPORT_SYMBOL(drm_helper_crtc_mode_set);
 
/**
* drm_helper_crtc_mode_set_base - mode_set_base implementation for atomic plane helpers
* @crtc: DRM CRTC
* @x: x offset of the CRTC scanout area on the underlying framebuffer
* @y: y offset of the CRTC scanout area on the underlying framebuffer
* @old_fb: previous framebuffer
*
* This function implements a callback useable as the ->mode_set_base used
* required by the crtc helpers. The driver must provide the atomic plane helper
* functions for the primary plane.
*
* This is a transitional helper useful for converting drivers to the atomic
* interfaces.
*/
int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_plane_state *plane_state;
struct drm_plane *plane = crtc->primary;
 
if (plane->funcs->atomic_duplicate_state)
plane_state = plane->funcs->atomic_duplicate_state(plane);
else if (plane->state)
plane_state = drm_atomic_helper_plane_duplicate_state(plane);
else
plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
if (!plane_state)
return -ENOMEM;
 
plane_state->crtc = crtc;
drm_atomic_set_fb_for_plane(plane_state, crtc->primary->fb);
plane_state->crtc_x = 0;
plane_state->crtc_y = 0;
plane_state->crtc_h = crtc->mode.vdisplay;
plane_state->crtc_w = crtc->mode.hdisplay;
plane_state->src_x = x << 16;
plane_state->src_y = y << 16;
plane_state->src_h = crtc->mode.vdisplay << 16;
plane_state->src_w = crtc->mode.hdisplay << 16;
 
return drm_plane_helper_commit(plane, plane_state, old_fb);
}
EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
/drivers/video/drm/drm_dp_helper.c
27,8 → 27,8
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/i2c.h>
#include <drm/drmP.h>
#include <drm/drm_dp_helper.h>
#include <drm/drmP.h>
 
/**
* DOC: dp helpers
39,6 → 39,198
* blocks, ...
*/
 
/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
static int
i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int ret;
 
ret = (*algo_data->aux_ch)(adapter, mode,
write_byte, read_byte);
return ret;
}
 
/*
* I2C over AUX CH
*/
 
/*
* Send the address. If the I2C link is running, this 'restarts'
* the connection with the new address, this is used for doing
* a write followed by a read (as needed for DDC)
*/
static int
i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int mode = MODE_I2C_START;
int ret;
 
if (reading)
mode |= MODE_I2C_READ;
else
mode |= MODE_I2C_WRITE;
algo_data->address = address;
algo_data->running = true;
ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
return ret;
}
 
/*
* Stop the I2C transaction. This closes out the link, sending
* a bare address packet with the MOT bit turned off
*/
static void
i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int mode = MODE_I2C_STOP;
 
if (reading)
mode |= MODE_I2C_READ;
else
mode |= MODE_I2C_WRITE;
if (algo_data->running) {
(void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
algo_data->running = false;
}
}
 
/*
* Write a single byte to the current I2C address, the
* the I2C link must be running or this returns -EIO
*/
static int
i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int ret;
 
if (!algo_data->running)
return -EIO;
 
ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
return ret;
}
 
/*
* Read a single byte from the current I2C address, the
* I2C link must be running or this returns -EIO
*/
static int
i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int ret;
 
if (!algo_data->running)
return -EIO;
 
ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
return ret;
}
 
static int
i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs,
int num)
{
int ret = 0;
bool reading = false;
int m;
int b;
 
for (m = 0; m < num; m++) {
u16 len = msgs[m].len;
u8 *buf = msgs[m].buf;
reading = (msgs[m].flags & I2C_M_RD) != 0;
ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
if (ret < 0)
break;
if (reading) {
for (b = 0; b < len; b++) {
ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
if (ret < 0)
break;
}
} else {
for (b = 0; b < len; b++) {
ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
if (ret < 0)
break;
}
}
if (ret < 0)
break;
}
if (ret >= 0)
ret = num;
i2c_algo_dp_aux_stop(adapter, reading);
DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
return ret;
}
 
static u32
i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
I2C_FUNC_10BIT_ADDR;
}
 
static const struct i2c_algorithm i2c_dp_aux_algo = {
.master_xfer = i2c_algo_dp_aux_xfer,
.functionality = i2c_algo_dp_aux_functionality,
};
 
static void
i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
{
(void) i2c_algo_dp_aux_address(adapter, 0, false);
(void) i2c_algo_dp_aux_stop(adapter, false);
}
 
static int
i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
{
adapter->algo = &i2c_dp_aux_algo;
adapter->retries = 3;
i2c_dp_aux_reset_bus(adapter);
return 0;
}
 
/**
* i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
* @adapter: i2c adapter to register
*
* This registers an i2c adapter that uses dp aux channel as it's underlaying
* transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
* and store it in the algo_data member of the @adapter argument. This will be
* used by the i2c over dp aux algorithm to drive the hardware.
*
* RETURNS:
* 0 on success, -ERRNO on failure.
*
* IMPORTANT:
* This interface is deprecated, please switch to the new dp aux helpers and
* drm_dp_aux_register().
*/
int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
{
int error;
 
error = i2c_dp_aux_prepare_bus(adapter);
if (error)
return error;
error = i2c_add_adapter(adapter);
return error;
}
EXPORT_SYMBOL(i2c_dp_aux_add_bus);
 
/* Helpers for DP link training */
static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
{
186,11 → 378,10
 
/*
* The specification doesn't give any recommendation on how often to
* retry native transactions. We used to retry 7 times like for
* aux i2c transactions but real world devices this wasn't
* sufficient, bump to 32 which makes Dell 4k monitors happier.
* retry native transactions, so retry 7 times like for I2C-over-AUX
* transactions.
*/
for (retry = 0; retry < 32; retry++) {
for (retry = 0; retry < 7; retry++) {
 
mutex_lock(&aux->hw_mutex);
err = aux->transfer(aux, &msg);
/drivers/video/drm/drm_dp_mst_topology.c
689,7 → 689,7
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_vcpi *vcpi)
{
int ret, vcpi_ret;
int ret;
 
mutex_lock(&mgr->payload_lock);
ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
699,16 → 699,8
goto out_unlock;
}
 
vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
if (vcpi_ret > mgr->max_payloads) {
ret = -EINVAL;
DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
goto out_unlock;
}
 
set_bit(ret, &mgr->payload_mask);
set_bit(vcpi_ret, &mgr->vcpi_mask);
vcpi->vcpi = vcpi_ret + 1;
vcpi->vcpi = ret;
mgr->proposed_vcpis[ret - 1] = vcpi;
out_unlock:
mutex_unlock(&mgr->payload_lock);
716,23 → 708,15
}
 
static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
int vcpi)
int id)
{
int i;
if (vcpi == 0)
if (id == 0)
return;
 
mutex_lock(&mgr->payload_lock);
DRM_DEBUG_KMS("putting payload %d\n", vcpi);
clear_bit(vcpi - 1, &mgr->vcpi_mask);
 
for (i = 0; i < mgr->max_payloads; i++) {
if (mgr->proposed_vcpis[i])
if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
mgr->proposed_vcpis[i] = NULL;
clear_bit(i + 1, &mgr->payload_mask);
}
}
DRM_DEBUG_KMS("putting payload %d\n", id);
clear_bit(id, &mgr->payload_mask);
mgr->proposed_vcpis[id - 1] = NULL;
mutex_unlock(&mgr->payload_lock);
}
 
846,8 → 830,6
 
static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
{
struct drm_dp_mst_branch *mstb;
 
switch (old_pdt) {
case DP_PEER_DEVICE_DP_LEGACY_CONV:
case DP_PEER_DEVICE_SST_SINK:
855,9 → 837,8
drm_dp_mst_unregister_i2c_bus(&port->aux);
break;
case DP_PEER_DEVICE_MST_BRANCHING:
mstb = port->mstb;
drm_dp_put_mst_branch_device(port->mstb);
port->mstb = NULL;
drm_dp_put_mst_branch_device(mstb);
break;
}
}
868,8 → 849,6
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
if (!port->input) {
port->vcpi.num_slots = 0;
 
kfree(port->cached_edid);
if (port->connector)
(*port->mgr->cbs->destroy_connector)(mgr, port->connector);
drm_dp_port_teardown_pdt(port, port->pdt);
1023,20 → 1002,19
 
static void build_mst_prop_path(struct drm_dp_mst_port *port,
struct drm_dp_mst_branch *mstb,
char *proppath,
size_t proppath_size)
char *proppath)
{
int i;
char temp[8];
snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
snprintf(proppath, 255, "mst:%d", mstb->mgr->conn_base_id);
for (i = 0; i < (mstb->lct - 1); i++) {
int shift = (i % 2) ? 0 : 4;
int port_num = mstb->rad[i / 2] >> shift;
snprintf(temp, sizeof(temp), "-%d", port_num);
strlcat(proppath, temp, proppath_size);
snprintf(temp, 8, "-%d", port_num);
strncat(proppath, temp, 255);
}
snprintf(temp, sizeof(temp), "-%d", port->port_num);
strlcat(proppath, temp, proppath_size);
snprintf(temp, 8, "-%d", port->port_num);
strncat(proppath, temp, 255);
}
 
static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1107,13 → 1085,9
 
if (created && !port->input) {
char proppath[255];
build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
build_mst_prop_path(port, mstb, proppath);
port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
 
if (port->port_num >= 8) {
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
}
}
 
/* put reference to this port */
drm_dp_put_port(port);
1596,7 → 1570,7
}
 
drm_dp_dpcd_write_payload(mgr, id, payload);
payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
payload->payload_state = 0;
return 0;
}
 
1623,7 → 1597,7
*/
int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
{
int i, j;
int i;
int cur_slots = 1;
struct drm_dp_payload req_payload;
struct drm_dp_mst_port *port;
1640,46 → 1614,26
port = NULL;
req_payload.num_slots = 0;
}
 
if (mgr->payloads[i].start_slot != req_payload.start_slot) {
mgr->payloads[i].start_slot = req_payload.start_slot;
}
/* work out what is required to happen with this payload */
if (mgr->payloads[i].num_slots != req_payload.num_slots) {
if (mgr->payloads[i].start_slot != req_payload.start_slot ||
mgr->payloads[i].num_slots != req_payload.num_slots) {
 
/* need to push an update for this payload */
if (req_payload.num_slots) {
drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
drm_dp_create_payload_step1(mgr, i + 1, &req_payload);
mgr->payloads[i].num_slots = req_payload.num_slots;
} else if (mgr->payloads[i].num_slots) {
mgr->payloads[i].num_slots = 0;
drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
drm_dp_destroy_payload_step1(mgr, port, i + 1, &mgr->payloads[i]);
req_payload.payload_state = mgr->payloads[i].payload_state;
mgr->payloads[i].start_slot = 0;
}
} else
req_payload.payload_state = 0;
 
mgr->payloads[i].start_slot = req_payload.start_slot;
mgr->payloads[i].payload_state = req_payload.payload_state;
}
cur_slots += req_payload.num_slots;
}
 
for (i = 0; i < mgr->max_payloads; i++) {
if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
DRM_DEBUG_KMS("removing payload %d\n", i);
for (j = i; j < mgr->max_payloads - 1; j++) {
memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
set_bit(j + 1, &mgr->payload_mask);
} else {
clear_bit(j + 1, &mgr->payload_mask);
}
}
memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
clear_bit(mgr->max_payloads, &mgr->payload_mask);
 
}
}
mutex_unlock(&mgr->payload_lock);
 
return 0;
1710,9 → 1664,9
 
DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
ret = drm_dp_create_payload_step2(mgr, port, i + 1, &mgr->payloads[i]);
} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
ret = drm_dp_destroy_payload_step2(mgr, i + 1, &mgr->payloads[i]);
}
if (ret) {
mutex_unlock(&mgr->payload_lock);
1815,27 → 1769,17
return 0;
}
 
static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
int dp_link_count,
int *out)
static int drm_dp_get_vc_payload_bw(int dp_link_bw, int dp_link_count)
{
switch (dp_link_bw) {
default:
DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
dp_link_bw, dp_link_count);
return false;
 
case DP_LINK_BW_1_62:
*out = 3 * dp_link_count;
break;
return 3 * dp_link_count;
case DP_LINK_BW_2_7:
*out = 5 * dp_link_count;
break;
return 5 * dp_link_count;
case DP_LINK_BW_5_4:
*out = 10 * dp_link_count;
break;
return 10 * dp_link_count;
}
return true;
return 0;
}
 
/**
1867,13 → 1811,7
goto out_unlock;
}
 
if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
&mgr->pbn_div)) {
ret = -EINVAL;
goto out_unlock;
}
 
mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
mgr->total_pbn = 2560;
mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
mgr->avail_slots = mgr->total_slots;
1930,7 → 1868,6
memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
mgr->payload_mask = 0;
set_bit(0, &mgr->payload_mask);
mgr->vcpi_mask = 0;
}
 
out_unlock:
2141,7 → 2078,6
* drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
* @mgr: manager to notify irq for.
* @esi: 4 bytes from SINK_COUNT_ESI
* @handled: whether the hpd interrupt was consumed or not
*
* This should be called from the driver when it detects a short IRQ,
* along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2183,8 → 2119,7
* This returns the current connection state for a port. It validates the
* port pointer still exists so the caller doesn't require a reference
*/
enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
{
enum drm_connector_status status = connector_status_disconnected;
 
2203,10 → 2138,6
 
case DP_PEER_DEVICE_SST_SINK:
status = connector_status_connected;
/* for logical ports - cache the EDID */
if (port->port_num >= 8 && !port->cached_edid) {
port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
}
break;
case DP_PEER_DEVICE_DP_LEGACY_CONV:
if (port->ldps)
2238,12 → 2169,7
if (!port)
return NULL;
 
if (port->cached_edid)
edid = drm_edid_duplicate(port->cached_edid);
else
edid = drm_get_edid(connector, &port->aux.ddc);
 
drm_mode_connector_set_tile_property(connector);
drm_dp_put_port(port);
return edid;
}
/drivers/video/drm/drm_edid.c
34,7 → 34,6
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include <drm/drm_displayid.h>
 
#define version_greater(edid, maj, min) \
(((edid)->version > (maj)) || \
633,27 → 632,27
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 6 - 720(1440)x480i@60Hz */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
/* 6 - 1440x480i@60Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 7 - 720(1440)x480i@60Hz */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
/* 7 - 1440x480i@60Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 8 - 720(1440)x240@60Hz */
{ DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 240, 244, 247, 262, 0,
/* 8 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 9 - 720(1440)x240@60Hz */
{ DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 240, 244, 247, 262, 0,
/* 9 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
715,27 → 714,27
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 21 - 720(1440)x576i@50Hz */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
/* 21 - 1440x576i@50Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 22 - 720(1440)x576i@50Hz */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
/* 22 - 1440x576i@50Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 23 - 720(1440)x288@50Hz */
{ DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 288, 290, 293, 312, 0,
/* 23 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 24 - 720(1440)x288@50Hz */
{ DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 288, 290, 293, 312, 0,
/* 24 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
838,17 → 837,17
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 44 - 720(1440)x576i@100Hz */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
/* 44 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
DRM_MODE_FLAG_DBLCLK),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 45 - 720(1440)x576i@100Hz */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
/* 45 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
DRM_MODE_FLAG_DBLCLK),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 46 - 1920x1080i@120Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
871,15 → 870,15
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 50 - 720(1440)x480i@120Hz */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
/* 50 - 1440x480i@120Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 51 - 720(1440)x480i@120Hz */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
/* 51 - 1440x480i@120Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
893,15 → 892,15
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 54 - 720(1440)x576i@200Hz */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
/* 54 - 1440x576i@200Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 55 - 720(1440)x576i@200Hz */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
/* 55 - 1440x576i@200Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
915,15 → 914,15
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 58 - 720(1440)x480i@240 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
/* 58 - 1440x480i@240 */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 59 - 720(1440)x480i@240 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
/* 59 - 1440x480i@240 */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
.vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
1015,27 → 1014,6
MODULE_PARM_DESC(edid_fixup,
"Minimum number of valid EDID header bytes (0-8, default 6)");
 
static void drm_get_displayid(struct drm_connector *connector,
struct edid *edid);
 
static int drm_edid_block_checksum(const u8 *raw_edid)
{
int i;
u8 csum = 0;
for (i = 0; i < EDID_LENGTH; i++)
csum += raw_edid[i];
 
return csum;
}
 
static bool drm_edid_is_zero(const u8 *in_edid, int length)
{
if (memchr_inv(in_edid, 0, length))
return false;
 
return true;
}
 
/**
* drm_edid_block_valid - Sanity check the EDID block (base or extension)
* @raw_edid: pointer to raw EDID block
1049,7 → 1027,8
*/
bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
{
u8 csum;
int i;
u8 csum = 0;
struct edid *edid = (struct edid *)raw_edid;
 
if (WARN_ON(!raw_edid))
1069,7 → 1048,8
}
}
 
csum = drm_edid_block_checksum(raw_edid);
for (i = 0; i < EDID_LENGTH; i++)
csum += raw_edid[i];
if (csum) {
if (print_bad_edid) {
DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
1100,14 → 1080,10
 
bad:
if (print_bad_edid) {
if (drm_edid_is_zero(raw_edid, EDID_LENGTH)) {
printk(KERN_ERR "EDID block is all zeroes\n");
} else {
printk(KERN_ERR "Raw EDID:\n");
print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
raw_edid, EDID_LENGTH, false);
}
}
return false;
}
EXPORT_SYMBOL(drm_edid_block_valid);
1139,7 → 1115,7
#define DDC_SEGMENT_ADDR 0x30
/**
* drm_do_probe_ddc_edid() - get EDID information via I2C
* @data: I2C device adapter
* @adapter: I2C device adaptor
* @buf: EDID data buffer to be filled
* @block: 128 byte EDID block to start fetching from
* @len: EDID data buffer length to fetch
1149,9 → 1125,9
* Return: 0 on success or -1 on failure.
*/
static int
drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
int block, int len)
{
struct i2c_adapter *adapter = data;
unsigned char start = block * EDID_LENGTH;
unsigned char segment = block >> 1;
unsigned char xfers = segment ? 3 : 2;
1200,27 → 1176,17
return ret == xfers ? 0 : -1;
}
 
/**
* drm_do_get_edid - get EDID data using a custom EDID block read function
* @connector: connector we're probing
* @get_edid_block: EDID block read function
* @data: private data passed to the block read function
*
* When the I2C adapter connected to the DDC bus is hidden behind a device that
* exposes a different interface to read EDID blocks this function can be used
* to get EDID data using a custom block read function.
*
* As in the general case the DDC bus is accessible by the kernel at the I2C
* level, drivers must make all reasonable efforts to expose it as an I2C
* adapter and use drm_get_edid() instead of abusing this function.
*
* Return: Pointer to valid EDID or NULL if we couldn't find any.
*/
struct edid *drm_do_get_edid(struct drm_connector *connector,
int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
size_t len),
void *data)
static bool drm_edid_is_zero(u8 *in_edid, int length)
{
if (memchr_inv(in_edid, 0, length))
return false;
 
return true;
}
 
static u8 *
drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
int i, j = 0, valid_extensions = 0;
u8 *block, *new;
bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
1230,7 → 1196,7
 
/* base block fetch */
for (i = 0; i < 4; i++) {
if (get_edid_block(data, block, 0, EDID_LENGTH))
if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
goto out;
if (drm_edid_block_valid(block, 0, print_bad_edid))
break;
1244,7 → 1210,7
 
/* if there's no extensions, we're done */
if (block[0x7e] == 0)
return (struct edid *)block;
return block;
 
new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
1253,7 → 1219,7
 
for (j = 1; j <= block[0x7e]; j++) {
for (i = 0; i < 4; i++) {
if (get_edid_block(data,
if (drm_do_probe_ddc_edid(adapter,
block + (valid_extensions + 1) * EDID_LENGTH,
j, EDID_LENGTH))
goto out;
1281,7 → 1247,7
block = new;
}
 
return (struct edid *)block;
return block;
 
carp:
if (print_bad_edid) {
1294,7 → 1260,6
kfree(block);
return NULL;
}
EXPORT_SYMBOL_GPL(drm_do_get_edid);
 
/**
* drm_probe_ddc() - probe DDC presence
1324,14 → 1289,11
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
struct edid *edid = NULL;
 
if (!drm_probe_ddc(adapter))
return NULL;
if (drm_probe_ddc(adapter))
edid = (struct edid *)drm_do_get_edid(connector, adapter);
 
edid = drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
if (edid)
drm_get_displayid(connector, edid);
return edid;
}
EXPORT_SYMBOL(drm_get_edid);
2141,8 → 2103,7
add_inferred_modes(struct drm_connector *connector, struct edid *edid)
{
struct detailed_mode_closure closure = {
.connector = connector,
.edid = edid,
connector, edid, 0, 0, 0
};
 
if (version_greater(edid, 1, 0))
2208,8 → 2169,7
((edid->established_timings.mfg_rsvd & 0x80) << 9);
int i, modes = 0;
struct detailed_mode_closure closure = {
.connector = connector,
.edid = edid,
connector, edid, 0, 0, 0
};
 
for (i = 0; i <= EDID_EST_TIMINGS; i++) {
2267,8 → 2227,7
{
int i, modes = 0;
struct detailed_mode_closure closure = {
.connector = connector,
.edid = edid,
connector, edid, 0, 0, 0
};
 
for (i = 0; i < EDID_STD_TIMINGS; i++) {
2354,8 → 2313,7
add_cvt_modes(struct drm_connector *connector, struct edid *edid)
{
struct detailed_mode_closure closure = {
.connector = connector,
.edid = edid,
connector, edid, 0, 0, 0
};
 
if (version_greater(edid, 1, 2))
2399,10 → 2357,11
u32 quirks)
{
struct detailed_mode_closure closure = {
.connector = connector,
.edid = edid,
.preferred = 1,
.quirks = quirks,
connector,
edid,
1,
quirks,
0
};
 
if (closure.preferred && !version_greater(edid, 1, 3))
2427,7 → 2386,7
/*
* Search EDID for CEA extension block.
*/
static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
static u8 *drm_find_cea_extension(struct edid *edid)
{
u8 *edid_ext = NULL;
int i;
2439,7 → 2398,7
/* Find CEA extension */
for (i = 0; i < edid->extensions; i++) {
edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
if (edid_ext[0] == ext_id)
if (edid_ext[0] == CEA_EXT)
break;
}
 
2449,16 → 2408,6
return edid_ext;
}
 
static u8 *drm_find_cea_extension(struct edid *edid)
{
return drm_find_edid_extension(edid, CEA_EXT);
}
 
static u8 *drm_find_displayid_extension(struct edid *edid)
{
return drm_find_edid_extension(edid, DISPLAYID_EXT);
}
 
/*
* Calculate the alternate clock for the CEA mode
* (60Hz vs. 59.94Hz etc.)
3176,12 → 3125,9
}
}
eld[5] |= sad_count << 4;
eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
 
eld[DRM_ELD_BASELINE_ELD_LEN] =
DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
 
DRM_DEBUG_KMS("ELD size %d, SAD count %d\n",
drm_eld_size(eld), sad_count);
DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count);
}
EXPORT_SYMBOL(drm_edid_to_eld);
 
3487,10 → 3433,10
/**
* drm_assign_hdmi_deep_color_info - detect whether monitor supports
* hdmi deep color modes and update drm_display_info if so.
*
* @edid: monitor EDID information
* @info: Updated with maximum supported deep color bpc and color format
* if deep color supported.
* @connector: DRM connector, used only for debug output
*
* Parse the CEA extension according to CEA-861-B.
* Return true if HDMI deep color supported, false if not or unknown.
3919,123 → 3865,3
return 0;
}
EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
 
static int drm_parse_display_id(struct drm_connector *connector,
u8 *displayid, int length,
bool is_edid_extension)
{
/* if this is an EDID extension the first byte will be 0x70 */
int idx = 0;
struct displayid_hdr *base;
struct displayid_block *block;
u8 csum = 0;
int i;
 
if (is_edid_extension)
idx = 1;
 
base = (struct displayid_hdr *)&displayid[idx];
 
DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
base->rev, base->bytes, base->prod_id, base->ext_count);
 
if (base->bytes + 5 > length - idx)
return -EINVAL;
 
for (i = idx; i <= base->bytes + 5; i++) {
csum += displayid[i];
}
if (csum) {
DRM_ERROR("DisplayID checksum invalid, remainder is %d\n", csum);
return -EINVAL;
}
 
block = (struct displayid_block *)&displayid[idx + 4];
DRM_DEBUG_KMS("block id %d, rev %d, len %d\n",
block->tag, block->rev, block->num_bytes);
 
switch (block->tag) {
case DATA_BLOCK_TILED_DISPLAY: {
struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
 
u16 w, h;
u8 tile_v_loc, tile_h_loc;
u8 num_v_tile, num_h_tile;
struct drm_tile_group *tg;
 
w = tile->tile_size[0] | tile->tile_size[1] << 8;
h = tile->tile_size[2] | tile->tile_size[3] << 8;
 
num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30);
num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30);
tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4);
tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4);
 
connector->has_tile = true;
if (tile->tile_cap & 0x80)
connector->tile_is_single_monitor = true;
 
connector->num_h_tile = num_h_tile + 1;
connector->num_v_tile = num_v_tile + 1;
connector->tile_h_loc = tile_h_loc;
connector->tile_v_loc = tile_v_loc;
connector->tile_h_size = w + 1;
connector->tile_v_size = h + 1;
 
DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap);
DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1);
DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n",
num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc);
DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]);
 
tg = drm_mode_get_tile_group(connector->dev, tile->topology_id);
if (!tg) {
tg = drm_mode_create_tile_group(connector->dev, tile->topology_id);
}
if (!tg)
return -ENOMEM;
 
if (connector->tile_group != tg) {
/* if we haven't got a pointer,
take the reference, drop ref to old tile group */
if (connector->tile_group) {
drm_mode_put_tile_group(connector->dev, connector->tile_group);
}
connector->tile_group = tg;
} else
/* if same tile group, then release the ref we just took. */
drm_mode_put_tile_group(connector->dev, tg);
}
break;
default:
printk("unknown displayid tag %d\n", block->tag);
break;
}
return 0;
}
 
static void drm_get_displayid(struct drm_connector *connector,
struct edid *edid)
{
void *displayid = NULL;
int ret;
connector->has_tile = false;
displayid = drm_find_displayid_extension(edid);
if (!displayid) {
/* drop reference to any tile group we had */
goto out_drop_ref;
}
 
ret = drm_parse_display_id(connector, displayid, EDID_LENGTH, true);
if (ret < 0)
goto out_drop_ref;
if (!connector->has_tile)
goto out_drop_ref;
return;
out_drop_ref:
if (connector->tile_group) {
drm_mode_put_tile_group(connector->dev, connector->tile_group);
connector->tile_group = NULL;
}
return;
}
/drivers/video/drm/drm_fb_helper.c
126,7 → 126,7
 
WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) {
temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector *) * (fb_helper->connector_count + 1), GFP_KERNEL);
temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector) * (fb_helper->connector_count + 1), GFP_KERNEL);
if (!temp)
return -ENOMEM;
 
170,7 → 170,6
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
 
static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
{
uint16_t *r_base, *g_base, *b_base;
211,17 → 210,10
 
drm_warn_on_modeset_not_all_locked(dev);
 
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
list_for_each_entry(plane, &dev->mode_config.plane_list, head)
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
drm_plane_force_disable(plane);
 
if (dev->mode_config.rotation_property) {
drm_mode_plane_set_obj_prop(plane,
dev->mode_config.rotation_property,
BIT(DRM_ROTATE_0));
}
}
 
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
struct drm_crtc *crtc = mode_set->crtc;
251,8 → 243,6
{
struct drm_device *dev = fb_helper->dev;
bool ret;
bool do_delayed = false;
 
drm_modeset_lock_all(dev);
ret = restore_fbdev_mode(fb_helper);
drm_modeset_unlock_all(dev);
716,6 → 706,10
 
drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
 
if (fb_helper->delayed_hotplug) {
fb_helper->delayed_hotplug = false;
drm_fb_helper_hotplug_event(fb_helper);
}
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
790,7 → 784,7
struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
struct drm_cmdline_mode *cmdline_mode;
 
cmdline_mode = &fb_helper_conn->connector->cmdline_mode;
cmdline_mode = &fb_helper_conn->cmdline_mode;
 
if (cmdline_mode->bpp_specified) {
switch (cmdline_mode->bpp) {
819,21 → 813,19
crtc_count = 0;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_display_mode *desired_mode;
int x, y;
desired_mode = fb_helper->crtc_info[i].desired_mode;
x = fb_helper->crtc_info[i].x;
y = fb_helper->crtc_info[i].y;
 
if (desired_mode) {
if (gamma_size == 0)
gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
if (desired_mode->hdisplay + x < sizes.fb_width)
sizes.fb_width = desired_mode->hdisplay + x;
if (desired_mode->vdisplay + y < sizes.fb_height)
sizes.fb_height = desired_mode->vdisplay + y;
if (desired_mode->hdisplay + x > sizes.surface_width)
sizes.surface_width = desired_mode->hdisplay + x;
if (desired_mode->vdisplay + y > sizes.surface_height)
sizes.surface_height = desired_mode->vdisplay + y;
if (desired_mode->hdisplay < sizes.fb_width)
sizes.fb_width = desired_mode->hdisplay;
if (desired_mode->vdisplay < sizes.fb_height)
sizes.fb_height = desired_mode->vdisplay;
if (desired_mode->hdisplay > sizes.surface_width)
sizes.surface_width = desired_mode->hdisplay;
if (desired_mode->vdisplay > sizes.surface_height)
sizes.surface_height = desired_mode->vdisplay;
crtc_count++;
}
}
866,8 → 858,6
 
 
info->var.pixclock = 0;
dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
info->node, info->fix.id);
 
list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
 
1027,7 → 1017,9
 
static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
{
return fb_connector->connector->cmdline_mode.specified;
struct drm_cmdline_mode *cmdline_mode;
cmdline_mode = &fb_connector->cmdline_mode;
return cmdline_mode->specified;
}
 
struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
1039,6 → 1031,7
 
return NULL;
 
cmdline_mode = &fb_helper_conn->cmdline_mode;
if (cmdline_mode->specified == false)
return mode;
 
1122,7 → 1115,6
 
static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
struct drm_display_mode **modes,
struct drm_fb_offset *offsets,
bool *enabled, int width, int height)
{
int count, i, j;
1194,80 → 1186,19
return false;
}
 
static int drm_get_tile_offsets(struct drm_fb_helper *fb_helper,
struct drm_display_mode **modes,
struct drm_fb_offset *offsets,
int idx,
int h_idx, int v_idx)
{
struct drm_fb_helper_connector *fb_helper_conn;
int i;
int hoffset = 0, voffset = 0;
 
for (i = 0; i < fb_helper->connector_count; i++) {
fb_helper_conn = fb_helper->connector_info[i];
if (!fb_helper_conn->connector->has_tile)
continue;
 
if (!modes[i] && (h_idx || v_idx)) {
DRM_DEBUG_KMS("no modes for connector tiled %d %d\n", i,
fb_helper_conn->connector->base.id);
continue;
}
if (fb_helper_conn->connector->tile_h_loc < h_idx)
hoffset += modes[i]->hdisplay;
 
if (fb_helper_conn->connector->tile_v_loc < v_idx)
voffset += modes[i]->vdisplay;
}
offsets[idx].x = hoffset;
offsets[idx].y = voffset;
DRM_DEBUG_KMS("returned %d %d for %d %d\n", hoffset, voffset, h_idx, v_idx);
return 0;
}
 
static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
struct drm_display_mode **modes,
struct drm_fb_offset *offsets,
bool *enabled, int width, int height)
{
struct drm_fb_helper_connector *fb_helper_conn;
int i;
uint64_t conn_configured = 0, mask;
int tile_pass = 0;
mask = (1 << fb_helper->connector_count) - 1;
retry:
 
for (i = 0; i < fb_helper->connector_count; i++) {
fb_helper_conn = fb_helper->connector_info[i];
 
if (conn_configured & (1 << i))
if (enabled[i] == false)
continue;
 
if (enabled[i] == false) {
conn_configured |= (1 << i);
continue;
}
 
/* first pass over all the untiled connectors */
if (tile_pass == 0 && fb_helper_conn->connector->has_tile)
continue;
 
if (tile_pass == 1) {
if (fb_helper_conn->connector->tile_h_loc != 0 ||
fb_helper_conn->connector->tile_v_loc != 0)
continue;
 
} else {
if (fb_helper_conn->connector->tile_h_loc != tile_pass -1 &&
fb_helper_conn->connector->tile_v_loc != tile_pass - 1)
/* if this tile_pass doesn't cover any of the tiles - keep going */
continue;
 
/* find the tile offsets for this pass - need
to find all tiles left and above */
drm_get_tile_offsets(fb_helper, modes, offsets,
i, fb_helper_conn->connector->tile_h_loc, fb_helper_conn->connector->tile_v_loc);
}
DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
fb_helper_conn->connector->base.id);
 
1274,8 → 1205,8
/* got for command line mode first */
modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
if (!modes[i]) {
DRM_DEBUG_KMS("looking for preferred mode on connector %d %d\n",
fb_helper_conn->connector->base.id, fb_helper_conn->connector->tile_group ? fb_helper_conn->connector->tile_group->id : 0);
DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
fb_helper_conn->connector->base.id);
modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
}
/* No preferred modes, pick one off the list */
1285,13 → 1216,7
}
DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
"none");
conn_configured |= (1 << i);
}
 
if ((conn_configured & mask) != mask) {
tile_pass++;
goto retry;
}
return true;
}
 
1380,7 → 1305,6
struct drm_device *dev = fb_helper->dev;
struct drm_fb_helper_crtc **crtcs;
struct drm_display_mode **modes;
struct drm_fb_offset *offsets;
struct drm_mode_set *modeset;
bool *enabled;
int width, height;
1395,11 → 1319,9
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
modes = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_display_mode *), GFP_KERNEL);
offsets = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_fb_offset), GFP_KERNEL);
enabled = kcalloc(dev->mode_config.num_connector,
sizeof(bool), GFP_KERNEL);
if (!crtcs || !modes || !enabled || !offsets) {
if (!crtcs || !modes || !enabled) {
DRM_ERROR("Memory allocation failed\n");
goto out;
}
1409,16 → 1331,14
 
if (!(fb_helper->funcs->initial_config &&
fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
offsets,
enabled, width, height))) {
memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0]));
memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0]));
memset(offsets, 0, dev->mode_config.num_connector*sizeof(offsets[0]));
 
if (!drm_target_cloned(fb_helper, modes, offsets,
enabled, width, height) &&
!drm_target_preferred(fb_helper, modes, offsets,
enabled, width, height))
if (!drm_target_cloned(fb_helper,
modes, enabled, width, height) &&
!drm_target_preferred(fb_helper,
modes, enabled, width, height))
DRM_ERROR("Unable to find initial modes\n");
 
DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n",
1438,15 → 1358,12
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_display_mode *mode = modes[i];
struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
struct drm_fb_offset *offset = &offsets[i];
modeset = &fb_crtc->mode_set;
 
if (mode && fb_crtc) {
DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y);
DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
mode->name, fb_crtc->mode_set.crtc->base.id);
fb_crtc->desired_mode = mode;
fb_crtc->x = offset->x;
fb_crtc->y = offset->y;
if (modeset->mode)
drm_mode_destroy(dev, modeset->mode);
modeset->mode = drm_mode_duplicate(dev,
1453,8 → 1370,6
fb_crtc->desired_mode);
modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
modeset->fb = fb_helper->fb;
modeset->x = offset->x;
modeset->y = offset->y;
}
}
 
1463,6 → 1378,7
modeset = &fb_helper->crtc_info[i].mode_set;
if (modeset->num_connectors == 0) {
BUG_ON(modeset->fb);
BUG_ON(modeset->num_connectors);
if (modeset->mode)
drm_mode_destroy(dev, modeset->mode);
modeset->mode = NULL;
1471,7 → 1387,6
out:
kfree(crtcs);
kfree(modes);
kfree(offsets);
kfree(enabled);
}
 
1501,6 → 1416,8
struct drm_device *dev = fb_helper->dev;
int count = 0;
 
// drm_fb_helper_parse_command_line(fb_helper);
 
mutex_lock(&dev->mode_config.mutex);
count = drm_fb_helper_probe_connector_modes(fb_helper,
dev->mode_config.max_width,
/drivers/video/drm/drm_gem.c
35,8 → 35,6
#include <linux/err.h>
#include <drm/drmP.h>
#include <drm/drm_vma_manager.h>
#include <drm/drm_gem.h>
#include "drm_internal.h"
 
/** @file drm_gem.c
*
145,7 → 143,7
EXPORT_SYMBOL(drm_gem_object_init);
 
/**
* drm_gem_private_object_init - initialize an allocated private GEM object
* drm_gem_object_init - initialize an allocated private GEM object
* @dev: drm_device the object should be initialized for
* @obj: drm_gem_object to initialize
* @size: object size
170,7 → 168,7
EXPORT_SYMBOL(drm_gem_private_object_init);
 
/**
* drm_gem_object_handle_free - release resources bound to userspace handles
* drm_gem_object_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
*
* Called after the last handle to the object has been closed
280,7 → 278,7
* drm_gem_handle_create_tail - internal functions to create a handle
* @file_priv: drm file-private structure to register the handle for
* @obj: object to register
* @handlep: pointer to return the created handle to the caller
* @handlep: pionter to return the created handle to the caller
*
* This expects the dev->object_name_lock to be held already and will drop it
* before returning. Used to avoid races in establishing new handles when
333,7 → 331,7
}
 
/**
* drm_gem_handle_create - create a gem handle for an object
* gem_handle_create - create a gem handle for an object
* @file_priv: drm file-private structure to register the handle for
* @obj: object to register
* @handlep: pionter to return the created handle to the caller
342,7 → 340,8
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
*/
int drm_gem_handle_create(struct drm_file *file_priv,
int
drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep)
{
552,7 → 551,7
struct drm_gem_close *args = data;
int ret;
 
if (!drm_core_check_feature(dev, DRIVER_GEM))
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
 
ret = drm_gem_handle_delete(file_priv, args->handle);
579,7 → 578,7
struct drm_gem_object *obj;
int ret;
 
if (!drm_core_check_feature(dev, DRIVER_GEM))
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
 
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
632,7 → 631,7
int ret;
u32 handle;
 
if (!drm_core_check_feature(dev, DRIVER_GEM))
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
 
mutex_lock(&dev->object_name_lock);
/drivers/video/drm/drm_irq.c
33,13 → 33,13
*/
 
#include <drm/drmP.h>
#include <asm/div64.h>
//#include "drm_trace.h"
#include "drm_internal.h"
 
//#include <linux/interrupt.h> /* For task queue support */
#include <linux/slab.h>
 
#include <linux/vgaarb.h>
//#include <linux/vgaarb.h>
#include <linux/export.h>
 
/* Access macro for slots in vblank timestamp ringbuffer. */
56,12 → 56,6
*/
#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
 
static bool
drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
struct timeval *tvblank, unsigned flags);
 
static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
 
/*
* Clear vblank timestamp buffer for a crtc.
*/
92,13 → 86,11
goto err;
 
for (i = 0; i < num_crtcs; i++) {
struct drm_vblank_crtc *vblank = &dev->vblank[i];
 
vblank->dev = dev;
vblank->crtc = i;
init_waitqueue_head(&vblank->queue);
setup_timer(&vblank->disable_timer, vblank_disable_fn,
(unsigned long)vblank);
dev->vblank[i].dev = dev;
dev->vblank[i].crtc = i;
init_waitqueue_head(&dev->vblank[i].queue);
setup_timer(&dev->vblank[i].disable_timer, vblank_disable_fn,
(unsigned long)&dev->vblank[i]);
}
 
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
114,7 → 106,7
return 0;
 
err:
dev->num_crtcs = 0;
drm_vblank_cleanup(dev);
return ret;
}
EXPORT_SYMBOL(drm_vblank_init);
186,7 → 178,7
dev->irq = irq;
}
 
u16 cmd = PciRead16(dev->pdev->busnr, dev->pdev->devfn, 4);
u16_t cmd = PciRead16(dev->pdev->busnr, dev->pdev->devfn, 4);
cmd&= ~(1<<10);
PciWrite16(dev->pdev->busnr, dev->pdev->devfn, 4, cmd);
 
353,159 → 345,6
EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
 
/**
* drm_vblank_get - get a reference count on vblank events
* @dev: DRM device
* @crtc: which CRTC to own
*
* Acquire a reference count on vblank events to avoid having them disabled
* while in use.
*
* This is the legacy version of drm_crtc_vblank_get().
*
* Returns:
* Zero on success, nonzero on failure.
*/
int drm_vblank_get(struct drm_device *dev, int crtc)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
unsigned long irqflags;
int ret = 0;
#if 0
 
if (WARN_ON(crtc >= dev->num_crtcs))
return -EINVAL;
 
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
if (atomic_add_return(1, &vblank->refcount) == 1) {
ret = drm_vblank_enable(dev, crtc);
} else {
if (!vblank->enabled) {
atomic_dec(&vblank->refcount);
ret = -EINVAL;
}
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
#endif
return ret;
}
EXPORT_SYMBOL(drm_vblank_get);
 
/**
* drm_crtc_vblank_get - get a reference count on vblank events
* @crtc: which CRTC to own
*
* Acquire a reference count on vblank events to avoid having them disabled
* while in use.
*
* This is the native kms version of drm_vblank_off().
*
* Returns:
* Zero on success, nonzero on failure.
*/
int drm_crtc_vblank_get(struct drm_crtc *crtc)
{
return drm_vblank_get(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_vblank_get);
 
/**
* drm_vblank_put - give up ownership of vblank events
* @dev: DRM device
* @crtc: which counter to give up
*
* Release ownership of a given vblank counter, turning off interrupts
* if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
*
* This is the legacy version of drm_crtc_vblank_put().
*/
void drm_vblank_put(struct drm_device *dev, int crtc)
{
#if 0
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
 
if (WARN_ON(atomic_read(&vblank->refcount) == 0))
return;
 
if (WARN_ON(crtc >= dev->num_crtcs))
return;
 
/* Last user schedules interrupt disable */
if (atomic_dec_and_test(&vblank->refcount)) {
if (drm_vblank_offdelay == 0)
return;
else if (dev->vblank_disable_immediate || drm_vblank_offdelay < 0)
vblank_disable_fn((unsigned long)vblank);
else
mod_timer(&vblank->disable_timer,
jiffies + ((drm_vblank_offdelay * HZ)/1000));
}
#endif
}
EXPORT_SYMBOL(drm_vblank_put);
 
/**
* drm_crtc_vblank_put - give up ownership of vblank events
* @crtc: which counter to give up
*
* Release ownership of a given vblank counter, turning off interrupts
* if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
*
* This is the native kms version of drm_vblank_put().
*/
void drm_crtc_vblank_put(struct drm_crtc *crtc)
{
drm_vblank_put(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_vblank_put);
 
/**
* drm_wait_one_vblank - wait for one vblank
* @dev: DRM device
* @crtc: crtc index
*
* This waits for one vblank to pass on @crtc, using the irq driver interfaces.
* It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
* due to lack of driver support or because the crtc is off.
*/
void drm_wait_one_vblank(struct drm_device *dev, int crtc)
{
#if 0
int ret;
u32 last;
 
ret = drm_vblank_get(dev, crtc);
if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", crtc, ret))
return;
 
last = drm_vblank_count(dev, crtc);
 
ret = wait_event_timeout(dev->vblank[crtc].queue,
last != drm_vblank_count(dev, crtc),
msecs_to_jiffies(100));
 
WARN(ret == 0, "vblank wait timed out on crtc %i\n", crtc);
 
drm_vblank_put(dev, crtc);
#endif
}
EXPORT_SYMBOL(drm_wait_one_vblank);
 
/**
* drm_crtc_wait_one_vblank - wait for one vblank
* @crtc: DRM crtc
*
* This waits for one vblank to pass on @crtc, using the irq driver interfaces.
* It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
* due to lack of driver support or because the crtc is off.
*/
void drm_crtc_wait_one_vblank(struct drm_crtc *crtc)
{
drm_wait_one_vblank(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
 
/**
* drm_vblank_off - disable vblank events on a CRTC
* @dev: DRM device
* @crtc: CRTC in question
521,7 → 360,6
*/
void drm_vblank_off(struct drm_device *dev, int crtc)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long irqflags;
557,7 → 395,7
*
* This functions restores the vblank interrupt state captured with
* drm_vblank_off() again. Note that calls to drm_vblank_on() and
* drm_vblank_off() can be unbalanced and so can also be unconditionally called
* drm_vblank_off() can be unbalanced and so can also be unconditionaly called
* in driver load code to reflect the current hardware state of the crtc.
*
* This is the legacy version of drm_crtc_vblank_on().
564,7 → 402,6
*/
void drm_vblank_on(struct drm_device *dev, int crtc)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
unsigned long irqflags;
 
}
576,7 → 413,7
*
* This functions restores the vblank interrupt state captured with
* drm_vblank_off() again. Note that calls to drm_vblank_on() and
* drm_vblank_off() can be unbalanced and so can also be unconditionally called
* drm_vblank_off() can be unbalanced and so can also be unconditionaly called
* in driver load code to reflect the current hardware state of the crtc.
*
* This is the native kms version of drm_vblank_on().
616,10 → 453,6
/* vblank is not initialized (IRQ not installed ?) */
if (!dev->num_crtcs)
return;
 
if (WARN_ON(crtc >= dev->num_crtcs))
return;
 
/*
* To avoid all the problems that might happen if interrupts
* were enabled/disabled around or between these calls, we just
627,10 → 460,10
* to avoid corrupting the count if multiple, mismatch calls occur),
* so that interrupts remain enabled in the interim.
*/
if (!vblank->inmodeset) {
vblank->inmodeset = 0x1;
if (!dev->vblank[crtc].inmodeset) {
dev->vblank[crtc].inmodeset = 0x1;
if (drm_vblank_get(dev, crtc) == 0)
vblank->inmodeset |= 0x2;
dev->vblank[crtc].inmodeset |= 0x2;
}
#endif
}
653,18 → 486,16
if (!dev->num_crtcs)
return;
 
if (vblank->inmodeset) {
if (dev->vblank[crtc].inmodeset) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = true;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 
if (vblank->inmodeset & 0x2)
if (dev->vblank[crtc].inmodeset & 0x2)
drm_vblank_put(dev, crtc);
 
vblank->inmodeset = 0;
dev->vblank[crtc].inmodeset = 0;
}
#endif
}
EXPORT_SYMBOL(drm_vblank_post_modeset);
 
 
/drivers/video/drm/drm_modes.c
912,7 → 912,7
*
* This function is a helper which can be used to validate modes against size
* limitations of the DRM device/connector. If a mode is too big its status
* member is updated with the appropriate validation failure code. The list
* memeber is updated with the appropriate validation failure code. The list
* itself is not changed.
*/
void drm_mode_validate_size(struct drm_device *dev,
/drivers/video/drm/drm_modeset_lock.c
35,7 → 35,7
* of extra utility/tracking out of our acquire-ctx. This is provided
* by drm_modeset_lock / drm_modeset_acquire_ctx.
*
* For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt
* For basic principles of ww_mutex, see: Documentation/ww-mutex-design.txt
*
* The basic usage pattern is to:
*
57,230 → 57,6
 
 
/**
* __drm_modeset_lock_all - internal helper to grab all modeset locks
* @dev: DRM device
* @trylock: trylock mode for atomic contexts
*
* This is a special version of drm_modeset_lock_all() which can also be used in
* atomic contexts. Then @trylock must be set to true.
*
* Returns:
* 0 on success or negative error code on failure.
*/
int __drm_modeset_lock_all(struct drm_device *dev,
bool trylock)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_modeset_acquire_ctx *ctx;
int ret;
 
ctx = kzalloc(sizeof(*ctx),
trylock ? GFP_ATOMIC : GFP_KERNEL);
if (!ctx)
return -ENOMEM;
 
if (trylock) {
if (!mutex_trylock(&config->mutex))
return -EBUSY;
} else {
mutex_lock(&config->mutex);
}
 
drm_modeset_acquire_init(ctx, 0);
ctx->trylock_only = trylock;
 
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
if (ret)
goto fail;
ret = drm_modeset_lock_all_crtcs(dev, ctx);
if (ret)
goto fail;
 
WARN_ON(config->acquire_ctx);
 
/* now we hold the locks, so now that it is safe, stash the
* ctx for drm_modeset_unlock_all():
*/
config->acquire_ctx = ctx;
 
drm_warn_on_modeset_not_all_locked(dev);
 
return 0;
 
fail:
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
}
 
return ret;
}
EXPORT_SYMBOL(__drm_modeset_lock_all);
 
/**
* drm_modeset_lock_all - take all modeset locks
* @dev: drm device
*
* This function takes all modeset locks, suitable where a more fine-grained
* scheme isn't (yet) implemented. Locks must be dropped with
* drm_modeset_unlock_all.
*/
void drm_modeset_lock_all(struct drm_device *dev)
{
WARN_ON(__drm_modeset_lock_all(dev, false) != 0);
}
EXPORT_SYMBOL(drm_modeset_lock_all);
 
/**
* drm_modeset_unlock_all - drop all modeset locks
* @dev: device
*
* This function drop all modeset locks taken by drm_modeset_lock_all.
*/
void drm_modeset_unlock_all(struct drm_device *dev)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
 
if (WARN_ON(!ctx))
return;
 
config->acquire_ctx = NULL;
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
 
kfree(ctx);
 
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_modeset_unlock_all);
 
/**
* drm_modeset_lock_crtc - lock crtc with hidden acquire ctx for a plane update
* @crtc: DRM CRTC
* @plane: DRM plane to be updated on @crtc
*
* This function locks the given crtc and plane (which should be either the
* primary or cursor plane) using a hidden acquire context. This is necessary so
* that drivers internally using the atomic interfaces can grab further locks
* with the lock acquire context.
*
* Note that @plane can be NULL, e.g. when the cursor support hasn't yet been
* converted to universal planes yet.
*/
void drm_modeset_lock_crtc(struct drm_crtc *crtc,
struct drm_plane *plane)
{
struct drm_modeset_acquire_ctx *ctx;
int ret;
 
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (WARN_ON(!ctx))
return;
 
drm_modeset_acquire_init(ctx, 0);
 
retry:
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
goto fail;
 
if (plane) {
ret = drm_modeset_lock(&plane->mutex, ctx);
if (ret)
goto fail;
 
if (plane->crtc) {
ret = drm_modeset_lock(&plane->crtc->mutex, ctx);
if (ret)
goto fail;
}
}
 
WARN_ON(crtc->acquire_ctx);
 
/* now we hold the locks, so now that it is safe, stash the
* ctx for drm_modeset_unlock_crtc():
*/
crtc->acquire_ctx = ctx;
 
return;
 
fail:
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
goto retry;
}
}
EXPORT_SYMBOL(drm_modeset_lock_crtc);
 
/**
* drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls
* @crtc: drm crtc
*
* Legacy ioctl operations like cursor updates or page flips only have per-crtc
* locking, and store the acquire ctx in the corresponding crtc. All other
* legacy operations take all locks and use a global acquire context. This
* function grabs the right one.
*/
struct drm_modeset_acquire_ctx *
drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc)
{
if (crtc->acquire_ctx)
return crtc->acquire_ctx;
 
WARN_ON(!crtc->dev->mode_config.acquire_ctx);
 
return crtc->dev->mode_config.acquire_ctx;
}
EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx);
 
/**
* drm_modeset_unlock_crtc - drop crtc lock
* @crtc: drm crtc
*
* This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other
* locks acquired through the hidden context.
*/
void drm_modeset_unlock_crtc(struct drm_crtc *crtc)
{
struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx;
 
if (WARN_ON(!ctx))
return;
 
crtc->acquire_ctx = NULL;
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
 
kfree(ctx);
}
EXPORT_SYMBOL(drm_modeset_unlock_crtc);
 
/**
* drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
* @dev: device
*
* Useful as a debug assert.
*/
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
{
struct drm_crtc *crtc;
 
/* Locking is currently fubar in the panic handler. */
// if (oops_in_progress)
// return;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
}
EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
 
/**
* drm_modeset_acquire_init - initialize acquire context
* @ctx: the acquire context
* @flags: for future
332,12 → 108,7
 
WARN_ON(ctx->contended);
 
if (ctx->trylock_only) {
if (!ww_mutex_trylock(&lock->mutex))
return -EBUSY;
else
return 0;
} else if (interruptible && slow) {
if (interruptible && slow) {
ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
} else if (interruptible) {
ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
455,14 → 226,15
}
EXPORT_SYMBOL(drm_modeset_unlock);
 
/* In some legacy codepaths it's convenient to just grab all the crtc and plane
* related locks. */
/* Temporary.. until we have sufficiently fine grained locking, there
* are a couple scenarios where it is convenient to grab all crtc locks.
* It is planned to remove this:
*/
int drm_modeset_lock_all_crtcs(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_mode_config *config = &dev->mode_config;
struct drm_crtc *crtc;
struct drm_plane *plane;
int ret = 0;
 
list_for_each_entry(crtc, &config->crtc_list, head) {
471,12 → 243,6
return ret;
}
 
list_for_each_entry(plane, &config->plane_list, head) {
ret = drm_modeset_lock(&plane->mutex, ctx);
if (ret)
return ret;
}
 
return 0;
}
EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);
/drivers/video/drm/drm_pci.c
22,12 → 22,11
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
 
#include <linux/pci.h>
#include <linux/slab.h>
//#include <linux/pci.h>
//#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <drm/drmP.h>
#include "drm_legacy.h"
 
#include <syscall.h>
/**
/drivers/video/drm/drm_plane_helper.c
27,38 → 27,10
#include <drm/drmP.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_atomic.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
 
#define SUBPIXEL_MASK 0xffff
 
/**
* DOC: overview
*
* This helper library has two parts. The first part has support to implement
* primary plane support on top of the normal CRTC configuration interface.
* Since the legacy ->set_config interface ties the primary plane together with
* the CRTC state this does not allow userspace to disable the primary plane
* itself. To avoid too much duplicated code use
* drm_plane_helper_check_update() which can be used to enforce the same
* restrictions as primary planes had thus. The default primary plane only
* expose XRBG8888 and ARGB8888 as valid pixel formats for the attached
* framebuffer.
*
* Drivers are highly recommended to implement proper support for primary
* planes, and newly merged drivers must not rely upon these transitional
* helpers.
*
* The second part also implements transitional helpers which allow drivers to
* gradually switch to the atomic helper infrastructure for plane updates. Once
* that switch is complete drivers shouldn't use these any longer, instead using
* the proper legacy implementations for update and disable plane hooks provided
* by the atomic helpers.
*
* Again drivers are strongly urged to switch to the new interfaces.
*/
 
/*
* This is the minimal list of formats that seem to be safe for modeset use
* with all current DRM drivers. Most hardware can actually support more
155,11 → 127,6
return -ERANGE;
}
 
if (!fb) {
*visible = false;
return 0;
}
 
*visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
if (!*visible)
/*
402,171 → 369,3
return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs);
}
EXPORT_SYMBOL(drm_crtc_init);
 
int drm_plane_helper_commit(struct drm_plane *plane,
struct drm_plane_state *plane_state,
struct drm_framebuffer *old_fb)
{
struct drm_plane_helper_funcs *plane_funcs;
struct drm_crtc *crtc[2];
struct drm_crtc_helper_funcs *crtc_funcs[2];
int i, ret = 0;
 
plane_funcs = plane->helper_private;
 
/* Since this is a transitional helper we can't assume that plane->state
* is always valid. Hence we need to use plane->crtc instead of
* plane->state->crtc as the old crtc. */
crtc[0] = plane->crtc;
crtc[1] = crtc[0] != plane_state->crtc ? plane_state->crtc : NULL;
 
for (i = 0; i < 2; i++)
crtc_funcs[i] = crtc[i] ? crtc[i]->helper_private : NULL;
 
if (plane_funcs->atomic_check) {
ret = plane_funcs->atomic_check(plane, plane_state);
if (ret)
goto out;
}
 
if (plane_funcs->prepare_fb && plane_state->fb) {
ret = plane_funcs->prepare_fb(plane, plane_state->fb);
if (ret)
goto out;
}
 
/* Point of no return, commit sw state. */
swap(plane->state, plane_state);
 
for (i = 0; i < 2; i++) {
if (crtc_funcs[i] && crtc_funcs[i]->atomic_begin)
crtc_funcs[i]->atomic_begin(crtc[i]);
}
 
plane_funcs->atomic_update(plane, plane_state);
 
for (i = 0; i < 2; i++) {
if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush)
crtc_funcs[i]->atomic_flush(crtc[i]);
}
 
for (i = 0; i < 2; i++) {
if (!crtc[i])
continue;
 
/* There's no other way to figure out whether the crtc is running. */
ret = drm_crtc_vblank_get(crtc[i]);
if (ret == 0) {
drm_crtc_wait_one_vblank(crtc[i]);
drm_crtc_vblank_put(crtc[i]);
}
 
ret = 0;
}
 
if (plane_funcs->cleanup_fb && old_fb)
plane_funcs->cleanup_fb(plane, old_fb);
out:
if (plane_state) {
if (plane->funcs->atomic_destroy_state)
plane->funcs->atomic_destroy_state(plane, plane_state);
else
drm_atomic_helper_plane_destroy_state(plane, plane_state);
}
 
return ret;
}
 
/**
* drm_plane_helper_update() - Helper for primary plane update
* @plane: plane object to update
* @crtc: owning CRTC of owning plane
* @fb: framebuffer to flip onto plane
* @crtc_x: x offset of primary plane on crtc
* @crtc_y: y offset of primary plane on crtc
* @crtc_w: width of primary plane rectangle on crtc
* @crtc_h: height of primary plane rectangle on crtc
* @src_x: x offset of @fb for panning
* @src_y: y offset of @fb for panning
* @src_w: width of source rectangle in @fb
* @src_h: height of source rectangle in @fb
*
* Provides a default plane update handler using the atomic plane update
* functions. It is fully left to the driver to check plane constraints and
* handle corner-cases like a fully occluded or otherwise invisible plane.
*
* This is useful for piecewise transitioning of a driver to the atomic helpers.
*
* RETURNS:
* Zero on success, error code on failure
*/
int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct drm_plane_state *plane_state;
 
if (plane->funcs->atomic_duplicate_state)
plane_state = plane->funcs->atomic_duplicate_state(plane);
else if (plane->state)
plane_state = drm_atomic_helper_plane_duplicate_state(plane);
else
plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
if (!plane_state)
return -ENOMEM;
 
plane_state->crtc = crtc;
drm_atomic_set_fb_for_plane(plane_state, fb);
plane_state->crtc_x = crtc_x;
plane_state->crtc_y = crtc_y;
plane_state->crtc_h = crtc_h;
plane_state->crtc_w = crtc_w;
plane_state->src_x = src_x;
plane_state->src_y = src_y;
plane_state->src_h = src_h;
plane_state->src_w = src_w;
 
return drm_plane_helper_commit(plane, plane_state, plane->fb);
}
EXPORT_SYMBOL(drm_plane_helper_update);
 
/**
* drm_plane_helper_disable() - Helper for primary plane disable
* @plane: plane to disable
*
* Provides a default plane disable handler using the atomic plane update
* functions. It is fully left to the driver to check plane constraints and
* handle corner-cases like a fully occluded or otherwise invisible plane.
*
* This is useful for piecewise transitioning of a driver to the atomic helpers.
*
* RETURNS:
* Zero on success, error code on failure
*/
int drm_plane_helper_disable(struct drm_plane *plane)
{
struct drm_plane_state *plane_state;
 
/* crtc helpers love to call disable functions for already disabled hw
* functions. So cope with that. */
if (!plane->crtc)
return 0;
 
if (plane->funcs->atomic_duplicate_state)
plane_state = plane->funcs->atomic_duplicate_state(plane);
else if (plane->state)
plane_state = drm_atomic_helper_plane_duplicate_state(plane);
else
plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
if (!plane_state)
return -ENOMEM;
 
plane_state->crtc = NULL;
drm_atomic_set_fb_for_plane(plane_state, NULL);
 
return drm_plane_helper_commit(plane, plane_state, plane->fb);
}
EXPORT_SYMBOL(drm_plane_helper_disable);
/drivers/video/drm/drm_probe_helper.c
102,8 → 102,7
mode->status = MODE_UNVERIFIED;
 
if (connector->force) {
if (connector->force == DRM_FORCE_ON ||
connector->force == DRM_FORCE_ON_DIGITAL)
if (connector->force == DRM_FORCE_ON)
connector->status = connector_status_connected;
else
connector->status = connector_status_disconnected;
/drivers/video/drm/drm_stub.c
33,6 → 33,11
#include <drm/drmP.h>
#include <drm/drm_core.h>
 
struct va_format {
const char *fmt;
va_list *va;
};
 
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
 
56,11 → 61,11
unsigned int drm_timestamp_monotonic = 1;
 
struct idr drm_minors_idr;
 
void drm_err(const char *format, ...)
int drm_err(const char *func, const char *format, ...)
{
struct va_format vaf;
va_list args;
int r;
 
va_start(args, format);
 
67,10 → 72,11
vaf.fmt = format;
vaf.va = &args;
 
printk(KERN_ERR "[" DRM_NAME ":%pf] *ERROR* %pV",
__builtin_return_address(0), &vaf);
r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
 
va_end(args);
 
return r;
}
EXPORT_SYMBOL(drm_err);
 
555,6 → 561,10
 
extern int x86_clflush_size;
 
static inline void clflush(volatile void *__p)
{
asm volatile("clflush %0" : "+m" (*(volatile char*)__p));
}
 
void drm_clflush_virt_range(void *addr, unsigned long length)
{
/drivers/video/drm/drm_cache.c
29,10 → 29,15
*/
 
#include <linux/export.h>
#include <linux/scatterlist.h>
#include <drm/drmP.h>
 
extern int x86_clflush_size;
 
static inline void clflush(volatile void *__p)
{
asm volatile("clflush %0" : "+m" (*(volatile char*)__p));
}
 
#if 0
static void
/drivers/video/drm/i2c/i2c-algo-bit.c
12,19 → 12,25
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301 USA.
* ------------------------------------------------------------------------- */
 
/* With some changes from Frodo Looijaard <frodol@dds.nl>, Kyösti Mälkki
<kmalkki@cc.hut.fi> and Jean Delvare <jdelvare@suse.de> */
<kmalkki@cc.hut.fi> and Jean Delvare <khali@linux-fr.org> */
 
#include <types.h>
#include <list.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <syscall.h>
#include <linux/jiffies.h>
#include <errno.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <syscall.h>
 
#define I2C_FUNC_NOSTART 0x00000010 /* I2C_M_NOSTART */
 
43,11 → 49,11
 
/* ----- global variables --------------------------------------------- */
 
static int bit_test; /* see if the line-setting functions work */
MODULE_PARM_DESC(bit_test, "lines testing - 0 off; 1 report; 2 fail if stuck");
static int bit_test = 0; /* see if the line-setting functions work */
 
#ifdef DEBUG
static int i2c_debug = 1;
module_param(i2c_debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(i2c_debug,
"debug level - 0 off; 1 normal; 2 verbose; 3 very verbose");
#endif
91,7 → 97,7
if (!adap->getscl)
goto done;
 
start = jiffies;
start = GetTimerTicks();
while (!getscl(adap)) {
/* This hw knows how to read the clock line, so we wait
* until it actually gets high. This is safer as some
98,7 → 104,7
* chips may hold it low ("clock stretching") while they
* are processing data internally.
*/
if (time_after(jiffies, start + adap->timeout)) {
if (time_after(GetTimerTicks(), start + adap->timeout)) {
/* Test one last time, as we may have been preempted
* between last check and timeout test.
*/
106,14 → 112,8
break;
return -ETIMEDOUT;
}
cpu_relax();
asm volatile("rep; nop" ::: "memory");
}
#ifdef DEBUG
if (jiffies != start && i2c_debug >= 3)
pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go "
"high\n", jiffies - start);
#endif
 
done:
udelay(adap->udelay);
return 0;
650,6 → 650,3
}
 
 
MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
MODULE_DESCRIPTION("I2C-Bus bit-banging algorithm");
MODULE_LICENSE("GPL");
/drivers/video/drm/i2c/i2c-core.c
27,16 → 27,46
 
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <list.h>
#include <errno.h>
#include <linux/i2c.h>
#include <syscall.h>
#include <linux/jiffies.h>
#include <syscall.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
#if 0
 
static ssize_t
297,7 → 327,7
if (ret != -EAGAIN)
break;
 
if (time_after((unsigned long)GetTimerTicks(), orig_jiffies + adap->timeout))
if (time_after(GetTimerTicks(), orig_jiffies + adap->timeout))
break;
 
delay(1);