Subversion Repositories Kolibri OS

Rev

Rev 5271 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5271 Rev 6104
Line 266... Line 266...
266
		WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
266
		WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
267
		WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
267
		WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
268
	}
268
	}
269
	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
269
	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
270
	rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
270
	rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
-
 
271
 
-
 
272
	/* FIXME use something else than big hammer but after few days can not
-
 
273
	 * seem to find good combination so reset SDMA blocks as it seems we
-
 
274
	 * do not shut them down properly. This fix hibernation and does not
-
 
275
	 * affect suspend to ram.
-
 
276
	 */
-
 
277
	WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
-
 
278
	(void)RREG32(SRBM_SOFT_RESET);
-
 
279
	udelay(50);
-
 
280
	WREG32(SRBM_SOFT_RESET, 0);
-
 
281
	(void)RREG32(SRBM_SOFT_RESET);
271
}
282
}
Line 272... Line 283...
272
 
283
 
273
/**
284
/**
274
 * cik_sdma_rlc_stop - stop the compute async dma engines
285
 * cik_sdma_rlc_stop - stop the compute async dma engines
Line 281... Line 292...
281
{
292
{
282
	/* XXX todo */
293
	/* XXX todo */
283
}
294
}
Line 284... Line 295...
284
 
295
 
-
 
296
/**
-
 
297
 * cik_sdma_ctx_switch_enable - enable/disable sdma engine preemption
-
 
298
 *
-
 
299
 * @rdev: radeon_device pointer
-
 
300
 * @enable: enable/disable preemption.
-
 
301
 *
-
 
302
 * Halt or unhalt the async dma engines (CIK).
-
 
303
 */
-
 
304
static void cik_sdma_ctx_switch_enable(struct radeon_device *rdev, bool enable)
-
 
305
{
-
 
306
	uint32_t reg_offset, value;
-
 
307
	int i;
-
 
308
 
-
 
309
	for (i = 0; i < 2; i++) {
-
 
310
		if (i == 0)
-
 
311
			reg_offset = SDMA0_REGISTER_OFFSET;
-
 
312
		else
-
 
313
			reg_offset = SDMA1_REGISTER_OFFSET;
-
 
314
		value = RREG32(SDMA0_CNTL + reg_offset);
-
 
315
		if (enable)
-
 
316
			value |= AUTO_CTXSW_ENABLE;
-
 
317
		else
-
 
318
			value &= ~AUTO_CTXSW_ENABLE;
-
 
319
		WREG32(SDMA0_CNTL + reg_offset, value);
-
 
320
	}
-
 
321
}
-
 
322
 
285
/**
323
/**
286
 * cik_sdma_enable - stop the async dma engines
324
 * cik_sdma_enable - stop the async dma engines
287
 *
325
 *
288
 * @rdev: radeon_device pointer
326
 * @rdev: radeon_device pointer
289
 * @enable: enable/disable the DMA MEs.
327
 * @enable: enable/disable the DMA MEs.
Line 310... Line 348...
310
			me_cntl &= ~SDMA_HALT;
348
			me_cntl &= ~SDMA_HALT;
311
		else
349
		else
312
			me_cntl |= SDMA_HALT;
350
			me_cntl |= SDMA_HALT;
313
		WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
351
		WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
314
	}
352
	}
-
 
353
 
-
 
354
	cik_sdma_ctx_switch_enable(rdev, enable);
315
}
355
}
Line 316... Line 356...
316
 
356
 
317
/**
357
/**
318
 * cik_sdma_gfx_resume - setup and start the async dma engines
358
 * cik_sdma_gfx_resume - setup and start the async dma engines
Line 814... Line 854...
814
			ib->ptr[ib->length_dw++] = upper_32_bits(pe);
854
		ib->ptr[ib->length_dw++] = upper_32_bits(pe);
815
			ib->ptr[ib->length_dw++] = ndw;
855
		ib->ptr[ib->length_dw++] = ndw;
816
			for (; ndw > 0; ndw -= 2, --count, pe += 8) {
856
		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
817
			if (flags & R600_PTE_SYSTEM) {
857
			if (flags & R600_PTE_SYSTEM) {
818
				value = radeon_vm_map_gart(rdev, addr);
858
				value = radeon_vm_map_gart(rdev, addr);
819
				value &= 0xFFFFFFFFFFFFF000ULL;
-
 
820
			} else if (flags & R600_PTE_VALID) {
859
			} else if (flags & R600_PTE_VALID) {
821
				value = addr;
860
				value = addr;
822
			} else {
861
			} else {
823
				value = 0;
862
				value = 0;
824
			}
863
			}
Line 901... Line 940...
901
 * using sDMA (CIK).
940
 * using sDMA (CIK).
902
 */
941
 */
903
void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
942
void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
904
		      unsigned vm_id, uint64_t pd_addr)
943
		      unsigned vm_id, uint64_t pd_addr)
905
{
944
{
-
 
945
	u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
-
 
946
			  SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
-
 
947
 
906
	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
948
	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
907
	if (vm_id < 8) {
949
	if (vm_id < 8) {
908
		radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
950
		radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
909
	} else {
951
	} else {
910
		radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
952
		radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
Line 941... Line 983...
941
 
983
 
942
	/* flush TLB */
984
	/* flush TLB */
943
	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
985
	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
944
	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
986
	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
-
 
987
	radeon_ring_write(ring, 1 << vm_id);
-
 
988
 
-
 
989
	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
-
 
990
	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
-
 
991
	radeon_ring_write(ring, 0);
-
 
992
	radeon_ring_write(ring, 0); /* reference */
-
 
993
	radeon_ring_write(ring, 0); /* mask */
945
	radeon_ring_write(ring, 1 << vm_id);
994
	radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */