Subversion Repositories Kolibri OS

Rev

Rev 1403 | Rev 1412 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1403 Rev 1404
Line 270... Line 270...
270
		return RREG32(RADEON_CRTC_CRNT_FRAME);
270
		return RREG32(RADEON_CRTC_CRNT_FRAME);
271
	else
271
	else
272
		return RREG32(RADEON_CRTC2_CRNT_FRAME);
272
		return RREG32(RADEON_CRTC2_CRNT_FRAME);
273
}
273
}
Line -... Line 274...
-
 
274
 
-
 
275
/* Who ever call radeon_fence_emit should call ring_lock and ask
274
 
276
 * for enough space (today caller are ib schedule and buffer move) */
275
void r100_fence_ring_emit(struct radeon_device *rdev,
277
void r100_fence_ring_emit(struct radeon_device *rdev,
276
			  struct radeon_fence *fence)
278
			  struct radeon_fence *fence)
277
{
279
{
278
	/* Who ever call radeon_fence_emit should call ring_lock and ask
280
	/* We have to make sure that caches are flushed before
-
 
281
	 * CPU might read something from VRAM. */
-
 
282
	radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
-
 
283
	radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
-
 
284
	radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
279
	 * for enough space (today caller are ib schedule and buffer move) */
285
	radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
280
	/* Wait until IDLE & CLEAN */
286
	/* Wait until IDLE & CLEAN */
281
	radeon_ring_write(rdev, PACKET0(0x1720, 0));
287
	radeon_ring_write(rdev, PACKET0(0x1720, 0));
282
	radeon_ring_write(rdev, (1 << 16) | (1 << 17));
288
	radeon_ring_write(rdev, (1 << 16) | (1 << 17));
283
	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
289
	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
Line 341... Line 347...
341
{
347
{
342
	int r;
348
	int r;
Line 343... Line 349...
343
 
349
 
344
	r100_wb_disable(rdev);
350
	r100_wb_disable(rdev);
-
 
351
	if (rdev->wb.wb_obj) {
-
 
352
		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
-
 
353
		if (unlikely(r != 0)) {
-
 
354
			dev_err(rdev->dev, "(%d) can't finish WB\n", r);
-
 
355
			return;
345
	if (rdev->wb.wb_obj) {
356
		}
346
//       radeon_object_kunmap(rdev->wb.wb_obj);
357
		radeon_bo_kunmap(rdev->wb.wb_obj);
-
 
358
		radeon_bo_unpin(rdev->wb.wb_obj);
347
//       radeon_object_unpin(rdev->wb.wb_obj);
359
		radeon_bo_unreserve(rdev->wb.wb_obj);
348
//       radeon_object_unref(&rdev->wb.wb_obj);
360
		radeon_bo_unref(&rdev->wb.wb_obj);
349
		rdev->wb.wb = NULL;
361
		rdev->wb.wb = NULL;
350
		rdev->wb.wb_obj = NULL;
362
		rdev->wb.wb_obj = NULL;
351
	}
363
	}
Line 530... Line 542...
530
		rdev->me_fw = NULL;
542
		rdev->me_fw = NULL;
531
	}
543
	}
532
	return err;
544
	return err;
533
}
545
}
Line 534... Line -...
534
 
-
 
535
 
546
 
536
static void r100_cp_load_microcode(struct radeon_device *rdev)
547
static void r100_cp_load_microcode(struct radeon_device *rdev)
537
{
548
{
538
	const __be32 *fw_data;
549
	const __be32 *fw_data;
Line 2812... Line 2823...
2812
		if (r)
2823
		if (r)
2813
			return r;
2824
			return r;
2814
	}
2825
	}
2815
	/* Enable IRQ */
2826
	/* Enable IRQ */
2816
//   r100_irq_set(rdev);
2827
//   r100_irq_set(rdev);
-
 
2828
	rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
2817
	/* 1M ring buffer */
2829
	/* 1M ring buffer */
2818
//   r = r100_cp_init(rdev, 1024 * 1024);
2830
//   r = r100_cp_init(rdev, 1024 * 1024);
2819
//   if (r) {
2831
//   if (r) {
2820
//       dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
2832
//       dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
2821
//       return r;
2833
//       return r;