Subversion Repositories Kolibri OS

Rev

Rev 3031 | Rev 3764 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3031 Rev 3192
Line 28... Line 28...
28
 *    Jerome Glisse 
28
 *    Jerome Glisse 
29
 *    Dave Airlie
29
 *    Dave Airlie
30
 */
30
 */
31
#include 
31
#include 
32
#include 
32
#include 
33
//#include 
33
#include 
34
#include 
34
#include 
35
#include 
35
#include 
36
#include 
36
#include 
37
#include 
37
#include 
38
#include "radeon_reg.h"
38
#include "radeon_reg.h"
Line 301... Line 301...
301
		/* Save current last activity valuee, used to check for GPU lockups */
301
		/* Save current last activity valuee, used to check for GPU lockups */
302
		last_activity = rdev->fence_drv[ring].last_activity;
302
		last_activity = rdev->fence_drv[ring].last_activity;
Line 303... Line 303...
303
 
303
 
304
//		trace_radeon_fence_wait_begin(rdev->ddev, seq);
304
//		trace_radeon_fence_wait_begin(rdev->ddev, seq);
305
		radeon_irq_kms_sw_irq_get(rdev, ring);
305
		radeon_irq_kms_sw_irq_get(rdev, ring);
306
//       if (intr) {
306
        if (intr) {
307
//           r = wait_event_interruptible_timeout(rdev->fence_queue,
307
			r = wait_event_interruptible_timeout(rdev->fence_queue,
308
//               (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
308
                (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
309
//               timeout);
309
                timeout);
310
//                } else {
310
            } else {
311
//           r = wait_event_timeout(rdev->fence_queue,
311
            r = wait_event_timeout(rdev->fence_queue,
312
//               (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
312
                (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
313
//               timeout);
-
 
314
//   }
313
               timeout);
315
        delay(1);
-
 
316
 
314
            }
317
		radeon_irq_kms_sw_irq_put(rdev, ring);
315
		radeon_irq_kms_sw_irq_put(rdev, ring);
318
//       if (unlikely(r < 0)) {
316
        if (unlikely(r < 0)) {
319
//           return r;
317
            return r;
320
//       }
318
        }
Line 321... Line 319...
321
//		trace_radeon_fence_wait_end(rdev->ddev, seq);
319
//		trace_radeon_fence_wait_end(rdev->ddev, seq);
322
 
320
 
323
		if (unlikely(!signaled)) {
321
		if (unlikely(!signaled)) {
Line 472... Line 470...
472
		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
470
		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
473
			if (target_seq[i]) {
471
			if (target_seq[i]) {
474
				radeon_irq_kms_sw_irq_get(rdev, i);
472
				radeon_irq_kms_sw_irq_get(rdev, i);
475
			}
473
			}
476
		}
474
		}
-
 
475
		if (intr) {
-
 
476
			r = wait_event_interruptible_timeout(rdev->fence_queue,
-
 
477
				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
-
 
478
				timeout);
477
 
479
		} else {
478
//        WaitEvent(fence->evnt);
480
			r = wait_event_timeout(rdev->fence_queue,
479
 
-
 
-
 
481
				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
480
		r = 1;
482
				timeout);
481
 
483
		}
482
		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
484
		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
483
			if (target_seq[i]) {
485
			if (target_seq[i]) {
484
				radeon_irq_kms_sw_irq_put(rdev, i);
486
				radeon_irq_kms_sw_irq_put(rdev, i);
485
			}
487
			}
486
		}
488
		}
Line 604... Line 606...
604
 *
606
 *
605
 * Wait for all fences on the requested ring to signal (all asics).
607
 * Wait for all fences on the requested ring to signal (all asics).
606
 * Returns 0 if the fences have passed, error for all other cases.
608
 * Returns 0 if the fences have passed, error for all other cases.
607
 * Caller must hold ring lock.
609
 * Caller must hold ring lock.
608
 */
610
 */
609
void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
611
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
610
{
612
{
611
	uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
613
	uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
612
 
-
 
613
	while(1) {
-
 
614
	int r;
614
	int r;
-
 
615
 
615
		r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
616
		r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
-
 
617
	if (r) {
616
		if (r == -EDEADLK) {
618
		if (r == -EDEADLK) {
617
			mutex_unlock(&rdev->ring_lock);
-
 
618
			r = radeon_gpu_reset(rdev);
-
 
619
			mutex_lock(&rdev->ring_lock);
-
 
620
			if (!r)
-
 
621
				continue;
619
			return -EDEADLK;
622
	}
620
	}
623
		if (r) {
-
 
624
			dev_err(rdev->dev, "error waiting for ring to become"
621
		dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
625
				" idle (%d)\n", r);
-
 
626
		}
-
 
627
		return;
622
			ring, r);
628
	}
623
	}
-
 
624
	return 0;
629
}
625
}
Line 630... Line 626...
630
 
626
 
631
/**
627
/**
632
 * radeon_fence_ref - take a ref on a fence
628
 * radeon_fence_ref - take a ref on a fence
Line 767... Line 763...
767
{
763
{
768
	uint64_t index;
764
	uint64_t index;
769
	int r;
765
	int r;
Line 770... Line 766...
770
 
766
 
771
	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
767
	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
772
	if (rdev->wb.use_event) {
768
	if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
773
		rdev->fence_drv[ring].scratch_reg = 0;
769
		rdev->fence_drv[ring].scratch_reg = 0;
774
		index = R600_WB_EVENT_OFFSET + ring * 4;
770
		index = R600_WB_EVENT_OFFSET + ring * 4;
775
	} else {
771
	} else {
776
		r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
772
		r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
Line 849... Line 845...
849
 *
845
 *
850
 * Tear down the fence driver for all possible rings (all asics).
846
 * Tear down the fence driver for all possible rings (all asics).
851
 */
847
 */
852
void radeon_fence_driver_fini(struct radeon_device *rdev)
848
void radeon_fence_driver_fini(struct radeon_device *rdev)
853
{
849
{
854
	int ring;
850
	int ring, r;
Line 855... Line 851...
855
 
851
 
856
	mutex_lock(&rdev->ring_lock);
852
	mutex_lock(&rdev->ring_lock);
857
	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
853
	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
858
		if (!rdev->fence_drv[ring].initialized)
854
		if (!rdev->fence_drv[ring].initialized)
859
			continue;
855
			continue;
-
 
856
		r = radeon_fence_wait_empty_locked(rdev, ring);
-
 
857
		if (r) {
-
 
858
			/* no need to trigger GPU reset as we are unloading */
-
 
859
			radeon_fence_driver_force_completion(rdev);
860
		radeon_fence_wait_empty_locked(rdev, ring);
860
		}
861
		wake_up_all(&rdev->fence_queue);
861
		wake_up_all(&rdev->fence_queue);
862
		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
862
		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
863
		rdev->fence_drv[ring].initialized = false;
863
		rdev->fence_drv[ring].initialized = false;
864
	}
864
	}
865
	mutex_unlock(&rdev->ring_lock);
865
	mutex_unlock(&rdev->ring_lock);
Line -... Line 866...
-
 
866
}
-
 
867
 
-
 
868
/**
-
 
869
 * radeon_fence_driver_force_completion - force all fence waiter to complete
-
 
870
 *
-
 
871
 * @rdev: radeon device pointer
-
 
872
 *
-
 
873
 * In case of GPU reset failure make sure no process keep waiting on fence
-
 
874
 * that will never complete.
-
 
875
 */
-
 
876
void radeon_fence_driver_force_completion(struct radeon_device *rdev)
-
 
877
{
-
 
878
	int ring;
-
 
879
 
-
 
880
	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
-
 
881
		if (!rdev->fence_drv[ring].initialized)
-
 
882
			continue;
-
 
883
		radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
-
 
884
	}
Line 866... Line 885...
866
}
885
}
867
 
886
 
868
 
887
 
869
/*
888
/*