Subversion Repositories Kolibri OS

Rev

Rev 3764 | Rev 5271 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3764 Rev 5078
Line 35... Line 35...
35
#include 
35
#include 
36
#include 
36
#include 
37
#include 
37
#include 
38
#include "radeon_reg.h"
38
#include "radeon_reg.h"
39
#include "radeon.h"
39
#include "radeon.h"
-
 
40
#include "radeon_trace.h"
Line 40... Line 41...
40
 
41
 
41
/*
42
/*
42
 * Fences
43
 * Fences
43
 * Fences mark an event in the GPUs pipeline and are used
44
 * Fences mark an event in the GPUs pipeline and are used
Line 118... Line 119...
118
	kref_init(&((*fence)->kref));
119
	kref_init(&((*fence)->kref));
119
	(*fence)->rdev = rdev;
120
	(*fence)->rdev = rdev;
120
	(*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
121
	(*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
121
	(*fence)->ring = ring;
122
	(*fence)->ring = ring;
122
	radeon_fence_ring_emit(rdev, ring, *fence);
123
	radeon_fence_ring_emit(rdev, ring, *fence);
123
//   trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
124
	trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
124
	return 0;
125
	return 0;
125
}
126
}
Line 126... Line 127...
126
 
127
 
127
/**
128
/**
Line 187... Line 188...
187
			 */
188
			 */
188
			break;
189
			break;
189
		}
190
		}
190
	} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
191
	} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
Line 191... Line 192...
191
 
192
 
192
	if (wake) {
-
 
193
		rdev->fence_drv[ring].last_activity = GetTimerTicks();
193
	if (wake)
194
		wake_up_all(&rdev->fence_queue);
194
		wake_up_all(&rdev->fence_queue);
195
	}
-
 
Line 196... Line 195...
196
}
195
}
197
 
196
 
198
/**
197
/**
199
 * radeon_fence_destroy - destroy a fence
198
 * radeon_fence_destroy - destroy a fence
Line 209... Line 208...
209
	fence = container_of(kref, struct radeon_fence, kref);
208
	fence = container_of(kref, struct radeon_fence, kref);
210
	kfree(fence);
209
	kfree(fence);
211
}
210
}
Line 212... Line 211...
212
 
211
 
213
/**
212
/**
214
 * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
213
 * radeon_fence_seq_signaled - check if a fence sequence number has signaled
215
 *
214
 *
216
 * @rdev: radeon device pointer
215
 * @rdev: radeon device pointer
217
 * @seq: sequence number
216
 * @seq: sequence number
218
 * @ring: ring index the fence is associated with
217
 * @ring: ring index the fence is associated with
219
 *
218
 *
220
 * Check if the last singled fence sequnce number is >= the requested
219
 * Check if the last signaled fence sequnce number is >= the requested
221
 * sequence number (all asics).
220
 * sequence number (all asics).
222
 * Returns true if the fence has signaled (current fence value
221
 * Returns true if the fence has signaled (current fence value
223
 * is >= requested value) or false if it has not (current fence
222
 * is >= requested value) or false if it has not (current fence
224
 * value is < the requested value.  Helper function for
223
 * value is < the requested value.  Helper function for
Line 260... Line 259...
260
	}
259
	}
261
	return false;
260
	return false;
262
}
261
}
Line 263... Line 262...
263
 
262
 
264
/**
263
/**
265
 * radeon_fence_wait_seq - wait for a specific sequence number
264
 * radeon_fence_any_seq_signaled - check if any sequence number is signaled
266
 *
265
 *
-
 
266
 * @rdev: radeon device pointer
-
 
267
 * @seq: sequence numbers
-
 
268
 *
267
 * @rdev: radeon device pointer
269
 * Check if the last signaled fence sequnce number is >= the requested
-
 
270
 * sequence number (all asics).
-
 
271
 * Returns true if any has signaled (current value is >= requested value)
-
 
272
 * or false if it has not. Helper function for radeon_fence_wait_seq.
-
 
273
 */
-
 
274
static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
-
 
275
{
-
 
276
	unsigned i;
-
 
277
 
-
 
278
	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-
 
279
		if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
-
 
280
			return true;
-
 
281
	}
-
 
282
	return false;
-
 
283
}
-
 
284
 
-
 
285
/**
-
 
286
 * radeon_fence_wait_seq - wait for a specific sequence numbers
268
 * @target_seq: sequence number we want to wait for
287
 *
-
 
288
 * @rdev: radeon device pointer
269
 * @ring: ring index the fence is associated with
289
 * @target_seq: sequence number(s) we want to wait for
270
 * @intr: use interruptable sleep
-
 
271
 * @lock_ring: whether the ring should be locked or not
290
 * @intr: use interruptable sleep
272
 *
291
 *
-
 
292
 * Wait for the requested sequence number(s) to be written by any ring
273
 * Wait for the requested sequence number to be written (all asics).
293
 * (all asics).  Sequnce number array is indexed by ring id.
274
 * @intr selects whether to use interruptable (true) or non-interruptable
294
 * @intr selects whether to use interruptable (true) or non-interruptable
275
 * (false) sleep when waiting for the sequence number.  Helper function
295
 * (false) sleep when waiting for the sequence number.  Helper function
276
 * for radeon_fence_wait(), et al.
296
 * for radeon_fence_wait_*().
277
 * Returns 0 if the sequence number has passed, error for all other cases.
297
 * Returns 0 if the sequence number has passed, error for all other cases.
278
 * -EDEADLK is returned when a GPU lockup has been detected and the ring is
-
 
279
 * marked as not ready so no further jobs get scheduled until a successful
-
 
280
 * reset.
298
 * -EDEADLK is returned when a GPU lockup has been detected.
281
 */
299
 */
282
static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
300
static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
283
				 unsigned ring, bool intr, bool lock_ring)
301
				 bool intr)
284
{
302
{
285
	unsigned long timeout, last_activity;
-
 
286
	uint64_t seq;
-
 
287
	unsigned i;
303
	uint64_t last_seq[RADEON_NUM_RINGS];
288
	bool signaled;
304
	bool signaled;
Line 289... Line 305...
289
	int r;
305
	int i, r;
290
 
-
 
291
	while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
-
 
292
		if (!rdev->ring[ring].ready) {
-
 
Line 293... Line -...
293
			return -EBUSY;
-
 
294
        }
306
 
295
 
307
	while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
296
		timeout = GetTimerTicks() - RADEON_FENCE_JIFFIES_TIMEOUT;
308
 
297
		if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
309
		/* Save current sequence values, used to check for GPU lockups */
-
 
310
		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
298
			/* the normal case, timeout is somewhere before last_activity */
311
			if (!target_seq[i])
299
			timeout = rdev->fence_drv[ring].last_activity - timeout;
312
				continue;
300
		} else {
-
 
301
			/* either jiffies wrapped around, or no fence was signaled in the last 500ms
313
 
302
			 * anyway we will just wait for the minimum amount and then check for a lockup
314
			last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq);
303
			 */
-
 
304
			timeout = 1;
-
 
305
        }
-
 
Line 306... Line -...
306
		seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
-
 
307
		/* Save current last activity valuee, used to check for GPU lockups */
-
 
308
		last_activity = rdev->fence_drv[ring].last_activity;
315
			trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
309
 
316
			radeon_irq_kms_sw_irq_get(rdev, i);
310
//		trace_radeon_fence_wait_begin(rdev->ddev, seq);
317
        }
311
		radeon_irq_kms_sw_irq_get(rdev, ring);
318
 
312
        if (intr) {
319
		if (intr) {
313
			r = wait_event_interruptible_timeout(rdev->fence_queue,
320
			r = wait_event_interruptible_timeout(rdev->fence_queue, (
314
                (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
321
				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
315
                timeout);
322
				 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
316
            } else {
323
		} else {
-
 
324
			r = wait_event_timeout(rdev->fence_queue, (
317
            r = wait_event_timeout(rdev->fence_queue,
325
				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
318
                (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
326
				 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
319
               timeout);
327
        }
-
 
328
 
-
 
329
		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-
 
330
			if (!target_seq[i])
320
            }
331
				continue;
-
 
332
 
321
        radeon_irq_kms_sw_irq_put(rdev, ring);
333
			radeon_irq_kms_sw_irq_put(rdev, i);
-
 
334
			trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
Line 322... Line 335...
322
        if (unlikely(r < 0)) {
335
		}
-
 
336
 
-
 
337
		if (unlikely(r < 0))
-
 
338
            return r;
323
            return r;
339
 
324
        }
340
		if (unlikely(!signaled)) {
325
//		trace_radeon_fence_wait_end(rdev->ddev, seq);
341
			if (rdev->needs_reset)
326
 
342
				return -EDEADLK;
327
		if (unlikely(!signaled)) {
-
 
Line 328... Line 343...
328
			/* we were interrupted for some reason and fence
343
 
329
			 * isn't signaled yet, resume waiting */
344
			/* we were interrupted for some reason and fence
330
			if (r) {
345
			 * isn't signaled yet, resume waiting */
331
				continue;
-
 
Line 332... Line 346...
332
            }
346
			if (r)
333
 
347
				continue;
334
			/* check if sequence value has changed since last_activity */
348
 
Line 335... Line -...
335
			if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
-
 
336
				continue;
-
 
337
			}
349
			for (i = 0; i < RADEON_NUM_RINGS; ++i) {
338
 
-
 
339
			if (lock_ring) {
-
 
340
				mutex_lock(&rdev->ring_lock);
350
				if (!target_seq[i])
341
            }
-
 
342
 
-
 
343
			/* test if somebody else has already decided that this is a lockup */
-
 
344
			if (last_activity != rdev->fence_drv[ring].last_activity) {
-
 
345
				if (lock_ring) {
-
 
346
					mutex_unlock(&rdev->ring_lock);
-
 
Line 347... Line -...
347
				}
-
 
348
				continue;
351
				continue;
349
			}
352
 
350
 
353
				if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq))
Line 351... Line -...
351
			if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
-
 
352
				/* good news we believe it's a lockup */
-
 
353
				dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
-
 
354
					 target_seq, seq);
354
					break;
355
 
355
			}
356
				/* change last activity so nobody else think there is a lockup */
-
 
357
				for (i = 0; i < RADEON_NUM_RINGS; ++i) {
356
 
Line -... Line 357...
-
 
357
			if (i != RADEON_NUM_RINGS)
-
 
358
				continue;
-
 
359
 
-
 
360
			for (i = 0; i < RADEON_NUM_RINGS; ++i) {
358
                    rdev->fence_drv[i].last_activity = GetTimerTicks();
361
				if (!target_seq[i])
-
 
362
				continue;
-
 
363
 
-
 
364
				if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i]))
-
 
365
					break;
359
				}
366
			}
-
 
367
 
360
 
368
			if (i < RADEON_NUM_RINGS) {
361
				/* mark the ring as not ready any more */
369
				/* good news we believe it's a lockup */
362
				rdev->ring[ring].ready = false;
370
				dev_warn(rdev->dev, "GPU lockup (waiting for "
363
				if (lock_ring) {
371
					 "0x%016llx last fence id 0x%016llx on"
364
					mutex_unlock(&rdev->ring_lock);
372
					 " ring %d)\n",
Line 385... Line 393...
385
 * (false) sleep when waiting for the fence.
393
 * (false) sleep when waiting for the fence.
386
 * Returns 0 if the fence has passed, error for all other cases.
394
 * Returns 0 if the fence has passed, error for all other cases.
387
 */
395
 */
388
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
396
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
389
{
397
{
-
 
398
	uint64_t seq[RADEON_NUM_RINGS] = {};
390
	int r;
399
	int r;
Line 391... Line 400...
391
 
400
 
392
	if (fence == NULL) {
401
	if (fence == NULL) {
393
		WARN(1, "Querying an invalid fence : %p !\n", fence);
402
		WARN(1, "Querying an invalid fence : %p !\n", fence);
394
		return -EINVAL;
403
		return -EINVAL;
Line 395... Line -...
395
	}
-
 
396
 
404
	}
397
	r = radeon_fence_wait_seq(fence->rdev, fence->seq,
-
 
398
				  fence->ring, intr, true);
-
 
399
	if (r) {
-
 
400
		return r;
405
 
401
	}
406
	seq[fence->ring] = fence->seq;
402
	fence->seq = RADEON_FENCE_SIGNALED_SEQ;
-
 
403
		return 0;
-
 
404
}
-
 
405
 
-
 
406
static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
-
 
407
{
-
 
408
	unsigned i;
-
 
409
 
-
 
410
	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-
 
411
		if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
-
 
412
			return true;
-
 
413
		}
-
 
414
	}
-
 
415
	return false;
-
 
416
}
-
 
417
 
-
 
418
/**
-
 
419
 * radeon_fence_wait_any_seq - wait for a sequence number on any ring
-
 
420
 *
-
 
421
 * @rdev: radeon device pointer
-
 
422
 * @target_seq: sequence number(s) we want to wait for
-
 
423
 * @intr: use interruptable sleep
-
 
424
 *
-
 
425
 * Wait for the requested sequence number(s) to be written by any ring
-
 
426
 * (all asics).  Sequnce number array is indexed by ring id.
-
 
427
 * @intr selects whether to use interruptable (true) or non-interruptable
-
 
428
 * (false) sleep when waiting for the sequence number.  Helper function
-
 
429
 * for radeon_fence_wait_any(), et al.
-
 
430
 * Returns 0 if the sequence number has passed, error for all other cases.
-
 
431
 */
-
 
432
static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
-
 
433
				     u64 *target_seq, bool intr)
-
 
434
{
-
 
435
	unsigned long timeout, last_activity, tmp;
-
 
436
	unsigned i, ring = RADEON_NUM_RINGS;
-
 
437
	bool signaled;
-
 
438
	int r;
-
 
439
 
-
 
440
	for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
-
 
441
		if (!target_seq[i]) {
-
 
442
			continue;
-
 
443
		}
-
 
444
 
-
 
445
		/* use the most recent one as indicator */
-
 
446
		if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
-
 
447
			last_activity = rdev->fence_drv[i].last_activity;
-
 
448
	}
-
 
449
 
-
 
450
		/* For lockup detection just pick the lowest ring we are
-
 
451
		 * actively waiting for
-
 
452
		 */
-
 
453
		if (i < ring) {
-
 
454
			ring = i;
-
 
455
		}
-
 
456
	}
-
 
457
 
-
 
458
	/* nothing to wait for ? */
-
 
459
	if (ring == RADEON_NUM_RINGS) {
-
 
460
		return -ENOENT;
-
 
461
	}
-
 
462
 
-
 
463
	while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
-
 
464
        timeout = GetTimerTicks() - RADEON_FENCE_JIFFIES_TIMEOUT;
-
 
465
		if (time_after(last_activity, timeout)) {
-
 
466
			/* the normal case, timeout is somewhere before last_activity */
-
 
467
			timeout = last_activity - timeout;
-
 
468
		} else {
-
 
469
			/* either jiffies wrapped around, or no fence was signaled in the last 500ms
-
 
470
			 * anyway we will just wait for the minimum amount and then check for a lockup
-
 
471
			 */
-
 
Line 472... Line 407...
472
			timeout = 1;
407
	if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
473
		}
-
 
474
 
-
 
475
//		trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
-
 
476
		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-
 
477
			if (target_seq[i]) {
-
 
478
				radeon_irq_kms_sw_irq_get(rdev, i);
408
		return 0;
479
			}
-
 
480
		}
-
 
481
		if (intr) {
-
 
482
			r = wait_event_interruptible_timeout(rdev->fence_queue,
-
 
483
				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
-
 
484
				timeout);
-
 
485
		} else {
-
 
486
			r = wait_event_timeout(rdev->fence_queue,
-
 
487
				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
-
 
488
				timeout);
-
 
489
		}
-
 
490
		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-
 
491
			if (target_seq[i]) {
-
 
492
				radeon_irq_kms_sw_irq_put(rdev, i);
-
 
493
			}
409
 
494
		}
-
 
495
		if (unlikely(r < 0)) {
-
 
496
			return r;
-
 
497
		}
-
 
498
//   trace_radeon_fence_wait_end(rdev->ddev, seq);
-
 
499
 
-
 
500
		if (unlikely(!signaled)) {
-
 
501
			/* we were interrupted for some reason and fence
-
 
502
			 * isn't signaled yet, resume waiting */
-
 
503
		if (r) {
-
 
504
				continue;
-
 
505
			}
-
 
506
 
-
 
507
			mutex_lock(&rdev->ring_lock);
-
 
508
			for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
-
 
509
				if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
-
 
510
					tmp = rdev->fence_drv[i].last_activity;
-
 
511
				}
-
 
512
			}
-
 
513
			/* test if somebody else has already decided that this is a lockup */
-
 
514
			if (last_activity != tmp) {
-
 
515
				last_activity = tmp;
-
 
516
				mutex_unlock(&rdev->ring_lock);
-
 
517
				continue;
-
 
518
		}
-
 
519
 
-
 
520
			if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
-
 
521
			/* good news we believe it's a lockup */
-
 
522
				dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
-
 
523
					 target_seq[ring]);
-
 
524
 
-
 
525
				/* change last activity so nobody else think there is a lockup */
-
 
Line 526... Line -...
526
				for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-
 
527
					rdev->fence_drv[i].last_activity = GetTimerTicks();
-
 
528
				}
410
	r = radeon_fence_wait_seq(fence->rdev, seq, intr);
529
 
-
 
530
				/* mark the ring as not ready any more */
-
 
531
				rdev->ring[ring].ready = false;
-
 
532
				mutex_unlock(&rdev->ring_lock);
-
 
533
				return -EDEADLK;
-
 
534
			}
411
	if (r)
535
			mutex_unlock(&rdev->ring_lock);
412
			return r;
Line 536... Line 413...
536
			}
413
 
537
	}
414
	fence->seq = RADEON_FENCE_SIGNALED_SEQ;
Line 554... Line 431...
554
int radeon_fence_wait_any(struct radeon_device *rdev,
431
int radeon_fence_wait_any(struct radeon_device *rdev,
555
			  struct radeon_fence **fences,
432
			  struct radeon_fence **fences,
556
			  bool intr)
433
			  bool intr)
557
{
434
{
558
	uint64_t seq[RADEON_NUM_RINGS];
435
	uint64_t seq[RADEON_NUM_RINGS];
559
	unsigned i;
436
	unsigned i, num_rings = 0;
560
	int r;
437
	int r;
Line 561... Line 438...
561
 
438
 
562
	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
439
	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
Line 563... Line 440...
563
		seq[i] = 0;
440
		seq[i] = 0;
564
 
441
 
565
		if (!fences[i]) {
442
		if (!fences[i]) {
Line 566... Line 443...
566
			continue;
443
			continue;
-
 
444
		}
-
 
445
 
567
		}
446
		seq[i] = fences[i]->seq;
-
 
447
		++num_rings;
568
 
448
 
569
		if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
449
		/* test if something was allready signaled */
Line 570... Line 450...
570
			/* something was allready signaled */
450
		if (seq[i] == RADEON_FENCE_SIGNALED_SEQ)
-
 
451
		return 0;
571
		return 0;
452
	}
Line 572... Line 453...
572
	}
453
 
573
 
454
	/* nothing to wait for ? */
574
		seq[i] = fences[i]->seq;
455
	if (num_rings == 0)
575
	}
456
		return -ENOENT;
576
 
457
 
577
	r = radeon_fence_wait_any_seq(rdev, seq, intr);
458
	r = radeon_fence_wait_seq(rdev, seq, intr);
Line 578... Line 459...
578
	if (r) {
459
	if (r) {
579
		return r;
460
		return r;
580
	}
461
	}
581
	return 0;
462
	return 0;
582
}
463
}
583
 
464
 
584
/**
465
/**
585
 * radeon_fence_wait_next_locked - wait for the next fence to signal
466
 * radeon_fence_wait_next - wait for the next fence to signal
586
 *
467
 *
587
 * @rdev: radeon device pointer
468
 * @rdev: radeon device pointer
588
 * @ring: ring index the fence is associated with
469
 * @ring: ring index the fence is associated with
589
 *
470
 *
590
 * Wait for the next fence on the requested ring to signal (all asics).
471
 * Wait for the next fence on the requested ring to signal (all asics).
Line 591... Line 472...
591
 * Returns 0 if the next fence has passed, error for all other cases.
472
 * Returns 0 if the next fence has passed, error for all other cases.
592
 * Caller must hold ring lock.
473
 * Caller must hold ring lock.
593
 */
474
 */
594
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
475
int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
595
{
476
{
596
	uint64_t seq;
477
	uint64_t seq[RADEON_NUM_RINGS] = {};
597
 
478
 
598
	seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
479
	seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
Line 599... Line 480...
599
	if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
480
	if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
600
		/* nothing to wait for, last_seq is
481
		/* nothing to wait for, last_seq is
601
		   already the last emited fence */
482
		   already the last emited fence */
602
		return -ENOENT;
483
		return -ENOENT;
603
	}
484
	}
604
	return radeon_fence_wait_seq(rdev, seq, ring, false, false);
485
	return radeon_fence_wait_seq(rdev, seq, false);
605
}
486
}
606
 
487
 
607
/**
488
/**
608
 * radeon_fence_wait_empty_locked - wait for all fences to signal
489
 * radeon_fence_wait_empty - wait for all fences to signal
609
 *
490
 *
610
 * @rdev: radeon device pointer
491
 * @rdev: radeon device pointer
611
 * @ring: ring index the fence is associated with
492
 * @ring: ring index the fence is associated with
612
 *
493
 *
Line -... Line 494...
-
 
494
 * Wait for all fences on the requested ring to signal (all asics).
-
 
495
 * Returns 0 if the fences have passed, error for all other cases.
-
 
496
 * Caller must hold ring lock.
-
 
497
 */
613
 * Wait for all fences on the requested ring to signal (all asics).
498
int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
614
 * Returns 0 if the fences have passed, error for all other cases.
499
{
615
 * Caller must hold ring lock.
500
	uint64_t seq[RADEON_NUM_RINGS] = {};
616
 */
501
	int r;
617
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
502
 
618
{
503
	seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
619
	uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
504
	if (!seq[ring])
620
	int r;
505
		return 0;
621
 
506
 
622
		r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
507
	r = radeon_fence_wait_seq(rdev, seq, false);
Line 823... Line 708...
823
	rdev->fence_drv[ring].cpu_addr = NULL;
708
	rdev->fence_drv[ring].cpu_addr = NULL;
824
	rdev->fence_drv[ring].gpu_addr = 0;
709
	rdev->fence_drv[ring].gpu_addr = 0;
825
	for (i = 0; i < RADEON_NUM_RINGS; ++i)
710
	for (i = 0; i < RADEON_NUM_RINGS; ++i)
826
		rdev->fence_drv[ring].sync_seq[i] = 0;
711
		rdev->fence_drv[ring].sync_seq[i] = 0;
827
	atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
712
	atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
828
    rdev->fence_drv[ring].last_activity = GetTimerTicks();
-
 
829
	rdev->fence_drv[ring].initialized = false;
713
	rdev->fence_drv[ring].initialized = false;
830
}
714
}
Line 831... Line 715...
831
 
715
 
832
/**
716
/**
Line 869... Line 753...
869
 
753
 
870
	mutex_lock(&rdev->ring_lock);
754
	mutex_lock(&rdev->ring_lock);
871
	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
755
	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
872
		if (!rdev->fence_drv[ring].initialized)
756
		if (!rdev->fence_drv[ring].initialized)
873
			continue;
757
			continue;
874
		r = radeon_fence_wait_empty_locked(rdev, ring);
758
		r = radeon_fence_wait_empty(rdev, ring);
875
		if (r) {
759
		if (r) {
876
			/* no need to trigger GPU reset as we are unloading */
760
			/* no need to trigger GPU reset as we are unloading */
877
			radeon_fence_driver_force_completion(rdev);
761
			radeon_fence_driver_force_completion(rdev);
878
		}
762
		}
Line 916... Line 800...
916
 
800
 
917
	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
801
	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
918
		if (!rdev->fence_drv[i].initialized)
802
		if (!rdev->fence_drv[i].initialized)
Line -... Line 803...
-
 
803
			continue;
-
 
804
 
919
			continue;
805
		radeon_fence_process(rdev, i);
920
 
806
 
921
		seq_printf(m, "--- ring %d ---\n", i);
807
		seq_printf(m, "--- ring %d ---\n", i);
922
		seq_printf(m, "Last signaled fence 0x%016llx\n",
808
		seq_printf(m, "Last signaled fence 0x%016llx\n",
923
			   (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
809
			   (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
Line 931... Line 817...
931
		}
817
		}
932
	}
818
	}
933
	return 0;
819
	return 0;
934
}
820
}
Line -... Line 821...
-
 
821
 
-
 
822
/**
-
 
823
 * radeon_debugfs_gpu_reset - manually trigger a gpu reset
-
 
824
 *
-
 
825
 * Manually trigger a gpu reset at the next fence wait.
-
 
826
 */
-
 
827
static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data)
-
 
828
{
-
 
829
	struct drm_info_node *node = (struct drm_info_node *) m->private;
-
 
830
	struct drm_device *dev = node->minor->dev;
-
 
831
	struct radeon_device *rdev = dev->dev_private;
-
 
832
 
-
 
833
	down_read(&rdev->exclusive_lock);
-
 
834
	seq_printf(m, "%d\n", rdev->needs_reset);
-
 
835
	rdev->needs_reset = true;
-
 
836
	up_read(&rdev->exclusive_lock);
-
 
837
 
-
 
838
	return 0;
-
 
839
}
935
 
840
 
936
static struct drm_info_list radeon_debugfs_fence_list[] = {
841
static struct drm_info_list radeon_debugfs_fence_list[] = {
-
 
842
	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
937
	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
843
	{"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL}
938
};
844
};
Line 939... Line 845...
939
#endif
845
#endif
940
 
846
 
941
int radeon_debugfs_fence_init(struct radeon_device *rdev)
847
int radeon_debugfs_fence_init(struct radeon_device *rdev)
942
{
848
{
943
#if defined(CONFIG_DEBUG_FS)
849
#if defined(CONFIG_DEBUG_FS)
944
	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
850
	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2);
945
#else
851
#else
946
	return 0;
852
	return 0;