Subversion Repositories Kolibri OS

Rev

Rev 2005 | Rev 3031 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2005 Rev 2997
Line 32... Line 32...
32
#include 
32
#include 
33
//#include 
33
//#include 
34
#include 
34
#include 
35
#include 
35
#include 
36
#include 
36
#include 
37
#include "drmP.h"
37
#include 
38
#include "drm.h"
-
 
39
#include "radeon_reg.h"
38
#include "radeon_reg.h"
40
#include "radeon.h"
39
#include "radeon.h"
Line -... Line 40...
-
 
40
 
-
 
41
/*
-
 
42
 * Fences
-
 
43
 * Fences mark an event in the GPUs pipeline and are used
-
 
44
 * for GPU/CPU synchronization.  When the fence is written,
-
 
45
 * it is expected that all buffers associated with that fence
-
 
46
 * are no longer in use by the associated ring on the GPU and
-
 
47
 * that the the relevant GPU caches have been flushed.  Whether
-
 
48
 * we use a scratch register or memory location depends on the asic
-
 
49
 * and whether writeback is enabled.
-
 
50
 */
-
 
51
 
-
 
52
/**
-
 
53
 * radeon_fence_write - write a fence value
-
 
54
 *
-
 
55
 * @rdev: radeon_device pointer
-
 
56
 * @seq: sequence number to write
-
 
57
 * @ring: ring index the fence is associated with
-
 
58
 *
-
 
59
 * Writes a fence value to memory or a scratch register (all asics).
41
 
60
 */
42
static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
61
static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
43
{
-
 
44
	if (rdev->wb.enabled) {
-
 
45
		u32 scratch_index;
-
 
46
		if (rdev->wb.use_event)
62
{
47
			scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
-
 
48
		else
63
	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
49
			scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
64
	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
50
		rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);;
65
		*drv->cpu_addr = cpu_to_le32(seq);
51
	} else
66
	} else {
52
		WREG32(rdev->fence_drv.scratch_reg, seq);
67
		WREG32(drv->scratch_reg, seq);
53
}
-
 
54
 
-
 
55
static u32 radeon_fence_read(struct radeon_device *rdev)
-
 
56
{
-
 
57
	u32 seq;
-
 
58
 
-
 
59
	if (rdev->wb.enabled) {
-
 
60
		u32 scratch_index;
-
 
61
		if (rdev->wb.use_event)
-
 
62
			scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
-
 
63
		else
-
 
64
			scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
-
 
65
		seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
-
 
66
	} else
-
 
67
		seq = RREG32(rdev->fence_drv.scratch_reg);
-
 
68
	return seq;
68
	}
Line -... Line 69...
-
 
69
}
-
 
70
 
-
 
71
/**
-
 
72
 * radeon_fence_read - read a fence value
-
 
73
 *
-
 
74
 * @rdev: radeon_device pointer
-
 
75
 * @ring: ring index the fence is associated with
-
 
76
 *
-
 
77
 * Reads a fence value from memory or a scratch register (all asics).
69
}
78
 * Returns the value of the fence read from memory or register.
70
 
79
 */
-
 
80
static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
71
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
81
{
Line 72... Line 82...
72
{
82
	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
73
	unsigned long irq_flags;
-
 
74
 
83
	u32 seq = 0;
75
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
84
 
-
 
85
	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
76
	if (fence->emited) {
86
		seq = le32_to_cpu(*drv->cpu_addr);
-
 
87
	} else {
-
 
88
		seq = RREG32(drv->scratch_reg);
-
 
89
	}
-
 
90
	return seq;
77
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
91
}
-
 
92
 
-
 
93
/**
78
		return 0;
94
 * radeon_fence_emit - emit a fence on the requested ring
79
	}
95
 *
80
	fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
96
 * @rdev: radeon_device pointer
-
 
97
 * @fence: radeon fence object
-
 
98
 * @ring: ring index the fence is associated with
81
	if (!rdev->cp.ready)
99
 *
82
		/* FIXME: cp is not running assume everythings is done right
100
 * Emits a fence command on the requested ring (all asics).
-
 
101
 * Returns 0 on success, -ENOMEM on failure.
-
 
102
 */
83
		 * away
103
int radeon_fence_emit(struct radeon_device *rdev,
84
		 */
104
		      struct radeon_fence **fence,
-
 
105
		      int ring)
-
 
106
{
-
 
107
	/* we are protected by the ring emission mutex */
85
		radeon_fence_write(rdev, fence->seq);
108
	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
86
	else
109
	if ((*fence) == NULL) {
87
		radeon_fence_ring_emit(rdev, fence);
110
		return -ENOMEM;
88
 
111
	}
-
 
112
	kref_init(&((*fence)->kref));
-
 
113
	(*fence)->rdev = rdev;
89
//   trace_radeon_fence_emit(rdev->ddev, fence->seq);
114
	(*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
90
	fence->emited = true;
115
	(*fence)->ring = ring;
91
	list_move_tail(&fence->list, &rdev->fence_drv.emited);
116
	radeon_fence_ring_emit(rdev, ring, *fence);
Line -... Line 117...
-
 
117
//   trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
-
 
118
	return 0;
-
 
119
}
-
 
120
 
-
 
121
/**
-
 
122
 * radeon_fence_process - process a fence
-
 
123
 *
-
 
124
 * @rdev: radeon_device pointer
-
 
125
 * @ring: ring index the fence is associated with
92
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
126
 *
93
	return 0;
127
 * Checks the current fence value and wakes the fence queue
94
}
128
 * if the sequence number has increased (all asics).
95
 
129
 */
96
static bool radeon_fence_poll_locked(struct radeon_device *rdev)
-
 
97
{
130
void radeon_fence_process(struct radeon_device *rdev, int ring)
98
	struct radeon_fence *fence;
-
 
Line 99... Line 131...
99
	struct list_head *i, *n;
131
{
100
	uint32_t seq;
132
	uint64_t seq, last_seq, last_emitted;
101
	bool wake = false;
133
	unsigned count_loop = 0;
102
	unsigned long cjiffies;
134
	bool wake = false;
103
 
135
 
104
	seq = radeon_fence_read(rdev);
136
	/* Note there is a scenario here for an infinite loop but it's
105
	if (seq != rdev->fence_drv.last_seq) {
137
	 * very unlikely to happen. For it to happen, the current polling
106
		rdev->fence_drv.last_seq = seq;
138
	 * process need to be interrupted by another process and another
107
        rdev->fence_drv.last_jiffies = GetTimerTicks();
139
	 * process needs to update the last_seq btw the atomic read and
108
        rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
140
	 * xchg of the current process.
109
	} else {
141
	 *
110
        cjiffies = GetTimerTicks();
142
	 * More over for this to go in infinite loop there need to be
111
		if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
143
	 * continuously new fence signaled ie radeon_fence_read needs
112
			cjiffies -= rdev->fence_drv.last_jiffies;
144
	 * to return a different value each time for both the currently
113
			if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
145
	 * polling process and the other process that xchg the last_seq
114
				/* update the timeout */
146
	 * btw atomic read and xchg of the current process. And the
115
				rdev->fence_drv.last_timeout -= cjiffies;
147
	 * value the other process set as last seq must be higher than
116
			} else {
-
 
117
				/* the 500ms timeout is elapsed we should test
148
	 * the seq value we just read. Which means that current process
118
				 * for GPU lockup
149
	 * need to be interrupted after radeon_fence_read and before
119
				 */
150
	 * atomic xchg.
120
				rdev->fence_drv.last_timeout = 1;
151
	 *
121
			}
152
	 * To be even more safe we count the number of time we loop and
122
		} else {
153
	 * we bail after 10 loop just accepting the fact that we might
123
			/* wrap around update last jiffies, we will just wait
154
	 * have temporarly set the last_seq not to the true real last
124
			 * a little longer
-
 
125
			 */
-
 
126
			rdev->fence_drv.last_jiffies = cjiffies;
155
	 * seq but to an older one.
127
	}
156
	 */
128
		return false;
157
	last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
129
	}
158
	do {
-
 
159
		last_emitted = rdev->fence_drv[ring].sync_seq[ring];
-
 
160
		seq = radeon_fence_read(rdev, ring);
-
 
161
		seq |= last_seq & 0xffffffff00000000LL;
-
 
162
		if (seq < last_seq) {
130
	n = NULL;
163
			seq &= 0xffffffff;
131
	list_for_each(i, &rdev->fence_drv.emited) {
164
			seq |= last_emitted & 0xffffffff00000000LL;
132
		fence = list_entry(i, struct radeon_fence, list);
-
 
133
		if (fence->seq == seq) {
165
		}
134
			n = i;
-
 
135
			break;
-
 
136
		}
-
 
137
	}
-
 
138
	/* all fence previous to this one are considered as signaled */
-
 
139
	if (n) {
-
 
140
        kevent_t event;
-
 
141
        event.code = -1;
166
 
142
		i = n;
-
 
143
		do {
-
 
144
			n = i->prev;
167
		if (seq <= last_seq || seq > last_emitted) {
145
			list_move_tail(i, &rdev->fence_drv.signaled);
168
			break;
146
			fence = list_entry(i, struct radeon_fence, list);
-
 
147
			fence->signaled = true;
169
		}
-
 
170
		/* If we loop over we don't want to return without
-
 
171
		 * checking if a fence is signaled as it means that the
-
 
172
		 * seq we just read is different from the previous on.
-
 
173
		 */
-
 
174
		wake = true;
-
 
175
		last_seq = seq;
-
 
176
		if ((count_loop++) > 10) {
-
 
177
			/* We looped over too many time leave with the
-
 
178
			 * fact that we might have set an older fence
-
 
179
			 * seq then the current real last seq as signaled
-
 
180
			 * by the hw.
-
 
181
			 */
-
 
182
			break;
-
 
183
		}
148
//            dbgprintf("fence %x done\n", fence);
184
	} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
149
            RaiseEvent(fence->evnt, 0, &event);
-
 
150
			i = n;
185
 
Line -... Line 186...
-
 
186
	if (wake) {
-
 
187
		rdev->fence_drv[ring].last_activity = GetTimerTicks();
-
 
188
		wake_up_all(&rdev->fence_queue);
-
 
189
	}
151
		} while (i != &rdev->fence_drv.emited);
190
}
-
 
191
 
-
 
192
/**
152
		wake = true;
193
 * radeon_fence_destroy - destroy a fence
153
	}
194
 *
154
	return wake;
195
 * @kref: fence kref
155
}
-
 
156
 
-
 
157
 
-
 
158
int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
-
 
159
{
-
 
Line 160... Line -...
160
	unsigned long irq_flags;
-
 
161
 
-
 
162
	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
-
 
163
	if ((*fence) == NULL) {
-
 
164
		return -ENOMEM;
-
 
165
	}
-
 
166
 
-
 
167
    (*fence)->evnt = CreateEvent(NULL, MANUAL_DESTROY);
-
 
168
//	kref_init(&((*fence)->kref));
-
 
169
	(*fence)->rdev = rdev;
196
 *
170
	(*fence)->emited = false;
-
 
171
	(*fence)->signaled = false;
197
 * Frees the fence object (all asics).
172
	(*fence)->seq = 0;
198
 */
Line -... Line 199...
-
 
199
static void radeon_fence_destroy(struct kref *kref)
-
 
200
{
-
 
201
	struct radeon_fence *fence;
-
 
202
 
-
 
203
	fence = container_of(kref, struct radeon_fence, kref);
-
 
204
	kfree(fence);
-
 
205
}
-
 
206
 
-
 
207
/**
-
 
208
 * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
-
 
209
 *
-
 
210
 * @rdev: radeon device pointer
-
 
211
 * @seq: sequence number
-
 
212
 * @ring: ring index the fence is associated with
-
 
213
 *
-
 
214
 * Check if the last singled fence sequnce number is >= the requested
-
 
215
 * sequence number (all asics).
-
 
216
 * Returns true if the fence has signaled (current fence value
-
 
217
 * is >= requested value) or false if it has not (current fence
-
 
218
 * value is < the requested value.  Helper function for
-
 
219
 * radeon_fence_signaled().
-
 
220
 */
-
 
221
static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
-
 
222
				      u64 seq, unsigned ring)
-
 
223
{
-
 
224
	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
-
 
225
		return true;
Line -... Line 226...
-
 
226
	}
-
 
227
	/* poll new last sequence at least once */
-
 
228
	radeon_fence_process(rdev, ring);
-
 
229
	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
-
 
230
		return true;
-
 
231
	}
-
 
232
	return false;
-
 
233
}
173
	INIT_LIST_HEAD(&(*fence)->list);
234
 
174
 
235
/**
175
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
-
 
176
	list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
-
 
177
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
-
 
178
	return 0;
236
 * radeon_fence_signaled - check if a fence has signaled
179
}
237
 *
180
 
238
 * @fence: radeon fence object
181
 
239
 *
-
 
240
 * Check if the requested fence has signaled (all asics).
-
 
241
 * Returns true if the fence has signaled or false if it has not.
-
 
242
				 */
-
 
243
bool radeon_fence_signaled(struct radeon_fence *fence)
182
bool radeon_fence_signaled(struct radeon_fence *fence)
244
{
-
 
245
	if (!fence) {
-
 
246
		return true;
-
 
247
	}
-
 
248
	if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
-
 
249
		return true;
-
 
250
	}
-
 
251
	if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
-
 
252
		fence->seq = RADEON_FENCE_SIGNALED_SEQ;
-
 
253
		return true;
-
 
254
	}
-
 
255
	return false;
-
 
256
}
-
 
257
 
-
 
258
/**
-
 
259
 * radeon_fence_wait_seq - wait for a specific sequence number
-
 
260
 *
-
 
261
 * @rdev: radeon device pointer
-
 
262
 * @target_seq: sequence number we want to wait for
-
 
263
 * @ring: ring index the fence is associated with
-
 
264
 * @intr: use interruptable sleep
-
 
265
 * @lock_ring: whether the ring should be locked or not
-
 
266
 *
-
 
267
 * Wait for the requested sequence number to be written (all asics).
-
 
268
 * @intr selects whether to use interruptable (true) or non-interruptable
-
 
269
 * (false) sleep when waiting for the sequence number.  Helper function
-
 
270
 * for radeon_fence_wait(), et al.
-
 
271
 * Returns 0 if the sequence number has passed, error for all other cases.
-
 
272
 * -EDEADLK is returned when a GPU lockup has been detected and the ring is
-
 
273
 * marked as not ready so no further jobs get scheduled until a successful
-
 
274
 * reset.
-
 
275
 */
-
 
276
static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
-
 
277
				 unsigned ring, bool intr, bool lock_ring)
-
 
278
{
-
 
279
	unsigned long timeout, last_activity;
-
 
280
	uint64_t seq;
-
 
281
	unsigned i;
-
 
282
	bool signaled;
-
 
283
	int r;
-
 
284
 
-
 
285
	while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
-
 
286
		if (!rdev->ring[ring].ready) {
-
 
287
			return -EBUSY;
-
 
288
			}
-
 
289
 
-
 
290
		timeout = GetTimerTicks() - RADEON_FENCE_JIFFIES_TIMEOUT;
-
 
291
		if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
-
 
292
			/* the normal case, timeout is somewhere before last_activity */
-
 
293
			timeout = rdev->fence_drv[ring].last_activity - timeout;
-
 
294
		} else {
-
 
295
			/* either jiffies wrapped around, or no fence was signaled in the last 500ms
-
 
296
			 * anyway we will just wait for the minimum amount and then check for a lockup
-
 
297
			 */
-
 
298
			timeout = 1;
-
 
299
	}
-
 
300
		seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
-
 
301
		/* Save current last activity valuee, used to check for GPU lockups */
-
 
302
		last_activity = rdev->fence_drv[ring].last_activity;
-
 
303
 
-
 
304
//		trace_radeon_fence_wait_begin(rdev->ddev, seq);
-
 
305
		radeon_irq_kms_sw_irq_get(rdev, ring);
-
 
306
//       if (intr) {
-
 
307
//           r = wait_event_interruptible_timeout(rdev->fence_queue,
-
 
308
//               (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
-
 
309
//               timeout);
-
 
310
//                } else {
-
 
311
//           r = wait_event_timeout(rdev->fence_queue,
-
 
312
//               (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
-
 
313
//               timeout);
-
 
314
//   }
-
 
315
        delay(1);
-
 
316
 
-
 
317
		radeon_irq_kms_sw_irq_put(rdev, ring);
-
 
318
//       if (unlikely(r < 0)) {
-
 
319
//           return r;
-
 
320
//       }
-
 
321
//		trace_radeon_fence_wait_end(rdev->ddev, seq);
-
 
322
 
-
 
323
		if (unlikely(!signaled)) {
-
 
324
			/* we were interrupted for some reason and fence
-
 
325
			 * isn't signaled yet, resume waiting */
-
 
326
			if (r) {
-
 
327
				continue;
-
 
328
	}
-
 
329
 
-
 
330
			/* check if sequence value has changed since last_activity */
-
 
331
			if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
-
 
332
				continue;
-
 
333
			}
-
 
334
 
-
 
335
			if (lock_ring) {
-
 
336
				mutex_lock(&rdev->ring_lock);
-
 
337
	}
-
 
338
 
-
 
339
			/* test if somebody else has already decided that this is a lockup */
-
 
340
			if (last_activity != rdev->fence_drv[ring].last_activity) {
-
 
341
				if (lock_ring) {
-
 
342
					mutex_unlock(&rdev->ring_lock);
-
 
343
				}
-
 
344
				continue;
-
 
345
			}
-
 
346
 
-
 
347
			if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
-
 
348
				/* good news we believe it's a lockup */
-
 
349
				dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
-
 
350
					 target_seq, seq);
-
 
351
 
-
 
352
				/* change last activity so nobody else think there is a lockup */
-
 
353
				for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-
 
354
					rdev->fence_drv[i].last_activity = jiffies;
Line 183... Line -...
183
{
-
 
184
	unsigned long irq_flags;
355
				}
185
	bool signaled = false;
-
 
186
 
356
 
187
	if (!fence)
-
 
188
		return true;
357
				/* mark the ring as not ready any more */
189
 
-
 
190
	if (fence->rdev->gpu_lockup)
-
 
191
		return true;
-
 
192
 
358
				rdev->ring[ring].ready = false;
193
	write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
-
 
194
	signaled = fence->signaled;
-
 
195
	/* if we are shuting down report all fence as signaled */
-
 
196
	if (fence->rdev->shutdown) {
359
				if (lock_ring) {
197
		signaled = true;
-
 
198
	}
360
					mutex_unlock(&rdev->ring_lock);
199
	if (!fence->emited) {
361
				}
Line -... Line 362...
-
 
362
				return -EDEADLK;
-
 
363
			}
-
 
364
 
-
 
365
			if (lock_ring) {
-
 
366
				mutex_unlock(&rdev->ring_lock);
-
 
367
			}
-
 
368
		}
-
 
369
	}
-
 
370
	return 0;
-
 
371
}
-
 
372
 
200
		WARN(1, "Querying an unemited fence : %p !\n", fence);
373
/**
201
		signaled = true;
374
 * radeon_fence_wait - wait for a fence to signal
202
	}
-
 
203
	if (!signaled) {
-
 
204
		radeon_fence_poll_locked(fence->rdev);
-
 
205
		signaled = fence->signaled;
375
 *
Line 206... Line 376...
206
	}
376
 * @fence: radeon fence object
207
	write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
377
 * @intr: use interruptable sleep
-
 
378
 *
-
 
379
 * Wait for the requested fence to signal (all asics).
-
 
380
 * @intr selects whether to use interruptable (true) or non-interruptable
-
 
381
 * (false) sleep when waiting for the fence.
-
 
382
 * Returns 0 if the fence has passed, error for all other cases.
-
 
383
 */
208
	return signaled;
384
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
209
}
385
{
210
 
-
 
211
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
386
	int r;
212
{
387
 
-
 
388
	if (fence == NULL) {
-
 
389
		WARN(1, "Querying an invalid fence : %p !\n", fence);
-
 
390
		return -EINVAL;
-
 
391
	}
-
 
392
 
-
 
393
	r = radeon_fence_wait_seq(fence->rdev, fence->seq,
-
 
394
				  fence->ring, intr, true);
-
 
395
	if (r) {
-
 
396
		return r;
-
 
397
	}
213
	struct radeon_device *rdev;
398
	fence->seq = RADEON_FENCE_SIGNALED_SEQ;
214
	unsigned long irq_flags, timeout;
399
		return 0;
215
	u32 seq;
400
}
216
	int r;
-
 
217
 
-
 
218
	if (fence == NULL) {
-
 
219
		WARN(1, "Querying an invalid fence : %p !\n", fence);
-
 
220
		return 0;
-
 
221
	}
-
 
222
	rdev = fence->rdev;
-
 
Line -... Line 401...
-
 
401
 
-
 
402
static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
-
 
403
{
223
	if (radeon_fence_signaled(fence)) {
404
	unsigned i;
-
 
405
 
-
 
406
	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-
 
407
		if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
-
 
408
			return true;
-
 
409
		}
-
 
410
	}
-
 
411
	return false;
-
 
412
}
-
 
413
 
-
 
414
/**
-
 
415
 * radeon_fence_wait_any_seq - wait for a sequence number on any ring
-
 
416
 *
-
 
417
 * @rdev: radeon device pointer
-
 
418
 * @target_seq: sequence number(s) we want to wait for
-
 
419
 * @intr: use interruptable sleep
-
 
420
 *
-
 
421
 * Wait for the requested sequence number(s) to be written by any ring
Line 224... Line 422...
224
		return 0;
422
 * (all asics).  Sequnce number array is indexed by ring id.
225
	}
423
 * @intr selects whether to use interruptable (true) or non-interruptable
226
	timeout = rdev->fence_drv.last_timeout;
424
 * (false) sleep when waiting for the sequence number.  Helper function
227
retry:
425
 * for radeon_fence_wait_any(), et al.
228
	/* save current sequence used to check for GPU lockup */
-
 
229
	seq = rdev->fence_drv.last_seq;
-
 
230
//   trace_radeon_fence_wait_begin(rdev->ddev, seq);
-
 
231
	if (intr) {
-
 
Line -... Line 426...
-
 
426
 * Returns 0 if the sequence number has passed, error for all other cases.
-
 
427
 */
-
 
428
static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
-
 
429
				     u64 *target_seq, bool intr)
-
 
430
{
-
 
431
	unsigned long timeout, last_activity, tmp;
232
		radeon_irq_kms_sw_irq_get(rdev);
432
	unsigned i, ring = RADEON_NUM_RINGS;
-
 
433
	bool signaled;
-
 
434
	int r;
-
 
435
 
-
 
436
	for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
-
 
437
		if (!target_seq[i]) {
Line 233... Line 438...
233
//       r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
438
			continue;
-
 
439
		}
-
 
440
 
234
//               radeon_fence_signaled(fence), timeout);
441
		/* use the most recent one as indicator */
-
 
442
		if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
-
 
443
			last_activity = rdev->fence_drv[i].last_activity;
-
 
444
	}
-
 
445
 
-
 
446
		/* For lockup detection just pick the lowest ring we are
-
 
447
		 * actively waiting for
-
 
448
		 */
-
 
449
		if (i < ring) {
-
 
450
			ring = i;
-
 
451
		}
-
 
452
	}
-
 
453
 
-
 
454
	/* nothing to wait for ? */
-
 
455
	if (ring == RADEON_NUM_RINGS) {
-
 
456
		return -ENOENT;
-
 
457
	}
-
 
458
 
-
 
459
	while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
-
 
460
        timeout = GetTimerTicks() - RADEON_FENCE_JIFFIES_TIMEOUT;
-
 
461
		if (time_after(last_activity, timeout)) {
-
 
462
			/* the normal case, timeout is somewhere before last_activity */
-
 
463
			timeout = last_activity - timeout;
-
 
464
		} else {
-
 
465
			/* either jiffies wrapped around, or no fence was signaled in the last 500ms
-
 
466
			 * anyway we will just wait for the minimum amount and then check for a lockup
-
 
467
			 */
-
 
468
			timeout = 1;
-
 
469
		}
-
 
470
 
-
 
471
//		trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
-
 
472
		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-
 
473
			if (target_seq[i]) {
235
 
474
				radeon_irq_kms_sw_irq_get(rdev, i);
-
 
475
			}
236
        WaitEvent(fence->evnt);
476
		}
237
 
477
 
238
		radeon_irq_kms_sw_irq_put(rdev);
478
//        WaitEvent(fence->evnt);
239
		if (unlikely(r < 0)) {
-
 
240
			return r;
479
 
241
		}
480
		r = 1;
-
 
481
 
-
 
482
		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-
 
483
			if (target_seq[i]) {
-
 
484
				radeon_irq_kms_sw_irq_put(rdev, i);
-
 
485
			}
-
 
486
		}
-
 
487
		if (unlikely(r < 0)) {
-
 
488
			return r;
-
 
489
		}
-
 
490
//   trace_radeon_fence_wait_end(rdev->ddev, seq);
-
 
491
 
-
 
492
		if (unlikely(!signaled)) {
242
	} else {
493
			/* we were interrupted for some reason and fence
243
		radeon_irq_kms_sw_irq_get(rdev);
494
			 * isn't signaled yet, resume waiting */
244
//       r = wait_event_timeout(rdev->fence_drv.queue,
-
 
245
//            radeon_fence_signaled(fence), timeout);
-
 
246
 
495
		if (r) {
247
        WaitEvent(fence->evnt);
496
				continue;
248
 
497
			}
249
		radeon_irq_kms_sw_irq_put(rdev);
498
 
250
	}
499
			mutex_lock(&rdev->ring_lock);
-
 
500
			for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
251
//   trace_radeon_fence_wait_end(rdev->ddev, seq);
501
				if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
252
	if (unlikely(!radeon_fence_signaled(fence))) {
502
					tmp = rdev->fence_drv[i].last_activity;
-
 
503
				}
253
		/* we were interrupted for some reason and fence isn't
504
			}
254
		 * isn't signaled yet, resume wait
-
 
255
		 */
-
 
256
		if (r) {
-
 
257
			timeout = r;
-
 
258
			goto retry;
-
 
259
		}
505
			/* test if somebody else has already decided that this is a lockup */
260
		/* don't protect read access to rdev->fence_drv.last_seq
506
			if (last_activity != tmp) {
261
		 * if we experiencing a lockup the value doesn't change
507
				last_activity = tmp;
-
 
508
				mutex_unlock(&rdev->ring_lock);
-
 
509
				continue;
262
		 */
510
		}
263
		if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
-
 
264
			/* good news we believe it's a lockup */
-
 
265
			WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
-
 
266
			     fence->seq, seq);
-
 
267
			/* FIXME: what should we do ? marking everyone
511
 
268
			 * as signaled for now
512
			if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
269
			 */
513
			/* good news we believe it's a lockup */
270
			rdev->gpu_lockup = true;
514
				dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
271
//           r = radeon_gpu_reset(rdev);
515
					 target_seq[ring]);
Line -... Line 516...
-
 
516
 
-
 
517
				/* change last activity so nobody else think there is a lockup */
272
//           if (r)
518
				for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-
 
519
					rdev->fence_drv[i].last_activity = GetTimerTicks();
-
 
520
				}
-
 
521
 
-
 
522
				/* mark the ring as not ready any more */
-
 
523
				rdev->ring[ring].ready = false;
-
 
524
				mutex_unlock(&rdev->ring_lock);
-
 
525
				return -EDEADLK;
-
 
526
			}
-
 
527
			mutex_unlock(&rdev->ring_lock);
-
 
528
			}
273
//               return r;
529
	}
-
 
530
    return 0;
-
 
531
}
274
            return true;
532
 
275
 
533
/**
276
//           radeon_fence_write(rdev, fence->seq);
534
 * radeon_fence_wait_any - wait for a fence to signal on any ring
277
//           rdev->gpu_lockup = false;
535
 *
Line -... Line 536...
-
 
536
 * @rdev: radeon device pointer
-
 
537
 * @fences: radeon fence object(s)
-
 
538
 * @intr: use interruptable sleep
278
			}
539
 *
-
 
540
 * Wait for any requested fence to signal (all asics).  Fence
-
 
541
 * array is indexed by ring id.  @intr selects whether to use
-
 
542
 * interruptable (true) or non-interruptable (false) sleep when
-
 
543
 * waiting for the fences. Used by the suballocator.
-
 
544
 * Returns 0 if any fence has passed, error for all other cases.
279
        timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
545
 */
280
		write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
546
int radeon_fence_wait_any(struct radeon_device *rdev,
281
        rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
-
 
-
 
547
			  struct radeon_fence **fences,
282
        rdev->fence_drv.last_jiffies = GetTimerTicks();
548
			  bool intr)
-
 
549
{
-
 
550
	uint64_t seq[RADEON_NUM_RINGS];
283
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
551
	unsigned i;
-
 
552
	int r;
284
		goto retry;
553
 
285
	}
554
	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
286
	return 0;
-
 
287
}
-
 
288
 
-
 
289
#if 0
-
 
290
int radeon_fence_wait_next(struct radeon_device *rdev)
-
 
291
{
-
 
292
	unsigned long irq_flags;
555
		seq[i] = 0;
293
	struct radeon_fence *fence;
556
 
Line -... Line 557...
-
 
557
		if (!fences[i]) {
-
 
558
			continue;
-
 
559
		}
-
 
560
 
-
 
561
		if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
-
 
562
			/* something was allready signaled */
-
 
563
		return 0;
-
 
564
	}
-
 
565
 
-
 
566
		seq[i] = fences[i]->seq;
294
	int r;
567
	}
295
 
568
 
296
	if (rdev->gpu_lockup) {
-
 
297
		return 0;
-
 
298
	}
569
	r = radeon_fence_wait_any_seq(rdev, seq, intr);
Line -... Line 570...
-
 
570
	if (r) {
-
 
571
		return r;
-
 
572
	}
-
 
573
	return 0;
-
 
574
}
-
 
575
 
-
 
576
/**
-
 
577
 * radeon_fence_wait_next_locked - wait for the next fence to signal
-
 
578
 *
-
 
579
 * @rdev: radeon device pointer
-
 
580
 * @ring: ring index the fence is associated with
-
 
581
 *
-
 
582
 * Wait for the next fence on the requested ring to signal (all asics).
-
 
583
 * Returns 0 if the next fence has passed, error for all other cases.
-
 
584
 * Caller must hold ring lock.
-
 
585
 */
-
 
586
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
-
 
587
{
-
 
588
	uint64_t seq;
-
 
589
 
-
 
590
	seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
-
 
591
	if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
-
 
592
		/* nothing to wait for, last_seq is
-
 
593
		   already the last emited fence */
-
 
594
		return -ENOENT;
-
 
595
	}
299
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
596
	return radeon_fence_wait_seq(rdev, seq, ring, false, false);
-
 
597
}
-
 
598
 
-
 
599
/**
-
 
600
 * radeon_fence_wait_empty_locked - wait for all fences to signal
300
	if (list_empty(&rdev->fence_drv.emited)) {
601
 *
301
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
602
 * @rdev: radeon device pointer
302
		return 0;
603
 * @ring: ring index the fence is associated with
303
	}
604
 *
304
	fence = list_entry(rdev->fence_drv.emited.next,
605
 * Wait for all fences on the requested ring to signal (all asics).
-
 
606
 * Returns 0 if the fences have passed, error for all other cases.
305
			   struct radeon_fence, list);
607
 * Caller must hold ring lock.
306
	radeon_fence_ref(fence);
608
 */
307
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
-
 
308
	r = radeon_fence_wait(fence, false);
-
 
309
	radeon_fence_unref(&fence);
-
 
310
	return r;
-
 
311
}
-
 
312
 
-
 
313
int radeon_fence_wait_last(struct radeon_device *rdev)
-
 
314
{
609
void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
Line -... Line 610...
-
 
610
{
-
 
611
	uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
-
 
612
 
-
 
613
	while(1) {
-
 
614
	int r;
-
 
615
		r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
-
 
616
		if (r == -EDEADLK) {
-
 
617
			mutex_unlock(&rdev->ring_lock);
315
	unsigned long irq_flags;
618
			r = radeon_gpu_reset(rdev);
316
	struct radeon_fence *fence;
619
			mutex_lock(&rdev->ring_lock);
317
	int r;
620
			if (!r)
318
 
621
				continue;
319
	if (rdev->gpu_lockup) {
622
	}
Line 320... Line 623...
320
		return 0;
623
		if (r) {
-
 
624
			dev_err(rdev->dev, "error waiting for ring to become"
321
	}
625
				" idle (%d)\n", r);
-
 
626
		}
-
 
627
		return;
-
 
628
	}
-
 
629
}
322
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
630
 
323
	if (list_empty(&rdev->fence_drv.emited)) {
631
/**
-
 
632
 * radeon_fence_ref - take a ref on a fence
-
 
633
 *
-
 
634
 * @fence: radeon fence object
-
 
635
 *
-
 
636
 * Take a reference on a fence (all asics).
-
 
637
 * Returns the fence.
-
 
638
 */
-
 
639
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
-
 
640
{
-
 
641
	kref_get(&fence->kref);
-
 
642
	return fence;
-
 
643
}
-
 
644
 
-
 
645
/**
-
 
646
 * radeon_fence_unref - remove a ref on a fence
-
 
647
 *
324
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
648
 * @fence: radeon fence object
-
 
649
 *
-
 
650
 * Remove a reference on a fence (all asics).
-
 
651
 */
-
 
652
void radeon_fence_unref(struct radeon_fence **fence)
-
 
653
{
-
 
654
    struct radeon_fence *tmp = *fence;
-
 
655
 
-
 
656
    *fence = NULL;
-
 
657
	if (tmp) {
-
 
658
		kref_put(&tmp->kref, radeon_fence_destroy);
-
 
659
	}
-
 
660
}
-
 
661
 
-
 
662
/**
-
 
663
 * radeon_fence_count_emitted - get the count of emitted fences
-
 
664
 *
-
 
665
 * @rdev: radeon device pointer
-
 
666
 * @ring: ring index the fence is associated with
-
 
667
 *
-
 
668
 * Get the number of fences emitted on the requested ring (all asics).
-
 
669
 * Returns the number of emitted fences on the ring.  Used by the
-
 
670
 * dynpm code to ring track activity.
-
 
671
 */
-
 
672
unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
-
 
673
{
-
 
674
	uint64_t emitted;
-
 
675
 
-
 
676
	/* We are not protected by ring lock when reading the last sequence
-
 
677
	 * but it's ok to report slightly wrong fence count here.
-
 
678
	 */
-
 
679
	radeon_fence_process(rdev, ring);
325
		return 0;
680
	emitted = rdev->fence_drv[ring].sync_seq[ring]
Line -... Line 681...
-
 
681
		- atomic64_read(&rdev->fence_drv[ring].last_seq);
326
	}
682
	/* to avoid 32bits warp around */
-
 
683
	if (emitted > 0x10000000) {
Line -... Line 684...
-
 
684
		emitted = 0x10000000;
327
	fence = list_entry(rdev->fence_drv.emited.prev,
685
	}
328
			   struct radeon_fence, list);
686
	return (unsigned)emitted;
-
 
687
}
-
 
688
 
329
	radeon_fence_ref(fence);
689
/**
330
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
690
 * radeon_fence_need_sync - do we need a semaphore
331
	r = radeon_fence_wait(fence, false);
691
 *
332
	radeon_fence_unref(&fence);
-
 
333
	return r;
692
 * @fence: radeon fence object
-
 
693
 * @dst_ring: which ring to check against
-
 
694
 *
334
}
695
 * Check if the fence needs to be synced against another ring
Line -... Line 696...
-
 
696
 * (all asics).  If so, we need to emit a semaphore.
-
 
697
 * Returns true if we need to sync with another ring, false if
-
 
698
 * not.
-
 
699
 */
-
 
700
bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
-
 
701
{
-
 
702
	struct radeon_fence_driver *fdrv;
-
 
703
 
-
 
704
	if (!fence) {
335
 
705
		return false;
336
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
706
	}
-
 
707
 
337
{
708
	if (fence->ring == dst_ring) {
-
 
709
		return false;
-
 
710
	}
338
	kref_get(&fence->kref);
711
 
-
 
712
	/* we are protected by the ring mutex */
-
 
713
	fdrv = &fence->rdev->fence_drv[dst_ring];
-
 
714
	if (fence->seq <= fdrv->sync_seq[fence->ring]) {
-
 
715
		return false;
-
 
716
	}
Line -... Line 717...
-
 
717
 
339
	return fence;
718
	return true;
340
}
719
}
-
 
720
 
-
 
721
/**
-
 
722
 * radeon_fence_note_sync - record the sync point
-
 
723
 *
341
 
724
 * @fence: radeon fence object
-
 
725
 * @dst_ring: which ring to check against
342
#endif
726
 *
Line -... Line 727...
-
 
727
 * Note the sequence number at which point the fence will
-
 
728
 * be synced with the requested ring (all asics).
-
 
729
 */
-
 
730
void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
-
 
731
{
-
 
732
	struct radeon_fence_driver *dst, *src;
-
 
733
	unsigned i;
-
 
734
 
-
 
735
	if (!fence) {
-
 
736
		return;
-
 
737
	}
-
 
738
 
343
 
739
	if (fence->ring == dst_ring) {
344
void radeon_fence_unref(struct radeon_fence **fence)
740
		return;
345
{
741
	}
346
    unsigned long irq_flags;
742
 
Line 347... Line 743...
347
	struct radeon_fence *tmp = *fence;
743
	/* we are protected by the ring mutex */
-
 
744
	src = &fence->rdev->fence_drv[fence->ring];
-
 
745
	dst = &fence->rdev->fence_drv[dst_ring];
-
 
746
	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-
 
747
		if (i == dst_ring) {
348
 
748
			continue;
349
	*fence = NULL;
749
		}
350
 
750
		dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
351
    if(tmp)
-
 
352
    {
751
	}
353
        write_lock_irqsave(&tmp->rdev->fence_drv.lock, irq_flags);
752
}
354
        list_del(&tmp->list);
753
 
355
        tmp->emited = false;
754
/**
356
        write_unlock_irqrestore(&tmp->rdev->fence_drv.lock, irq_flags);
755
 * radeon_fence_driver_start_ring - make the fence driver
-
 
756
 * ready for use on the requested ring.
357
    };
757
 *
358
}
758
 * @rdev: radeon device pointer
359
 
759
 * @ring: ring index to start the fence driver on
360
void radeon_fence_process(struct radeon_device *rdev)
760
 *
-
 
761
 * Make the fence driver ready for processing (all asics).
361
{
762
 * Not all asics have all rings, so each asic will only
362
	unsigned long irq_flags;
763
 * start the fence driver on the rings it has.
363
	bool wake;
764
 * Returns 0 for success, errors for failure.
Line -... Line 765...
-
 
765
 */
-
 
766
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
-
 
767
{
-
 
768
	uint64_t index;
-
 
769
	int r;
-
 
770
 
-
 
771
	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
-
 
772
	if (rdev->wb.use_event) {
-
 
773
		rdev->fence_drv[ring].scratch_reg = 0;
-
 
774
		index = R600_WB_EVENT_OFFSET + ring * 4;
-
 
775
	} else {
-
 
776
		r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
-
 
777
	if (r) {
-
 
778
		dev_err(rdev->dev, "fence failed to get scratch register\n");
-
 
779
		return r;
-
 
780
	}
-
 
781
		index = RADEON_WB_SCRATCH_OFFSET +
-
 
782
			rdev->fence_drv[ring].scratch_reg -
-
 
783
			rdev->scratch.reg_base;
-
 
784
	}
-
 
785
	rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
-
 
786
	rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
-
 
787
	radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
-
 
788
	rdev->fence_drv[ring].initialized = true;
-
 
789
	dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
-
 
790
		 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
-
 
791
    return 0;
-
 
792
}
-
 
793
 
-
 
794
/**
-
 
795
 * radeon_fence_driver_init_ring - init the fence driver
-
 
796
 * for the requested ring.
-
 
797
 *
-
 
798
 * @rdev: radeon device pointer
-
 
799
 * @ring: ring index to start the fence driver on
-
 
800
 *
-
 
801
 * Init the fence driver for the requested ring (all asics).
-
 
802
 * Helper function for radeon_fence_driver_init().
-
 
803
 */
-
 
804
static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
-
 
805
{
-
 
806
	int i;
-
 
807
 
-
 
808
	rdev->fence_drv[ring].scratch_reg = -1;
-
 
809
	rdev->fence_drv[ring].cpu_addr = NULL;
-
 
810
	rdev->fence_drv[ring].gpu_addr = 0;
-
 
811
	for (i = 0; i < RADEON_NUM_RINGS; ++i)
-
 
812
		rdev->fence_drv[ring].sync_seq[i] = 0;
-
 
813
	atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
-
 
814
	rdev->fence_drv[ring].last_activity = jiffies;
-
 
815
	rdev->fence_drv[ring].initialized = false;
-
 
816
}
-
 
817
 
-
 
818
/**
-
 
819
 * radeon_fence_driver_init - init the fence driver
-
 
820
 * for all possible rings.
-
 
821
 *
-
 
822
 * @rdev: radeon device pointer
-
 
823
 *
-
 
824
 * Init the fence driver for all possible rings (all asics).
-
 
825
 * Not all asics have all rings, so each asic will only
-
 
826
 * start the fence driver on the rings it has using
-
 
827
 * radeon_fence_driver_start_ring().
-
 
828
 * Returns 0 for success.
-
 
829
 */
-
 
830
int radeon_fence_driver_init(struct radeon_device *rdev)
-
 
831
{
-
 
832
	int ring;
-
 
833
 
-
 
834
	init_waitqueue_head(&rdev->fence_queue);
-
 
835
	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
-
 
836
		radeon_fence_driver_init_ring(rdev, ring);
-
 
837
	}
-
 
838
	if (radeon_debugfs_fence_init(rdev)) {
Line 364... Line 839...
364
 
839
		dev_err(rdev->dev, "fence debugfs file creation failed\n");
365
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
840
	}
366
	wake = radeon_fence_poll_locked(rdev);
841
	return 0;
367
	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
842
}
368
}
843
 
369
 
844
/**
370
int radeon_fence_driver_init(struct radeon_device *rdev)
845
 * radeon_fence_driver_fini - tear down the fence driver
371
{
846
 * for all possible rings.
372
	unsigned long irq_flags;
847
 *
373
	int r;
848
 * @rdev: radeon device pointer
Line -... Line 849...
-
 
849
 *
-
 
850
 * Tear down the fence driver for all possible rings (all asics).
-
 
851
 */
-
 
852
void radeon_fence_driver_fini(struct radeon_device *rdev)
-
 
853
{
374
 
854
	int ring;
375
	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
855
 
376
	r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
856
	mutex_lock(&rdev->ring_lock);
377
	if (r) {
857
	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
-
 
858
		if (!rdev->fence_drv[ring].initialized)
-
 
859
			continue;
378
		dev_err(rdev->dev, "fence failed to get scratch register\n");
860
		radeon_fence_wait_empty_locked(rdev, ring);
379
		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
861
		wake_up_all(&rdev->fence_queue);
380
		return r;
862
		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
-
 
863
		rdev->fence_drv[ring].initialized = false;
381
	}
864
	}
382
	radeon_fence_write(rdev, 0);
865
	mutex_unlock(&rdev->ring_lock);
383
	atomic_set(&rdev->fence_drv.seq, 0);
866
}
Line 384... Line 867...
384
	INIT_LIST_HEAD(&rdev->fence_drv.created);
867