Subversion Repositories Kolibri OS

Rev

Rev 5078 | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5078 Rev 6296
Line 1... Line 1...
1
/**************************************************************************
1
/**************************************************************************
2
 *
2
 *
3
 * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
3
 * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
4
 * All Rights Reserved.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
8
 * "Software"), to deal in the Software without restriction, including
Line 44... Line 44...
44
	uint32_t pending_actions[VMW_ACTION_MAX];
44
	uint32_t pending_actions[VMW_ACTION_MAX];
45
	struct mutex goal_irq_mutex;
45
	struct mutex goal_irq_mutex;
46
	bool goal_irq_on; /* Protected by @goal_irq_mutex */
46
	bool goal_irq_on; /* Protected by @goal_irq_mutex */
47
	bool seqno_valid; /* Protected by @lock, and may not be set to true
47
	bool seqno_valid; /* Protected by @lock, and may not be set to true
48
			     without the @goal_irq_mutex held. */
48
			     without the @goal_irq_mutex held. */
-
 
49
	unsigned ctx;
49
};
50
};
Line 50... Line 51...
50
 
51
 
51
struct vmw_user_fence {
52
struct vmw_user_fence {
52
	struct ttm_base_object base;
53
	struct ttm_base_object base;
Line 78... Line 79...
78
 
79
 
79
	uint32_t *tv_sec;
80
	uint32_t *tv_sec;
80
	uint32_t *tv_usec;
81
	uint32_t *tv_usec;
Line -... Line 82...
-
 
82
};
-
 
83
 
-
 
84
static struct vmw_fence_manager *
-
 
85
fman_from_fence(struct vmw_fence_obj *fence)
-
 
86
{
-
 
87
	return container_of(fence->base.lock, struct vmw_fence_manager, lock);
81
};
88
}
82
 
89
 
83
/**
90
/**
84
 * Note on fencing subsystem usage of irqs:
91
 * Note on fencing subsystem usage of irqs:
85
 * Typically the vmw_fences_update function is called
92
 * Typically the vmw_fences_update function is called
Line 100... Line 107...
100
 *
107
 *
101
 * The fence goal seqno irq is on as long as there are unsignaled fence
108
 * The fence goal seqno irq is on as long as there are unsignaled fence
102
 * objects with actions attached to them.
109
 * objects with actions attached to them.
103
 */
110
 */
Line 104... Line 111...
104
 
111
 
105
static void vmw_fence_obj_destroy_locked(struct kref *kref)
112
static void vmw_fence_obj_destroy(struct fence *f)
106
{
113
{
107
	struct vmw_fence_obj *fence =
114
	struct vmw_fence_obj *fence =
Line 108... Line 115...
108
		container_of(kref, struct vmw_fence_obj, kref);
115
		container_of(f, struct vmw_fence_obj, base);
109
 
116
 
Line -... Line 117...
-
 
117
	struct vmw_fence_manager *fman = fman_from_fence(fence);
110
	struct vmw_fence_manager *fman = fence->fman;
118
	unsigned long irq_flags;
111
	unsigned int num_fences;
119
 
112
 
120
	spin_lock_irqsave(&fman->lock, irq_flags);
113
	list_del_init(&fence->head);
-
 
114
	num_fences = --fman->num_fence_objects;
121
	list_del_init(&fence->head);
115
	spin_unlock_irq(&fman->lock);
122
	--fman->num_fence_objects;
116
	if (fence->destroy)
-
 
Line -... Line 123...
-
 
123
	spin_unlock_irqrestore(&fman->lock, irq_flags);
-
 
124
	fence->destroy(fence);
117
		fence->destroy(fence);
125
}
-
 
126
 
-
 
127
static const char *vmw_fence_get_driver_name(struct fence *f)
-
 
128
{
-
 
129
	return "vmwgfx";
-
 
130
}
118
	else
131
 
Line -... Line 132...
-
 
132
static const char *vmw_fence_get_timeline_name(struct fence *f)
-
 
133
{
-
 
134
	return "svga";
-
 
135
}
-
 
136
 
-
 
137
static bool vmw_fence_enable_signaling(struct fence *f)
-
 
138
{
-
 
139
	struct vmw_fence_obj *fence =
-
 
140
		container_of(f, struct vmw_fence_obj, base);
-
 
141
 
-
 
142
	struct vmw_fence_manager *fman = fman_from_fence(fence);
-
 
143
	struct vmw_private *dev_priv = fman->dev_priv;
-
 
144
 
-
 
145
	u32 *fifo_mem = dev_priv->mmio_virt;
-
 
146
	u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
-
 
147
	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
-
 
148
		return false;
-
 
149
 
-
 
150
	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
-
 
151
 
-
 
152
	return true;
-
 
153
}
-
 
154
 
-
 
155
struct vmwgfx_wait_cb {
-
 
156
	struct fence_cb base;
-
 
157
	struct task_struct *task;
-
 
158
};
-
 
159
 
-
 
160
static void
-
 
161
vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
-
 
162
{
-
 
163
	struct vmwgfx_wait_cb *wait =
-
 
164
		container_of(cb, struct vmwgfx_wait_cb, base);
-
 
165
 
-
 
166
//   wake_up_process(wait->task);
-
 
167
}
-
 
168
 
-
 
169
static void __vmw_fences_update(struct vmw_fence_manager *fman);
-
 
170
 
-
 
171
static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
-
 
172
{
-
 
173
	struct vmw_fence_obj *fence =
-
 
174
		container_of(f, struct vmw_fence_obj, base);
-
 
175
 
-
 
176
	struct vmw_fence_manager *fman = fman_from_fence(fence);
-
 
177
	struct vmw_private *dev_priv = fman->dev_priv;
-
 
178
	struct vmwgfx_wait_cb cb;
-
 
179
	long ret = timeout;
-
 
180
	unsigned long irq_flags;
-
 
181
 
-
 
182
	if (likely(vmw_fence_obj_signaled(fence)))
-
 
183
		return timeout;
-
 
184
 
-
 
185
	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
-
 
186
	vmw_seqno_waiter_add(dev_priv);
-
 
187
 
-
 
188
	spin_lock_irqsave(f->lock, irq_flags);
-
 
189
 
-
 
190
//   if (intr && signal_pending(current)) {
-
 
191
//       ret = -ERESTARTSYS;
-
 
192
//       goto out;
-
 
193
//   }
-
 
194
 
-
 
195
	cb.base.func = vmwgfx_wait_cb;
-
 
196
	cb.task = current;
-
 
197
	list_add(&cb.base.node, &f->cb_list);
-
 
198
 
-
 
199
	while (ret > 0) {
-
 
200
		__vmw_fences_update(fman);
-
 
201
		if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags))
-
 
202
			break;
-
 
203
 
-
 
204
		spin_unlock_irqrestore(f->lock, irq_flags);
-
 
205
 
-
 
206
//		ret = schedule_timeout(ret);
-
 
207
		delay(1);
-
 
208
		ret = 0;
-
 
209
		spin_lock_irqsave(f->lock, irq_flags);
-
 
210
//       if (ret > 0 && intr && signal_pending(current))
-
 
211
//           ret = -ERESTARTSYS;
-
 
212
	}
-
 
213
 
-
 
214
	if (!list_empty(&cb.base.node))
-
 
215
		list_del(&cb.base.node);
-
 
216
 
-
 
217
out:
-
 
218
	spin_unlock_irqrestore(f->lock, irq_flags);
-
 
219
 
-
 
220
	vmw_seqno_waiter_remove(dev_priv);
-
 
221
 
-
 
222
	return ret;
-
 
223
}
-
 
224
 
-
 
225
static struct fence_ops vmw_fence_ops = {
-
 
226
	.get_driver_name = vmw_fence_get_driver_name,
-
 
227
	.get_timeline_name = vmw_fence_get_timeline_name,
Line 119... Line 228...
119
		kfree(fence);
228
	.enable_signaling = vmw_fence_enable_signaling,
120
 
229
	.wait = vmw_fence_wait,
121
	spin_lock_irq(&fman->lock);
230
	.release = vmw_fence_obj_destroy,
122
}
231
};
Line 184... Line 293...
184
	fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
293
	fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
185
	fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
294
	fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
186
	fman->event_fence_action_size =
295
	fman->event_fence_action_size =
187
		ttm_round_pot(sizeof(struct vmw_event_fence_action));
296
		ttm_round_pot(sizeof(struct vmw_event_fence_action));
188
	mutex_init(&fman->goal_irq_mutex);
297
	mutex_init(&fman->goal_irq_mutex);
-
 
298
	fman->ctx = fence_context_alloc(1);
Line 189... Line 299...
189
 
299
 
190
	return fman;
300
	return fman;
Line 191... Line 301...
191
}
301
}
Line 205... Line 315...
205
	BUG_ON(!lists_empty);
315
	BUG_ON(!lists_empty);
206
	kfree(fman);
316
	kfree(fman);
207
}
317
}
Line 208... Line 318...
208
 
318
 
209
static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
319
static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
210
			      struct vmw_fence_obj *fence,
-
 
211
			      u32 seqno,
-
 
212
			      uint32_t mask,
320
			      struct vmw_fence_obj *fence, u32 seqno,
213
			      void (*destroy) (struct vmw_fence_obj *fence))
321
			      void (*destroy) (struct vmw_fence_obj *fence))
214
{
322
{
215
	unsigned long irq_flags;
-
 
216
	unsigned int num_fences;
323
	unsigned long irq_flags;
Line -... Line 324...
-
 
324
	int ret = 0;
217
	int ret = 0;
325
 
218
 
326
	fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
219
	fence->seqno = seqno;
-
 
220
	INIT_LIST_HEAD(&fence->seq_passed_actions);
-
 
221
	fence->fman = fman;
-
 
222
	fence->signaled = 0;
-
 
223
	fence->signal_mask = mask;
327
		   fman->ctx, seqno);
224
	kref_init(&fence->kref);
-
 
Line 225... Line 328...
225
	fence->destroy = destroy;
328
	INIT_LIST_HEAD(&fence->seq_passed_actions);
226
	init_waitqueue_head(&fence->queue);
329
	fence->destroy = destroy;
227
 
330
 
228
	spin_lock_irqsave(&fman->lock, irq_flags);
331
	spin_lock_irqsave(&fman->lock, irq_flags);
229
	if (unlikely(fman->fifo_down)) {
332
	if (unlikely(fman->fifo_down)) {
230
		ret = -EBUSY;
333
		ret = -EBUSY;
231
		goto out_unlock;
334
		goto out_unlock;
Line 232... Line 335...
232
	}
335
	}
233
	list_add_tail(&fence->head, &fman->fence_list);
336
	list_add_tail(&fence->head, &fman->fence_list);
234
	num_fences = ++fman->num_fence_objects;
337
	++fman->num_fence_objects;
Line 235... Line 338...
235
 
338
 
Line 236... Line -...
236
out_unlock:
-
 
237
	spin_unlock_irqrestore(&fman->lock, irq_flags);
-
 
238
	return ret;
-
 
239
 
-
 
240
}
-
 
241
 
-
 
242
struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence)
-
 
243
{
-
 
244
	if (unlikely(fence == NULL))
-
 
245
		return NULL;
-
 
246
 
-
 
247
	kref_get(&fence->kref);
-
 
248
	return fence;
-
 
249
}
-
 
250
 
-
 
251
/**
-
 
252
 * vmw_fence_obj_unreference
-
 
253
 *
-
 
254
 * Note that this function may not be entered with disabled irqs since
-
 
255
 * it may re-enable them in the destroy function.
-
 
256
 *
-
 
257
 */
-
 
258
void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
-
 
259
{
-
 
260
	struct vmw_fence_obj *fence = *fence_p;
-
 
261
	struct vmw_fence_manager *fman;
-
 
262
 
-
 
263
	if (unlikely(fence == NULL))
-
 
264
		return;
-
 
265
 
-
 
266
	fman = fence->fman;
-
 
267
	*fence_p = NULL;
-
 
268
	spin_lock_irq(&fman->lock);
339
out_unlock:
269
	BUG_ON(atomic_read(&fence->kref.refcount) == 0);
340
	spin_unlock_irqrestore(&fman->lock, irq_flags);
270
	kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
341
	return ret;
271
	spin_unlock_irq(&fman->lock);
342
 
Line 309... Line 380...
309
 */
380
 */
310
static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
381
static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
311
				      u32 passed_seqno)
382
				      u32 passed_seqno)
312
{
383
{
313
	u32 goal_seqno;
384
	u32 goal_seqno;
314
	__le32 __iomem *fifo_mem;
385
	u32 *fifo_mem;
315
	struct vmw_fence_obj *fence;
386
	struct vmw_fence_obj *fence;
Line 316... Line 387...
316
 
387
 
317
	if (likely(!fman->seqno_valid))
388
	if (likely(!fman->seqno_valid))
Line 318... Line 389...
318
		return false;
389
		return false;
319
 
390
 
320
	fifo_mem = fman->dev_priv->mmio_virt;
391
	fifo_mem = fman->dev_priv->mmio_virt;
321
	goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
392
	goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
Line 322... Line 393...
322
	if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
393
	if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
323
		return false;
394
		return false;
324
 
395
 
325
	fman->seqno_valid = false;
396
	fman->seqno_valid = false;
326
	list_for_each_entry(fence, &fman->fence_list, head) {
397
	list_for_each_entry(fence, &fman->fence_list, head) {
327
		if (!list_empty(&fence->seq_passed_actions)) {
398
		if (!list_empty(&fence->seq_passed_actions)) {
328
			fman->seqno_valid = true;
399
			fman->seqno_valid = true;
329
			iowrite32(fence->seqno,
400
			vmw_mmio_write(fence->base.seqno,
330
				  fifo_mem + SVGA_FIFO_FENCE_GOAL);
401
				       fifo_mem + SVGA_FIFO_FENCE_GOAL);
Line 351... Line 422...
351
 *
422
 *
352
 * returns true if the device goal seqno was updated. False otherwise.
423
 * returns true if the device goal seqno was updated. False otherwise.
353
 */
424
 */
354
static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
425
static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
355
{
426
{
-
 
427
	struct vmw_fence_manager *fman = fman_from_fence(fence);
356
	u32 goal_seqno;
428
	u32 goal_seqno;
357
	__le32 __iomem *fifo_mem;
429
	u32 *fifo_mem;
Line 358... Line 430...
358
 
430
 
359
	if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC)
431
	if (fence_is_signaled_locked(&fence->base))
Line 360... Line 432...
360
		return false;
432
		return false;
361
 
433
 
362
	fifo_mem = fence->fman->dev_priv->mmio_virt;
434
	fifo_mem = fman->dev_priv->mmio_virt;
363
	goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
435
	goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
364
	if (likely(fence->fman->seqno_valid &&
436
	if (likely(fman->seqno_valid &&
Line 365... Line 437...
365
		   goal_seqno - fence->seqno < VMW_FENCE_WRAP))
437
		   goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
366
		return false;
438
		return false;
Line 367... Line 439...
367
 
439
 
368
	iowrite32(fence->seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
440
	vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
Line 369... Line 441...
369
	fence->fman->seqno_valid = true;
441
	fman->seqno_valid = true;
370
 
442
 
371
	return true;
-
 
372
}
443
	return true;
373
 
444
}
374
void vmw_fences_update(struct vmw_fence_manager *fman)
445
 
375
{
446
static void __vmw_fences_update(struct vmw_fence_manager *fman)
376
	unsigned long flags;
447
{
Line 377... Line 448...
377
	struct vmw_fence_obj *fence, *next_fence;
448
	struct vmw_fence_obj *fence, *next_fence;
378
	struct list_head action_list;
449
	struct list_head action_list;
379
	bool needs_rerun;
-
 
380
	uint32_t seqno, new_seqno;
450
	bool needs_rerun;
381
	__le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
451
	uint32_t seqno, new_seqno;
382
 
452
	u32 *fifo_mem = fman->dev_priv->mmio_virt;
383
	seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
453
 
384
rerun:
454
	seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
385
	spin_lock_irqsave(&fman->lock, flags);
455
rerun:
386
	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
456
	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
387
		if (seqno - fence->seqno < VMW_FENCE_WRAP) {
457
		if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
388
			list_del_init(&fence->head);
-
 
389
			fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
458
			list_del_init(&fence->head);
390
			INIT_LIST_HEAD(&action_list);
459
			fence_signal_locked(&fence->base);
391
			list_splice_init(&fence->seq_passed_actions,
460
			INIT_LIST_HEAD(&action_list);
Line 392... Line -...
392
					 &action_list);
-
 
393
			vmw_fences_perform_actions(fman, &action_list);
-
 
394
			wake_up_all(&fence->queue);
-
 
395
		} else
-
 
396
			break;
-
 
397
	}
-
 
398
 
461
			list_splice_init(&fence->seq_passed_actions,
399
	needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
462
					 &action_list);
400
 
463
			vmw_fences_perform_actions(fman, &action_list);
401
//   if (!list_empty(&fman->cleanup_list))
464
		} else
402
//       (void) schedule_work(&fman->work);
465
			break;
Line -... Line 466...
-
 
466
	}
403
	spin_unlock_irqrestore(&fman->lock, flags);
467
 
404
 
468
	/*
405
	/*
469
	 * Rerun if the fence goal seqno was updated, and the
406
	 * Rerun if the fence goal seqno was updated, and the
470
	 * hardware might have raced with that update, so that
407
	 * hardware might have raced with that update, so that
471
	 * we missed a fence_goal irq.
408
	 * we missed a fence_goal irq.
472
	 */
409
	 */
473
 
-
 
474
	needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
410
 
475
	if (unlikely(needs_rerun)) {
Line 411... Line 476...
411
	if (unlikely(needs_rerun)) {
476
		new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
412
		new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
-
 
413
		if (new_seqno != seqno) {
477
		if (new_seqno != seqno) {
414
			seqno = new_seqno;
-
 
415
			goto rerun;
478
			seqno = new_seqno;
416
		}
-
 
Line 417... Line 479...
417
	}
479
			goto rerun;
418
}
480
		}
419
 
481
	}
-
 
482
 
-
 
483
}
-
 
484
 
-
 
485
void vmw_fences_update(struct vmw_fence_manager *fman)
-
 
486
{
Line 420... Line -...
420
bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
-
 
421
			    uint32_t flags)
487
	unsigned long irq_flags;
422
{
488
 
Line 423... Line -...
423
	struct vmw_fence_manager *fman = fence->fman;
-
 
424
	unsigned long irq_flags;
489
	spin_lock_irqsave(&fman->lock, irq_flags);
Line 425... Line -...
425
	uint32_t signaled;
-
 
426
 
-
 
427
	spin_lock_irqsave(&fman->lock, irq_flags);
-
 
428
	signaled = fence->signaled;
-
 
429
	spin_unlock_irqrestore(&fman->lock, irq_flags);
490
	__vmw_fences_update(fman);
430
 
491
	spin_unlock_irqrestore(&fman->lock, irq_flags);
Line 431... Line 492...
431
	flags &= fence->signal_mask;
492
}
432
	if ((signaled & flags) == flags)
-
 
433
		return 1;
493
 
434
 
494
bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
435
	if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0)
495
{
436
		vmw_fences_update(fman);
-
 
Line 437... Line 496...
437
 
496
	struct vmw_fence_manager *fman = fman_from_fence(fence);
438
	spin_lock_irqsave(&fman->lock, irq_flags);
497
 
439
	signaled = fence->signaled;
-
 
440
	spin_unlock_irqrestore(&fman->lock, irq_flags);
-
 
441
 
-
 
442
	return ((signaled & flags) == flags);
-
 
443
}
498
	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
444
 
-
 
445
int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
-
 
446
		       uint32_t flags, bool lazy,
-
 
447
		       bool interruptible, unsigned long timeout)
499
		return 1;
448
{
500
 
449
	struct vmw_private *dev_priv = fence->fman->dev_priv;
-
 
450
	long ret;
-
 
451
 
-
 
452
	if (likely(vmw_fence_obj_signaled(fence, flags)))
-
 
453
		return 0;
-
 
454
 
-
 
455
	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
-
 
456
	vmw_seqno_waiter_add(dev_priv);
-
 
457
 
-
 
458
	if (interruptible)
-
 
459
		ret = wait_event_interruptible_timeout
-
 
460
			(fence->queue,
-
 
461
			 vmw_fence_obj_signaled(fence, flags),
501
	vmw_fences_update(fman);
462
			 timeout);
502
 
Line 463... Line 503...
463
	else
503
	return fence_is_signaled(&fence->base);
464
		ret = wait_event_timeout
504
}
465
			(fence->queue,
505
 
Line 466... Line 506...
466
			 vmw_fence_obj_signaled(fence, flags),
506
int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
467
			 timeout);
507
		       bool interruptible, unsigned long timeout)
Line 468... Line 508...
468
 
508
{
469
	vmw_seqno_waiter_remove(dev_priv);
509
	long ret = fence_wait_timeout(&fence->base, interruptible, timeout);
470
 
-
 
471
	if (unlikely(ret == 0))
-
 
472
		ret = -EBUSY;
-
 
473
	else if (likely(ret > 0))
-
 
474
		ret = 0;
-
 
475
 
-
 
476
	return ret;
-
 
477
}
510
 
478
 
511
	if (likely(ret > 0))
Line 479... Line 512...
479
void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
512
		return 0;
480
{
513
	else if (ret == 0)
481
	struct vmw_private *dev_priv = fence->fman->dev_priv;
-
 
482
 
514
		return -EBUSY;
483
	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
515
	else
484
}
-
 
485
 
516
		return ret;
486
static void vmw_fence_destroy(struct vmw_fence_obj *fence)
517
}
Line 487... Line -...
487
{
-
 
488
	struct vmw_fence_manager *fman = fence->fman;
-
 
489
 
-
 
490
	kfree(fence);
-
 
491
	/*
-
 
492
	 * Free kernel space accounting.
518
 
493
	 */
519
void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
494
	ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
520
{
495
			    fman->fence_size);
-
 
496
}
-
 
Line 497... Line 521...
497
 
521
	struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
498
int vmw_fence_create(struct vmw_fence_manager *fman,
522
 
499
		     uint32_t seqno,
523
	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
500
		     uint32_t mask,
524
}
Line 501... Line 525...
501
		     struct vmw_fence_obj **p_fence)
525
 
502
{
526
static void vmw_fence_destroy(struct vmw_fence_obj *fence)
Line 503... Line 527...
503
	struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
527
{
504
	struct vmw_fence_obj *fence;
528
	fence_free(&fence->base);
505
	int ret;
-
 
506
 
-
 
507
	ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
529
}
508
				   false, false);
530
 
Line 509... Line 531...
509
	if (unlikely(ret != 0))
531
int vmw_fence_create(struct vmw_fence_manager *fman,
510
		return ret;
532
		     uint32_t seqno,
511
 
533
		     struct vmw_fence_obj **p_fence)
512
	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
534
{
513
	if (unlikely(fence == NULL)) {
535
	struct vmw_fence_obj *fence;
Line 514... Line 536...
514
		ret = -ENOMEM;
536
	int ret;
515
		goto out_no_object;
537
 
516
	}
538
	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
517
 
539
	if (unlikely(fence == NULL))
518
	ret = vmw_fence_obj_init(fman, fence, seqno, mask,
540
		return -ENOMEM;
519
				 vmw_fence_destroy);
541
 
Line 557... Line 579...
557
}
579
}
Line 558... Line 580...
558
 
580
 
559
int vmw_user_fence_create(struct drm_file *file_priv,
581
int vmw_user_fence_create(struct drm_file *file_priv,
560
			  struct vmw_fence_manager *fman,
582
			  struct vmw_fence_manager *fman,
561
			  uint32_t seqno,
-
 
562
			  uint32_t mask,
583
			  uint32_t seqno,
563
			  struct vmw_fence_obj **p_fence,
584
			  struct vmw_fence_obj **p_fence,
564
			  uint32_t *p_handle)
585
			  uint32_t *p_handle)
565
{
586
{
566
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
587
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
Line 584... Line 605...
584
		ret = -ENOMEM;
605
		ret = -ENOMEM;
585
		goto out_no_object;
606
		goto out_no_object;
586
	}
607
	}
Line 587... Line 608...
587
 
608
 
588
	ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
609
	ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
589
				 mask, vmw_user_fence_destroy);
610
				 vmw_user_fence_destroy);
590
	if (unlikely(ret != 0)) {
611
	if (unlikely(ret != 0)) {
591
		kfree(ufence);
612
		kfree(ufence);
592
		goto out_no_object;
613
		goto out_no_object;
Line 627... Line 648...
627
 * vmw_fence_fifo_down - signal all unsignaled fence objects.
648
 * vmw_fence_fifo_down - signal all unsignaled fence objects.
628
 */
649
 */
Line 629... Line 650...
629
 
650
 
630
void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
651
void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
631
{
-
 
632
	unsigned long irq_flags;
652
{
633
	struct list_head action_list;
653
	struct list_head action_list;
Line 634... Line 654...
634
	int ret;
654
	int ret;
635
 
655
 
636
	/*
656
	/*
637
	 * The list may be altered while we traverse it, so always
657
	 * The list may be altered while we traverse it, so always
Line 638... Line 658...
638
	 * restart when we've released the fman->lock.
658
	 * restart when we've released the fman->lock.
639
	 */
659
	 */
640
 
660
 
641
	spin_lock_irqsave(&fman->lock, irq_flags);
661
	spin_lock_irq(&fman->lock);
642
	fman->fifo_down = true;
662
	fman->fifo_down = true;
643
	while (!list_empty(&fman->fence_list)) {
663
	while (!list_empty(&fman->fence_list)) {
644
		struct vmw_fence_obj *fence =
664
		struct vmw_fence_obj *fence =
645
			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
665
			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
Line 646... Line 666...
646
				   head);
666
				   head);
647
		kref_get(&fence->kref);
-
 
648
		spin_unlock_irq(&fman->lock);
667
		fence_get(&fence->base);
Line 649... Line 668...
649
 
668
		spin_unlock_irq(&fman->lock);
650
		ret = vmw_fence_obj_wait(fence, fence->signal_mask,
669
 
651
					 false, false,
670
		ret = vmw_fence_obj_wait(fence, false, false,
652
					 VMW_FENCE_WAIT_TIMEOUT);
671
					 VMW_FENCE_WAIT_TIMEOUT);
653
 
672
 
654
		if (unlikely(ret != 0)) {
673
		if (unlikely(ret != 0)) {
655
			list_del_init(&fence->head);
674
			list_del_init(&fence->head);
656
			fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
-
 
657
			INIT_LIST_HEAD(&action_list);
675
			fence_signal(&fence->base);
Line 658... Line -...
658
			list_splice_init(&fence->seq_passed_actions,
-
 
659
					 &action_list);
-
 
660
			vmw_fences_perform_actions(fman, &action_list);
676
			INIT_LIST_HEAD(&action_list);
661
			wake_up_all(&fence->queue);
677
			list_splice_init(&fence->seq_passed_actions,
-
 
678
					 &action_list);
662
		}
679
			vmw_fences_perform_actions(fman, &action_list);
663
 
680
		}
664
		spin_lock_irq(&fman->lock);
681
 
Line 665... Line 682...
665
 
682
		BUG_ON(!list_empty(&fence->head));
666
		BUG_ON(!list_empty(&fence->head));
683
		fence_put(&fence->base);
667
		kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
684
		spin_lock_irq(&fman->lock);
Line 714... Line 731...
714
 
731
 
Line 715... Line 732...
715
	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
732
	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
716
 
733
 
717
	timeout = jiffies;
734
	timeout = jiffies;
718
	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
735
	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
719
		ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ?
736
		ret = ((vmw_fence_obj_signaled(fence)) ?
720
		       0 : -EBUSY);
737
		       0 : -EBUSY);
Line 721... Line 738...
721
		goto out;
738
		goto out;
Line 722... Line 739...
722
	}
739
	}
Line 723... Line 740...
723
 
740
 
724
	timeout = (unsigned long)arg->kernel_cookie - timeout;
741
	timeout = (unsigned long)arg->kernel_cookie - timeout;
Line 725... Line 742...
725
 
742
 
Line 756... Line 773...
756
		       (unsigned long)arg->handle);
773
		       (unsigned long)arg->handle);
757
		return -EINVAL;
774
		return -EINVAL;
758
	}
775
	}
Line 759... Line 776...
759
 
776
 
760
	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
777
	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
Line 761... Line 778...
761
	fman = fence->fman;
778
	fman = fman_from_fence(fence);
762
 
-
 
Line 763... Line 779...
763
	arg->signaled = vmw_fence_obj_signaled(fence, arg->flags);
779
 
-
 
780
	arg->signaled = vmw_fence_obj_signaled(fence);
764
	spin_lock_irq(&fman->lock);
781
 
765
 
782
	arg->signaled_flags = arg->flags;
Line 766... Line 783...
766
	arg->signaled_flags = fence->signaled;
783
	spin_lock_irq(&fman->lock);
Line 845... Line 862...
845
	if (unlikely(event == NULL))
862
	if (unlikely(event == NULL))
846
		return;
863
		return;
Line 847... Line 864...
847
 
864
 
848
	file_priv = event->file_priv;
865
	file_priv = event->file_priv;
849
	spin_lock_irqsave(&dev->event_lock, irq_flags);
866
	spin_lock_irqsave(&dev->event_lock, irq_flags);
850
/*
867
 
851
	if (likely(eaction->tv_sec != NULL)) {
868
	if (likely(eaction->tv_sec != NULL)) {
Line 852... Line 869...
852
		struct timeval tv;
869
		struct timeval tv;
853
 
870
 
854
		do_gettimeofday(&tv);
871
//       do_gettimeofday(&tv);
855
		*eaction->tv_sec = tv.tv_sec;
872
		*eaction->tv_sec = tv.tv_sec;
856
		*eaction->tv_usec = tv.tv_usec;
873
		*eaction->tv_usec = tv.tv_usec;
857
	}
874
	}
858
*/
875
 
859
	list_del_init(&eaction->fpriv_head);
876
	list_del_init(&eaction->fpriv_head);
860
	list_add_tail(&eaction->event->link, &file_priv->event_list);
877
	list_add_tail(&eaction->event->link, &file_priv->event_list);
861
	eaction->event = NULL;
878
	eaction->event = NULL;
Line 874... Line 891...
874
 */
891
 */
875
static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
892
static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
876
{
893
{
877
	struct vmw_event_fence_action *eaction =
894
	struct vmw_event_fence_action *eaction =
878
		container_of(action, struct vmw_event_fence_action, action);
895
		container_of(action, struct vmw_event_fence_action, action);
879
	struct vmw_fence_manager *fman = eaction->fence->fman;
896
	struct vmw_fence_manager *fman = fman_from_fence(eaction->fence);
880
	unsigned long irq_flags;
897
	unsigned long irq_flags;
Line 881... Line 898...
881
 
898
 
882
	spin_lock_irqsave(&fman->lock, irq_flags);
899
	spin_lock_irqsave(&fman->lock, irq_flags);
883
	list_del(&eaction->fpriv_head);
900
	list_del(&eaction->fpriv_head);
Line 898... Line 915...
898
 * returns.
915
 * returns.
899
 */
916
 */
900
static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
917
static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
901
			      struct vmw_fence_action *action)
918
			      struct vmw_fence_action *action)
902
{
919
{
903
	struct vmw_fence_manager *fman = fence->fman;
920
	struct vmw_fence_manager *fman = fman_from_fence(fence);
904
	unsigned long irq_flags;
921
	unsigned long irq_flags;
905
	bool run_update = false;
922
	bool run_update = false;
Line 906... Line 923...
906
 
923
 
907
	mutex_lock(&fman->goal_irq_mutex);
924
	mutex_lock(&fman->goal_irq_mutex);
Line 908... Line 925...
908
	spin_lock_irqsave(&fman->lock, irq_flags);
925
	spin_lock_irqsave(&fman->lock, irq_flags);
909
 
926
 
910
	fman->pending_actions[action->type]++;
927
	fman->pending_actions[action->type]++;
Line 911... Line 928...
911
	if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) {
928
	if (fence_is_signaled_locked(&fence->base)) {
912
		struct list_head action_list;
929
		struct list_head action_list;
913
 
930
 
Line 958... Line 975...
958
				 uint32_t *tv_sec,
975
				 uint32_t *tv_sec,
959
				 uint32_t *tv_usec,
976
				 uint32_t *tv_usec,
960
				 bool interruptible)
977
				 bool interruptible)
961
{
978
{
962
	struct vmw_event_fence_action *eaction;
979
	struct vmw_event_fence_action *eaction;
963
	struct vmw_fence_manager *fman = fence->fman;
980
	struct vmw_fence_manager *fman = fman_from_fence(fence);
964
	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
981
	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
965
	unsigned long irq_flags;
982
	unsigned long irq_flags;
Line 966... Line 983...
966
 
983
 
967
	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
984
	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
Line 998... Line 1015...
998
				  uint32_t flags,
1015
				  uint32_t flags,
999
				  uint64_t user_data,
1016
				  uint64_t user_data,
1000
				  bool interruptible)
1017
				  bool interruptible)
1001
{
1018
{
1002
	struct vmw_event_fence_pending *event;
1019
	struct vmw_event_fence_pending *event;
-
 
1020
	struct vmw_fence_manager *fman = fman_from_fence(fence);
1003
	struct drm_device *dev = fence->fman->dev_priv->dev;
1021
	struct drm_device *dev = fman->dev_priv->dev;
1004
	unsigned long irq_flags;
1022
	unsigned long irq_flags;
1005
	int ret;
1023
	int ret;
Line 1006... Line 1024...
1006
 
1024
 
Line 1047... Line 1065...
1047
						   NULL,
1065
						   NULL,
1048
						   interruptible);
1066
						   interruptible);
1049
	if (ret != 0)
1067
	if (ret != 0)
1050
		goto out_no_queue;
1068
		goto out_no_queue;
Line -... Line 1069...
-
 
1069
 
-
 
1070
	return 0;
1051
 
1071
 
1052
out_no_queue:
1072
out_no_queue:
1053
	event->base.destroy(&event->base);
1073
	event->base.destroy(&event->base);
1054
out_no_event:
1074
out_no_event:
1055
	spin_lock_irqsave(&dev->event_lock, irq_flags);
1075
	spin_lock_irqsave(&dev->event_lock, irq_flags);
Line 1123... Line 1143...
1123
		}
1143
		}
1124
	}
1144
	}
Line 1125... Line 1145...
1125
 
1145
 
Line 1126... Line -...
1126
	BUG_ON(fence == NULL);
-
 
1127
 
1146
	BUG_ON(fence == NULL);
1128
	if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
1147
 
1129
		ret = vmw_event_fence_action_create(file_priv, fence,
1148
	ret = vmw_event_fence_action_create(file_priv, fence,
1130
						    arg->flags,
1149
					    arg->flags,
1131
						    arg->user_data,
-
 
1132
						    true);
-
 
1133
	else
-
 
1134
		ret = vmw_event_fence_action_create(file_priv, fence,
-
 
1135
						    arg->flags,
-
 
1136
						    arg->user_data,
-
 
1137
						    true);
1150
					    arg->user_data,
1138
 
1151
					    true);
1139
	if (unlikely(ret != 0)) {
1152
	if (unlikely(ret != 0)) {
1140
		if (ret != -ERESTARTSYS)
1153
		if (ret != -ERESTARTSYS)
1141
			DRM_ERROR("Failed to attach event to fence.\n");
1154
			DRM_ERROR("Failed to attach event to fence.\n");
Line 1152... Line 1165...
1152
					  handle, TTM_REF_USAGE);
1165
					  handle, TTM_REF_USAGE);
1153
out_no_ref_obj:
1166
out_no_ref_obj:
1154
	vmw_fence_obj_unreference(&fence);
1167
	vmw_fence_obj_unreference(&fence);
1155
	return ret;
1168
	return ret;
1156
}
1169
}
1157
 
-
 
1158
#endif
1170
#endif