Subversion Repositories Kolibri OS

Rev

Rev 4111 | Rev 5078 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4111 Rev 4569
Line 30... Line 30...
30
#include 
30
#include 
31
#include 
31
#include 
32
#include 
32
#include 
33
#include "vmwgfx_resource_priv.h"
33
#include "vmwgfx_resource_priv.h"
Line -... Line 34...
-
 
34
 
-
 
35
#define VMW_RES_EVICT_ERR_COUNT 10
34
 
36
 
35
struct vmw_user_dma_buffer {
37
struct vmw_user_dma_buffer {
36
	struct ttm_base_object base;
38
	struct ttm_prime_object prime;
37
	struct vmw_dma_buffer dma;
39
	struct vmw_dma_buffer dma;
Line 38... Line 40...
38
};
40
};
39
 
41
 
Line 211... Line 213...
211
	res->avail = false;
213
	res->avail = false;
212
	res->dev_priv = dev_priv;
214
	res->dev_priv = dev_priv;
213
	res->func = func;
215
	res->func = func;
214
	INIT_LIST_HEAD(&res->lru_head);
216
	INIT_LIST_HEAD(&res->lru_head);
215
	INIT_LIST_HEAD(&res->mob_head);
217
	INIT_LIST_HEAD(&res->mob_head);
-
 
218
	INIT_LIST_HEAD(&res->binding_head);
216
	res->id = -1;
219
	res->id = -1;
217
	res->backup = NULL;
220
	res->backup = NULL;
218
	res->backup_offset = 0;
221
	res->backup_offset = 0;
219
	res->backup_dirty = false;
222
	res->backup_dirty = false;
220
	res->res_dirty = false;
223
	res->res_dirty = false;
Line 293... Line 296...
293
 
296
 
294
	base = ttm_base_object_lookup(tfile, handle);
297
	base = ttm_base_object_lookup(tfile, handle);
295
	if (unlikely(base == NULL))
298
	if (unlikely(base == NULL))
Line 296... Line 299...
296
		return -EINVAL;
299
		return -EINVAL;
297
 
300
 
Line 298... Line 301...
298
	if (unlikely(base->object_type != converter->object_type))
301
	if (unlikely(ttm_base_object_type(base) != converter->object_type))
Line 299... Line 302...
299
		goto out_bad_resource;
302
		goto out_bad_resource;
Line 348... Line 351...
348
}
351
}
Line 349... Line 352...
349
 
352
 
350
/**
353
/**
351
 * Buffer management.
354
 * Buffer management.
-
 
355
 */
-
 
356
 
-
 
357
/**
-
 
358
 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
-
 
359
 *
-
 
360
 * @dev_priv: Pointer to a struct vmw_private identifying the device.
-
 
361
 * @size: The requested buffer size.
-
 
362
 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
-
 
363
 */
-
 
364
static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
-
 
365
				  bool user)
-
 
366
{
-
 
367
	static size_t struct_size, user_struct_size;
-
 
368
	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
 
369
	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
-
 
370
 
-
 
371
	if (unlikely(struct_size == 0)) {
-
 
372
		size_t backend_size = ttm_round_pot(vmw_tt_size);
-
 
373
 
-
 
374
		struct_size = backend_size +
-
 
375
			ttm_round_pot(sizeof(struct vmw_dma_buffer));
-
 
376
		user_struct_size = backend_size +
-
 
377
			ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
-
 
378
	}
-
 
379
 
-
 
380
	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
-
 
381
		page_array_size +=
-
 
382
			ttm_round_pot(num_pages * sizeof(dma_addr_t));
-
 
383
 
-
 
384
	return ((user) ? user_struct_size : struct_size) +
-
 
385
		page_array_size;
-
 
386
}
352
 */
387
 
353
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
388
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
354
{
389
{
Line 355... Line 390...
355
	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
390
	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
356
 
391
 
Line -... Line 392...
-
 
392
	kfree(vmw_bo);
-
 
393
}
-
 
394
 
-
 
395
static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
-
 
396
{
-
 
397
	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
-
 
398
 
357
	kfree(vmw_bo);
399
//   ttm_prime_object_kfree(vmw_user_bo, prime);
358
}
400
}
359
 
401
 
360
int vmw_dmabuf_init(struct vmw_private *dev_priv,
402
int vmw_dmabuf_init(struct vmw_private *dev_priv,
361
		    struct vmw_dma_buffer *vmw_bo,
403
		    struct vmw_dma_buffer *vmw_bo,
362
		    size_t size, struct ttm_placement *placement,
404
		    size_t size, struct ttm_placement *placement,
363
		    bool interruptible,
405
		    bool interruptible,
364
		    void (*bo_free) (struct ttm_buffer_object *bo))
406
		    void (*bo_free) (struct ttm_buffer_object *bo))
365
{
407
{
-
 
408
	struct ttm_bo_device *bdev = &dev_priv->bdev;
Line 366... Line 409...
366
	struct ttm_bo_device *bdev = &dev_priv->bdev;
409
	size_t acc_size;
Line 367... Line 410...
367
	size_t acc_size;
410
	int ret;
368
	int ret;
411
	bool user = (bo_free == &vmw_user_dmabuf_destroy);
Line 369... Line 412...
369
 
412
 
Line 370... Line 413...
370
	BUG_ON(!bo_free);
413
	BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
-
 
414
 
371
 
415
	acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
372
	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
416
	memset(vmw_bo, 0, sizeof(*vmw_bo));
373
	memset(vmw_bo, 0, sizeof(*vmw_bo));
417
 
374
 
418
	INIT_LIST_HEAD(&vmw_bo->res_list);
375
	INIT_LIST_HEAD(&vmw_bo->res_list);
419
 
Line 376... Line -...
376
 
-
 
377
	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
-
 
378
			  ttm_bo_type_device, placement,
-
 
379
			  0, interruptible,
-
 
380
			  NULL, acc_size, NULL, bo_free);
-
 
381
	return ret;
-
 
382
}
-
 
383
 
420
	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
384
static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
421
			  (user) ? ttm_bo_type_device :
385
{
422
			  ttm_bo_type_kernel, placement,
386
	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
423
			  0, interruptible,
387
 
424
			  NULL, acc_size, NULL, bo_free);
Line 397... Line 434...
397
	*p_base = NULL;
434
	*p_base = NULL;
Line 398... Line 435...
398
 
435
 
399
	if (unlikely(base == NULL))
436
	if (unlikely(base == NULL))
Line 400... Line 437...
400
		return;
437
		return;
-
 
438
 
401
 
439
	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
402
	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
440
				   prime.base);
403
	bo = &vmw_user_bo->dma.base;
441
	bo = &vmw_user_bo->dma.base;
Line -... Line 442...
-
 
442
	ttm_bo_unref(&bo);
-
 
443
}
-
 
444
 
-
 
445
static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
-
 
446
					    enum ttm_ref_type ref_type)
-
 
447
{
-
 
448
	struct vmw_user_dma_buffer *user_bo;
-
 
449
	user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
-
 
450
 
-
 
451
	switch (ref_type) {
-
 
452
	case TTM_REF_SYNCCPU_WRITE:
-
 
453
		ttm_bo_synccpu_write_release(&user_bo->dma.base);
-
 
454
		break;
-
 
455
	default:
-
 
456
		BUG();
404
	ttm_bo_unref(&bo);
457
	}
405
}
458
}
406
 
459
 
407
/**
460
/**
408
 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
461
 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
Line 432... Line 485...
432
		DRM_ERROR("Failed to allocate a buffer.\n");
485
		DRM_ERROR("Failed to allocate a buffer.\n");
433
		return -ENOMEM;
486
		return -ENOMEM;
434
	}
487
	}
Line 435... Line 488...
435
 
488
 
-
 
489
	ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
-
 
490
			      (dev_priv->has_mob) ?
436
	ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
491
			      &vmw_sys_placement :
437
			      &vmw_vram_sys_placement, true,
492
			      &vmw_vram_sys_placement, true,
438
			      &vmw_user_dmabuf_destroy);
493
			      &vmw_user_dmabuf_destroy);
439
	if (unlikely(ret != 0))
494
	if (unlikely(ret != 0))
Line 440... Line 495...
440
		return ret;
495
		return ret;
-
 
496
 
441
 
497
	tmp = ttm_bo_reference(&user_bo->dma.base);
-
 
498
/*
442
	tmp = ttm_bo_reference(&user_bo->dma.base);
499
    ret = ttm_prime_object_init(tfile,
443
	ret = ttm_base_object_init(tfile,
500
				    size,
444
				   &user_bo->base,
501
				    &user_bo->prime,
445
				   shareable,
502
				   shareable,
-
 
503
				   ttm_buffer_type,
446
				   ttm_buffer_type,
504
				    &vmw_user_dmabuf_release,
447
				   &vmw_user_dmabuf_release, NULL);
505
				    &vmw_user_dmabuf_ref_obj_release);
448
	if (unlikely(ret != 0)) {
506
	if (unlikely(ret != 0)) {
449
		ttm_bo_unref(&tmp);
507
		ttm_bo_unref(&tmp);
-
 
508
		goto out_no_base_object;
Line 450... Line 509...
450
		goto out_no_base_object;
509
	}
451
	}
510
*/
Line 452... Line 511...
452
 
511
 
453
	*p_dma_buf = &user_bo->dma;
512
	*p_dma_buf = &user_bo->dma;
454
	*handle = user_bo->base.hash.key;
513
	*handle = user_bo->prime.base.hash.key;
Line 471... Line 530...
471
 
530
 
472
	if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
531
	if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
Line 473... Line 532...
473
		return -EPERM;
532
		return -EPERM;
474
 
533
 
475
	vmw_user_bo = vmw_user_dma_buffer(bo);
534
	vmw_user_bo = vmw_user_dma_buffer(bo);
-
 
535
	return (vmw_user_bo->prime.base.tfile == tfile ||
-
 
536
		vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
-
 
537
}
-
 
538
 
-
 
539
/**
-
 
540
 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
-
 
541
 * access, idling previous GPU operations on the buffer and optionally
-
 
542
 * blocking it for further command submissions.
-
 
543
 *
-
 
544
 * @user_bo: Pointer to the buffer object being grabbed for CPU access
-
 
545
 * @tfile: Identifying the caller.
-
 
546
 * @flags: Flags indicating how the grab should be performed.
-
 
547
 *
-
 
548
 * A blocking grab will be automatically released when @tfile is closed.
-
 
549
 */
-
 
550
static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
-
 
551
					struct ttm_object_file *tfile,
-
 
552
					uint32_t flags)
-
 
553
{
-
 
554
	struct ttm_buffer_object *bo = &user_bo->dma.base;
-
 
555
	bool existed;
-
 
556
    int ret=0;
-
 
557
 
-
 
558
	if (flags & drm_vmw_synccpu_allow_cs) {
-
 
559
		struct ttm_bo_device *bdev = bo->bdev;
-
 
560
 
-
 
561
//       spin_lock(&bdev->fence_lock);
-
 
562
//       ret = ttm_bo_wait(bo, false, true,
-
 
563
//                 !!(flags & drm_vmw_synccpu_dontblock));
-
 
564
//       spin_unlock(&bdev->fence_lock);
-
 
565
		return ret;
-
 
566
	}
-
 
567
 
-
 
568
//   ret = ttm_bo_synccpu_write_grab
-
 
569
//       (bo, !!(flags & drm_vmw_synccpu_dontblock));
-
 
570
//   if (unlikely(ret != 0))
-
 
571
//       return ret;
-
 
572
 
-
 
573
	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
-
 
574
				 TTM_REF_SYNCCPU_WRITE, &existed);
-
 
575
//   if (ret != 0 || existed)
-
 
576
//       ttm_bo_synccpu_write_release(&user_bo->dma.base);
-
 
577
 
-
 
578
	return ret;
-
 
579
}
-
 
580
 
-
 
581
/**
-
 
582
 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
-
 
583
 * and unblock command submission on the buffer if blocked.
-
 
584
 *
-
 
585
 * @handle: Handle identifying the buffer object.
-
 
586
 * @tfile: Identifying the caller.
-
 
587
 * @flags: Flags indicating the type of release.
-
 
588
 */
-
 
589
static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
-
 
590
					   struct ttm_object_file *tfile,
-
 
591
					   uint32_t flags)
-
 
592
{
-
 
593
	if (!(flags & drm_vmw_synccpu_allow_cs))
-
 
594
		return ttm_ref_object_base_unref(tfile, handle,
-
 
595
						 TTM_REF_SYNCCPU_WRITE);
-
 
596
 
-
 
597
	return 0;
-
 
598
}
-
 
599
 
-
 
600
/**
-
 
601
 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
-
 
602
 * functionality.
-
 
603
 *
-
 
604
 * @dev: Identifies the drm device.
-
 
605
 * @data: Pointer to the ioctl argument.
-
 
606
 * @file_priv: Identifies the caller.
-
 
607
 *
-
 
608
 * This function checks the ioctl arguments for validity and calls the
-
 
609
 * relevant synccpu functions.
-
 
610
 */
-
 
611
int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
-
 
612
				  struct drm_file *file_priv)
-
 
613
{
-
 
614
	struct drm_vmw_synccpu_arg *arg =
-
 
615
		(struct drm_vmw_synccpu_arg *) data;
-
 
616
	struct vmw_dma_buffer *dma_buf;
-
 
617
	struct vmw_user_dma_buffer *user_bo;
-
 
618
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-
 
619
	int ret;
-
 
620
 
-
 
621
	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
-
 
622
	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
-
 
623
			       drm_vmw_synccpu_dontblock |
-
 
624
			       drm_vmw_synccpu_allow_cs)) != 0) {
-
 
625
		DRM_ERROR("Illegal synccpu flags.\n");
-
 
626
		return -EINVAL;
-
 
627
	}
-
 
628
 
-
 
629
	switch (arg->op) {
-
 
630
	case drm_vmw_synccpu_grab:
-
 
631
		ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
-
 
632
		if (unlikely(ret != 0))
-
 
633
			return ret;
-
 
634
 
-
 
635
		user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
-
 
636
				       dma);
-
 
637
		ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
-
 
638
		vmw_dmabuf_unreference(&dma_buf);
-
 
639
		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
-
 
640
			     ret != -EBUSY)) {
-
 
641
			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
-
 
642
				  (unsigned int) arg->handle);
-
 
643
			return ret;
-
 
644
		}
-
 
645
		break;
-
 
646
	case drm_vmw_synccpu_release:
-
 
647
		ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
-
 
648
						      arg->flags);
-
 
649
		if (unlikely(ret != 0)) {
-
 
650
			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
-
 
651
				  (unsigned int) arg->handle);
-
 
652
			return ret;
-
 
653
		}
-
 
654
		break;
-
 
655
	default:
-
 
656
		DRM_ERROR("Invalid synccpu operation.\n");
-
 
657
		return -EINVAL;
-
 
658
	}
476
	return (vmw_user_bo->base.tfile == tfile ||
659
 
Line 477... Line 660...
477
	vmw_user_bo->base.shareable) ? 0 : -EPERM;
660
	return 0;
478
}
661
}
479
 
662
 
Line 536... Line 719...
536
		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
719
		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
537
		       (unsigned long)handle);
720
		       (unsigned long)handle);
538
		return -ESRCH;
721
		return -ESRCH;
539
	}
722
	}
Line 540... Line 723...
540
 
723
 
541
	if (unlikely(base->object_type != ttm_buffer_type)) {
724
	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
542
		ttm_base_object_unref(&base);
725
		ttm_base_object_unref(&base);
543
		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
726
		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
544
		       (unsigned long)handle);
727
		       (unsigned long)handle);
545
		return -EINVAL;
728
		return -EINVAL;
Line 546... Line 729...
546
	}
729
	}
-
 
730
 
547
 
731
	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
548
	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
732
				   prime.base);
549
	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
733
	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
Line 550... Line 734...
550
	ttm_base_object_unref(&base);
734
	ttm_base_object_unref(&base);
551
	*out = &vmw_user_bo->dma;
735
	*out = &vmw_user_bo->dma;
Line 552... Line 736...
552
 
736
 
553
	return 0;
737
	return 0;
-
 
738
}
554
}
739
 
555
 
740
int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
Line 556... Line 741...
556
int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
741
			      struct vmw_dma_buffer *dma_buf,
557
			      struct vmw_dma_buffer *dma_buf)
742
			      uint32_t *handle)
Line 558... Line 743...
558
{
743
{
-
 
744
	struct vmw_user_dma_buffer *user_bo;
-
 
745
 
559
	struct vmw_user_dma_buffer *user_bo;
746
	if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
-
 
747
		return -EINVAL;
560
 
748
 
Line 561... Line 749...
561
	if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
749
	user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
562
		return -EINVAL;
750
 
563
 
751
	*handle = user_bo->prime.base.hash.key;
Line 783... Line 971...
783
		    struct drm_device *dev,
971
		    struct drm_device *dev,
784
		    struct drm_mode_create_dumb *args)
972
		    struct drm_mode_create_dumb *args)
785
{
973
{
786
	struct vmw_private *dev_priv = vmw_priv(dev);
974
	struct vmw_private *dev_priv = vmw_priv(dev);
787
	struct vmw_master *vmaster = vmw_master(file_priv->master);
975
	struct vmw_master *vmaster = vmw_master(file_priv->master);
788
	struct vmw_user_dma_buffer *vmw_user_bo;
976
	struct vmw_dma_buffer *dma_buf;
789
	struct ttm_buffer_object *tmp;
-
 
790
	int ret;
977
	int ret;
Line 791... Line 978...
791
 
978
 
792
	args->pitch = args->width * ((args->bpp + 7) / 8);
979
	args->pitch = args->width * ((args->bpp + 7) / 8);
Line 793... Line -...
793
	args->size = args->pitch * args->height;
-
 
794
 
-
 
795
	vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
-
 
796
	if (vmw_user_bo == NULL)
-
 
797
		return -ENOMEM;
980
	args->size = args->pitch * args->height;
798
 
981
 
799
	ret = ttm_read_lock(&vmaster->lock, true);
-
 
800
	if (ret != 0) {
982
	ret = ttm_read_lock(&vmaster->lock, true);
801
		kfree(vmw_user_bo);
-
 
802
		return ret;
-
 
803
	}
-
 
804
 
-
 
805
	ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
-
 
806
			      &vmw_vram_sys_placement, true,
-
 
807
			      &vmw_user_dmabuf_destroy);
-
 
Line 808... Line -...
808
	if (ret != 0)
-
 
809
		goto out_no_dmabuf;
983
	if (unlikely(ret != 0))
810
 
984
		return ret;
811
	tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
985
 
812
	ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
-
 
813
				   &vmw_user_bo->base,
-
 
814
				   false,
986
	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
815
				   ttm_buffer_type,
987
				    args->size, false, &args->handle,
816
				   &vmw_user_dmabuf_release, NULL);
-
 
817
	if (unlikely(ret != 0))
-
 
Line 818... Line -...
818
		goto out_no_base_object;
-
 
819
 
988
				    &dma_buf);
820
	args->handle = vmw_user_bo->base.hash.key;
989
	if (unlikely(ret != 0))
821
 
990
		goto out_no_dmabuf;
822
out_no_base_object:
991
 
823
	ttm_bo_unref(&tmp);
992
	vmw_dmabuf_unreference(&dma_buf);
824
out_no_dmabuf:
993
out_no_dmabuf:
Line -... Line 994...
-
 
994
	ttm_read_unlock(&vmaster->lock);
-
 
995
	return ret;
-
 
996
}
-
 
997
#endif
-
 
998
 
-
 
999
/**
-
 
1000
 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
-
 
1001
 *
-
 
1002
 * @file_priv: Pointer to a struct drm_file identifying the caller.
-
 
1003
 * @dev: Pointer to the drm device.
825
	ttm_read_unlock(&vmaster->lock);
1004
 * @handle: Handle identifying the dumb buffer.
826
	return ret;
1005
 * @offset: The address space offset returned.
827
}
1006
 *
828
#endif
1007
 * This is a driver callback for the core drm dumb_map_offset functionality.
829
 
1008
 */
Line 842... Line 1021...
842
	*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1021
	*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
843
	vmw_dmabuf_unreference(&out_buf);
1022
	vmw_dmabuf_unreference(&out_buf);
844
	return 0;
1023
	return 0;
845
}
1024
}
Line -... Line 1025...
-
 
1025
 
-
 
1026
/**
-
 
1027
 * vmw_dumb_destroy - Destroy a dumb boffer
-
 
1028
 *
-
 
1029
 * @file_priv: Pointer to a struct drm_file identifying the caller.
-
 
1030
 * @dev: Pointer to the drm device.
-
 
1031
 * @handle: Handle identifying the dumb buffer.
-
 
1032
 *
-
 
1033
 * This is a driver callback for the core drm dumb_destroy functionality.
846
 
1034
 */
847
int vmw_dumb_destroy(struct drm_file *file_priv,
1035
int vmw_dumb_destroy(struct drm_file *file_priv,
848
		     struct drm_device *dev,
1036
		     struct drm_device *dev,
849
		     uint32_t handle)
1037
		     uint32_t handle)
850
{
1038
{
Line 973... Line 1161...
973
		list_add_tail(&res->mob_head, &new_backup->res_list);
1161
		list_add_tail(&res->mob_head, &new_backup->res_list);
974
	}
1162
	}
975
	if (new_backup)
1163
	if (new_backup)
976
		res->backup_offset = new_backup_offset;
1164
		res->backup_offset = new_backup_offset;
Line 977... Line 1165...
977
 
1165
 
978
	if (!res->func->may_evict)
1166
	if (!res->func->may_evict || res->id == -1)
Line 979... Line 1167...
979
		return;
1167
		return;
980
 
1168
 
981
	write_lock(&dev_priv->resource_lock);
1169
	write_lock(&dev_priv->resource_lock);
Line 995... Line 1183...
995
 * @val_buf:        On successful return contains data about the
1183
 * @val_buf:        On successful return contains data about the
996
 *                  reserved and validated backup buffer.
1184
 *                  reserved and validated backup buffer.
997
 */
1185
 */
998
static int
1186
static int
999
vmw_resource_check_buffer(struct vmw_resource *res,
1187
vmw_resource_check_buffer(struct vmw_resource *res,
1000
			  struct ww_acquire_ctx *ticket,
-
 
1001
			  bool interruptible,
1188
			  bool interruptible,
1002
			  struct ttm_validate_buffer *val_buf)
1189
			  struct ttm_validate_buffer *val_buf)
1003
{
1190
{
1004
	struct list_head val_list;
1191
	struct list_head val_list;
1005
	bool backup_dirty = false;
1192
	bool backup_dirty = false;
Line 1012... Line 1199...
1012
	}
1199
	}
Line 1013... Line 1200...
1013
 
1200
 
1014
	INIT_LIST_HEAD(&val_list);
1201
	INIT_LIST_HEAD(&val_list);
1015
	val_buf->bo = ttm_bo_reference(&res->backup->base);
1202
	val_buf->bo = ttm_bo_reference(&res->backup->base);
1016
	list_add_tail(&val_buf->head, &val_list);
1203
	list_add_tail(&val_buf->head, &val_list);
1017
	ret = ttm_eu_reserve_buffers(ticket, &val_list);
1204
	ret = ttm_eu_reserve_buffers(NULL, &val_list);
1018
	if (unlikely(ret != 0))
1205
	if (unlikely(ret != 0))
Line 1019... Line 1206...
1019
		goto out_no_reserve;
1206
		goto out_no_reserve;
1020
 
1207
 
Line 1030... Line 1217...
1030
		goto out_no_validate;
1217
		goto out_no_validate;
Line 1031... Line 1218...
1031
 
1218
 
Line 1032... Line 1219...
1032
	return 0;
1219
	return 0;
1033
 
1220
 
1034
out_no_validate:
1221
out_no_validate:
1035
	ttm_eu_backoff_reservation(ticket, &val_list);
1222
	ttm_eu_backoff_reservation(NULL, &val_list);
1036
out_no_reserve:
1223
out_no_reserve:
1037
	ttm_bo_unref(&val_buf->bo);
1224
	ttm_bo_unref(&val_buf->bo);
Line 1075... Line 1262...
1075
 *                                    backup buffer
1262
 *                                    backup buffer
1076
 *.
1263
 *.
1077
 * @val_buf:        Backup buffer information.
1264
 * @val_buf:        Backup buffer information.
1078
 */
1265
 */
1079
static void
1266
static void
1080
vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
1267
vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1081
				 struct ttm_validate_buffer *val_buf)
-
 
1082
{
1268
{
1083
	struct list_head val_list;
1269
	struct list_head val_list;
Line 1084... Line 1270...
1084
 
1270
 
1085
	if (likely(val_buf->bo == NULL))
1271
	if (likely(val_buf->bo == NULL))
Line 1086... Line 1272...
1086
		return;
1272
		return;
1087
 
1273
 
1088
	INIT_LIST_HEAD(&val_list);
1274
	INIT_LIST_HEAD(&val_list);
1089
	list_add_tail(&val_buf->head, &val_list);
1275
	list_add_tail(&val_buf->head, &val_list);
1090
	ttm_eu_backoff_reservation(ticket, &val_list);
1276
	ttm_eu_backoff_reservation(NULL, &val_list);
Line 1091... Line 1277...
1091
	ttm_bo_unref(&val_buf->bo);
1277
	ttm_bo_unref(&val_buf->bo);
1092
}
1278
}
1093
 
1279
 
1094
/**
1280
/**
1095
 * vmw_resource_do_evict - Evict a resource, and transfer its data
1281
 * vmw_resource_do_evict - Evict a resource, and transfer its data
-
 
1282
 *                         to a backup buffer.
1096
 *                         to a backup buffer.
1283
 *
1097
 *
1284
 * @res:            The resource to evict.
1098
 * @res:            The resource to evict.
1285
 * @interruptible:  Whether to wait interruptible.
1099
 */
1286
 */
1100
int vmw_resource_do_evict(struct vmw_resource *res)
1287
int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1101
{
-
 
1102
	struct ttm_validate_buffer val_buf;
1288
{
Line 1103... Line 1289...
1103
	const struct vmw_res_func *func = res->func;
1289
	struct ttm_validate_buffer val_buf;
Line 1104... Line 1290...
1104
	struct ww_acquire_ctx ticket;
1290
	const struct vmw_res_func *func = res->func;
1105
	int ret;
1291
	int ret;
1106
 
1292
 
1107
	BUG_ON(!func->may_evict);
1293
	BUG_ON(!func->may_evict);
Line 1108... Line 1294...
1108
 
1294
 
1109
	val_buf.bo = NULL;
1295
	val_buf.bo = NULL;
Line 1120... Line 1306...
1120
	}
1306
	}
1121
	ret = func->destroy(res);
1307
	ret = func->destroy(res);
1122
	res->backup_dirty = true;
1308
	res->backup_dirty = true;
1123
	res->res_dirty = false;
1309
	res->res_dirty = false;
1124
out_no_unbind:
1310
out_no_unbind:
1125
	vmw_resource_backoff_reservation(&ticket, &val_buf);
1311
	vmw_resource_backoff_reservation(&val_buf);
Line 1126... Line 1312...
1126
 
1312
 
1127
	return ret;
1313
	return ret;
Line 1144... Line 1330...
1144
	int ret;
1330
	int ret;
1145
	struct vmw_resource *evict_res;
1331
	struct vmw_resource *evict_res;
1146
	struct vmw_private *dev_priv = res->dev_priv;
1332
	struct vmw_private *dev_priv = res->dev_priv;
1147
	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1333
	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1148
	struct ttm_validate_buffer val_buf;
1334
	struct ttm_validate_buffer val_buf;
-
 
1335
	unsigned err_count = 0;
Line 1149... Line 1336...
1149
 
1336
 
1150
	if (likely(!res->func->may_evict))
1337
	if (likely(!res->func->may_evict))
Line 1151... Line 1338...
1151
		return 0;
1338
		return 0;
Line 1158... Line 1345...
1158
		if (likely(ret != -EBUSY))
1345
		if (likely(ret != -EBUSY))
1159
			break;
1346
			break;
Line 1160... Line 1347...
1160
 
1347
 
1161
		write_lock(&dev_priv->resource_lock);
1348
		write_lock(&dev_priv->resource_lock);
1162
		if (list_empty(lru_list) || !res->func->may_evict) {
1349
		if (list_empty(lru_list) || !res->func->may_evict) {
1163
			DRM_ERROR("Out of device device id entries "
1350
			DRM_ERROR("Out of device device resources "
1164
				  "for %s.\n", res->func->type_name);
1351
				  "for %s.\n", res->func->type_name);
1165
			ret = -EBUSY;
1352
			ret = -EBUSY;
1166
			write_unlock(&dev_priv->resource_lock);
1353
			write_unlock(&dev_priv->resource_lock);
1167
			break;
1354
			break;
Line 1171... Line 1358...
1171
			(list_first_entry(lru_list, struct vmw_resource,
1358
			(list_first_entry(lru_list, struct vmw_resource,
1172
					  lru_head));
1359
					  lru_head));
1173
		list_del_init(&evict_res->lru_head);
1360
		list_del_init(&evict_res->lru_head);
Line 1174... Line 1361...
1174
 
1361
 
-
 
1362
		write_unlock(&dev_priv->resource_lock);
1175
		write_unlock(&dev_priv->resource_lock);
1363
 
-
 
1364
		ret = vmw_resource_do_evict(evict_res, true);
-
 
1365
		if (unlikely(ret != 0)) {
-
 
1366
			write_lock(&dev_priv->resource_lock);
-
 
1367
			list_add_tail(&evict_res->lru_head, lru_list);
-
 
1368
			write_unlock(&dev_priv->resource_lock);
-
 
1369
			if (ret == -ERESTARTSYS ||
-
 
1370
			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
-
 
1371
				vmw_resource_unreference(&evict_res);
-
 
1372
				goto out_no_validate;
-
 
1373
			}
-
 
1374
		}
1176
		vmw_resource_do_evict(evict_res);
1375
 
1177
		vmw_resource_unreference(&evict_res);
1376
		vmw_resource_unreference(&evict_res);
Line 1178... Line 1377...
1178
	} while (1);
1377
	} while (1);
1179
 
1378
 
Line 1232... Line 1431...
1232
 *
1431
 *
1233
 * @bo:             The TTM buffer object about to move.
1432
 * @bo:             The TTM buffer object about to move.
1234
 * @mem:            The truct ttm_mem_reg indicating to what memory
1433
 * @mem:            The truct ttm_mem_reg indicating to what memory
1235
 *                  region the move is taking place.
1434
 *                  region the move is taking place.
1236
 *
1435
 *
-
 
1436
 * Evicts the Guest Backed hardware resource if the backup
-
 
1437
 * buffer is being moved out of MOB memory.
-
 
1438
 * Note that this function should not race with the resource
-
 
1439
 * validation code as long as it accesses only members of struct
-
 
1440
 * resource that remain static while bo::res is !NULL and
-
 
1441
 * while we have @bo reserved. struct resource::backup is *not* a
-
 
1442
 * static member. The resource validation code will take care
-
 
1443
 * to set @bo::res to NULL, while having @bo reserved when the
-
 
1444
 * buffer is no longer bound to the resource, so @bo:res can be
-
 
1445
 * used to determine whether there is a need to unbind and whether
1237
 * For now does nothing.
1446
 * it is safe to unbind.
1238
 */
1447
 */
1239
void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1448
void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1240
			      struct ttm_mem_reg *mem)
1449
			      struct ttm_mem_reg *mem)
1241
{
1450
{
1242
}
1451
}
Line 1256... Line 1465...
1256
 *
1465
 *
1257
 * @dev_priv:       Pointer to a device private struct
1466
 * @dev_priv:       Pointer to a device private struct
1258
 * @type:           The resource type to evict
1467
 * @type:           The resource type to evict
1259
 *
1468
 *
1260
 * To avoid thrashing starvation or as part of the hibernation sequence,
1469
 * To avoid thrashing starvation or as part of the hibernation sequence,
1261
 * evict all evictable resources of a specific type.
1470
 * try to evict all evictable resources of a specific type.
1262
 */
1471
 */
1263
static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1472
static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1264
				    enum vmw_res_type type)
1473
				    enum vmw_res_type type)
1265
{
1474
{
1266
	struct list_head *lru_list = &dev_priv->res_lru[type];
1475
	struct list_head *lru_list = &dev_priv->res_lru[type];
1267
	struct vmw_resource *evict_res;
1476
	struct vmw_resource *evict_res;
-
 
1477
	unsigned err_count = 0;
-
 
1478
	int ret;
Line 1268... Line 1479...
1268
 
1479
 
1269
	do {
1480
	do {
Line 1270... Line 1481...
1270
		write_lock(&dev_priv->resource_lock);
1481
		write_lock(&dev_priv->resource_lock);
Line 1275... Line 1486...
1275
		evict_res = vmw_resource_reference(
1486
		evict_res = vmw_resource_reference(
1276
			list_first_entry(lru_list, struct vmw_resource,
1487
			list_first_entry(lru_list, struct vmw_resource,
1277
					 lru_head));
1488
					 lru_head));
1278
		list_del_init(&evict_res->lru_head);
1489
		list_del_init(&evict_res->lru_head);
1279
		write_unlock(&dev_priv->resource_lock);
1490
		write_unlock(&dev_priv->resource_lock);
-
 
1491
 
1280
		vmw_resource_do_evict(evict_res);
1492
		ret = vmw_resource_do_evict(evict_res, false);
-
 
1493
		if (unlikely(ret != 0)) {
-
 
1494
			write_lock(&dev_priv->resource_lock);
-
 
1495
			list_add_tail(&evict_res->lru_head, lru_list);
-
 
1496
			write_unlock(&dev_priv->resource_lock);
-
 
1497
			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
-
 
1498
				vmw_resource_unreference(&evict_res);
-
 
1499
				return;
-
 
1500
			}
-
 
1501
		}
-
 
1502
 
1281
		vmw_resource_unreference(&evict_res);
1503
		vmw_resource_unreference(&evict_res);
1282
	} while (1);
1504
	} while (1);
Line 1283... Line 1505...
1283
 
1505
 
1284
out_unlock:
1506
out_unlock: