Rev 5078 | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 5078 | Rev 6296 | ||
---|---|---|---|
Line 1... | Line 1... | ||
1 | /************************************************************************** |
1 | /************************************************************************** |
2 | * |
2 | * |
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA |
3 | * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. |
4 | * All Rights Reserved. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
8 | * "Software"), to deal in the Software without restriction, including |
Line 29... | Line 29... | ||
29 | #include |
29 | #include |
30 | #include |
30 | #include |
31 | #include |
31 | #include |
32 | #include |
32 | #include |
33 | #include "vmwgfx_resource_priv.h" |
33 | #include "vmwgfx_resource_priv.h" |
- | 34 | #include "vmwgfx_binding.h" |
|
Line 34... | Line 35... | ||
34 | 35 | ||
Line 35... | Line 36... | ||
35 | #define VMW_RES_EVICT_ERR_COUNT 10 |
36 | #define VMW_RES_EVICT_ERR_COUNT 10 |
36 | 37 | ||
Line 86... | Line 87... | ||
86 | { |
87 | { |
87 | kref_get(&res->kref); |
88 | kref_get(&res->kref); |
88 | return res; |
89 | return res; |
89 | } |
90 | } |
Line -... | Line 91... | ||
- | 91 | ||
- | 92 | struct vmw_resource * |
|
- | 93 | vmw_resource_reference_unless_doomed(struct vmw_resource *res) |
|
- | 94 | { |
|
- | 95 | return kref_get_unless_zero(&res->kref) ? res : NULL; |
|
Line 90... | Line 96... | ||
90 | 96 | } |
|
91 | 97 | ||
92 | /** |
98 | /** |
93 | * vmw_resource_release_id - release a resource id to the id manager. |
99 | * vmw_resource_release_id - release a resource id to the id manager. |
Line 114... | Line 120... | ||
114 | container_of(kref, struct vmw_resource, kref); |
120 | container_of(kref, struct vmw_resource, kref); |
115 | struct vmw_private *dev_priv = res->dev_priv; |
121 | struct vmw_private *dev_priv = res->dev_priv; |
116 | int id; |
122 | int id; |
117 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
123 | struct idr *idr = &dev_priv->res_idr[res->func->res_type]; |
Line -... | Line 124... | ||
- | 124 | ||
118 | 125 | write_lock(&dev_priv->resource_lock); |
|
119 | res->avail = false; |
126 | res->avail = false; |
120 | list_del_init(&res->lru_head); |
127 | list_del_init(&res->lru_head); |
121 | write_unlock(&dev_priv->resource_lock); |
128 | write_unlock(&dev_priv->resource_lock); |
122 | if (res->backup) { |
129 | if (res->backup) { |
Line 126... | Line 133... | ||
126 | if (!list_empty(&res->mob_head) && |
133 | if (!list_empty(&res->mob_head) && |
127 | res->func->unbind != NULL) { |
134 | res->func->unbind != NULL) { |
128 | struct ttm_validate_buffer val_buf; |
135 | struct ttm_validate_buffer val_buf; |
Line 129... | Line 136... | ||
129 | 136 | ||
- | 137 | val_buf.bo = bo; |
|
130 | val_buf.bo = bo; |
138 | val_buf.shared = false; |
131 | res->func->unbind(res, false, &val_buf); |
139 | res->func->unbind(res, false, &val_buf); |
132 | } |
140 | } |
133 | res->backup_dirty = false; |
141 | res->backup_dirty = false; |
134 | list_del_init(&res->mob_head); |
142 | list_del_init(&res->mob_head); |
135 | ttm_bo_unreserve(bo); |
143 | ttm_bo_unreserve(bo); |
136 | vmw_dmabuf_unreference(&res->backup); |
144 | vmw_dmabuf_unreference(&res->backup); |
Line 137... | Line 145... | ||
137 | } |
145 | } |
138 | - | ||
139 | if (likely(res->hw_destroy != NULL)) { |
146 | |
140 | res->hw_destroy(res); |
147 | if (likely(res->hw_destroy != NULL)) { |
141 | mutex_lock(&dev_priv->binding_mutex); |
148 | mutex_lock(&dev_priv->binding_mutex); |
- | 149 | vmw_binding_res_list_kill(&res->binding_head); |
|
142 | vmw_context_binding_res_list_kill(&res->binding_head); |
150 | mutex_unlock(&dev_priv->binding_mutex); |
Line 143... | Line 151... | ||
143 | mutex_unlock(&dev_priv->binding_mutex); |
151 | res->hw_destroy(res); |
144 | } |
152 | } |
145 | 153 | ||
146 | id = res->id; |
154 | id = res->id; |
147 | if (res->res_free != NULL) |
155 | if (res->res_free != NULL) |
Line 148... | Line 156... | ||
148 | res->res_free(res); |
156 | res->res_free(res); |
149 | else |
- | |
150 | kfree(res); |
157 | else |
151 | 158 | kfree(res); |
|
- | 159 | ||
152 | write_lock(&dev_priv->resource_lock); |
160 | write_lock(&dev_priv->resource_lock); |
Line 153... | Line 161... | ||
153 | 161 | if (id != -1) |
|
154 | if (id != -1) |
162 | idr_remove(idr, id); |
155 | idr_remove(idr, id); |
163 | write_unlock(&dev_priv->resource_lock); |
156 | } |
- | |
Line 157... | Line 164... | ||
157 | 164 | } |
|
158 | void vmw_resource_unreference(struct vmw_resource **p_res) |
- | |
159 | { |
165 | |
160 | struct vmw_resource *res = *p_res; |
- | |
161 | struct vmw_private *dev_priv = res->dev_priv; |
166 | void vmw_resource_unreference(struct vmw_resource **p_res) |
Line 162... | Line 167... | ||
162 | 167 | { |
|
163 | *p_res = NULL; |
168 | struct vmw_resource *res = *p_res; |
Line 252... | Line 257... | ||
252 | res->avail = true; |
257 | res->avail = true; |
253 | res->hw_destroy = hw_destroy; |
258 | res->hw_destroy = hw_destroy; |
254 | write_unlock(&dev_priv->resource_lock); |
259 | write_unlock(&dev_priv->resource_lock); |
255 | } |
260 | } |
Line 256... | Line 261... | ||
256 | 261 | ||
257 | struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, |
262 | static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, |
258 | struct idr *idr, int id) |
263 | struct idr *idr, int id) |
259 | { |
264 | { |
Line 260... | Line 265... | ||
260 | struct vmw_resource *res; |
265 | struct vmw_resource *res; |
261 | 266 | ||
262 | read_lock(&dev_priv->resource_lock); |
- | |
263 | res = idr_find(idr, id); |
267 | read_lock(&dev_priv->resource_lock); |
264 | if (res && res->avail) |
- | |
265 | kref_get(&res->kref); |
268 | res = idr_find(idr, id); |
- | 269 | if (!res || !res->avail || !kref_get_unless_zero(&res->kref)) |
|
266 | else |
270 | res = NULL; |
Line 267... | Line 271... | ||
267 | res = NULL; |
271 | |
268 | read_unlock(&dev_priv->resource_lock); |
272 | read_unlock(&dev_priv->resource_lock); |
Line 348... | Line 352... | ||
348 | *out_surf = vmw_res_to_srf(res); |
352 | *out_surf = vmw_res_to_srf(res); |
349 | return 0; |
353 | return 0; |
350 | } |
354 | } |
Line 351... | Line 355... | ||
351 | 355 | ||
352 | *out_surf = NULL; |
356 | *out_surf = NULL; |
353 | ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); |
357 | ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL); |
354 | return ret; |
358 | return ret; |
Line 355... | Line 359... | ||
355 | } |
359 | } |
356 | 360 | ||
Line 398... | Line 402... | ||
398 | 402 | ||
399 | static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) |
403 | static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) |
400 | { |
404 | { |
Line 401... | Line 405... | ||
401 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); |
405 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); |
402 | 406 | ||
Line 403... | Line 407... | ||
403 | // ttm_prime_object_kfree(vmw_user_bo, prime); |
407 | ttm_prime_object_kfree(vmw_user_bo, prime); |
404 | } |
408 | } |
405 | 409 | ||
Line 422... | Line 426... | ||
422 | INIT_LIST_HEAD(&vmw_bo->res_list); |
426 | INIT_LIST_HEAD(&vmw_bo->res_list); |
Line 423... | Line 427... | ||
423 | 427 | ||
424 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, |
428 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, |
425 | ttm_bo_type_device, placement, |
429 | ttm_bo_type_device, placement, |
426 | 0, interruptible, |
430 | 0, interruptible, |
427 | NULL, acc_size, NULL, bo_free); |
431 | NULL, acc_size, NULL, NULL, bo_free); |
428 | return ret; |
432 | return ret; |
Line 429... | Line 433... | ||
429 | } |
433 | } |
430 | 434 | ||
Line 475... | Line 479... | ||
475 | int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, |
479 | int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, |
476 | struct ttm_object_file *tfile, |
480 | struct ttm_object_file *tfile, |
477 | uint32_t size, |
481 | uint32_t size, |
478 | bool shareable, |
482 | bool shareable, |
479 | uint32_t *handle, |
483 | uint32_t *handle, |
480 | struct vmw_dma_buffer **p_dma_buf) |
484 | struct vmw_dma_buffer **p_dma_buf, |
- | 485 | struct ttm_base_object **p_base) |
|
481 | { |
486 | { |
482 | struct vmw_user_dma_buffer *user_bo; |
487 | struct vmw_user_dma_buffer *user_bo; |
483 | struct ttm_buffer_object *tmp; |
488 | struct ttm_buffer_object *tmp; |
484 | int ret; |
489 | int ret; |
Line 496... | Line 501... | ||
496 | &vmw_user_dmabuf_destroy); |
501 | &vmw_user_dmabuf_destroy); |
497 | if (unlikely(ret != 0)) |
502 | if (unlikely(ret != 0)) |
498 | return ret; |
503 | return ret; |
Line 499... | Line 504... | ||
499 | 504 | ||
500 | tmp = ttm_bo_reference(&user_bo->dma.base); |
- | |
501 | /* |
505 | tmp = ttm_bo_reference(&user_bo->dma.base); |
502 | ret = ttm_prime_object_init(tfile, |
506 | ret = ttm_prime_object_init(tfile, |
503 | size, |
507 | size, |
504 | &user_bo->prime, |
508 | &user_bo->prime, |
505 | shareable, |
509 | shareable, |
Line 508... | Line 512... | ||
508 | &vmw_user_dmabuf_ref_obj_release); |
512 | &vmw_user_dmabuf_ref_obj_release); |
509 | if (unlikely(ret != 0)) { |
513 | if (unlikely(ret != 0)) { |
510 | ttm_bo_unref(&tmp); |
514 | ttm_bo_unref(&tmp); |
511 | goto out_no_base_object; |
515 | goto out_no_base_object; |
512 | } |
516 | } |
513 | */ |
- | |
Line 514... | Line 517... | ||
514 | 517 | ||
- | 518 | *p_dma_buf = &user_bo->dma; |
|
- | 519 | if (p_base) { |
|
- | 520 | *p_base = &user_bo->prime.base; |
|
- | 521 | kref_get(&(*p_base)->refcount); |
|
515 | *p_dma_buf = &user_bo->dma; |
522 | } |
Line 516... | Line 523... | ||
516 | *handle = user_bo->prime.base.hash.key; |
523 | *handle = user_bo->prime.base.hash.key; |
517 | 524 | ||
518 | out_no_base_object: |
525 | out_no_base_object: |
Line 562... | Line 569... | ||
562 | struct ttm_buffer_object *bo = &user_bo->dma.base; |
569 | struct ttm_buffer_object *bo = &user_bo->dma.base; |
563 | bool existed; |
570 | bool existed; |
564 | int ret; |
571 | int ret; |
Line 565... | Line 572... | ||
565 | 572 | ||
566 | if (flags & drm_vmw_synccpu_allow_cs) { |
573 | if (flags & drm_vmw_synccpu_allow_cs) { |
- | 574 | bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); |
|
- | 575 | long lret; |
|
- | 576 | ||
- | 577 | if (nonblock) |
|
Line 567... | Line 578... | ||
567 | struct ttm_bo_device *bdev = bo->bdev; |
578 | return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY; |
568 | 579 | ||
569 | // spin_lock(&bdev->fence_lock); |
580 | // spin_lock(&bdev->fence_lock); |
570 | // ret = ttm_bo_wait(bo, false, true, |
581 | // ret = ttm_bo_wait(bo, false, true, |
571 | // !!(flags & drm_vmw_synccpu_dontblock)); |
582 | // !!(flags & drm_vmw_synccpu_dontblock)); |
572 | // spin_unlock(&bdev->fence_lock); |
583 | // spin_unlock(&bdev->fence_lock); |
Line 573... | Line 584... | ||
573 | return ret; |
584 | return ret; |
574 | } |
585 | } |
575 | 586 | ||
576 | // ret = ttm_bo_synccpu_write_grab |
587 | ret = ttm_bo_synccpu_write_grab |
Line 577... | Line 588... | ||
577 | // (bo, !!(flags & drm_vmw_synccpu_dontblock)); |
588 | (bo, !!(flags & drm_vmw_synccpu_dontblock)); |
578 | // if (unlikely(ret != 0)) |
589 | if (unlikely(ret != 0)) |
579 | // return ret; |
590 | return ret; |
580 | 591 | ||
Line 622... | Line 633... | ||
622 | struct drm_vmw_synccpu_arg *arg = |
633 | struct drm_vmw_synccpu_arg *arg = |
623 | (struct drm_vmw_synccpu_arg *) data; |
634 | (struct drm_vmw_synccpu_arg *) data; |
624 | struct vmw_dma_buffer *dma_buf; |
635 | struct vmw_dma_buffer *dma_buf; |
625 | struct vmw_user_dma_buffer *user_bo; |
636 | struct vmw_user_dma_buffer *user_bo; |
626 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
637 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
- | 638 | struct ttm_base_object *buffer_base; |
|
627 | int ret; |
639 | int ret; |
Line 628... | Line 640... | ||
628 | 640 | ||
629 | if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 |
641 | if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 |
630 | || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write | |
642 | || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write | |
Line 634... | Line 646... | ||
634 | return -EINVAL; |
646 | return -EINVAL; |
635 | } |
647 | } |
Line 636... | Line 648... | ||
636 | 648 | ||
637 | switch (arg->op) { |
649 | switch (arg->op) { |
638 | case drm_vmw_synccpu_grab: |
650 | case drm_vmw_synccpu_grab: |
- | 651 | ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf, |
|
639 | ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf); |
652 | &buffer_base); |
640 | if (unlikely(ret != 0)) |
653 | if (unlikely(ret != 0)) |
Line 641... | Line 654... | ||
641 | return ret; |
654 | return ret; |
642 | 655 | ||
643 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, |
656 | user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, |
644 | dma); |
657 | dma); |
- | 658 | ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); |
|
645 | ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); |
659 | vmw_dmabuf_unreference(&dma_buf); |
646 | vmw_dmabuf_unreference(&dma_buf); |
660 | ttm_base_object_unref(&buffer_base); |
647 | if (unlikely(ret != 0 && ret != -ERESTARTSYS && |
661 | if (unlikely(ret != 0 && ret != -ERESTARTSYS && |
648 | ret != -EBUSY)) { |
662 | ret != -EBUSY)) { |
649 | DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", |
663 | DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", |
Line 684... | Line 698... | ||
684 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
698 | ret = ttm_read_lock(&dev_priv->reservation_sem, true); |
685 | if (unlikely(ret != 0)) |
699 | if (unlikely(ret != 0)) |
686 | return ret; |
700 | return ret; |
Line 687... | Line 701... | ||
687 | 701 | ||
688 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, |
702 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, |
- | 703 | req->size, false, &handle, &dma_buf, |
|
689 | req->size, false, &handle, &dma_buf); |
704 | NULL); |
690 | if (unlikely(ret != 0)) |
705 | if (unlikely(ret != 0)) |
Line 691... | Line 706... | ||
691 | goto out_no_dmabuf; |
706 | goto out_no_dmabuf; |
692 | 707 | ||
Line 714... | Line 729... | ||
714 | TTM_REF_USAGE); |
729 | TTM_REF_USAGE); |
715 | } |
730 | } |
716 | #endif |
731 | #endif |
Line 717... | Line 732... | ||
717 | 732 | ||
718 | int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, |
733 | int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, |
- | 734 | uint32_t handle, struct vmw_dma_buffer **out, |
|
719 | uint32_t handle, struct vmw_dma_buffer **out) |
735 | struct ttm_base_object **p_base) |
720 | { |
736 | { |
721 | struct vmw_user_dma_buffer *vmw_user_bo; |
737 | struct vmw_user_dma_buffer *vmw_user_bo; |
Line 722... | Line 738... | ||
722 | struct ttm_base_object *base; |
738 | struct ttm_base_object *base; |
Line 736... | Line 752... | ||
736 | } |
752 | } |
Line 737... | Line 753... | ||
737 | 753 | ||
738 | vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, |
754 | vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, |
739 | prime.base); |
755 | prime.base); |
- | 756 | (void)ttm_bo_reference(&vmw_user_bo->dma.base); |
|
- | 757 | if (p_base) |
|
- | 758 | *p_base = base; |
|
740 | (void)ttm_bo_reference(&vmw_user_bo->dma.base); |
759 | else |
741 | ttm_base_object_unref(&base); |
760 | ttm_base_object_unref(&base); |
Line 742... | Line 761... | ||
742 | *out = &vmw_user_bo->dma; |
761 | *out = &vmw_user_bo->dma; |
743 | 762 | ||
Line 892... | Line 911... | ||
892 | return ret; |
911 | return ret; |
Line 893... | Line 912... | ||
893 | 912 | ||
894 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), |
913 | ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), |
895 | vmw_user_stream_size, |
914 | vmw_user_stream_size, |
- | 915 | false, true); |
|
896 | false, true); |
916 | ttm_read_unlock(&dev_priv->reservation_sem); |
897 | if (unlikely(ret != 0)) { |
917 | if (unlikely(ret != 0)) { |
898 | if (ret != -ERESTARTSYS) |
918 | if (ret != -ERESTARTSYS) |
899 | DRM_ERROR("Out of graphics memory for stream" |
919 | DRM_ERROR("Out of graphics memory for stream" |
900 | " creation.\n"); |
- | |
901 | goto out_unlock; |
- | |
Line -... | Line 920... | ||
- | 920 | " creation.\n"); |
|
- | 921 | ||
Line 902... | Line 922... | ||
902 | } |
922 | goto out_ret; |
903 | 923 | } |
|
904 | 924 | ||
905 | stream = kmalloc(sizeof(*stream), GFP_KERNEL); |
925 | stream = kmalloc(sizeof(*stream), GFP_KERNEL); |
906 | if (unlikely(stream == NULL)) { |
926 | if (unlikely(stream == NULL)) { |
907 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
927 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
908 | vmw_user_stream_size); |
928 | vmw_user_stream_size); |
Line 909... | Line 929... | ||
909 | ret = -ENOMEM; |
929 | ret = -ENOMEM; |
910 | goto out_unlock; |
930 | goto out_ret; |
911 | } |
931 | } |
Line 918... | Line 938... | ||
918 | * From here on, the destructor takes over resource freeing. |
938 | * From here on, the destructor takes over resource freeing. |
919 | */ |
939 | */ |
Line 920... | Line 940... | ||
920 | 940 | ||
921 | ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free); |
941 | ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free); |
922 | if (unlikely(ret != 0)) |
942 | if (unlikely(ret != 0)) |
Line 923... | Line 943... | ||
923 | goto out_unlock; |
943 | goto out_ret; |
924 | 944 | ||
925 | tmp = vmw_resource_reference(res); |
945 | tmp = vmw_resource_reference(res); |
Line 932... | Line 952... | ||
932 | } |
952 | } |
Line 933... | Line 953... | ||
933 | 953 | ||
934 | arg->stream_id = res->id; |
954 | arg->stream_id = res->id; |
935 | out_err: |
955 | out_err: |
936 | vmw_resource_unreference(&res); |
956 | vmw_resource_unreference(&res); |
937 | out_unlock: |
- | |
938 | ttm_read_unlock(&dev_priv->reservation_sem); |
957 | out_ret: |
939 | return ret; |
958 | return ret; |
940 | } |
959 | } |
Line 941... | Line 960... | ||
941 | #endif |
960 | #endif |
Line 970... | Line 989... | ||
970 | err_ref: |
989 | err_ref: |
971 | vmw_resource_unreference(&res); |
990 | vmw_resource_unreference(&res); |
972 | return ret; |
991 | return ret; |
973 | } |
992 | } |
Line -... | Line 993... | ||
- | 993 | ||
- | 994 | ||
- | 995 | /** |
|
974 | 996 | * vmw_dumb_create - Create a dumb kms buffer |
|
- | 997 | * |
|
- | 998 | * @file_priv: Pointer to a struct drm_file identifying the caller. |
|
- | 999 | * @dev: Pointer to the drm device. |
|
- | 1000 | * @args: Pointer to a struct drm_mode_create_dumb structure |
|
- | 1001 | * |
|
- | 1002 | * This is a driver callback for the core drm create_dumb functionality. |
|
- | 1003 | * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except |
|
- | 1004 | * that the arguments have a different format. |
|
975 | #if 0 |
1005 | */ |
976 | int vmw_dumb_create(struct drm_file *file_priv, |
1006 | int vmw_dumb_create(struct drm_file *file_priv, |
977 | struct drm_device *dev, |
1007 | struct drm_device *dev, |
978 | struct drm_mode_create_dumb *args) |
1008 | struct drm_mode_create_dumb *args) |
979 | { |
1009 | { |
980 | struct vmw_private *dev_priv = vmw_priv(dev); |
- | |
981 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
1010 | struct vmw_private *dev_priv = vmw_priv(dev); |
982 | struct vmw_dma_buffer *dma_buf; |
1011 | struct vmw_dma_buffer *dma_buf; |
Line 983... | Line 1012... | ||
983 | int ret; |
1012 | int ret; |
984 | 1013 | ||
Line 989... | Line 1018... | ||
989 | if (unlikely(ret != 0)) |
1018 | if (unlikely(ret != 0)) |
990 | return ret; |
1019 | return ret; |
Line 991... | Line 1020... | ||
991 | 1020 | ||
992 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, |
1021 | ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, |
993 | args->size, false, &args->handle, |
1022 | args->size, false, &args->handle, |
994 | &dma_buf); |
1023 | &dma_buf, NULL); |
995 | if (unlikely(ret != 0)) |
1024 | if (unlikely(ret != 0)) |
Line 996... | Line 1025... | ||
996 | goto out_no_dmabuf; |
1025 | goto out_no_dmabuf; |
997 | 1026 | ||
998 | vmw_dmabuf_unreference(&dma_buf); |
1027 | vmw_dmabuf_unreference(&dma_buf); |
999 | out_no_dmabuf: |
1028 | out_no_dmabuf: |
1000 | ttm_read_unlock(&dev_priv->reservation_sem); |
1029 | ttm_read_unlock(&dev_priv->reservation_sem); |
1001 | return ret; |
- | |
Line 1002... | Line 1030... | ||
1002 | } |
1030 | return ret; |
1003 | #endif |
1031 | } |
1004 | 1032 | ||
1005 | /** |
1033 | /** |
Line 1018... | Line 1046... | ||
1018 | { |
1046 | { |
1019 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
1047 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
1020 | struct vmw_dma_buffer *out_buf; |
1048 | struct vmw_dma_buffer *out_buf; |
1021 | int ret; |
1049 | int ret; |
Line 1022... | Line 1050... | ||
1022 | 1050 | ||
1023 | ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf); |
1051 | ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL); |
1024 | if (ret != 0) |
1052 | if (ret != 0) |
Line 1025... | Line 1053... | ||
1025 | return -EINVAL; |
1053 | return -EINVAL; |
1026 | 1054 | ||
Line 1136... | Line 1164... | ||
1136 | /** |
1164 | /** |
1137 | * vmw_resource_unreserve - Unreserve a resource previously reserved for |
1165 | * vmw_resource_unreserve - Unreserve a resource previously reserved for |
1138 | * command submission. |
1166 | * command submission. |
1139 | * |
1167 | * |
1140 | * @res: Pointer to the struct vmw_resource to unreserve. |
1168 | * @res: Pointer to the struct vmw_resource to unreserve. |
- | 1169 | * @switch_backup: Backup buffer has been switched. |
|
1141 | * @new_backup: Pointer to new backup buffer if command submission |
1170 | * @new_backup: Pointer to new backup buffer if command submission |
1142 | * switched. |
1171 | * switched. May be NULL. |
1143 | * @new_backup_offset: New backup offset if @new_backup is !NULL. |
1172 | * @new_backup_offset: New backup offset if @switch_backup is true. |
1144 | * |
1173 | * |
1145 | * Currently unreserving a resource means putting it back on the device's |
1174 | * Currently unreserving a resource means putting it back on the device's |
1146 | * resource lru list, so that it can be evicted if necessary. |
1175 | * resource lru list, so that it can be evicted if necessary. |
1147 | */ |
1176 | */ |
1148 | void vmw_resource_unreserve(struct vmw_resource *res, |
1177 | void vmw_resource_unreserve(struct vmw_resource *res, |
- | 1178 | bool switch_backup, |
|
1149 | struct vmw_dma_buffer *new_backup, |
1179 | struct vmw_dma_buffer *new_backup, |
1150 | unsigned long new_backup_offset) |
1180 | unsigned long new_backup_offset) |
1151 | { |
1181 | { |
1152 | struct vmw_private *dev_priv = res->dev_priv; |
1182 | struct vmw_private *dev_priv = res->dev_priv; |
Line 1153... | Line 1183... | ||
1153 | 1183 | ||
1154 | if (!list_empty(&res->lru_head)) |
1184 | if (!list_empty(&res->lru_head)) |
Line 1155... | Line 1185... | ||
1155 | return; |
1185 | return; |
1156 | - | ||
1157 | if (new_backup && new_backup != res->backup) { |
1186 | |
1158 | 1187 | if (switch_backup && new_backup != res->backup) { |
|
1159 | if (res->backup) { |
1188 | if (res->backup) { |
1160 | lockdep_assert_held(&res->backup->base.resv->lock.base); |
1189 | lockdep_assert_held(&res->backup->base.resv->lock.base); |
1161 | list_del_init(&res->mob_head); |
1190 | list_del_init(&res->mob_head); |
Line -... | Line 1191... | ||
- | 1191 | vmw_dmabuf_unreference(&res->backup); |
|
1162 | vmw_dmabuf_unreference(&res->backup); |
1192 | } |
1163 | } |
1193 | |
1164 | 1194 | if (new_backup) { |
|
- | 1195 | res->backup = vmw_dmabuf_reference(new_backup); |
|
- | 1196 | lockdep_assert_held(&new_backup->base.resv->lock.base); |
|
1165 | res->backup = vmw_dmabuf_reference(new_backup); |
1197 | list_add_tail(&res->mob_head, &new_backup->res_list); |
- | 1198 | } else { |
|
1166 | lockdep_assert_held(&new_backup->base.resv->lock.base); |
1199 | res->backup = NULL; |
1167 | list_add_tail(&res->mob_head, &new_backup->res_list); |
1200 | } |
Line 1168... | Line 1201... | ||
1168 | } |
1201 | } |
1169 | if (new_backup) |
1202 | if (switch_backup) |
Line 1170... | Line 1203... | ||
1170 | res->backup_offset = new_backup_offset; |
1203 | res->backup_offset = new_backup_offset; |
1171 | 1204 | ||
1172 | if (!res->func->may_evict || res->id == -1) |
1205 | if (!res->func->may_evict || res->id == -1 || res->pin_count) |
Line 1204... | Line 1237... | ||
1204 | return ret; |
1237 | return ret; |
1205 | } |
1238 | } |
Line 1206... | Line 1239... | ||
1206 | 1239 | ||
1207 | INIT_LIST_HEAD(&val_list); |
1240 | INIT_LIST_HEAD(&val_list); |
- | 1241 | val_buf->bo = ttm_bo_reference(&res->backup->base); |
|
1208 | val_buf->bo = ttm_bo_reference(&res->backup->base); |
1242 | val_buf->shared = false; |
1209 | list_add_tail(&val_buf->head, &val_list); |
1243 | list_add_tail(&val_buf->head, &val_list); |
1210 | ret = ttm_eu_reserve_buffers(NULL, &val_list); |
1244 | ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL); |
1211 | if (unlikely(ret != 0)) |
1245 | if (unlikely(ret != 0)) |
Line 1212... | Line 1246... | ||
1212 | goto out_no_reserve; |
1246 | goto out_no_reserve; |
1213 | 1247 | ||
Line 1242... | Line 1276... | ||
1242 | * This function takes the resource off the LRU list and make sure |
1276 | * This function takes the resource off the LRU list and make sure |
1243 | * a backup buffer is present for guest-backed resources. However, |
1277 | * a backup buffer is present for guest-backed resources. However, |
1244 | * the buffer may not be bound to the resource at this point. |
1278 | * the buffer may not be bound to the resource at this point. |
1245 | * |
1279 | * |
1246 | */ |
1280 | */ |
1247 | int vmw_resource_reserve(struct vmw_resource *res, bool no_backup) |
1281 | int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, |
- | 1282 | bool no_backup) |
|
1248 | { |
1283 | { |
1249 | struct vmw_private *dev_priv = res->dev_priv; |
1284 | struct vmw_private *dev_priv = res->dev_priv; |
1250 | int ret; |
1285 | int ret; |
Line 1251... | Line 1286... | ||
1251 | 1286 | ||
1252 | write_lock(&dev_priv->resource_lock); |
1287 | write_lock(&dev_priv->resource_lock); |
1253 | list_del_init(&res->lru_head); |
1288 | list_del_init(&res->lru_head); |
Line 1254... | Line 1289... | ||
1254 | write_unlock(&dev_priv->resource_lock); |
1289 | write_unlock(&dev_priv->resource_lock); |
1255 | 1290 | ||
1256 | if (res->func->needs_backup && res->backup == NULL && |
1291 | if (res->func->needs_backup && res->backup == NULL && |
1257 | !no_backup) { |
1292 | !no_backup) { |
- | 1293 | ret = vmw_resource_buf_alloc(res, interruptible); |
|
- | 1294 | if (unlikely(ret != 0)) { |
|
- | 1295 | DRM_ERROR("Failed to allocate a backup buffer " |
|
1258 | ret = vmw_resource_buf_alloc(res, true); |
1296 | "of size %lu. bytes\n", |
1259 | if (unlikely(ret != 0)) |
1297 | (unsigned long) res->backup_size); |
- | 1298 | return ret; |
|
Line 1260... | Line 1299... | ||
1260 | return ret; |
1299 | } |
1261 | } |
1300 | } |
Line 1262... | Line 1301... | ||
1262 | 1301 | ||
Line 1288... | Line 1327... | ||
1288 | * to a backup buffer. |
1327 | * to a backup buffer. |
1289 | * |
1328 | * |
1290 | * @res: The resource to evict. |
1329 | * @res: The resource to evict. |
1291 | * @interruptible: Whether to wait interruptible. |
1330 | * @interruptible: Whether to wait interruptible. |
1292 | */ |
1331 | */ |
1293 | int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) |
1332 | static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) |
1294 | { |
1333 | { |
1295 | struct ttm_validate_buffer val_buf; |
1334 | struct ttm_validate_buffer val_buf; |
1296 | const struct vmw_res_func *func = res->func; |
1335 | const struct vmw_res_func *func = res->func; |
1297 | int ret; |
1336 | int ret; |
Line 1298... | Line 1337... | ||
1298 | 1337 | ||
Line 1299... | Line 1338... | ||
1299 | BUG_ON(!func->may_evict); |
1338 | BUG_ON(!func->may_evict); |
- | 1339 | ||
1300 | 1340 | val_buf.bo = NULL; |
|
1301 | val_buf.bo = NULL; |
1341 | val_buf.shared = false; |
1302 | ret = vmw_resource_check_buffer(res, interruptible, &val_buf); |
1342 | ret = vmw_resource_check_buffer(res, interruptible, &val_buf); |
Line 1303... | Line 1343... | ||
1303 | if (unlikely(ret != 0)) |
1343 | if (unlikely(ret != 0)) |
Line 1338... | Line 1378... | ||
1338 | struct vmw_private *dev_priv = res->dev_priv; |
1378 | struct vmw_private *dev_priv = res->dev_priv; |
1339 | struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; |
1379 | struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; |
1340 | struct ttm_validate_buffer val_buf; |
1380 | struct ttm_validate_buffer val_buf; |
1341 | unsigned err_count = 0; |
1381 | unsigned err_count = 0; |
Line 1342... | Line 1382... | ||
1342 | 1382 | ||
1343 | if (likely(!res->func->may_evict)) |
1383 | if (!res->func->create) |
Line 1344... | Line 1384... | ||
1344 | return 0; |
1384 | return 0; |
- | 1385 | ||
1345 | 1386 | val_buf.bo = NULL; |
|
1346 | val_buf.bo = NULL; |
1387 | val_buf.shared = false; |
1347 | if (res->backup) |
1388 | if (res->backup) |
1348 | val_buf.bo = &res->backup->base; |
1389 | val_buf.bo = &res->backup->base; |
1349 | do { |
1390 | do { |
Line 1409... | Line 1450... | ||
1409 | */ |
1450 | */ |
1410 | void vmw_fence_single_bo(struct ttm_buffer_object *bo, |
1451 | void vmw_fence_single_bo(struct ttm_buffer_object *bo, |
1411 | struct vmw_fence_obj *fence) |
1452 | struct vmw_fence_obj *fence) |
1412 | { |
1453 | { |
1413 | struct ttm_bo_device *bdev = bo->bdev; |
1454 | struct ttm_bo_device *bdev = bo->bdev; |
1414 | struct ttm_bo_driver *driver = bdev->driver; |
- | |
1415 | struct vmw_fence_obj *old_fence_obj; |
- | |
- | 1455 | ||
1416 | struct vmw_private *dev_priv = |
1456 | struct vmw_private *dev_priv = |
1417 | container_of(bdev, struct vmw_private, bdev); |
1457 | container_of(bdev, struct vmw_private, bdev); |
Line 1418... | Line 1458... | ||
1418 | 1458 | ||
1419 | if (fence == NULL) |
1459 | if (fence == NULL) { |
1420 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); |
- | |
1421 | else |
1460 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); |
1422 | driver->sync_obj_ref(fence); |
- | |
1423 | 1461 | reservation_object_add_excl_fence(bo->resv, &fence->base); |
|
1424 | spin_lock(&bdev->fence_lock); |
- | |
1425 | - | ||
1426 | old_fence_obj = bo->sync_obj; |
- | |
1427 | bo->sync_obj = fence; |
- | |
1428 | - | ||
1429 | spin_unlock(&bdev->fence_lock); |
- | |
1430 | 1462 | fence_put(&fence->base); |
|
1431 | if (old_fence_obj) |
1463 | } else |
1432 | vmw_fence_obj_unreference(&old_fence_obj); |
1464 | reservation_object_add_excl_fence(bo->resv, &fence->base); |
Line 1433... | Line 1465... | ||
1433 | } |
1465 | } |
1434 | 1466 | ||
1435 | /** |
1467 | /** |
1436 | * vmw_resource_move_notify - TTM move_notify_callback |
1468 | * vmw_resource_move_notify - TTM move_notify_callback |
1437 | * |
1469 | * |
1438 | * @bo: The TTM buffer object about to move. |
1470 | * @bo: The TTM buffer object about to move. |
1439 | * @mem: The truct ttm_mem_reg indicating to what memory |
1471 | * @mem: The struct ttm_mem_reg indicating to what memory |
1440 | * region the move is taking place. |
1472 | * region the move is taking place. |
1441 | * |
1473 | * |
1442 | * Evicts the Guest Backed hardware resource if the backup |
1474 | * Evicts the Guest Backed hardware resource if the backup |
Line 1452... | Line 1484... | ||
1452 | * it is safe to unbind. |
1484 | * it is safe to unbind. |
1453 | */ |
1485 | */ |
1454 | void vmw_resource_move_notify(struct ttm_buffer_object *bo, |
1486 | void vmw_resource_move_notify(struct ttm_buffer_object *bo, |
1455 | struct ttm_mem_reg *mem) |
1487 | struct ttm_mem_reg *mem) |
1456 | { |
1488 | { |
- | 1489 | /** |
|
- | 1490 | * vmw_query_readback_all - Read back cached query states |
|
- | 1491 | * |
|
- | 1492 | * @dx_query_mob: Buffer containing the DX query MOB |
|
- | 1493 | * |
|
- | 1494 | * Read back cached states from the device if they exist. This function |
|
- | 1495 | * assumings binding_mutex is held. |
|
- | 1496 | */ |
|
- | 1497 | int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob) |
|
- | 1498 | { |
|
- | 1499 | struct vmw_resource *dx_query_ctx; |
|
- | 1500 | struct vmw_private *dev_priv; |
|
- | 1501 | struct { |
|
- | 1502 | SVGA3dCmdHeader header; |
|
- | 1503 | SVGA3dCmdDXReadbackAllQuery body; |
|
- | 1504 | } *cmd; |
|
- | 1505 | ||
- | 1506 | ||
- | 1507 | /* No query bound, so do nothing */ |
|
- | 1508 | if (!dx_query_mob || !dx_query_mob->dx_query_ctx) |
|
- | 1509 | return 0; |
|
- | 1510 | ||
- | 1511 | dx_query_ctx = dx_query_mob->dx_query_ctx; |
|
- | 1512 | dev_priv = dx_query_ctx->dev_priv; |
|
- | 1513 | ||
- | 1514 | cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id); |
|
- | 1515 | if (unlikely(cmd == NULL)) { |
|
- | 1516 | DRM_ERROR("Failed reserving FIFO space for " |
|
- | 1517 | "query MOB read back.\n"); |
|
- | 1518 | return -ENOMEM; |
|
- | 1519 | } |
|
- | 1520 | ||
- | 1521 | cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY; |
|
- | 1522 | cmd->header.size = sizeof(cmd->body); |
|
- | 1523 | cmd->body.cid = dx_query_ctx->id; |
|
- | 1524 | ||
- | 1525 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
|
- | 1526 | ||
- | 1527 | /* Triggers a rebind the next time affected context is bound */ |
|
- | 1528 | dx_query_mob->dx_query_ctx = NULL; |
|
- | 1529 | ||
- | 1530 | return 0; |
|
- | 1531 | } |
|
1457 | } |
1532 | } |
Line 1458... | Line 1533... | ||
1458 | 1533 | ||
1459 | /** |
1534 | /** |
1460 | * vmw_resource_needs_backup - Return whether a resource needs a backup buffer. |
1535 | * vmw_resource_needs_backup - Return whether a resource needs a backup buffer. |
Line 1532... | Line 1607... | ||
1532 | for (type = 0; type < vmw_res_max; ++type) |
1607 | for (type = 0; type < vmw_res_max; ++type) |
1533 | vmw_resource_evict_type(dev_priv, type); |
1608 | vmw_resource_evict_type(dev_priv, type); |
Line 1534... | Line 1609... | ||
1534 | 1609 | ||
1535 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
1610 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
- | 1611 | } |
|
- | 1612 | ||
- | 1613 | /** |
|
- | 1614 | * vmw_resource_pin - Add a pin reference on a resource |
|
- | 1615 | * |
|
- | 1616 | * @res: The resource to add a pin reference on |
|
- | 1617 | * |
|
- | 1618 | * This function adds a pin reference, and if needed validates the resource. |
|
- | 1619 | * Having a pin reference means that the resource can never be evicted, and |
|
- | 1620 | * its id will never change as long as there is a pin reference. |
|
- | 1621 | * This function returns 0 on success and a negative error code on failure. |
|
- | 1622 | */ |
|
- | 1623 | int vmw_resource_pin(struct vmw_resource *res, bool interruptible) |
|
- | 1624 | { |
|
- | 1625 | struct vmw_private *dev_priv = res->dev_priv; |
|
- | 1626 | int ret; |
|
- | 1627 | ||
- | 1628 | ttm_write_lock(&dev_priv->reservation_sem, interruptible); |
|
- | 1629 | mutex_lock(&dev_priv->cmdbuf_mutex); |
|
- | 1630 | ret = vmw_resource_reserve(res, interruptible, false); |
|
- | 1631 | if (ret) |
|
- | 1632 | goto out_no_reserve; |
|
- | 1633 | ||
- | 1634 | if (res->pin_count == 0) { |
|
- | 1635 | struct vmw_dma_buffer *vbo = NULL; |
|
- | 1636 | ||
- | 1637 | if (res->backup) { |
|
- | 1638 | vbo = res->backup; |
|
- | 1639 | ||
- | 1640 | ttm_bo_reserve(&vbo->base, interruptible, false, false, |
|
- | 1641 | NULL); |
|
- | 1642 | if (!vbo->pin_count) { |
|
- | 1643 | ret = ttm_bo_validate |
|
- | 1644 | (&vbo->base, |
|
- | 1645 | res->func->backup_placement, |
|
- | 1646 | interruptible, false); |
|
- | 1647 | if (ret) { |
|
- | 1648 | ttm_bo_unreserve(&vbo->base); |
|
- | 1649 | goto out_no_validate; |
|
- | 1650 | } |
|
- | 1651 | } |
|
- | 1652 | ||
- | 1653 | /* Do we really need to pin the MOB as well? */ |
|
- | 1654 | vmw_bo_pin_reserved(vbo, true); |
|
- | 1655 | } |
|
- | 1656 | ret = vmw_resource_validate(res); |
|
- | 1657 | if (vbo) |
|
- | 1658 | ttm_bo_unreserve(&vbo->base); |
|
- | 1659 | if (ret) |
|
- | 1660 | goto out_no_validate; |
|
- | 1661 | } |
|
- | 1662 | res->pin_count++; |
|
- | 1663 | ||
- | 1664 | out_no_validate: |
|
- | 1665 | vmw_resource_unreserve(res, false, NULL, 0UL); |
|
- | 1666 | out_no_reserve: |
|
- | 1667 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
|
- | 1668 | ttm_write_unlock(&dev_priv->reservation_sem); |
|
- | 1669 | ||
- | 1670 | return ret; |
|
- | 1671 | } |
|
- | 1672 | ||
- | 1673 | /** |
|
- | 1674 | * vmw_resource_unpin - Remove a pin reference from a resource |
|
- | 1675 | * |
|
- | 1676 | * @res: The resource to remove a pin reference from |
|
- | 1677 | * |
|
- | 1678 | * Having a pin reference means that the resource can never be evicted, and |
|
- | 1679 | * its id will never change as long as there is a pin reference. |
|
- | 1680 | */ |
|
- | 1681 | void vmw_resource_unpin(struct vmw_resource *res) |
|
- | 1682 | { |
|
- | 1683 | struct vmw_private *dev_priv = res->dev_priv; |
|
- | 1684 | int ret; |
|
- | 1685 | ||
- | 1686 | ttm_read_lock(&dev_priv->reservation_sem, false); |
|
- | 1687 | mutex_lock(&dev_priv->cmdbuf_mutex); |
|
- | 1688 | ||
- | 1689 | ret = vmw_resource_reserve(res, false, true); |
|
- | 1690 | WARN_ON(ret); |
|
- | 1691 | ||
- | 1692 | WARN_ON(res->pin_count == 0); |
|
- | 1693 | if (--res->pin_count == 0 && res->backup) { |
|
- | 1694 | struct vmw_dma_buffer *vbo = res->backup; |
|
- | 1695 | ||
- | 1696 | ttm_bo_reserve(&vbo->base, false, false, false, NULL); |
|
- | 1697 | vmw_bo_pin_reserved(vbo, false); |
|
- | 1698 | ttm_bo_unreserve(&vbo->base); |
|
- | 1699 | } |
|
- | 1700 | ||
- | 1701 | vmw_resource_unreserve(res, false, NULL, 0UL); |
|
- | 1702 | ||
- | 1703 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
|
- | 1704 | ttm_read_unlock(&dev_priv->reservation_sem); |
|
- | 1705 | } |
|
- | 1706 | ||
- | 1707 | /** |
|
- | 1708 | * vmw_res_type - Return the resource type |
|
- | 1709 | * |
|
- | 1710 | * @res: Pointer to the resource |
|
- | 1711 | */ |
|
- | 1712 | enum vmw_res_type vmw_res_type(const struct vmw_resource *res) |
|
- | 1713 | { |
|
- | 1714 | return res->func->res_type; |