Subversion Repositories Kolibri OS

Rev

Rev 5078 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27. struct ww_acquire_ctx{};
  28.  
  29. #include <drm/ttm/ttm_execbuf_util.h>
  30. #include <drm/ttm/ttm_bo_driver.h>
  31. #include <drm/ttm/ttm_placement.h>
  32. #include <linux/wait.h>
  33. #include <linux/sched.h>
  34. #include <linux/module.h>
  35.  
  36. static void ttm_eu_backoff_reservation_locked(struct list_head *list,
  37.                                               struct ww_acquire_ctx *ticket)
  38. {
  39.         struct ttm_validate_buffer *entry;
  40.  
  41.         list_for_each_entry(entry, list, head) {
  42.                 struct ttm_buffer_object *bo = entry->bo;
  43.                 if (!entry->reserved)
  44.                         continue;
  45.  
  46.                 entry->reserved = false;
  47.                 if (entry->removed) {
  48.                         ttm_bo_add_to_lru(bo);
  49.                         entry->removed = false;
  50.                 }
  51. //       ww_mutex_unlock(&bo->resv->lock);
  52.         }
  53. }
  54.  
  55. static void ttm_eu_del_from_lru_locked(struct list_head *list)
  56. {
  57.         struct ttm_validate_buffer *entry;
  58.  
  59.         list_for_each_entry(entry, list, head) {
  60.                 struct ttm_buffer_object *bo = entry->bo;
  61.                 if (!entry->reserved)
  62.                         continue;
  63.  
  64.                 if (!entry->removed) {
  65.                         entry->put_count = ttm_bo_del_from_lru(bo);
  66.                         entry->removed = true;
  67.                 }
  68.         }
  69. }
  70.  
  71. static void ttm_eu_list_ref_sub(struct list_head *list)
  72. {
  73.         struct ttm_validate_buffer *entry;
  74.  
  75.         list_for_each_entry(entry, list, head) {
  76.                 struct ttm_buffer_object *bo = entry->bo;
  77.  
  78.                 if (entry->put_count) {
  79.                         ttm_bo_list_ref_sub(bo, entry->put_count, true);
  80.                         entry->put_count = 0;
  81.                 }
  82.         }
  83. }
  84.  
  85. void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
  86.                                 struct list_head *list)
  87. {
  88.         struct ttm_validate_buffer *entry;
  89.         struct ttm_bo_global *glob;
  90.  
  91.         if (list_empty(list))
  92.                 return;
  93.  
  94.         entry = list_first_entry(list, struct ttm_validate_buffer, head);
  95.         glob = entry->bo->glob;
  96.         spin_lock(&glob->lru_lock);
  97.         ttm_eu_backoff_reservation_locked(list, ticket);
  98. //   ww_acquire_fini(ticket);
  99.         spin_unlock(&glob->lru_lock);
  100. }
  101. EXPORT_SYMBOL(ttm_eu_backoff_reservation);
  102.  
  103. /*
  104.  * Reserve buffers for validation.
  105.  *
  106.  * If a buffer in the list is marked for CPU access, we back off and
  107.  * wait for that buffer to become free for GPU access.
  108.  *
  109.  * If a buffer is reserved for another validation, the validator with
  110.  * the highest validation sequence backs off and waits for that buffer
  111.  * to become unreserved. This prevents deadlocks when validating multiple
  112.  * buffers in different orders.
  113.  */
  114.  
  115. int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
  116.                            struct list_head *list)
  117. {
  118.         struct ttm_bo_global *glob;
  119.         struct ttm_validate_buffer *entry;
  120.         int ret;
  121.  
  122.         if (list_empty(list))
  123.                 return 0;
  124.  
  125.         list_for_each_entry(entry, list, head) {
  126.                 entry->reserved = false;
  127.                 entry->put_count = 0;
  128.                 entry->removed = false;
  129.         }
  130.  
  131.         entry = list_first_entry(list, struct ttm_validate_buffer, head);
  132.         glob = entry->bo->glob;
  133.  
  134. //   ww_acquire_init(ticket, &reservation_ww_class);
  135. retry:
  136.         list_for_each_entry(entry, list, head) {
  137.                 struct ttm_buffer_object *bo = entry->bo;
  138.  
  139.                 /* already slowpath reserved? */
  140.                 if (entry->reserved)
  141.                         continue;
  142.  
  143.  
  144.                 ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);
  145.  
  146.                 if (ret == -EDEADLK) {
  147.                         /* uh oh, we lost out, drop every reservation and try
  148.                          * to only reserve this buffer, then start over if
  149.                          * this succeeds.
  150.                          */
  151.                         spin_lock(&glob->lru_lock);
  152.                         ttm_eu_backoff_reservation_locked(list, ticket);
  153.                         spin_unlock(&glob->lru_lock);
  154.                         ttm_eu_list_ref_sub(list);
  155.   //         ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
  156.   //                                ticket);
  157.                         if (unlikely(ret != 0)) {
  158.                                 if (ret == -EINTR)
  159.                                         ret = -ERESTARTSYS;
  160.                                 goto err_fini;
  161.                         }
  162.  
  163.                         entry->reserved = true;
  164.                         if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
  165.                                 ret = -EBUSY;
  166.                                 goto err;
  167.                         }
  168.                         goto retry;
  169.                 } else if (ret)
  170.                         goto err;
  171.  
  172.                 entry->reserved = true;
  173.                 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
  174.                         ret = -EBUSY;
  175.                         goto err;
  176.                 }
  177.         }
  178.  
  179. //   ww_acquire_done(ticket);
  180.         spin_lock(&glob->lru_lock);
  181.         ttm_eu_del_from_lru_locked(list);
  182.         spin_unlock(&glob->lru_lock);
  183.         ttm_eu_list_ref_sub(list);
  184.         return 0;
  185.  
  186. err:
  187.         spin_lock(&glob->lru_lock);
  188.         ttm_eu_backoff_reservation_locked(list, ticket);
  189.         spin_unlock(&glob->lru_lock);
  190.         ttm_eu_list_ref_sub(list);
  191. err_fini:
  192. //   ww_acquire_done(ticket);
  193. //   ww_acquire_fini(ticket);
  194.         return ret;
  195. }
  196. EXPORT_SYMBOL(ttm_eu_reserve_buffers);
  197.  
  198. void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
  199.                                  struct list_head *list, void *sync_obj)
  200. {
  201.         struct ttm_validate_buffer *entry;
  202.         struct ttm_buffer_object *bo;
  203.         struct ttm_bo_global *glob;
  204.         struct ttm_bo_device *bdev;
  205.         struct ttm_bo_driver *driver;
  206.  
  207.         if (list_empty(list))
  208.                 return;
  209.  
  210.         bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
  211.         bdev = bo->bdev;
  212.         driver = bdev->driver;
  213.         glob = bo->glob;
  214.  
  215.         spin_lock(&glob->lru_lock);
  216.         spin_lock(&bdev->fence_lock);
  217.  
  218.         list_for_each_entry(entry, list, head) {
  219.                 bo = entry->bo;
  220.                 entry->old_sync_obj = bo->sync_obj;
  221.                 bo->sync_obj = driver->sync_obj_ref(sync_obj);
  222.                 ttm_bo_add_to_lru(bo);
  223. //       ww_mutex_unlock(&bo->resv->lock);
  224.                 entry->reserved = false;
  225.         }
  226.         spin_unlock(&bdev->fence_lock);
  227.         spin_unlock(&glob->lru_lock);
  228. //   ww_acquire_fini(ticket);
  229.  
  230.         list_for_each_entry(entry, list, head) {
  231.                 if (entry->old_sync_obj)
  232.                         driver->sync_obj_unref(&entry->old_sync_obj);
  233.         }
  234. }
  235. EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
  236.