Subversion Repositories Kolibri OS

Rev

Rev 6937 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2008-2010 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *    Eric Anholt <eric@anholt.net>
  25.  *    Chris Wilson <chris@chris-wilson.co.uuk>
  26.  *
  27.  */
  28.  
  29. #include <drm/drmP.h>
  30. #include <drm/i915_drm.h>
  31.  
  32. #include "i915_drv.h"
  33. #include "intel_drv.h"
  34. #include "i915_trace.h"
  35.  
  36. static bool
  37. mark_free(struct i915_vma *vma, struct list_head *unwind)
  38. {
  39.         if (vma->pin_count)
  40.                 return false;
  41.  
  42.         if (WARN_ON(!list_empty(&vma->exec_list)))
  43.                 return false;
  44.  
  45.         list_add(&vma->exec_list, unwind);
  46.         return drm_mm_scan_add_block(&vma->node);
  47. }
  48.  
  49. /**
  50.  * i915_gem_evict_something - Evict vmas to make room for binding a new one
  51.  * @dev: drm_device
  52.  * @vm: address space to evict from
  53.  * @min_size: size of the desired free space
  54.  * @alignment: alignment constraint of the desired free space
  55.  * @cache_level: cache_level for the desired space
  56.  * @start: start (inclusive) of the range from which to evict objects
  57.  * @end: end (exclusive) of the range from which to evict objects
  58.  * @flags: additional flags to control the eviction algorithm
  59.  *
  60.  * This function will try to evict vmas until a free space satisfying the
  61.  * requirements is found. Callers must check first whether any such hole exists
  62.  * already before calling this function.
  63.  *
  64.  * This function is used by the object/vma binding code.
  65.  *
  66.  * Since this function is only used to free up virtual address space it only
  67.  * ignores pinned vmas, and not object where the backing storage itself is
  68.  * pinned. Hence obj->pages_pin_count does not protect against eviction.
  69.  *
  70.  * To clarify: This is for freeing up virtual address space, not for freeing
  71.  * memory in e.g. the shrinker.
  72.  */
  73. int
  74. i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
  75.                          int min_size, unsigned alignment, unsigned cache_level,
  76.                          unsigned long start, unsigned long end,
  77.                          unsigned flags)
  78. {
  79.         struct list_head eviction_list, unwind_list;
  80.         struct i915_vma *vma;
  81.         int ret = 0;
  82.         int pass = 0;
  83.  
  84.         trace_i915_gem_evict(dev, min_size, alignment, flags);
  85.  
  86.         /*
  87.          * The goal is to evict objects and amalgamate space in LRU order.
  88.          * The oldest idle objects reside on the inactive list, which is in
  89.          * retirement order. The next objects to retire are those on the (per
  90.          * ring) active list that do not have an outstanding flush. Once the
  91.          * hardware reports completion (the seqno is updated after the
  92.          * batchbuffer has been finished) the clean buffer objects would
  93.          * be retired to the inactive list. Any dirty objects would be added
  94.          * to the tail of the flushing list. So after processing the clean
  95.          * active objects we need to emit a MI_FLUSH to retire the flushing
  96.          * list, hence the retirement order of the flushing list is in
  97.          * advance of the dirty objects on the active lists.
  98.          *
  99.          * The retirement sequence is thus:
  100.          *   1. Inactive objects (already retired)
  101.          *   2. Clean active objects
  102.          *   3. Flushing list
  103.          *   4. Dirty active objects.
  104.          *
  105.          * On each list, the oldest objects lie at the HEAD with the freshest
  106.          * object on the TAIL.
  107.          */
  108.  
  109.         INIT_LIST_HEAD(&unwind_list);
  110.         if (start != 0 || end != vm->total) {
  111.                 drm_mm_init_scan_with_range(&vm->mm, min_size,
  112.                                             alignment, cache_level,
  113.                                             start, end);
  114.         } else
  115.                 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
  116.  
  117. search_again:
  118.         /* First see if there is a large enough contiguous idle region... */
  119.         list_for_each_entry(vma, &vm->inactive_list, vm_link) {
  120.                 if (mark_free(vma, &unwind_list))
  121.                         goto found;
  122.         }
  123.  
  124.         if (flags & PIN_NONBLOCK)
  125.                 goto none;
  126.  
  127.         /* Now merge in the soon-to-be-expired objects... */
  128.         list_for_each_entry(vma, &vm->active_list, vm_link) {
  129.                 if (mark_free(vma, &unwind_list))
  130.                         goto found;
  131.         }
  132.  
  133. none:
  134.         /* Nothing found, clean up and bail out! */
  135.         while (!list_empty(&unwind_list)) {
  136.                 vma = list_first_entry(&unwind_list,
  137.                                        struct i915_vma,
  138.                                        exec_list);
  139.                 ret = drm_mm_scan_remove_block(&vma->node);
  140.                 BUG_ON(ret);
  141.  
  142.                 list_del_init(&vma->exec_list);
  143.         }
  144.  
  145.         /* Can we unpin some objects such as idle hw contents,
  146.          * or pending flips?
  147.          */
  148.         if (flags & PIN_NONBLOCK)
  149.                 return -ENOSPC;
  150.  
  151.         /* Only idle the GPU and repeat the search once */
  152.         if (pass++ == 0) {
  153.                 ret = i915_gpu_idle(dev);
  154.                 if (ret)
  155.                         return ret;
  156.  
  157.                 i915_gem_retire_requests(dev);
  158.                 goto search_again;
  159.         }
  160.  
  161.         /* If we still have pending pageflip completions, drop
  162.          * back to userspace to give our workqueues time to
  163.          * acquire our locks and unpin the old scanouts.
  164.          */
  165.         return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
  166.  
  167. found:
  168.         /* drm_mm doesn't allow any other other operations while
  169.          * scanning, therefore store to be evicted objects on a
  170.          * temporary list. */
  171.         INIT_LIST_HEAD(&eviction_list);
  172.         while (!list_empty(&unwind_list)) {
  173.                 vma = list_first_entry(&unwind_list,
  174.                                        struct i915_vma,
  175.                                        exec_list);
  176.                 if (drm_mm_scan_remove_block(&vma->node)) {
  177.                         list_move(&vma->exec_list, &eviction_list);
  178.                         drm_gem_object_reference(&vma->obj->base);
  179.                         continue;
  180.                 }
  181.                 list_del_init(&vma->exec_list);
  182.         }
  183.  
  184.         /* Unbinding will emit any required flushes */
  185.         while (!list_empty(&eviction_list)) {
  186.                 struct drm_gem_object *obj;
  187.                 vma = list_first_entry(&eviction_list,
  188.                                        struct i915_vma,
  189.                                        exec_list);
  190.  
  191.                 obj =  &vma->obj->base;
  192.                 list_del_init(&vma->exec_list);
  193.                 if (ret == 0)
  194.                         ret = i915_vma_unbind(vma);
  195.  
  196.                 drm_gem_object_unreference(obj);
  197.         }
  198.  
  199.         return ret;
  200. }
  201.  
  202. int
  203. i915_gem_evict_for_vma(struct i915_vma *target)
  204. {
  205.         struct drm_mm_node *node, *next;
  206.  
  207.         list_for_each_entry_safe(node, next,
  208.                         &target->vm->mm.head_node.node_list,
  209.                         node_list) {
  210.                 struct i915_vma *vma;
  211.                 int ret;
  212.  
  213.                 if (node->start + node->size <= target->node.start)
  214.                         continue;
  215.                 if (node->start >= target->node.start + target->node.size)
  216.                         break;
  217.  
  218.                 vma = container_of(node, typeof(*vma), node);
  219.  
  220.                 if (vma->pin_count) {
  221.                         if (!vma->exec_entry || (vma->pin_count > 1))
  222.                                 /* Object is pinned for some other use */
  223.                                 return -EBUSY;
  224.  
  225.                         /* We need to evict a buffer in the same batch */
  226.                         if (vma->exec_entry->flags & EXEC_OBJECT_PINNED)
  227.                                 /* Overlapping fixed objects in the same batch */
  228.                                 return -EINVAL;
  229.  
  230.                         return -ENOSPC;
  231.                 }
  232.  
  233.                 ret = i915_vma_unbind(vma);
  234.                 if (ret)
  235.                         return ret;
  236.         }
  237.  
  238.         return 0;
  239. }
  240.  
  241. /**
  242.  * i915_gem_evict_vm - Evict all idle vmas from a vm
  243.  * @vm: Address space to cleanse
  244.  * @do_idle: Boolean directing whether to idle first.
  245.  *
  246.  * This function evicts all idles vmas from a vm. If all unpinned vmas should be
  247.  * evicted the @do_idle needs to be set to true.
  248.  *
  249.  * This is used by the execbuf code as a last-ditch effort to defragment the
  250.  * address space.
  251.  *
  252.  * To clarify: This is for freeing up virtual address space, not for freeing
  253.  * memory in e.g. the shrinker.
  254.  */
  255. int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
  256. {
  257.         struct i915_vma *vma, *next;
  258.         int ret;
  259.  
  260.         WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex));
  261.         trace_i915_gem_evict_vm(vm);
  262.  
  263.         if (do_idle) {
  264.                 ret = i915_gpu_idle(vm->dev);
  265.                 if (ret)
  266.                         return ret;
  267.  
  268.                 i915_gem_retire_requests(vm->dev);
  269.  
  270.                 WARN_ON(!list_empty(&vm->active_list));
  271.         }
  272.  
  273.         list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
  274.                 if (vma->pin_count == 0)
  275.                         WARN_ON(i915_vma_unbind(vma));
  276.  
  277.         return 0;
  278. }
  279.