Subversion Repositories Kolibri OS

Rev

Rev 5060 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  *
  27.  **************************************************************************/
  28.  
  29. /*
  30.  * Generic simple memory manager implementation. Intended to be used as a base
  31.  * class implementation for more advanced memory managers.
  32.  *
  33.  * Note that the algorithm used is quite simple and there might be substantial
  34.  * performance gains if a smarter free list is implemented. Currently it is just an
  35.  * unordered stack of free regions. This could easily be improved if an RB-tree
  36.  * is used instead. At least if we expect heavy fragmentation.
  37.  *
  38.  * Aligned allocations can also see improvement.
  39.  *
  40.  * Authors:
  41.  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
  42.  */
  43.  
  44. #include <drm/drmP.h>
  45. #include <drm/drm_mm.h>
  46. #include <linux/slab.h>
  47. #include <linux/seq_file.h>
  48. #include <linux/export.h>
  49.  
  50. /**
  51.  * DOC: Overview
  52.  *
  53.  * drm_mm provides a simple range allocator. The drivers are free to use the
  54.  * resource allocator from the linux core if it suits them, the upside of drm_mm
  55.  * is that it's in the DRM core. Which means that it's easier to extend for
  56.  * some of the crazier special purpose needs of gpus.
  57.  *
  58.  * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
  59.  * Drivers are free to embed either of them into their own suitable
  60.  * datastructures. drm_mm itself will not do any allocations of its own, so if
  61.  * drivers choose not to embed nodes they need to still allocate them
  62.  * themselves.
  63.  *
  64.  * The range allocator also supports reservation of preallocated blocks. This is
  65.  * useful for taking over initial mode setting configurations from the firmware,
  66.  * where an object needs to be created which exactly matches the firmware's
  67.  * scanout target. As long as the range is still free it can be inserted anytime
  68.  * after the allocator is initialized, which helps with avoiding looped
  69.  * depencies in the driver load sequence.
  70.  *
  71.  * drm_mm maintains a stack of most recently freed holes, which of all
  72.  * simplistic datastructures seems to be a fairly decent approach to clustering
  73.  * allocations and avoiding too much fragmentation. This means free space
  74.  * searches are O(num_holes). Given that all the fancy features drm_mm supports
  75.  * something better would be fairly complex and since gfx thrashing is a fairly
  76.  * steep cliff not a real concern. Removing a node again is O(1).
  77.  *
  78.  * drm_mm supports a few features: Alignment and range restrictions can be
  79.  * supplied. Further more every &drm_mm_node has a color value (which is just an
  80.  * opaqua unsigned long) which in conjunction with a driver callback can be used
  81.  * to implement sophisticated placement restrictions. The i915 DRM driver uses
  82.  * this to implement guard pages between incompatible caching domains in the
  83.  * graphics TT.
  84.  *
  85.  * Two behaviors are supported for searching and allocating: bottom-up and top-down.
  86.  * The default is bottom-up. Top-down allocation can be used if the memory area
  87.  * has different restrictions, or just to reduce fragmentation.
  88.  *
  89.  * Finally iteration helpers to walk all nodes and all holes are provided as are
  90.  * some basic allocator dumpers for debugging.
  91.  */
  92.  
  93. static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
  94.                                                 u64 size,
  95.                                                 unsigned alignment,
  96.                                                 unsigned long color,
  97.                                                 enum drm_mm_search_flags flags);
  98. static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
  99.                                                 u64 size,
  100.                                                 unsigned alignment,
  101.                                                 unsigned long color,
  102.                                                 u64 start,
  103.                                                 u64 end,
  104.                                                 enum drm_mm_search_flags flags);
  105.  
  106. static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
  107.                                  struct drm_mm_node *node,
  108.                                  u64 size, unsigned alignment,
  109.                                  unsigned long color,
  110.                                  enum drm_mm_allocator_flags flags)
  111. {
  112.         struct drm_mm *mm = hole_node->mm;
  113.         u64 hole_start = drm_mm_hole_node_start(hole_node);
  114.         u64 hole_end = drm_mm_hole_node_end(hole_node);
  115.         u64 adj_start = hole_start;
  116.         u64 adj_end = hole_end;
  117.  
  118.         BUG_ON(node->allocated);
  119.  
  120.         if (mm->color_adjust)
  121.                 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
  122.  
  123.         if (flags & DRM_MM_CREATE_TOP)
  124.                 adj_start = adj_end - size;
  125.  
  126.         if (alignment) {
  127.                 u64 tmp = adj_start;
  128.                 unsigned rem;
  129.  
  130.                 rem = do_div(tmp, alignment);
  131.                 if (rem) {
  132.                         if (flags & DRM_MM_CREATE_TOP)
  133.                                 adj_start -= rem;
  134.                         else
  135.                                 adj_start += alignment - rem;
  136.                 }
  137.         }
  138.  
  139.         BUG_ON(adj_start < hole_start);
  140.         BUG_ON(adj_end > hole_end);
  141.  
  142.         if (adj_start == hole_start) {
  143.                 hole_node->hole_follows = 0;
  144.                 list_del(&hole_node->hole_stack);
  145.         }
  146.  
  147.         node->start = adj_start;
  148.         node->size = size;
  149.         node->mm = mm;
  150.         node->color = color;
  151.         node->allocated = 1;
  152.  
  153.         INIT_LIST_HEAD(&node->hole_stack);
  154.         list_add(&node->node_list, &hole_node->node_list);
  155.  
  156.         BUG_ON(node->start + node->size > adj_end);
  157.  
  158.         node->hole_follows = 0;
  159.         if (__drm_mm_hole_node_start(node) < hole_end) {
  160.                 list_add(&node->hole_stack, &mm->hole_stack);
  161.                 node->hole_follows = 1;
  162.         }
  163. }
  164.  
  165. /**
  166.  * drm_mm_reserve_node - insert an pre-initialized node
  167.  * @mm: drm_mm allocator to insert @node into
  168.  * @node: drm_mm_node to insert
  169.  *
  170.  * This functions inserts an already set-up drm_mm_node into the allocator,
  171.  * meaning that start, size and color must be set by the caller. This is useful
  172.  * to initialize the allocator with preallocated objects which must be set-up
  173.  * before the range allocator can be set-up, e.g. when taking over a firmware
  174.  * framebuffer.
  175.  *
  176.  * Returns:
  177.  * 0 on success, -ENOSPC if there's no hole where @node is.
  178.  */
  179. int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
  180. {
  181.         struct drm_mm_node *hole;
  182.         u64 end = node->start + node->size;
  183.         u64 hole_start;
  184.         u64 hole_end;
  185.  
  186.         BUG_ON(node == NULL);
  187.  
  188.         /* Find the relevant hole to add our node to */
  189.         drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
  190.                 if (hole_start > node->start || hole_end < end)
  191.                         continue;
  192.  
  193.                 node->mm = mm;
  194.                 node->allocated = 1;
  195.  
  196.                 INIT_LIST_HEAD(&node->hole_stack);
  197.                 list_add(&node->node_list, &hole->node_list);
  198.  
  199.                 if (node->start == hole_start) {
  200.                         hole->hole_follows = 0;
  201.                         list_del_init(&hole->hole_stack);
  202.                 }
  203.  
  204.                 node->hole_follows = 0;
  205.                 if (end != hole_end) {
  206.                         list_add(&node->hole_stack, &mm->hole_stack);
  207.                         node->hole_follows = 1;
  208.                 }
  209.  
  210.                 return 0;
  211.         }
  212.  
  213.         return -ENOSPC;
  214. }
  215. EXPORT_SYMBOL(drm_mm_reserve_node);
  216.  
  217. /**
  218.  * drm_mm_insert_node_generic - search for space and insert @node
  219.  * @mm: drm_mm to allocate from
  220.  * @node: preallocate node to insert
  221.  * @size: size of the allocation
  222.  * @alignment: alignment of the allocation
  223.  * @color: opaque tag value to use for this node
  224.  * @sflags: flags to fine-tune the allocation search
  225.  * @aflags: flags to fine-tune the allocation behavior
  226.  *
  227.  * The preallocated node must be cleared to 0.
  228.  *
  229.  * Returns:
  230.  * 0 on success, -ENOSPC if there's no suitable hole.
  231.  */
  232. int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
  233.                                u64 size, unsigned alignment,
  234.                                unsigned long color,
  235.                                enum drm_mm_search_flags sflags,
  236.                                enum drm_mm_allocator_flags aflags)
  237. {
  238.         struct drm_mm_node *hole_node;
  239.  
  240.         hole_node = drm_mm_search_free_generic(mm, size, alignment,
  241.                                                color, sflags);
  242.         if (!hole_node)
  243.                 return -ENOSPC;
  244.  
  245.         drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
  246.         return 0;
  247. }
  248. EXPORT_SYMBOL(drm_mm_insert_node_generic);
  249.  
  250. static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
  251.                                        struct drm_mm_node *node,
  252.                                        u64 size, unsigned alignment,
  253.                                        unsigned long color,
  254.                                        u64 start, u64 end,
  255.                                        enum drm_mm_allocator_flags flags)
  256. {
  257.         struct drm_mm *mm = hole_node->mm;
  258.         u64 hole_start = drm_mm_hole_node_start(hole_node);
  259.         u64 hole_end = drm_mm_hole_node_end(hole_node);
  260.         u64 adj_start = hole_start;
  261.         u64 adj_end = hole_end;
  262.  
  263.         BUG_ON(!hole_node->hole_follows || node->allocated);
  264.  
  265.         if (adj_start < start)
  266.                 adj_start = start;
  267.         if (adj_end > end)
  268.                 adj_end = end;
  269.  
  270.         if (mm->color_adjust)
  271.                 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
  272.  
  273.         if (flags & DRM_MM_CREATE_TOP)
  274.                 adj_start = adj_end - size;
  275.  
  276.         if (alignment) {
  277.                 u64 tmp = adj_start;
  278.                 unsigned rem;
  279.  
  280.                 rem = do_div(tmp, alignment);
  281.                 if (rem) {
  282.                         if (flags & DRM_MM_CREATE_TOP)
  283.                                 adj_start -= rem;
  284.                         else
  285.                                 adj_start += alignment - rem;
  286.                 }
  287.         }
  288.  
  289.         if (adj_start == hole_start) {
  290.                 hole_node->hole_follows = 0;
  291.                 list_del(&hole_node->hole_stack);
  292.         }
  293.  
  294.         node->start = adj_start;
  295.         node->size = size;
  296.         node->mm = mm;
  297.         node->color = color;
  298.         node->allocated = 1;
  299.  
  300.         INIT_LIST_HEAD(&node->hole_stack);
  301.         list_add(&node->node_list, &hole_node->node_list);
  302.  
  303.         BUG_ON(node->start < start);
  304.         BUG_ON(node->start < adj_start);
  305.         BUG_ON(node->start + node->size > adj_end);
  306.         BUG_ON(node->start + node->size > end);
  307.  
  308.         node->hole_follows = 0;
  309.         if (__drm_mm_hole_node_start(node) < hole_end) {
  310.                 list_add(&node->hole_stack, &mm->hole_stack);
  311.                 node->hole_follows = 1;
  312.         }
  313. }
  314.  
  315. /**
  316.  * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
  317.  * @mm: drm_mm to allocate from
  318.  * @node: preallocate node to insert
  319.  * @size: size of the allocation
  320.  * @alignment: alignment of the allocation
  321.  * @color: opaque tag value to use for this node
  322.  * @start: start of the allowed range for this node
  323.  * @end: end of the allowed range for this node
  324.  * @sflags: flags to fine-tune the allocation search
  325.  * @aflags: flags to fine-tune the allocation behavior
  326.  *
  327.  * The preallocated node must be cleared to 0.
  328.  *
  329.  * Returns:
  330.  * 0 on success, -ENOSPC if there's no suitable hole.
  331.  */
  332. int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
  333.                                         u64 size, unsigned alignment,
  334.                                         unsigned long color,
  335.                                         u64 start, u64 end,
  336.                                         enum drm_mm_search_flags sflags,
  337.                                         enum drm_mm_allocator_flags aflags)
  338. {
  339.         struct drm_mm_node *hole_node;
  340.  
  341.         hole_node = drm_mm_search_free_in_range_generic(mm,
  342.                                                         size, alignment, color,
  343.                                                         start, end, sflags);
  344.         if (!hole_node)
  345.                 return -ENOSPC;
  346.  
  347.         drm_mm_insert_helper_range(hole_node, node,
  348.                                    size, alignment, color,
  349.                                    start, end, aflags);
  350.         return 0;
  351. }
  352. EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
  353.  
  354. /**
  355.  * drm_mm_remove_node - Remove a memory node from the allocator.
  356.  * @node: drm_mm_node to remove
  357.  *
  358.  * This just removes a node from its drm_mm allocator. The node does not need to
  359.  * be cleared again before it can be re-inserted into this or any other drm_mm
  360.  * allocator. It is a bug to call this function on a un-allocated node.
  361.  */
  362. void drm_mm_remove_node(struct drm_mm_node *node)
  363. {
  364.         struct drm_mm *mm = node->mm;
  365.         struct drm_mm_node *prev_node;
  366.  
  367.         if (WARN_ON(!node->allocated))
  368.                 return;
  369.  
  370.         BUG_ON(node->scanned_block || node->scanned_prev_free
  371.                                    || node->scanned_next_free);
  372.  
  373.         prev_node =
  374.             list_entry(node->node_list.prev, struct drm_mm_node, node_list);
  375.  
  376.         if (node->hole_follows) {
  377.                 BUG_ON(__drm_mm_hole_node_start(node) ==
  378.                        __drm_mm_hole_node_end(node));
  379.                 list_del(&node->hole_stack);
  380.         } else
  381.                 BUG_ON(__drm_mm_hole_node_start(node) !=
  382.                        __drm_mm_hole_node_end(node));
  383.  
  384.  
  385.         if (!prev_node->hole_follows) {
  386.                 prev_node->hole_follows = 1;
  387.                 list_add(&prev_node->hole_stack, &mm->hole_stack);
  388.         } else
  389.                 list_move(&prev_node->hole_stack, &mm->hole_stack);
  390.  
  391.         list_del(&node->node_list);
  392.         node->allocated = 0;
  393. }
  394. EXPORT_SYMBOL(drm_mm_remove_node);
  395.  
  396. static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
  397. {
  398.         if (end - start < size)
  399.                 return 0;
  400.  
  401.         if (alignment) {
  402.                 u64 tmp = start;
  403.                 unsigned rem;
  404.  
  405.                 rem = do_div(tmp, alignment);
  406.                 if (rem)
  407.                         start += alignment - rem;
  408.         }
  409.  
  410.         return end >= start + size;
  411. }
  412.  
  413. static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
  414.                                                       u64 size,
  415.                                                       unsigned alignment,
  416.                                                       unsigned long color,
  417.                                                       enum drm_mm_search_flags flags)
  418. {
  419.         struct drm_mm_node *entry;
  420.         struct drm_mm_node *best;
  421.         u64 adj_start;
  422.         u64 adj_end;
  423.         u64 best_size;
  424.  
  425.         BUG_ON(mm->scanned_blocks);
  426.  
  427.         best = NULL;
  428.         best_size = ~0UL;
  429.  
  430.         __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
  431.                                flags & DRM_MM_SEARCH_BELOW) {
  432.                 u64 hole_size = adj_end - adj_start;
  433.  
  434.                 if (mm->color_adjust) {
  435.                         mm->color_adjust(entry, color, &adj_start, &adj_end);
  436.                         if (adj_end <= adj_start)
  437.                                 continue;
  438.                 }
  439.  
  440.                 if (!check_free_hole(adj_start, adj_end, size, alignment))
  441.                         continue;
  442.  
  443.                 if (!(flags & DRM_MM_SEARCH_BEST))
  444.                         return entry;
  445.  
  446.                 if (hole_size < best_size) {
  447.                         best = entry;
  448.                         best_size = hole_size;
  449.                 }
  450.         }
  451.  
  452.         return best;
  453. }
  454.  
  455. static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
  456.                                                         u64 size,
  457.                                                         unsigned alignment,
  458.                                                         unsigned long color,
  459.                                                         u64 start,
  460.                                                         u64 end,
  461.                                                         enum drm_mm_search_flags flags)
  462. {
  463.         struct drm_mm_node *entry;
  464.         struct drm_mm_node *best;
  465.         u64 adj_start;
  466.         u64 adj_end;
  467.         u64 best_size;
  468.  
  469.         BUG_ON(mm->scanned_blocks);
  470.  
  471.         best = NULL;
  472.         best_size = ~0UL;
  473.  
  474.         __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
  475.                                flags & DRM_MM_SEARCH_BELOW) {
  476.                 u64 hole_size = adj_end - adj_start;
  477.  
  478.                 if (adj_start < start)
  479.                         adj_start = start;
  480.                 if (adj_end > end)
  481.                         adj_end = end;
  482.  
  483.                 if (mm->color_adjust) {
  484.                         mm->color_adjust(entry, color, &adj_start, &adj_end);
  485.                         if (adj_end <= adj_start)
  486.                                 continue;
  487.                 }
  488.  
  489.                 if (!check_free_hole(adj_start, adj_end, size, alignment))
  490.                         continue;
  491.  
  492.                 if (!(flags & DRM_MM_SEARCH_BEST))
  493.                         return entry;
  494.  
  495.                 if (hole_size < best_size) {
  496.                         best = entry;
  497.                         best_size = hole_size;
  498.                 }
  499.         }
  500.  
  501.         return best;
  502. }
  503.  
  504. /**
  505.  * drm_mm_replace_node - move an allocation from @old to @new
  506.  * @old: drm_mm_node to remove from the allocator
  507.  * @new: drm_mm_node which should inherit @old's allocation
  508.  *
  509.  * This is useful for when drivers embed the drm_mm_node structure and hence
  510.  * can't move allocations by reassigning pointers. It's a combination of remove
  511.  * and insert with the guarantee that the allocation start will match.
  512.  */
  513. void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
  514. {
  515.         list_replace(&old->node_list, &new->node_list);
  516.         list_replace(&old->hole_stack, &new->hole_stack);
  517.         new->hole_follows = old->hole_follows;
  518.         new->mm = old->mm;
  519.         new->start = old->start;
  520.         new->size = old->size;
  521.         new->color = old->color;
  522.  
  523.         old->allocated = 0;
  524.         new->allocated = 1;
  525. }
  526. EXPORT_SYMBOL(drm_mm_replace_node);
  527.  
  528. /**
  529.  * DOC: lru scan roaster
  530.  *
  531.  * Very often GPUs need to have continuous allocations for a given object. When
  532.  * evicting objects to make space for a new one it is therefore not most
  533.  * efficient when we simply start to select all objects from the tail of an LRU
  534.  * until there's a suitable hole: Especially for big objects or nodes that
  535.  * otherwise have special allocation constraints there's a good chance we evict
  536.  * lots of (smaller) objects unecessarily.
  537.  *
  538.  * The DRM range allocator supports this use-case through the scanning
  539.  * interfaces. First a scan operation needs to be initialized with
  540.  * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
  541.  * objects to the roaster (probably by walking an LRU list, but this can be
  542.  * freely implemented) until a suitable hole is found or there's no further
  543.  * evitable object.
  544.  *
  545.  * The the driver must walk through all objects again in exactly the reverse
  546.  * order to restore the allocator state. Note that while the allocator is used
  547.  * in the scan mode no other operation is allowed.
  548.  *
  549.  * Finally the driver evicts all objects selected in the scan. Adding and
  550.  * removing an object is O(1), and since freeing a node is also O(1) the overall
  551.  * complexity is O(scanned_objects). So like the free stack which needs to be
  552.  * walked before a scan operation even begins this is linear in the number of
  553.  * objects. It doesn't seem to hurt badly.
  554.  */
  555.  
  556. /**
  557.  * drm_mm_init_scan - initialize lru scanning
  558.  * @mm: drm_mm to scan
  559.  * @size: size of the allocation
  560.  * @alignment: alignment of the allocation
  561.  * @color: opaque tag value to use for the allocation
  562.  *
  563.  * This simply sets up the scanning routines with the parameters for the desired
  564.  * hole. Note that there's no need to specify allocation flags, since they only
  565.  * change the place a node is allocated from within a suitable hole.
  566.  *
  567.  * Warning:
  568.  * As long as the scan list is non-empty, no other operations than
  569.  * adding/removing nodes to/from the scan list are allowed.
  570.  */
  571. void drm_mm_init_scan(struct drm_mm *mm,
  572.                       u64 size,
  573.                       unsigned alignment,
  574.                       unsigned long color)
  575. {
  576.         mm->scan_color = color;
  577.         mm->scan_alignment = alignment;
  578.         mm->scan_size = size;
  579.         mm->scanned_blocks = 0;
  580.         mm->scan_hit_start = 0;
  581.         mm->scan_hit_end = 0;
  582.         mm->scan_check_range = 0;
  583.         mm->prev_scanned_node = NULL;
  584. }
  585. EXPORT_SYMBOL(drm_mm_init_scan);
  586.  
  587. /**
  588.  * drm_mm_init_scan - initialize range-restricted lru scanning
  589.  * @mm: drm_mm to scan
  590.  * @size: size of the allocation
  591.  * @alignment: alignment of the allocation
  592.  * @color: opaque tag value to use for the allocation
  593.  * @start: start of the allowed range for the allocation
  594.  * @end: end of the allowed range for the allocation
  595.  *
  596.  * This simply sets up the scanning routines with the parameters for the desired
  597.  * hole. Note that there's no need to specify allocation flags, since they only
  598.  * change the place a node is allocated from within a suitable hole.
  599.  *
  600.  * Warning:
  601.  * As long as the scan list is non-empty, no other operations than
  602.  * adding/removing nodes to/from the scan list are allowed.
  603.  */
  604. void drm_mm_init_scan_with_range(struct drm_mm *mm,
  605.                                  u64 size,
  606.                                  unsigned alignment,
  607.                                  unsigned long color,
  608.                                  u64 start,
  609.                                  u64 end)
  610. {
  611.         mm->scan_color = color;
  612.         mm->scan_alignment = alignment;
  613.         mm->scan_size = size;
  614.         mm->scanned_blocks = 0;
  615.         mm->scan_hit_start = 0;
  616.         mm->scan_hit_end = 0;
  617.         mm->scan_start = start;
  618.         mm->scan_end = end;
  619.         mm->scan_check_range = 1;
  620.         mm->prev_scanned_node = NULL;
  621. }
  622. EXPORT_SYMBOL(drm_mm_init_scan_with_range);
  623.  
  624. /**
  625.  * drm_mm_scan_add_block - add a node to the scan list
  626.  * @node: drm_mm_node to add
  627.  *
  628.  * Add a node to the scan list that might be freed to make space for the desired
  629.  * hole.
  630.  *
  631.  * Returns:
  632.  * True if a hole has been found, false otherwise.
  633.  */
  634. bool drm_mm_scan_add_block(struct drm_mm_node *node)
  635. {
  636.         struct drm_mm *mm = node->mm;
  637.         struct drm_mm_node *prev_node;
  638.         u64 hole_start, hole_end;
  639.         u64 adj_start, adj_end;
  640.  
  641.         mm->scanned_blocks++;
  642.  
  643.         BUG_ON(node->scanned_block);
  644.         node->scanned_block = 1;
  645.  
  646.         prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
  647.                                node_list);
  648.  
  649.         node->scanned_preceeds_hole = prev_node->hole_follows;
  650.         prev_node->hole_follows = 1;
  651.         list_del(&node->node_list);
  652.         node->node_list.prev = &prev_node->node_list;
  653.         node->node_list.next = &mm->prev_scanned_node->node_list;
  654.         mm->prev_scanned_node = node;
  655.  
  656.         adj_start = hole_start = drm_mm_hole_node_start(prev_node);
  657.         adj_end = hole_end = drm_mm_hole_node_end(prev_node);
  658.  
  659.         if (mm->scan_check_range) {
  660.                 if (adj_start < mm->scan_start)
  661.                         adj_start = mm->scan_start;
  662.                 if (adj_end > mm->scan_end)
  663.                         adj_end = mm->scan_end;
  664.         }
  665.  
  666.         if (mm->color_adjust)
  667.                 mm->color_adjust(prev_node, mm->scan_color,
  668.                                  &adj_start, &adj_end);
  669.  
  670.         if (check_free_hole(adj_start, adj_end,
  671.                             mm->scan_size, mm->scan_alignment)) {
  672.                 mm->scan_hit_start = hole_start;
  673.                 mm->scan_hit_end = hole_end;
  674.                 return true;
  675.         }
  676.  
  677.         return false;
  678. }
  679. EXPORT_SYMBOL(drm_mm_scan_add_block);
  680.  
  681. /**
  682.  * drm_mm_scan_remove_block - remove a node from the scan list
  683.  * @node: drm_mm_node to remove
  684.  *
  685.  * Nodes _must_ be removed in the exact same order from the scan list as they
  686.  * have been added, otherwise the internal state of the memory manager will be
  687.  * corrupted.
  688.  *
  689.  * When the scan list is empty, the selected memory nodes can be freed. An
  690.  * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
  691.  * return the just freed block (because its at the top of the free_stack list).
  692.  *
  693.  * Returns:
  694.  * True if this block should be evicted, false otherwise. Will always
  695.  * return false when no hole has been found.
  696.  */
  697. bool drm_mm_scan_remove_block(struct drm_mm_node *node)
  698. {
  699.         struct drm_mm *mm = node->mm;
  700.         struct drm_mm_node *prev_node;
  701.  
  702.         mm->scanned_blocks--;
  703.  
  704.         BUG_ON(!node->scanned_block);
  705.         node->scanned_block = 0;
  706.  
  707.         prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
  708.                                node_list);
  709.  
  710.         prev_node->hole_follows = node->scanned_preceeds_hole;
  711.         list_add(&node->node_list, &prev_node->node_list);
  712.  
  713.          return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
  714.                  node->start < mm->scan_hit_end);
  715. }
  716. EXPORT_SYMBOL(drm_mm_scan_remove_block);
  717.  
  718. /**
  719.  * drm_mm_clean - checks whether an allocator is clean
  720.  * @mm: drm_mm allocator to check
  721.  *
  722.  * Returns:
  723.  * True if the allocator is completely free, false if there's still a node
  724.  * allocated in it.
  725.  */
  726. bool drm_mm_clean(struct drm_mm * mm)
  727. {
  728.         struct list_head *head = &mm->head_node.node_list;
  729.  
  730.         return (head->next->next == head);
  731. }
  732. EXPORT_SYMBOL(drm_mm_clean);
  733.  
  734. /**
  735.  * drm_mm_init - initialize a drm-mm allocator
  736.  * @mm: the drm_mm structure to initialize
  737.  * @start: start of the range managed by @mm
  738.  * @size: end of the range managed by @mm
  739.  *
  740.  * Note that @mm must be cleared to 0 before calling this function.
  741.  */
  742. void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
  743. {
  744.         INIT_LIST_HEAD(&mm->hole_stack);
  745.         mm->scanned_blocks = 0;
  746.  
  747.         /* Clever trick to avoid a special case in the free hole tracking. */
  748.         INIT_LIST_HEAD(&mm->head_node.node_list);
  749.         INIT_LIST_HEAD(&mm->head_node.hole_stack);
  750.         mm->head_node.hole_follows = 1;
  751.         mm->head_node.scanned_block = 0;
  752.         mm->head_node.scanned_prev_free = 0;
  753.         mm->head_node.scanned_next_free = 0;
  754.         mm->head_node.mm = mm;
  755.         mm->head_node.start = start + size;
  756.         mm->head_node.size = start - mm->head_node.start;
  757.         list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
  758.  
  759.         mm->color_adjust = NULL;
  760. }
  761. EXPORT_SYMBOL(drm_mm_init);
  762.  
  763. /**
  764.  * drm_mm_takedown - clean up a drm_mm allocator
  765.  * @mm: drm_mm allocator to clean up
  766.  *
  767.  * Note that it is a bug to call this function on an allocator which is not
  768.  * clean.
  769.  */
  770. void drm_mm_takedown(struct drm_mm * mm)
  771. {
  772.         WARN(!list_empty(&mm->head_node.node_list),
  773.              "Memory manager not clean during takedown.\n");
  774. }
  775. EXPORT_SYMBOL(drm_mm_takedown);
  776.  
  777. static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
  778.                                      const char *prefix)
  779. {
  780.         u64 hole_start, hole_end, hole_size;
  781.  
  782.         if (entry->hole_follows) {
  783.                 hole_start = drm_mm_hole_node_start(entry);
  784.                 hole_end = drm_mm_hole_node_end(entry);
  785.                 hole_size = hole_end - hole_start;
  786.                 pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
  787.                          hole_end, hole_size);
  788.                 return hole_size;
  789.         }
  790.  
  791.         return 0;
  792. }
  793.  
  794. /**
  795.  * drm_mm_debug_table - dump allocator state to dmesg
  796.  * @mm: drm_mm allocator to dump
  797.  * @prefix: prefix to use for dumping to dmesg
  798.  */
  799. void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
  800. {
  801.         struct drm_mm_node *entry;
  802.         u64 total_used = 0, total_free = 0, total = 0;
  803.  
  804.         total_free += drm_mm_debug_hole(&mm->head_node, prefix);
  805.  
  806.         drm_mm_for_each_node(entry, mm) {
  807.                 pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
  808.                          entry->start + entry->size, entry->size);
  809.                 total_used += entry->size;
  810.                 total_free += drm_mm_debug_hole(entry, prefix);
  811.         }
  812.         total = total_free + total_used;
  813.  
  814.         pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
  815.                  total_used, total_free);
  816. }
  817. EXPORT_SYMBOL(drm_mm_debug_table);
  818.  
  819. #if defined(CONFIG_DEBUG_FS)
  820. static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
  821. {
  822.         u64 hole_start, hole_end, hole_size;
  823.  
  824.         if (entry->hole_follows) {
  825.                 hole_start = drm_mm_hole_node_start(entry);
  826.                 hole_end = drm_mm_hole_node_end(entry);
  827.                 hole_size = hole_end - hole_start;
  828.                 seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
  829.                            hole_end, hole_size);
  830.                 return hole_size;
  831.         }
  832.  
  833.         return 0;
  834. }
  835.  
  836. /**
  837.  * drm_mm_dump_table - dump allocator state to a seq_file
  838.  * @m: seq_file to dump to
  839.  * @mm: drm_mm allocator to dump
  840.  */
  841. int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
  842. {
  843.         struct drm_mm_node *entry;
  844.         u64 total_used = 0, total_free = 0, total = 0;
  845.  
  846.         total_free += drm_mm_dump_hole(m, &mm->head_node);
  847.  
  848.         drm_mm_for_each_node(entry, mm) {
  849.                 seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start,
  850.                            entry->start + entry->size, entry->size);
  851.                 total_used += entry->size;
  852.                 total_free += drm_mm_dump_hole(m, entry);
  853.         }
  854.         total = total_free + total_used;
  855.  
  856.         seq_printf(m, "total: %llu, used %llu free %llu\n", total,
  857.                    total_used, total_free);
  858.         return 0;
  859. }
  860. EXPORT_SYMBOL(drm_mm_dump_table);
  861. #endif
  862.