Subversion Repositories Kolibri OS

Rev

Rev 4104 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  *
  27.  **************************************************************************/
  28.  
  29. /*
  30.  * Generic simple memory manager implementation. Intended to be used as a base
  31.  * class implementation for more advanced memory managers.
  32.  *
  33.  * Note that the algorithm used is quite simple and there might be substantial
  34.  * performance gains if a smarter free list is implemented. Currently it is just an
  35.  * unordered stack of free regions. This could easily be improved if an RB-tree
  36.  * is used instead. At least if we expect heavy fragmentation.
  37.  *
  38.  * Aligned allocations can also see improvement.
  39.  *
  40.  * Authors:
  41.  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
  42.  */
  43.  
  44. #include <drm/drmP.h>
  45. #include <drm/drm_mm.h>
  46. #include <linux/slab.h>
  47. #include <linux/seq_file.h>
  48. #include <linux/export.h>
  49.  
  50. /**
  51.  * DOC: Overview
  52.  *
  53.  * drm_mm provides a simple range allocator. The drivers are free to use the
  54.  * resource allocator from the linux core if it suits them, the upside of drm_mm
  55.  * is that it's in the DRM core. Which means that it's easier to extend for
  56.  * some of the crazier special purpose needs of gpus.
  57.  *
  58.  * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
  59.  * Drivers are free to embed either of them into their own suitable
  60.  * datastructures. drm_mm itself will not do any allocations of its own, so if
  61.  * drivers choose not to embed nodes they need to still allocate them
  62.  * themselves.
  63.  *
  64.  * The range allocator also supports reservation of preallocated blocks. This is
  65.  * useful for taking over initial mode setting configurations from the firmware,
  66.  * where an object needs to be created which exactly matches the firmware's
  67.  * scanout target. As long as the range is still free it can be inserted anytime
  68.  * after the allocator is initialized, which helps with avoiding looped
  69.  * depencies in the driver load sequence.
  70.  *
  71.  * drm_mm maintains a stack of most recently freed holes, which of all
  72.  * simplistic datastructures seems to be a fairly decent approach to clustering
  73.  * allocations and avoiding too much fragmentation. This means free space
  74.  * searches are O(num_holes). Given that all the fancy features drm_mm supports
  75.  * something better would be fairly complex and since gfx thrashing is a fairly
  76.  * steep cliff not a real concern. Removing a node again is O(1).
  77.  *
  78.  * drm_mm supports a few features: Alignment and range restrictions can be
  79.  * supplied. Further more every &drm_mm_node has a color value (which is just an
  80.  * opaqua unsigned long) which in conjunction with a driver callback can be used
  81.  * to implement sophisticated placement restrictions. The i915 DRM driver uses
  82.  * this to implement guard pages between incompatible caching domains in the
  83.  * graphics TT.
  84.  *
  85.  * Two behaviors are supported for searching and allocating: bottom-up and top-down.
  86.  * The default is bottom-up. Top-down allocation can be used if the memory area
  87.  * has different restrictions, or just to reduce fragmentation.
  88.  *
  89.  * Finally iteration helpers to walk all nodes and all holes are provided as are
  90.  * some basic allocator dumpers for debugging.
  91.  */
  92.  
  93. static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
  94.                                                 unsigned long size,
  95.                                                 unsigned alignment,
  96.                                                 unsigned long color,
  97.                                                 enum drm_mm_search_flags flags);
  98. static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
  99.                                                 unsigned long size,
  100.                                                 unsigned alignment,
  101.                                                 unsigned long color,
  102.                                                 unsigned long start,
  103.                                                 unsigned long end,
  104.                                                 enum drm_mm_search_flags flags);
  105.  
  106. static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
  107.                                  struct drm_mm_node *node,
  108.                                  unsigned long size, unsigned alignment,
  109.                                  unsigned long color,
  110.                                  enum drm_mm_allocator_flags flags)
  111. {
  112.         struct drm_mm *mm = hole_node->mm;
  113.         unsigned long hole_start = drm_mm_hole_node_start(hole_node);
  114.         unsigned long hole_end = drm_mm_hole_node_end(hole_node);
  115.         unsigned long adj_start = hole_start;
  116.         unsigned long adj_end = hole_end;
  117.  
  118.         BUG_ON(node->allocated);
  119.  
  120.         if (mm->color_adjust)
  121.                 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
  122.  
  123.         if (flags & DRM_MM_CREATE_TOP)
  124.                 adj_start = adj_end - size;
  125.  
  126.         if (alignment) {
  127.                 unsigned tmp = adj_start % alignment;
  128.                 if (tmp) {
  129.                         if (flags & DRM_MM_CREATE_TOP)
  130.                                 adj_start -= tmp;
  131.                         else
  132.                         adj_start += alignment - tmp;
  133.         }
  134.         }
  135.  
  136.         BUG_ON(adj_start < hole_start);
  137.         BUG_ON(adj_end > hole_end);
  138.  
  139.         if (adj_start == hole_start) {
  140.                 hole_node->hole_follows = 0;
  141.                 list_del(&hole_node->hole_stack);
  142.         }
  143.  
  144.         node->start = adj_start;
  145.         node->size = size;
  146.         node->mm = mm;
  147.         node->color = color;
  148.         node->allocated = 1;
  149.  
  150.         INIT_LIST_HEAD(&node->hole_stack);
  151.         list_add(&node->node_list, &hole_node->node_list);
  152.  
  153.         BUG_ON(node->start + node->size > adj_end);
  154.  
  155.         node->hole_follows = 0;
  156.         if (__drm_mm_hole_node_start(node) < hole_end) {
  157.                 list_add(&node->hole_stack, &mm->hole_stack);
  158.                 node->hole_follows = 1;
  159.         }
  160. }
  161.  
  162. /**
  163.  * drm_mm_reserve_node - insert an pre-initialized node
  164.  * @mm: drm_mm allocator to insert @node into
  165.  * @node: drm_mm_node to insert
  166.  *
  167.  * This functions inserts an already set-up drm_mm_node into the allocator,
  168.  * meaning that start, size and color must be set by the caller. This is useful
  169.  * to initialize the allocator with preallocated objects which must be set-up
  170.  * before the range allocator can be set-up, e.g. when taking over a firmware
  171.  * framebuffer.
  172.  *
  173.  * Returns:
  174.  * 0 on success, -ENOSPC if there's no hole where @node is.
  175.  */
  176. int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
  177. {
  178.         struct drm_mm_node *hole;
  179.         unsigned long end = node->start + node->size;
  180.         unsigned long hole_start;
  181.         unsigned long hole_end;
  182.  
  183.         BUG_ON(node == NULL);
  184.  
  185.         /* Find the relevant hole to add our node to */
  186.         drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
  187.                 if (hole_start > node->start || hole_end < end)
  188.                         continue;
  189.  
  190.                 node->mm = mm;
  191.                 node->allocated = 1;
  192.  
  193.                 INIT_LIST_HEAD(&node->hole_stack);
  194.                 list_add(&node->node_list, &hole->node_list);
  195.  
  196.                 if (node->start == hole_start) {
  197.                         hole->hole_follows = 0;
  198.                         list_del_init(&hole->hole_stack);
  199.                 }
  200.  
  201.                 node->hole_follows = 0;
  202.                 if (end != hole_end) {
  203.                         list_add(&node->hole_stack, &mm->hole_stack);
  204.                         node->hole_follows = 1;
  205.                 }
  206.  
  207.                 return 0;
  208.         }
  209.  
  210.         return -ENOSPC;
  211. }
  212. EXPORT_SYMBOL(drm_mm_reserve_node);
  213.  
  214. /**
  215.  * drm_mm_insert_node_generic - search for space and insert @node
  216.  * @mm: drm_mm to allocate from
  217.  * @node: preallocate node to insert
  218.  * @size: size of the allocation
  219.  * @alignment: alignment of the allocation
  220.  * @color: opaque tag value to use for this node
  221.  * @sflags: flags to fine-tune the allocation search
  222.  * @aflags: flags to fine-tune the allocation behavior
  223.  *
  224.  * The preallocated node must be cleared to 0.
  225.  *
  226.  * Returns:
  227.  * 0 on success, -ENOSPC if there's no suitable hole.
  228.  */
  229. int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
  230.                                unsigned long size, unsigned alignment,
  231.                                unsigned long color,
  232.                                enum drm_mm_search_flags sflags,
  233.                                enum drm_mm_allocator_flags aflags)
  234. {
  235.         struct drm_mm_node *hole_node;
  236.  
  237.         hole_node = drm_mm_search_free_generic(mm, size, alignment,
  238.                                                color, sflags);
  239.         if (!hole_node)
  240.                 return -ENOSPC;
  241.  
  242.         drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
  243.         return 0;
  244. }
  245. EXPORT_SYMBOL(drm_mm_insert_node_generic);
  246.  
  247. static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
  248.                                        struct drm_mm_node *node,
  249.                                        unsigned long size, unsigned alignment,
  250.                                        unsigned long color,
  251.                                        unsigned long start, unsigned long end,
  252.                                        enum drm_mm_allocator_flags flags)
  253. {
  254.         struct drm_mm *mm = hole_node->mm;
  255.         unsigned long hole_start = drm_mm_hole_node_start(hole_node);
  256.         unsigned long hole_end = drm_mm_hole_node_end(hole_node);
  257.         unsigned long adj_start = hole_start;
  258.         unsigned long adj_end = hole_end;
  259.  
  260.         BUG_ON(!hole_node->hole_follows || node->allocated);
  261.  
  262.         if (adj_start < start)
  263.                 adj_start = start;
  264.         if (adj_end > end)
  265.                 adj_end = end;
  266.  
  267.         if (flags & DRM_MM_CREATE_TOP)
  268.                 adj_start = adj_end - size;
  269.  
  270.         if (mm->color_adjust)
  271.                 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
  272.  
  273.         if (alignment) {
  274.                 unsigned tmp = adj_start % alignment;
  275.                 if (tmp) {
  276.                         if (flags & DRM_MM_CREATE_TOP)
  277.                                 adj_start -= tmp;
  278.                         else
  279.                         adj_start += alignment - tmp;
  280.         }
  281.         }
  282.  
  283.         if (adj_start == hole_start) {
  284.                 hole_node->hole_follows = 0;
  285.                 list_del(&hole_node->hole_stack);
  286.         }
  287.  
  288.         node->start = adj_start;
  289.         node->size = size;
  290.         node->mm = mm;
  291.         node->color = color;
  292.         node->allocated = 1;
  293.  
  294.         INIT_LIST_HEAD(&node->hole_stack);
  295.         list_add(&node->node_list, &hole_node->node_list);
  296.  
  297.         BUG_ON(node->start < start);
  298.         BUG_ON(node->start < adj_start);
  299.         BUG_ON(node->start + node->size > adj_end);
  300.         BUG_ON(node->start + node->size > end);
  301.  
  302.         node->hole_follows = 0;
  303.         if (__drm_mm_hole_node_start(node) < hole_end) {
  304.                 list_add(&node->hole_stack, &mm->hole_stack);
  305.                 node->hole_follows = 1;
  306.         }
  307. }
  308.  
  309. /**
  310.  * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
  311.  * @mm: drm_mm to allocate from
  312.  * @node: preallocate node to insert
  313.  * @size: size of the allocation
  314.  * @alignment: alignment of the allocation
  315.  * @color: opaque tag value to use for this node
  316.  * @start: start of the allowed range for this node
  317.  * @end: end of the allowed range for this node
  318.  * @sflags: flags to fine-tune the allocation search
  319.  * @aflags: flags to fine-tune the allocation behavior
  320.  *
  321.  * The preallocated node must be cleared to 0.
  322.  *
  323.  * Returns:
  324.  * 0 on success, -ENOSPC if there's no suitable hole.
  325.  */
  326. int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
  327.                                         unsigned long size, unsigned alignment,
  328.                                         unsigned long color,
  329.                                         unsigned long start, unsigned long end,
  330.                                         enum drm_mm_search_flags sflags,
  331.                                         enum drm_mm_allocator_flags aflags)
  332. {
  333.         struct drm_mm_node *hole_node;
  334.  
  335.         hole_node = drm_mm_search_free_in_range_generic(mm,
  336.                                                         size, alignment, color,
  337.                                                         start, end, sflags);
  338.         if (!hole_node)
  339.                 return -ENOSPC;
  340.  
  341.         drm_mm_insert_helper_range(hole_node, node,
  342.                                    size, alignment, color,
  343.                                    start, end, aflags);
  344.         return 0;
  345. }
  346. EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
  347.  
  348. /**
  349.  * drm_mm_remove_node - Remove a memory node from the allocator.
  350.  * @node: drm_mm_node to remove
  351.  *
  352.  * This just removes a node from its drm_mm allocator. The node does not need to
  353.  * be cleared again before it can be re-inserted into this or any other drm_mm
  354.  * allocator. It is a bug to call this function on a un-allocated node.
  355.  */
  356. void drm_mm_remove_node(struct drm_mm_node *node)
  357. {
  358.         struct drm_mm *mm = node->mm;
  359.         struct drm_mm_node *prev_node;
  360.  
  361.         if (WARN_ON(!node->allocated))
  362.                 return;
  363.  
  364.         BUG_ON(node->scanned_block || node->scanned_prev_free
  365.                                    || node->scanned_next_free);
  366.  
  367.         prev_node =
  368.             list_entry(node->node_list.prev, struct drm_mm_node, node_list);
  369.  
  370.         if (node->hole_follows) {
  371.                 BUG_ON(__drm_mm_hole_node_start(node) ==
  372.                        __drm_mm_hole_node_end(node));
  373.                 list_del(&node->hole_stack);
  374.         } else
  375.                 BUG_ON(__drm_mm_hole_node_start(node) !=
  376.                        __drm_mm_hole_node_end(node));
  377.  
  378.  
  379.         if (!prev_node->hole_follows) {
  380.                 prev_node->hole_follows = 1;
  381.                 list_add(&prev_node->hole_stack, &mm->hole_stack);
  382.                                 } else
  383.                 list_move(&prev_node->hole_stack, &mm->hole_stack);
  384.  
  385.         list_del(&node->node_list);
  386.         node->allocated = 0;
  387. }
  388. EXPORT_SYMBOL(drm_mm_remove_node);
  389.  
  390. static int check_free_hole(unsigned long start, unsigned long end,
  391.                            unsigned long size, unsigned alignment)
  392. {
  393.         if (end - start < size)
  394.                 return 0;
  395.  
  396.         if (alignment) {
  397.                 unsigned tmp = start % alignment;
  398.                 if (tmp)
  399.                         start += alignment - tmp;
  400.         }
  401.  
  402.         return end >= start + size;
  403. }
  404.  
  405. static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
  406.                                        unsigned long size,
  407.                                                unsigned alignment,
  408.                                                unsigned long color,
  409.                                                       enum drm_mm_search_flags flags)
  410. {
  411.         struct drm_mm_node *entry;
  412.         struct drm_mm_node *best;
  413.         unsigned long adj_start;
  414.         unsigned long adj_end;
  415.         unsigned long best_size;
  416.  
  417.         BUG_ON(mm->scanned_blocks);
  418.  
  419.         best = NULL;
  420.         best_size = ~0UL;
  421.  
  422.         __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
  423.                                flags & DRM_MM_SEARCH_BELOW) {
  424.                 unsigned long hole_size = adj_end - adj_start;
  425.  
  426.                 if (mm->color_adjust) {
  427.                         mm->color_adjust(entry, color, &adj_start, &adj_end);
  428.                         if (adj_end <= adj_start)
  429.                                 continue;
  430.                 }
  431.  
  432.                 if (!check_free_hole(adj_start, adj_end, size, alignment))
  433.                         continue;
  434.  
  435.                 if (!(flags & DRM_MM_SEARCH_BEST))
  436.                                 return entry;
  437.  
  438.                 if (hole_size < best_size) {
  439.                                 best = entry;
  440.                         best_size = hole_size;
  441.                         }
  442.                 }
  443.  
  444.         return best;
  445. }
  446.  
  447. static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
  448.                                                 unsigned long size,
  449.                                                 unsigned alignment,
  450.                                                         unsigned long color,
  451.                                                 unsigned long start,
  452.                                                 unsigned long end,
  453.                                                         enum drm_mm_search_flags flags)
  454. {
  455.         struct drm_mm_node *entry;
  456.         struct drm_mm_node *best;
  457.         unsigned long adj_start;
  458.         unsigned long adj_end;
  459.         unsigned long best_size;
  460.  
  461.         BUG_ON(mm->scanned_blocks);
  462.  
  463.         best = NULL;
  464.         best_size = ~0UL;
  465.  
  466.         __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
  467.                                flags & DRM_MM_SEARCH_BELOW) {
  468.                 unsigned long hole_size = adj_end - adj_start;
  469.  
  470.                 if (adj_start < start)
  471.                         adj_start = start;
  472.                 if (adj_end > end)
  473.                         adj_end = end;
  474.  
  475.                 if (mm->color_adjust) {
  476.                         mm->color_adjust(entry, color, &adj_start, &adj_end);
  477.                         if (adj_end <= adj_start)
  478.                                 continue;
  479.                 }
  480.  
  481.                 if (!check_free_hole(adj_start, adj_end, size, alignment))
  482.                         continue;
  483.  
  484.                 if (!(flags & DRM_MM_SEARCH_BEST))
  485.                                 return entry;
  486.  
  487.                 if (hole_size < best_size) {
  488.                                 best = entry;
  489.                         best_size = hole_size;
  490.                         }
  491.                 }
  492.  
  493.         return best;
  494. }
  495.  
  496. /**
  497.  * drm_mm_replace_node - move an allocation from @old to @new
  498.  * @old: drm_mm_node to remove from the allocator
  499.  * @new: drm_mm_node which should inherit @old's allocation
  500.  *
  501.  * This is useful for when drivers embed the drm_mm_node structure and hence
  502.  * can't move allocations by reassigning pointers. It's a combination of remove
  503.  * and insert with the guarantee that the allocation start will match.
  504.  */
  505. void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
  506. {
  507.         list_replace(&old->node_list, &new->node_list);
  508.         list_replace(&old->hole_stack, &new->hole_stack);
  509.         new->hole_follows = old->hole_follows;
  510.         new->mm = old->mm;
  511.         new->start = old->start;
  512.         new->size = old->size;
  513.         new->color = old->color;
  514.  
  515.         old->allocated = 0;
  516.         new->allocated = 1;
  517. }
  518. EXPORT_SYMBOL(drm_mm_replace_node);
  519.  
  520. /**
  521.  * DOC: lru scan roaster
  522.  *
  523.  * Very often GPUs need to have continuous allocations for a given object. When
  524.  * evicting objects to make space for a new one it is therefore not most
  525.  * efficient when we simply start to select all objects from the tail of an LRU
  526.  * until there's a suitable hole: Especially for big objects or nodes that
  527.  * otherwise have special allocation constraints there's a good chance we evict
  528.  * lots of (smaller) objects unecessarily.
  529.  *
  530.  * The DRM range allocator supports this use-case through the scanning
  531.  * interfaces. First a scan operation needs to be initialized with
  532.  * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
  533.  * objects to the roaster (probably by walking an LRU list, but this can be
  534.  * freely implemented) until a suitable hole is found or there's no further
  535.  * evitable object.
  536.  *
  537.  * The the driver must walk through all objects again in exactly the reverse
  538.  * order to restore the allocator state. Note that while the allocator is used
  539.  * in the scan mode no other operation is allowed.
  540.  *
  541.  * Finally the driver evicts all objects selected in the scan. Adding and
  542.  * removing an object is O(1), and since freeing a node is also O(1) the overall
  543.  * complexity is O(scanned_objects). So like the free stack which needs to be
  544.  * walked before a scan operation even begins this is linear in the number of
  545.  * objects. It doesn't seem to hurt badly.
  546.  */
  547.  
  548. /**
  549.  * drm_mm_init_scan - initialize lru scanning
  550.  * @mm: drm_mm to scan
  551.  * @size: size of the allocation
  552.  * @alignment: alignment of the allocation
  553.  * @color: opaque tag value to use for the allocation
  554.  *
  555.  * This simply sets up the scanning routines with the parameters for the desired
  556.  * hole. Note that there's no need to specify allocation flags, since they only
  557.  * change the place a node is allocated from within a suitable hole.
  558.  *
  559.  * Warning:
  560.  * As long as the scan list is non-empty, no other operations than
  561.  * adding/removing nodes to/from the scan list are allowed.
  562.  */
  563. void drm_mm_init_scan(struct drm_mm *mm,
  564.                       unsigned long size,
  565.                       unsigned alignment,
  566.                       unsigned long color)
  567. {
  568.         mm->scan_color = color;
  569.         mm->scan_alignment = alignment;
  570.         mm->scan_size = size;
  571.         mm->scanned_blocks = 0;
  572.         mm->scan_hit_start = 0;
  573.         mm->scan_hit_end = 0;
  574.         mm->scan_check_range = 0;
  575.         mm->prev_scanned_node = NULL;
  576. }
  577. EXPORT_SYMBOL(drm_mm_init_scan);
  578.  
  579. /**
  580.  * drm_mm_init_scan - initialize range-restricted lru scanning
  581.  * @mm: drm_mm to scan
  582.  * @size: size of the allocation
  583.  * @alignment: alignment of the allocation
  584.  * @color: opaque tag value to use for the allocation
  585.  * @start: start of the allowed range for the allocation
  586.  * @end: end of the allowed range for the allocation
  587.  *
  588.  * This simply sets up the scanning routines with the parameters for the desired
  589.  * hole. Note that there's no need to specify allocation flags, since they only
  590.  * change the place a node is allocated from within a suitable hole.
  591.  *
  592.  * Warning:
  593.  * As long as the scan list is non-empty, no other operations than
  594.  * adding/removing nodes to/from the scan list are allowed.
  595.  */
  596. void drm_mm_init_scan_with_range(struct drm_mm *mm,
  597.                                  unsigned long size,
  598.                                  unsigned alignment,
  599.                                  unsigned long color,
  600.                                  unsigned long start,
  601.                                  unsigned long end)
  602. {
  603.         mm->scan_color = color;
  604.         mm->scan_alignment = alignment;
  605.         mm->scan_size = size;
  606.         mm->scanned_blocks = 0;
  607.         mm->scan_hit_start = 0;
  608.         mm->scan_hit_end = 0;
  609.         mm->scan_start = start;
  610.         mm->scan_end = end;
  611.         mm->scan_check_range = 1;
  612.         mm->prev_scanned_node = NULL;
  613. }
  614. EXPORT_SYMBOL(drm_mm_init_scan_with_range);
  615.  
  616. /**
  617.  * drm_mm_scan_add_block - add a node to the scan list
  618.  * @node: drm_mm_node to add
  619.  *
  620.  * Add a node to the scan list that might be freed to make space for the desired
  621.  * hole.
  622.  *
  623.  * Returns:
  624.  * True if a hole has been found, false otherwise.
  625.  */
  626. bool drm_mm_scan_add_block(struct drm_mm_node *node)
  627. {
  628.         struct drm_mm *mm = node->mm;
  629.         struct drm_mm_node *prev_node;
  630.         unsigned long hole_start, hole_end;
  631.         unsigned long adj_start, adj_end;
  632.  
  633.         mm->scanned_blocks++;
  634.  
  635.         BUG_ON(node->scanned_block);
  636.         node->scanned_block = 1;
  637.  
  638.                 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
  639.                                        node_list);
  640.  
  641.         node->scanned_preceeds_hole = prev_node->hole_follows;
  642.         prev_node->hole_follows = 1;
  643.         list_del(&node->node_list);
  644.         node->node_list.prev = &prev_node->node_list;
  645.         node->node_list.next = &mm->prev_scanned_node->node_list;
  646.         mm->prev_scanned_node = node;
  647.  
  648.         adj_start = hole_start = drm_mm_hole_node_start(prev_node);
  649.         adj_end = hole_end = drm_mm_hole_node_end(prev_node);
  650.  
  651.         if (mm->scan_check_range) {
  652.                 if (adj_start < mm->scan_start)
  653.                         adj_start = mm->scan_start;
  654.                 if (adj_end > mm->scan_end)
  655.                         adj_end = mm->scan_end;
  656.         }
  657.  
  658.         if (mm->color_adjust)
  659.                 mm->color_adjust(prev_node, mm->scan_color,
  660.                                  &adj_start, &adj_end);
  661.  
  662.         if (check_free_hole(adj_start, adj_end,
  663.                             mm->scan_size, mm->scan_alignment)) {
  664.                 mm->scan_hit_start = hole_start;
  665.                 mm->scan_hit_end = hole_end;
  666.                 return true;
  667.         }
  668.  
  669.         return false;
  670. }
  671. EXPORT_SYMBOL(drm_mm_scan_add_block);
  672.  
  673. /**
  674.  * drm_mm_scan_remove_block - remove a node from the scan list
  675.  * @node: drm_mm_node to remove
  676.  *
  677.  * Nodes _must_ be removed in the exact same order from the scan list as they
  678.  * have been added, otherwise the internal state of the memory manager will be
  679.  * corrupted.
  680.  *
  681.  * When the scan list is empty, the selected memory nodes can be freed. An
  682.  * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
  683.  * return the just freed block (because its at the top of the free_stack list).
  684.  *
  685.  * Returns:
  686.  * True if this block should be evicted, false otherwise. Will always
  687.  * return false when no hole has been found.
  688.  */
  689. bool drm_mm_scan_remove_block(struct drm_mm_node *node)
  690. {
  691.         struct drm_mm *mm = node->mm;
  692.         struct drm_mm_node *prev_node;
  693.  
  694.         mm->scanned_blocks--;
  695.  
  696.         BUG_ON(!node->scanned_block);
  697.         node->scanned_block = 0;
  698.  
  699.         prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
  700.                                node_list);
  701.  
  702.         prev_node->hole_follows = node->scanned_preceeds_hole;
  703.         list_add(&node->node_list, &prev_node->node_list);
  704.  
  705.          return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
  706.                  node->start < mm->scan_hit_end);
  707. }
  708. EXPORT_SYMBOL(drm_mm_scan_remove_block);
  709.  
  710. /**
  711.  * drm_mm_clean - checks whether an allocator is clean
  712.  * @mm: drm_mm allocator to check
  713.  *
  714.  * Returns:
  715.  * True if the allocator is completely free, false if there's still a node
  716.  * allocated in it.
  717.  */
  718. bool drm_mm_clean(struct drm_mm * mm)
  719. {
  720.         struct list_head *head = &mm->head_node.node_list;
  721.  
  722.         return (head->next->next == head);
  723. }
  724. EXPORT_SYMBOL(drm_mm_clean);
  725.  
  726. /**
  727.  * drm_mm_init - initialize a drm-mm allocator
  728.  * @mm: the drm_mm structure to initialize
  729.  * @start: start of the range managed by @mm
  730.  * @size: end of the range managed by @mm
  731.  *
  732.  * Note that @mm must be cleared to 0 before calling this function.
  733.  */
  734. void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
  735. {
  736.         INIT_LIST_HEAD(&mm->hole_stack);
  737.         mm->scanned_blocks = 0;
  738.  
  739.         /* Clever trick to avoid a special case in the free hole tracking. */
  740.         INIT_LIST_HEAD(&mm->head_node.node_list);
  741.         INIT_LIST_HEAD(&mm->head_node.hole_stack);
  742.         mm->head_node.hole_follows = 1;
  743.         mm->head_node.scanned_block = 0;
  744.         mm->head_node.scanned_prev_free = 0;
  745.         mm->head_node.scanned_next_free = 0;
  746.         mm->head_node.mm = mm;
  747.         mm->head_node.start = start + size;
  748.         mm->head_node.size = start - mm->head_node.start;
  749.         list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
  750.  
  751.         mm->color_adjust = NULL;
  752. }
  753. EXPORT_SYMBOL(drm_mm_init);
  754.  
  755. /**
  756.  * drm_mm_takedown - clean up a drm_mm allocator
  757.  * @mm: drm_mm allocator to clean up
  758.  *
  759.  * Note that it is a bug to call this function on an allocator which is not
  760.  * clean.
  761.  */
  762. void drm_mm_takedown(struct drm_mm * mm)
  763. {
  764.         WARN(!list_empty(&mm->head_node.node_list),
  765.              "Memory manager not clean during takedown.\n");
  766. }
  767. EXPORT_SYMBOL(drm_mm_takedown);
  768.  
  769. static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
  770.                                        const char *prefix)
  771. {
  772.         unsigned long hole_start, hole_end, hole_size;
  773.  
  774.         if (entry->hole_follows) {
  775.                 hole_start = drm_mm_hole_node_start(entry);
  776.                 hole_end = drm_mm_hole_node_end(entry);
  777.         hole_size = hole_end - hole_start;
  778.                 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
  779.                         prefix, hole_start, hole_end,
  780.                         hole_size);
  781.                 return hole_size;
  782.         }
  783.  
  784.         return 0;
  785. }
  786.  
  787. /**
  788.  * drm_mm_debug_table - dump allocator state to dmesg
  789.  * @mm: drm_mm allocator to dump
  790.  * @prefix: prefix to use for dumping to dmesg
  791.  */
  792. void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
  793. {
  794.         struct drm_mm_node *entry;
  795.         unsigned long total_used = 0, total_free = 0, total = 0;
  796.  
  797.         total_free += drm_mm_debug_hole(&mm->head_node, prefix);
  798.  
  799.         drm_mm_for_each_node(entry, mm) {
  800.                 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
  801.                         prefix, entry->start, entry->start + entry->size,
  802.                         entry->size);
  803.                 total_used += entry->size;
  804.                 total_free += drm_mm_debug_hole(entry, prefix);
  805.                 }
  806.         total = total_free + total_used;
  807.  
  808.         printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
  809.                 total_used, total_free);
  810. }
  811. EXPORT_SYMBOL(drm_mm_debug_table);
  812.  
  813. #if defined(CONFIG_DEBUG_FS)
  814. static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
  815. {
  816.         unsigned long hole_start, hole_end, hole_size;
  817.  
  818.         if (entry->hole_follows) {
  819.                 hole_start = drm_mm_hole_node_start(entry);
  820.                 hole_end = drm_mm_hole_node_end(entry);
  821.         hole_size = hole_end - hole_start;
  822.                 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
  823.                                 hole_start, hole_end, hole_size);
  824.                 return hole_size;
  825.         }
  826.  
  827.         return 0;
  828. }
  829.  
  830. /**
  831.  * drm_mm_dump_table - dump allocator state to a seq_file
  832.  * @m: seq_file to dump to
  833.  * @mm: drm_mm allocator to dump
  834.  */
  835. int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
  836. {
  837.         struct drm_mm_node *entry;
  838.         unsigned long total_used = 0, total_free = 0, total = 0;
  839.  
  840.         total_free += drm_mm_dump_hole(m, &mm->head_node);
  841.  
  842.         drm_mm_for_each_node(entry, mm) {
  843.                 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
  844.                                 entry->start, entry->start + entry->size,
  845.                                 entry->size);
  846.                 total_used += entry->size;
  847.                 total_free += drm_mm_dump_hole(m, entry);
  848.                 }
  849.         total = total_free + total_used;
  850.  
  851.         seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
  852.         return 0;
  853. }
  854. EXPORT_SYMBOL(drm_mm_dump_table);
  855. #endif
  856.