Subversion Repositories Kolibri OS

Rev

Rev 1963 | Rev 3192 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  *
  27.  **************************************************************************/
  28.  
  29. /*
  30.  * Generic simple memory manager implementation. Intended to be used as a base
  31.  * class implementation for more advanced memory managers.
  32.  *
  33.  * Note that the algorithm used is quite simple and there might be substantial
  34.  * performance gains if a smarter free list is implemented. Currently it is just an
  35.  * unordered stack of free regions. This could easily be improved if an RB-tree
  36.  * is used instead. At least if we expect heavy fragmentation.
  37.  *
  38.  * Aligned allocations can also see improvement.
  39.  *
  40.  * Authors:
  41.  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
  42.  */
  43.  
  44. #include <drm/drmP.h>
  45. #include <drm/drm_mm.h>
  46. #include <linux/slab.h>
  47. #include <linux/seq_file.h>
  48. #include <linux/export.h>
  49.  
  50. #define MM_UNUSED_TARGET 4
  51.  
  52. static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
  53. {
  54.         struct drm_mm_node *child;
  55.  
  56.         if (atomic)
  57.                 child = kzalloc(sizeof(*child), GFP_ATOMIC);
  58.         else
  59.                 child = kzalloc(sizeof(*child), GFP_KERNEL);
  60.  
  61.         if (unlikely(child == NULL)) {
  62.        spin_lock(&mm->unused_lock);
  63.                 if (list_empty(&mm->unused_nodes))
  64.                         child = NULL;
  65.                 else {
  66.                         child =
  67.                             list_entry(mm->unused_nodes.next,
  68.                                        struct drm_mm_node, node_list);
  69.                         list_del(&child->node_list);
  70.                         --mm->num_unused;
  71.                 }
  72.        spin_unlock(&mm->unused_lock);
  73.         }
  74.         return child;
  75. }
  76.  
  77. /* drm_mm_pre_get() - pre allocate drm_mm_node structure
  78.  * drm_mm:      memory manager struct we are pre-allocating for
  79.  *
  80.  * Returns 0 on success or -ENOMEM if allocation fails.
  81.  */
  82. int drm_mm_pre_get(struct drm_mm *mm)
  83. {
  84.         struct drm_mm_node *node;
  85.  
  86.         spin_lock(&mm->unused_lock);
  87.         while (mm->num_unused < MM_UNUSED_TARGET) {
  88.                 spin_unlock(&mm->unused_lock);
  89.                 node = kzalloc(sizeof(*node), GFP_KERNEL);
  90.                 spin_lock(&mm->unused_lock);
  91.  
  92.                 if (unlikely(node == NULL)) {
  93.                         int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
  94.                         spin_unlock(&mm->unused_lock);
  95.                         return ret;
  96.                 }
  97.                 ++mm->num_unused;
  98.                 list_add_tail(&node->node_list, &mm->unused_nodes);
  99.         }
  100.         spin_unlock(&mm->unused_lock);
  101.         return 0;
  102. }
  103. EXPORT_SYMBOL(drm_mm_pre_get);
  104.  
  105. static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
  106. {
  107.         return hole_node->start + hole_node->size;
  108. }
  109.  
  110. static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
  111. {
  112.         struct drm_mm_node *next_node =
  113.                 list_entry(hole_node->node_list.next, struct drm_mm_node,
  114.                            node_list);
  115.  
  116.         return next_node->start;
  117. }
  118.  
  119. static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
  120.                                  struct drm_mm_node *node,
  121.                                  unsigned long size, unsigned alignment,
  122.                                  unsigned long color)
  123. {
  124.         struct drm_mm *mm = hole_node->mm;
  125.         unsigned long hole_start = drm_mm_hole_node_start(hole_node);
  126.         unsigned long hole_end = drm_mm_hole_node_end(hole_node);
  127.         unsigned long adj_start = hole_start;
  128.         unsigned long adj_end = hole_end;
  129.  
  130.         BUG_ON(!hole_node->hole_follows || node->allocated);
  131.  
  132.         if (mm->color_adjust)
  133.                 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
  134.  
  135.         if (alignment) {
  136.                 unsigned tmp = adj_start % alignment;
  137.                 if (tmp)
  138.                         adj_start += alignment - tmp;
  139.         }
  140.  
  141.         if (adj_start == hole_start) {
  142.                 hole_node->hole_follows = 0;
  143.                 list_del(&hole_node->hole_stack);
  144.         }
  145.  
  146.         node->start = adj_start;
  147.         node->size = size;
  148.         node->mm = mm;
  149.         node->color = color;
  150.         node->allocated = 1;
  151.  
  152.         INIT_LIST_HEAD(&node->hole_stack);
  153.         list_add(&node->node_list, &hole_node->node_list);
  154.  
  155.         BUG_ON(node->start + node->size > adj_end);
  156.  
  157.         node->hole_follows = 0;
  158.         if (node->start + node->size < hole_end) {
  159.                 list_add(&node->hole_stack, &mm->hole_stack);
  160.                 node->hole_follows = 1;
  161.         }
  162. }
  163.  
  164. struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
  165.                                                  unsigned long size,
  166.                                              unsigned alignment,
  167.                                              unsigned long color,
  168.                                                  int atomic)
  169. {
  170.         struct drm_mm_node *node;
  171.  
  172.         node = drm_mm_kmalloc(hole_node->mm, atomic);
  173.         if (unlikely(node == NULL))
  174.                 return NULL;
  175.  
  176.         drm_mm_insert_helper(hole_node, node, size, alignment, color);
  177.  
  178.         return node;
  179. }
  180. EXPORT_SYMBOL(drm_mm_get_block_generic);
  181.  
  182. /**
  183.  * Search for free space and insert a preallocated memory node. Returns
  184.  * -ENOSPC if no suitable free area is available. The preallocated memory node
  185.  * must be cleared.
  186.  */
  187. int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
  188.                        unsigned long size, unsigned alignment)
  189. {
  190.         struct drm_mm_node *hole_node;
  191.  
  192.         hole_node = drm_mm_search_free(mm, size, alignment, false);
  193.         if (!hole_node)
  194.                 return -ENOSPC;
  195.  
  196.         drm_mm_insert_helper(hole_node, node, size, alignment, 0);
  197.  
  198.         return 0;
  199. }
  200. EXPORT_SYMBOL(drm_mm_insert_node);
  201.  
  202. static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
  203.                                        struct drm_mm_node *node,
  204.                                        unsigned long size, unsigned alignment,
  205.                                        unsigned long color,
  206.                                        unsigned long start, unsigned long end)
  207. {
  208.         struct drm_mm *mm = hole_node->mm;
  209.         unsigned long hole_start = drm_mm_hole_node_start(hole_node);
  210.         unsigned long hole_end = drm_mm_hole_node_end(hole_node);
  211.         unsigned long adj_start = hole_start;
  212.         unsigned long adj_end = hole_end;
  213.  
  214.         BUG_ON(!hole_node->hole_follows || node->allocated);
  215.  
  216.         if (mm->color_adjust)
  217.                 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
  218.  
  219.         if (adj_start < start)
  220.                 adj_start = start;
  221.  
  222.         if (alignment) {
  223.                 unsigned tmp = adj_start % alignment;
  224.         if (tmp)
  225.                         adj_start += alignment - tmp;
  226.         }
  227.  
  228.         if (adj_start == hole_start) {
  229.                 hole_node->hole_follows = 0;
  230.                 list_del(&hole_node->hole_stack);
  231.         }
  232.  
  233.         node->start = adj_start;
  234.         node->size = size;
  235.         node->mm = mm;
  236.         node->color = color;
  237.         node->allocated = 1;
  238.  
  239.         INIT_LIST_HEAD(&node->hole_stack);
  240.         list_add(&node->node_list, &hole_node->node_list);
  241.  
  242.         BUG_ON(node->start + node->size > adj_end);
  243.         BUG_ON(node->start + node->size > end);
  244.  
  245.         node->hole_follows = 0;
  246.         if (node->start + node->size < hole_end) {
  247.                 list_add(&node->hole_stack, &mm->hole_stack);
  248.                 node->hole_follows = 1;
  249.         }
  250. }
  251.  
  252. struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
  253.                                                 unsigned long size,
  254.                                                 unsigned alignment,
  255.                                                 unsigned long color,
  256.                                                 unsigned long start,
  257.                                                 unsigned long end,
  258.                                                 int atomic)
  259. {
  260.         struct drm_mm_node *node;
  261.  
  262.         node = drm_mm_kmalloc(hole_node->mm, atomic);
  263.         if (unlikely(node == NULL))
  264.                         return NULL;
  265.  
  266.         drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
  267.                                    start, end);
  268.  
  269.         return node;
  270. }
  271. EXPORT_SYMBOL(drm_mm_get_block_range_generic);
  272.  
  273. /**
  274.  * Search for free space and insert a preallocated memory node. Returns
  275.  * -ENOSPC if no suitable free area is available. This is for range
  276.  * restricted allocations. The preallocated memory node must be cleared.
  277.  */
  278. int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
  279.                                 unsigned long size, unsigned alignment,
  280.                                 unsigned long start, unsigned long end)
  281. {
  282.         struct drm_mm_node *hole_node;
  283.  
  284.         hole_node = drm_mm_search_free_in_range(mm, size, alignment,
  285.                                                 start, end, false);
  286.         if (!hole_node)
  287.                 return -ENOSPC;
  288.  
  289.         drm_mm_insert_helper_range(hole_node, node, size, alignment, 0,
  290.                                    start, end);
  291.  
  292.         return 0;
  293. }
  294. EXPORT_SYMBOL(drm_mm_insert_node_in_range);
  295.  
  296. /**
  297.  * Remove a memory node from the allocator.
  298.  */
  299. void drm_mm_remove_node(struct drm_mm_node *node)
  300. {
  301.         struct drm_mm *mm = node->mm;
  302.         struct drm_mm_node *prev_node;
  303.  
  304.         BUG_ON(node->scanned_block || node->scanned_prev_free
  305.                                    || node->scanned_next_free);
  306.  
  307.         prev_node =
  308.             list_entry(node->node_list.prev, struct drm_mm_node, node_list);
  309.  
  310.         if (node->hole_follows) {
  311.                 BUG_ON(drm_mm_hole_node_start(node)
  312.                                 == drm_mm_hole_node_end(node));
  313.                 list_del(&node->hole_stack);
  314.         } else
  315.                 BUG_ON(drm_mm_hole_node_start(node)
  316.                                 != drm_mm_hole_node_end(node));
  317.  
  318.         if (!prev_node->hole_follows) {
  319.                 prev_node->hole_follows = 1;
  320.                 list_add(&prev_node->hole_stack, &mm->hole_stack);
  321.                                 } else
  322.                 list_move(&prev_node->hole_stack, &mm->hole_stack);
  323.  
  324.         list_del(&node->node_list);
  325.         node->allocated = 0;
  326. }
  327. EXPORT_SYMBOL(drm_mm_remove_node);
  328.  
  329. /*
  330.  * Remove a memory node from the allocator and free the allocated struct
  331.  * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
  332.  * drm_mm_get_block functions.
  333.  */
  334. void drm_mm_put_block(struct drm_mm_node *node)
  335. {
  336.  
  337.         struct drm_mm *mm = node->mm;
  338.  
  339.         drm_mm_remove_node(node);
  340.  
  341.                 spin_lock(&mm->unused_lock);
  342.                 if (mm->num_unused < MM_UNUSED_TARGET) {
  343.                 list_add(&node->node_list, &mm->unused_nodes);
  344.                         ++mm->num_unused;
  345.                 } else
  346.                 kfree(node);
  347.                 spin_unlock(&mm->unused_lock);
  348. }
  349. EXPORT_SYMBOL(drm_mm_put_block);
  350.  
  351. static int check_free_hole(unsigned long start, unsigned long end,
  352.                            unsigned long size, unsigned alignment)
  353. {
  354.         if (end - start < size)
  355.                 return 0;
  356.  
  357.         if (alignment) {
  358.                 unsigned tmp = start % alignment;
  359.                 if (tmp)
  360.                         start += alignment - tmp;
  361.         }
  362.  
  363.         return end >= start + size;
  364. }
  365.  
  366. struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
  367.                                        unsigned long size,
  368.                                                unsigned alignment,
  369.                                                unsigned long color,
  370.                                                bool best_match)
  371. {
  372.         struct drm_mm_node *entry;
  373.         struct drm_mm_node *best;
  374.         unsigned long best_size;
  375.  
  376.         BUG_ON(mm->scanned_blocks);
  377.  
  378.         best = NULL;
  379.         best_size = ~0UL;
  380.  
  381.         list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
  382.                 unsigned long adj_start = drm_mm_hole_node_start(entry);
  383.                 unsigned long adj_end = drm_mm_hole_node_end(entry);
  384.  
  385.                 if (mm->color_adjust) {
  386.                         mm->color_adjust(entry, color, &adj_start, &adj_end);
  387.                         if (adj_end <= adj_start)
  388.                                 continue;
  389.                 }
  390.  
  391.                 BUG_ON(!entry->hole_follows);
  392.                 if (!check_free_hole(adj_start, adj_end, size, alignment))
  393.                         continue;
  394.  
  395.                         if (!best_match)
  396.                                 return entry;
  397.  
  398.                         if (entry->size < best_size) {
  399.                                 best = entry;
  400.                                 best_size = entry->size;
  401.                         }
  402.                 }
  403.  
  404.         return best;
  405. }
  406. EXPORT_SYMBOL(drm_mm_search_free_generic);
  407.  
  408. struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
  409.                                                 unsigned long size,
  410.                                                 unsigned alignment,
  411.                                                         unsigned long color,
  412.                                                 unsigned long start,
  413.                                                 unsigned long end,
  414.                                                         bool best_match)
  415. {
  416.         struct drm_mm_node *entry;
  417.         struct drm_mm_node *best;
  418.         unsigned long best_size;
  419.  
  420.         BUG_ON(mm->scanned_blocks);
  421.  
  422.         best = NULL;
  423.         best_size = ~0UL;
  424.  
  425.         list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
  426.                 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
  427.                         start : drm_mm_hole_node_start(entry);
  428.                 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
  429.                         end : drm_mm_hole_node_end(entry);
  430.  
  431.                 BUG_ON(!entry->hole_follows);
  432.  
  433.                 if (mm->color_adjust) {
  434.                         mm->color_adjust(entry, color, &adj_start, &adj_end);
  435.                         if (adj_end <= adj_start)
  436.                                 continue;
  437.                 }
  438.  
  439.                 if (!check_free_hole(adj_start, adj_end, size, alignment))
  440.                         continue;
  441.  
  442.                         if (!best_match)
  443.                                 return entry;
  444.  
  445.                         if (entry->size < best_size) {
  446.                                 best = entry;
  447.                                 best_size = entry->size;
  448.                         }
  449.                 }
  450.  
  451.         return best;
  452. }
  453. EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
  454.  
  455. /**
  456.  * Moves an allocation. To be used with embedded struct drm_mm_node.
  457.  */
  458. void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
  459. {
  460.         list_replace(&old->node_list, &new->node_list);
  461.         list_replace(&old->hole_stack, &new->hole_stack);
  462.         new->hole_follows = old->hole_follows;
  463.         new->mm = old->mm;
  464.         new->start = old->start;
  465.         new->size = old->size;
  466.         new->color = old->color;
  467.  
  468.         old->allocated = 0;
  469.         new->allocated = 1;
  470. }
  471. EXPORT_SYMBOL(drm_mm_replace_node);
  472.  
  473. /**
  474.  * Initializa lru scanning.
  475.  *
  476.  * This simply sets up the scanning routines with the parameters for the desired
  477.  * hole.
  478.  *
  479.  * Warning: As long as the scan list is non-empty, no other operations than
  480.  * adding/removing nodes to/from the scan list are allowed.
  481.  */
  482. void drm_mm_init_scan(struct drm_mm *mm,
  483.                       unsigned long size,
  484.                       unsigned alignment,
  485.                       unsigned long color)
  486. {
  487.         mm->scan_color = color;
  488.         mm->scan_alignment = alignment;
  489.         mm->scan_size = size;
  490.         mm->scanned_blocks = 0;
  491.         mm->scan_hit_start = 0;
  492.         mm->scan_hit_size = 0;
  493.         mm->scan_check_range = 0;
  494.         mm->prev_scanned_node = NULL;
  495. }
  496. EXPORT_SYMBOL(drm_mm_init_scan);
  497.  
  498. /**
  499.  * Initializa lru scanning.
  500.  *
  501.  * This simply sets up the scanning routines with the parameters for the desired
  502.  * hole. This version is for range-restricted scans.
  503.  *
  504.  * Warning: As long as the scan list is non-empty, no other operations than
  505.  * adding/removing nodes to/from the scan list are allowed.
  506.  */
  507. void drm_mm_init_scan_with_range(struct drm_mm *mm,
  508.                                  unsigned long size,
  509.                                  unsigned alignment,
  510.                                  unsigned long color,
  511.                                  unsigned long start,
  512.                                  unsigned long end)
  513. {
  514.         mm->scan_color = color;
  515.         mm->scan_alignment = alignment;
  516.         mm->scan_size = size;
  517.         mm->scanned_blocks = 0;
  518.         mm->scan_hit_start = 0;
  519.         mm->scan_hit_size = 0;
  520.         mm->scan_start = start;
  521.         mm->scan_end = end;
  522.         mm->scan_check_range = 1;
  523.         mm->prev_scanned_node = NULL;
  524. }
  525. EXPORT_SYMBOL(drm_mm_init_scan_with_range);
  526.  
  527. /**
  528.  * Add a node to the scan list that might be freed to make space for the desired
  529.  * hole.
  530.  *
  531.  * Returns non-zero, if a hole has been found, zero otherwise.
  532.  */
  533. int drm_mm_scan_add_block(struct drm_mm_node *node)
  534. {
  535.         struct drm_mm *mm = node->mm;
  536.         struct drm_mm_node *prev_node;
  537.         unsigned long hole_start, hole_end;
  538.         unsigned long adj_start;
  539.         unsigned long adj_end;
  540.  
  541.         mm->scanned_blocks++;
  542.  
  543.         BUG_ON(node->scanned_block);
  544.         node->scanned_block = 1;
  545.  
  546.                 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
  547.                                        node_list);
  548.  
  549.         node->scanned_preceeds_hole = prev_node->hole_follows;
  550.         prev_node->hole_follows = 1;
  551.         list_del(&node->node_list);
  552.         node->node_list.prev = &prev_node->node_list;
  553.         node->node_list.next = &mm->prev_scanned_node->node_list;
  554.         mm->prev_scanned_node = node;
  555.  
  556.         hole_start = drm_mm_hole_node_start(prev_node);
  557.         hole_end = drm_mm_hole_node_end(prev_node);
  558.  
  559.                 adj_start = hole_start;
  560.                 adj_end = hole_end;
  561.  
  562.         if (mm->color_adjust)
  563.                 mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
  564.  
  565.         if (mm->scan_check_range) {
  566.                 if (adj_start < mm->scan_start)
  567.                         adj_start = mm->scan_start;
  568.                 if (adj_end > mm->scan_end)
  569.                         adj_end = mm->scan_end;
  570.         }
  571.  
  572.         if (check_free_hole(adj_start, adj_end,
  573.                             mm->scan_size, mm->scan_alignment)) {
  574.                 mm->scan_hit_start = hole_start;
  575.                 mm->scan_hit_size = hole_end;
  576.  
  577.                 return 1;
  578.         }
  579.  
  580.         return 0;
  581. }
  582. EXPORT_SYMBOL(drm_mm_scan_add_block);
  583.  
  584. /**
  585.  * Remove a node from the scan list.
  586.  *
  587.  * Nodes _must_ be removed in the exact same order from the scan list as they
  588.  * have been added, otherwise the internal state of the memory manager will be
  589.  * corrupted.
  590.  *
  591.  * When the scan list is empty, the selected memory nodes can be freed. An
  592.  * immediately following drm_mm_search_free with best_match = 0 will then return
  593.  * the just freed block (because its at the top of the free_stack list).
  594.  *
  595.  * Returns one if this block should be evicted, zero otherwise. Will always
  596.  * return zero when no hole has been found.
  597.  */
  598. int drm_mm_scan_remove_block(struct drm_mm_node *node)
  599. {
  600.         struct drm_mm *mm = node->mm;
  601.         struct drm_mm_node *prev_node;
  602.  
  603.         mm->scanned_blocks--;
  604.  
  605.         BUG_ON(!node->scanned_block);
  606.         node->scanned_block = 0;
  607.  
  608.         prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
  609.                                node_list);
  610.  
  611.         prev_node->hole_follows = node->scanned_preceeds_hole;
  612.         INIT_LIST_HEAD(&node->node_list);
  613.         list_add(&node->node_list, &prev_node->node_list);
  614.  
  615.         /* Only need to check for containement because start&size for the
  616.          * complete resulting free block (not just the desired part) is
  617.          * stored. */
  618.         if (node->start >= mm->scan_hit_start &&
  619.             node->start + node->size
  620.                         <= mm->scan_hit_start + mm->scan_hit_size) {
  621.                 return 1;
  622.         }
  623.  
  624.         return 0;
  625. }
  626. EXPORT_SYMBOL(drm_mm_scan_remove_block);
  627.  
  628. int drm_mm_clean(struct drm_mm * mm)
  629. {
  630.         struct list_head *head = &mm->head_node.node_list;
  631.  
  632.         return (head->next->next == head);
  633. }
  634. EXPORT_SYMBOL(drm_mm_clean);
  635.  
  636. int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
  637. {
  638.         INIT_LIST_HEAD(&mm->hole_stack);
  639.         INIT_LIST_HEAD(&mm->unused_nodes);
  640.         mm->num_unused = 0;
  641.         mm->scanned_blocks = 0;
  642.         spin_lock_init(&mm->unused_lock);
  643.  
  644.         /* Clever trick to avoid a special case in the free hole tracking. */
  645.         INIT_LIST_HEAD(&mm->head_node.node_list);
  646.         INIT_LIST_HEAD(&mm->head_node.hole_stack);
  647.         mm->head_node.hole_follows = 1;
  648.         mm->head_node.scanned_block = 0;
  649.         mm->head_node.scanned_prev_free = 0;
  650.         mm->head_node.scanned_next_free = 0;
  651.         mm->head_node.mm = mm;
  652.         mm->head_node.start = start + size;
  653.         mm->head_node.size = start - mm->head_node.start;
  654.         list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
  655.  
  656.         mm->color_adjust = NULL;
  657.  
  658.         return 0;
  659. }
  660. EXPORT_SYMBOL(drm_mm_init);
  661.  
  662. void drm_mm_takedown(struct drm_mm * mm)
  663. {
  664.         struct drm_mm_node *entry, *next;
  665.  
  666.         if (!list_empty(&mm->head_node.node_list)) {
  667.                 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
  668.                 return;
  669.         }
  670.  
  671.         spin_lock(&mm->unused_lock);
  672.         list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
  673.         list_del(&entry->node_list);
  674.         kfree(entry);
  675.                 --mm->num_unused;
  676.         }
  677.         spin_unlock(&mm->unused_lock);
  678.  
  679.         BUG_ON(mm->num_unused != 0);
  680. }
  681. EXPORT_SYMBOL(drm_mm_takedown);
  682.