Subversion Repositories Kolibri OS

Rev

Rev 4110 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  *
  27.  **************************************************************************/
  28. /*
  29.  * Authors:
  30.  * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  31.  */
  32.  
  33. #ifndef _DRM_MM_H_
  34. #define _DRM_MM_H_
  35.  
  36. /*
  37.  * Generic range manager structs
  38.  */
  39. #include <linux/bug.h>
  40. #include <linux/kernel.h>
  41. #include <linux/list.h>
  42. #include <linux/spinlock.h>
  43. #ifdef CONFIG_DEBUG_FS
  44. #include <linux/seq_file.h>
  45. #endif
  46.  
  47. enum drm_mm_search_flags {
  48.         DRM_MM_SEARCH_DEFAULT =         0,
  49.         DRM_MM_SEARCH_BEST =            1 << 0,
  50.         DRM_MM_SEARCH_BELOW =           1 << 1,
  51. };
  52.  
  53. enum drm_mm_allocator_flags {
  54.         DRM_MM_CREATE_DEFAULT =         0,
  55.         DRM_MM_CREATE_TOP =             1 << 0,
  56. };
  57.  
  58. #define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
  59. #define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
  60.  
  61. struct drm_mm_node {
  62.         struct list_head node_list;
  63.         struct list_head hole_stack;
  64.         unsigned hole_follows : 1;
  65.         unsigned scanned_block : 1;
  66.         unsigned scanned_prev_free : 1;
  67.         unsigned scanned_next_free : 1;
  68.         unsigned scanned_preceeds_hole : 1;
  69.         unsigned allocated : 1;
  70.         unsigned long color;
  71.         unsigned long start;
  72.         unsigned long size;
  73.         struct drm_mm *mm;
  74. };
  75.  
  76. struct drm_mm {
  77.         /* List of all memory nodes that immediately precede a free hole. */
  78.         struct list_head hole_stack;
  79.         /* head_node.node_list is the list of all memory nodes, ordered
  80.          * according to the (increasing) start address of the memory node. */
  81.         struct drm_mm_node head_node;
  82.         unsigned int scan_check_range : 1;
  83.         unsigned scan_alignment;
  84.         unsigned long scan_color;
  85.         unsigned long scan_size;
  86.         unsigned long scan_hit_start;
  87.         unsigned long scan_hit_end;
  88.         unsigned scanned_blocks;
  89.         unsigned long scan_start;
  90.         unsigned long scan_end;
  91.         struct drm_mm_node *prev_scanned_node;
  92.  
  93.         void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
  94.                              unsigned long *start, unsigned long *end);
  95. };
  96.  
  97. /**
  98.  * drm_mm_node_allocated - checks whether a node is allocated
  99.  * @node: drm_mm_node to check
  100.  *
  101.  * Drivers should use this helpers for proper encapusulation of drm_mm
  102.  * internals.
  103.  *
  104.  * Returns:
  105.  * True if the @node is allocated.
  106.  */
  107. static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
  108. {
  109.         return node->allocated;
  110. }
  111.  
  112. /**
  113.  * drm_mm_initialized - checks whether an allocator is initialized
  114.  * @mm: drm_mm to check
  115.  *
  116.  * Drivers should use this helpers for proper encapusulation of drm_mm
  117.  * internals.
  118.  *
  119.  * Returns:
  120.  * True if the @mm is initialized.
  121.  */
  122. static inline bool drm_mm_initialized(struct drm_mm *mm)
  123. {
  124.         return mm->hole_stack.next;
  125. }
  126.  
  127. static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
  128. {
  129.         return hole_node->start + hole_node->size;
  130. }
  131.  
  132. /**
  133.  * drm_mm_hole_node_start - computes the start of the hole following @node
  134.  * @hole_node: drm_mm_node which implicitly tracks the following hole
  135.  *
  136.  * This is useful for driver-sepific debug dumpers. Otherwise drivers should not
  137.  * inspect holes themselves. Drivers must check first whether a hole indeed
  138.  * follows by looking at node->hole_follows.
  139.  *
  140.  * Returns:
  141.  * Start of the subsequent hole.
  142.  */
  143. static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
  144. {
  145.         BUG_ON(!hole_node->hole_follows);
  146.         return __drm_mm_hole_node_start(hole_node);
  147. }
  148.  
  149. static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
  150. {
  151.         return list_entry(hole_node->node_list.next,
  152.                           struct drm_mm_node, node_list)->start;
  153. }
  154.  
  155. /**
  156.  * drm_mm_hole_node_end - computes the end of the hole following @node
  157.  * @hole_node: drm_mm_node which implicitly tracks the following hole
  158.  *
  159.  * This is useful for driver-sepific debug dumpers. Otherwise drivers should not
  160.  * inspect holes themselves. Drivers must check first whether a hole indeed
  161.  * follows by looking at node->hole_follows.
  162.  *
  163.  * Returns:
  164.  * End of the subsequent hole.
  165.  */
  166. static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
  167. {
  168.         return __drm_mm_hole_node_end(hole_node);
  169. }
  170.  
  171. /**
  172.  * drm_mm_for_each_node - iterator to walk over all allocated nodes
  173.  * @entry: drm_mm_node structure to assign to in each iteration step
  174.  * @mm: drm_mm allocator to walk
  175.  *
  176.  * This iterator walks over all nodes in the range allocator. It is implemented
  177.  * with list_for_each, so not save against removal of elements.
  178.  */
  179. #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
  180.                                                 &(mm)->head_node.node_list, \
  181.                                                 node_list)
  182.  
  183. /**
  184.  * drm_mm_for_each_hole - iterator to walk over all holes
  185.  * @entry: drm_mm_node used internally to track progress
  186.  * @mm: drm_mm allocator to walk
  187.  * @hole_start: ulong variable to assign the hole start to on each iteration
  188.  * @hole_end: ulong variable to assign the hole end to on each iteration
  189.  *
  190.  * This iterator walks over all holes in the range allocator. It is implemented
  191.  * with list_for_each, so not save against removal of elements. @entry is used
  192.  * internally and will not reflect a real drm_mm_node for the very first hole.
  193.  * Hence users of this iterator may not access it.
  194.  *
  195.  * Implementation Note:
  196.  * We need to inline list_for_each_entry in order to be able to set hole_start
  197.  * and hole_end on each iteration while keeping the macro sane.
  198.  *
  199.  * The __drm_mm_for_each_hole version is similar, but with added support for
  200.  * going backwards.
  201.  */
  202. #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
  203.         for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
  204.              &entry->hole_stack != &(mm)->hole_stack ? \
  205.              hole_start = drm_mm_hole_node_start(entry), \
  206.              hole_end = drm_mm_hole_node_end(entry), \
  207.              1 : 0; \
  208.              entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
  209.  
  210. #define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
  211.         for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
  212.              &entry->hole_stack != &(mm)->hole_stack ? \
  213.              hole_start = drm_mm_hole_node_start(entry), \
  214.              hole_end = drm_mm_hole_node_end(entry), \
  215.              1 : 0; \
  216.              entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
  217.  
  218. /*
  219.  * Basic range manager support (drm_mm.c)
  220.  */
  221. int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
  222.  
  223. int drm_mm_insert_node_generic(struct drm_mm *mm,
  224.                                       struct drm_mm_node *node,
  225.                                                     unsigned long size,
  226.                                                     unsigned alignment,
  227.                                                     unsigned long color,
  228.                                enum drm_mm_search_flags sflags,
  229.                                enum drm_mm_allocator_flags aflags);
  230. /**
  231.  * drm_mm_insert_node - search for space and insert @node
  232.  * @mm: drm_mm to allocate from
  233.  * @node: preallocate node to insert
  234.  * @size: size of the allocation
  235.  * @alignment: alignment of the allocation
  236.  * @flags: flags to fine-tune the allocation
  237.  *
  238.  * This is a simplified version of drm_mm_insert_node_generic() with @color set
  239.  * to 0.
  240.  *
  241.  * The preallocated node must be cleared to 0.
  242.  *
  243.  * Returns:
  244.  * 0 on success, -ENOSPC if there's no suitable hole.
  245.  */
  246. static inline int drm_mm_insert_node(struct drm_mm *mm,
  247.                                                 struct drm_mm_node *node,
  248.                                                 unsigned long size,
  249.                                                 unsigned alignment,
  250.                                      enum drm_mm_search_flags flags)
  251. {
  252.         return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags,
  253.                                           DRM_MM_CREATE_DEFAULT);
  254. }
  255.  
  256. int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
  257.                                        struct drm_mm_node *node,
  258.                                        unsigned long size,
  259.                                        unsigned alignment,
  260.                                        unsigned long color,
  261.                                        unsigned long start,
  262.                                                 unsigned long end,
  263.                                         enum drm_mm_search_flags sflags,
  264.                                         enum drm_mm_allocator_flags aflags);
  265. /**
  266.  * drm_mm_insert_node_in_range - ranged search for space and insert @node
  267.  * @mm: drm_mm to allocate from
  268.  * @node: preallocate node to insert
  269.  * @size: size of the allocation
  270.  * @alignment: alignment of the allocation
  271.  * @start: start of the allowed range for this node
  272.  * @end: end of the allowed range for this node
  273.  * @flags: flags to fine-tune the allocation
  274.  *
  275.  * This is a simplified version of drm_mm_insert_node_in_range_generic() with
  276.  * @color set to 0.
  277.  *
  278.  * The preallocated node must be cleared to 0.
  279.  *
  280.  * Returns:
  281.  * 0 on success, -ENOSPC if there's no suitable hole.
  282.  */
  283. static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
  284.                                               struct drm_mm_node *node,
  285.                                                 unsigned long size,
  286.                                                 unsigned alignment,
  287.                                                 unsigned long start,
  288.                                                 unsigned long end,
  289.                                               enum drm_mm_search_flags flags)
  290. {
  291.         return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
  292.                                                    0, start, end, flags,
  293.                                                    DRM_MM_CREATE_DEFAULT);
  294. }
  295.  
  296. void drm_mm_remove_node(struct drm_mm_node *node);
  297. void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
  298. void drm_mm_init(struct drm_mm *mm,
  299.                        unsigned long start,
  300.                        unsigned long size);
  301. void drm_mm_takedown(struct drm_mm *mm);
  302. bool drm_mm_clean(struct drm_mm *mm);
  303.  
  304. void drm_mm_init_scan(struct drm_mm *mm,
  305.                       unsigned long size,
  306.                                  unsigned alignment,
  307.                       unsigned long color);
  308. void drm_mm_init_scan_with_range(struct drm_mm *mm,
  309.                                  unsigned long size,
  310.                                  unsigned alignment,
  311.                                  unsigned long color,
  312.                                  unsigned long start,
  313.                                  unsigned long end);
  314. bool drm_mm_scan_add_block(struct drm_mm_node *node);
  315. bool drm_mm_scan_remove_block(struct drm_mm_node *node);
  316.  
  317. void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
  318. #ifdef CONFIG_DEBUG_FS
  319. int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
  320. #endif
  321.  
  322. #endif
  323.