Rev 4104 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4104 | Rev 5060 | ||
---|---|---|---|
1 | /************************************************************************** |
1 | /************************************************************************** |
2 | * |
2 | * |
3 | * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. |
3 | * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. |
4 | * All Rights Reserved. |
4 | * All Rights Reserved. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
12 | * the following conditions: |
13 | * |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
16 | * of the Software. |
17 | * |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
25 | * |
26 | * |
26 | * |
27 | **************************************************************************/ |
27 | **************************************************************************/ |
28 | 28 | ||
29 | /* |
29 | /* |
30 | * Generic simple memory manager implementation. Intended to be used as a base |
30 | * Generic simple memory manager implementation. Intended to be used as a base |
31 | * class implementation for more advanced memory managers. |
31 | * class implementation for more advanced memory managers. |
32 | * |
32 | * |
33 | * Note that the algorithm used is quite simple and there might be substantial |
33 | * Note that the algorithm used is quite simple and there might be substantial |
34 | * performance gains if a smarter free list is implemented. Currently it is just an |
34 | * performance gains if a smarter free list is implemented. Currently it is just an |
35 | * unordered stack of free regions. This could easily be improved if an RB-tree |
35 | * unordered stack of free regions. This could easily be improved if an RB-tree |
36 | * is used instead. At least if we expect heavy fragmentation. |
36 | * is used instead. At least if we expect heavy fragmentation. |
37 | * |
37 | * |
38 | * Aligned allocations can also see improvement. |
38 | * Aligned allocations can also see improvement. |
39 | * |
39 | * |
40 | * Authors: |
40 | * Authors: |
41 | * Thomas Hellström |
41 | * Thomas Hellström |
42 | */ |
42 | */ |
43 | 43 | ||
44 | #include |
44 | #include |
45 | #include |
45 | #include |
46 | #include |
46 | #include |
47 | #include |
47 | #include |
48 | #include |
48 | #include |
- | 49 | ||
- | 50 | /** |
|
- | 51 | * DOC: Overview |
|
- | 52 | * |
|
- | 53 | * drm_mm provides a simple range allocator. The drivers are free to use the |
|
- | 54 | * resource allocator from the linux core if it suits them, the upside of drm_mm |
|
- | 55 | * is that it's in the DRM core. Which means that it's easier to extend for |
|
- | 56 | * some of the crazier special purpose needs of gpus. |
|
- | 57 | * |
|
- | 58 | * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. |
|
- | 59 | * Drivers are free to embed either of them into their own suitable |
|
- | 60 | * datastructures. drm_mm itself will not do any allocations of its own, so if |
|
- | 61 | * drivers choose not to embed nodes they need to still allocate them |
|
- | 62 | * themselves. |
|
- | 63 | * |
|
- | 64 | * The range allocator also supports reservation of preallocated blocks. This is |
|
- | 65 | * useful for taking over initial mode setting configurations from the firmware, |
|
- | 66 | * where an object needs to be created which exactly matches the firmware's |
|
- | 67 | * scanout target. As long as the range is still free it can be inserted anytime |
|
49 | 68 | * after the allocator is initialized, which helps with avoiding looped |
|
- | 69 | * depencies in the driver load sequence. |
|
- | 70 | * |
|
- | 71 | * drm_mm maintains a stack of most recently freed holes, which of all |
|
- | 72 | * simplistic datastructures seems to be a fairly decent approach to clustering |
|
- | 73 | * allocations and avoiding too much fragmentation. This means free space |
|
- | 74 | * searches are O(num_holes). Given that all the fancy features drm_mm supports |
|
- | 75 | * something better would be fairly complex and since gfx thrashing is a fairly |
|
- | 76 | * steep cliff not a real concern. Removing a node again is O(1). |
|
- | 77 | * |
|
- | 78 | * drm_mm supports a few features: Alignment and range restrictions can be |
|
- | 79 | * supplied. Further more every &drm_mm_node has a color value (which is just an |
|
- | 80 | * opaqua unsigned long) which in conjunction with a driver callback can be used |
|
- | 81 | * to implement sophisticated placement restrictions. The i915 DRM driver uses |
|
- | 82 | * this to implement guard pages between incompatible caching domains in the |
|
- | 83 | * graphics TT. |
|
- | 84 | * |
|
- | 85 | * Two behaviors are supported for searching and allocating: bottom-up and top-down. |
|
- | 86 | * The default is bottom-up. Top-down allocation can be used if the memory area |
|
- | 87 | * has different restrictions, or just to reduce fragmentation. |
|
- | 88 | * |
|
- | 89 | * Finally iteration helpers to walk all nodes and all holes are provided as are |
|
- | 90 | * some basic allocator dumpers for debugging. |
|
50 | #define MM_UNUSED_TARGET 4 |
91 | */ |
51 | 92 | ||
52 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
93 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
53 | unsigned long size, |
94 | unsigned long size, |
54 | unsigned alignment, |
95 | unsigned alignment, |
55 | unsigned long color, |
96 | unsigned long color, |
56 | enum drm_mm_search_flags flags); |
97 | enum drm_mm_search_flags flags); |
57 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, |
98 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, |
58 | unsigned long size, |
99 | unsigned long size, |
59 | unsigned alignment, |
100 | unsigned alignment, |
60 | unsigned long color, |
101 | unsigned long color, |
61 | unsigned long start, |
102 | unsigned long start, |
62 | unsigned long end, |
103 | unsigned long end, |
63 | enum drm_mm_search_flags flags); |
104 | enum drm_mm_search_flags flags); |
64 | 105 | ||
65 | static void drm_mm_insert_helper(struct drm_mm_node *hole_node, |
106 | static void drm_mm_insert_helper(struct drm_mm_node *hole_node, |
66 | struct drm_mm_node *node, |
107 | struct drm_mm_node *node, |
67 | unsigned long size, unsigned alignment, |
108 | unsigned long size, unsigned alignment, |
68 | unsigned long color) |
109 | unsigned long color, |
- | 110 | enum drm_mm_allocator_flags flags) |
|
69 | { |
111 | { |
70 | struct drm_mm *mm = hole_node->mm; |
112 | struct drm_mm *mm = hole_node->mm; |
71 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); |
113 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); |
72 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); |
114 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); |
73 | unsigned long adj_start = hole_start; |
115 | unsigned long adj_start = hole_start; |
74 | unsigned long adj_end = hole_end; |
116 | unsigned long adj_end = hole_end; |
75 | 117 | ||
76 | BUG_ON(node->allocated); |
118 | BUG_ON(node->allocated); |
77 | 119 | ||
78 | if (mm->color_adjust) |
120 | if (mm->color_adjust) |
79 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); |
121 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); |
- | 122 | ||
- | 123 | if (flags & DRM_MM_CREATE_TOP) |
|
- | 124 | adj_start = adj_end - size; |
|
80 | 125 | ||
81 | if (alignment) { |
126 | if (alignment) { |
82 | unsigned tmp = adj_start % alignment; |
127 | unsigned tmp = adj_start % alignment; |
- | 128 | if (tmp) { |
|
- | 129 | if (flags & DRM_MM_CREATE_TOP) |
|
- | 130 | adj_start -= tmp; |
|
83 | if (tmp) |
131 | else |
84 | adj_start += alignment - tmp; |
132 | adj_start += alignment - tmp; |
- | 133 | } |
|
- | 134 | } |
|
- | 135 | ||
- | 136 | BUG_ON(adj_start < hole_start); |
|
85 | } |
137 | BUG_ON(adj_end > hole_end); |
86 | 138 | ||
87 | if (adj_start == hole_start) { |
139 | if (adj_start == hole_start) { |
88 | hole_node->hole_follows = 0; |
140 | hole_node->hole_follows = 0; |
89 | list_del(&hole_node->hole_stack); |
141 | list_del(&hole_node->hole_stack); |
90 | } |
142 | } |
91 | 143 | ||
92 | node->start = adj_start; |
144 | node->start = adj_start; |
93 | node->size = size; |
145 | node->size = size; |
94 | node->mm = mm; |
146 | node->mm = mm; |
95 | node->color = color; |
147 | node->color = color; |
96 | node->allocated = 1; |
148 | node->allocated = 1; |
97 | 149 | ||
98 | INIT_LIST_HEAD(&node->hole_stack); |
150 | INIT_LIST_HEAD(&node->hole_stack); |
99 | list_add(&node->node_list, &hole_node->node_list); |
151 | list_add(&node->node_list, &hole_node->node_list); |
100 | 152 | ||
101 | BUG_ON(node->start + node->size > adj_end); |
153 | BUG_ON(node->start + node->size > adj_end); |
102 | 154 | ||
103 | node->hole_follows = 0; |
155 | node->hole_follows = 0; |
104 | if (__drm_mm_hole_node_start(node) < hole_end) { |
156 | if (__drm_mm_hole_node_start(node) < hole_end) { |
105 | list_add(&node->hole_stack, &mm->hole_stack); |
157 | list_add(&node->hole_stack, &mm->hole_stack); |
106 | node->hole_follows = 1; |
158 | node->hole_follows = 1; |
107 | } |
159 | } |
108 | } |
160 | } |
- | 161 | ||
- | 162 | /** |
|
- | 163 | * drm_mm_reserve_node - insert an pre-initialized node |
|
- | 164 | * @mm: drm_mm allocator to insert @node into |
|
- | 165 | * @node: drm_mm_node to insert |
|
- | 166 | * |
|
- | 167 | * This functions inserts an already set-up drm_mm_node into the allocator, |
|
- | 168 | * meaning that start, size and color must be set by the caller. This is useful |
|
- | 169 | * to initialize the allocator with preallocated objects which must be set-up |
|
- | 170 | * before the range allocator can be set-up, e.g. when taking over a firmware |
|
- | 171 | * framebuffer. |
|
- | 172 | * |
|
- | 173 | * Returns: |
|
- | 174 | * 0 on success, -ENOSPC if there's no hole where @node is. |
|
109 | 175 | */ |
|
110 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) |
176 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) |
111 | { |
177 | { |
112 | struct drm_mm_node *hole; |
178 | struct drm_mm_node *hole; |
113 | unsigned long end = node->start + node->size; |
179 | unsigned long end = node->start + node->size; |
114 | unsigned long hole_start; |
180 | unsigned long hole_start; |
115 | unsigned long hole_end; |
181 | unsigned long hole_end; |
116 | 182 | ||
117 | BUG_ON(node == NULL); |
183 | BUG_ON(node == NULL); |
118 | 184 | ||
119 | /* Find the relevant hole to add our node to */ |
185 | /* Find the relevant hole to add our node to */ |
120 | drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { |
186 | drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { |
121 | if (hole_start > node->start || hole_end < end) |
187 | if (hole_start > node->start || hole_end < end) |
122 | continue; |
188 | continue; |
123 | 189 | ||
124 | node->mm = mm; |
190 | node->mm = mm; |
125 | node->allocated = 1; |
191 | node->allocated = 1; |
126 | 192 | ||
127 | INIT_LIST_HEAD(&node->hole_stack); |
193 | INIT_LIST_HEAD(&node->hole_stack); |
128 | list_add(&node->node_list, &hole->node_list); |
194 | list_add(&node->node_list, &hole->node_list); |
129 | 195 | ||
130 | if (node->start == hole_start) { |
196 | if (node->start == hole_start) { |
131 | hole->hole_follows = 0; |
197 | hole->hole_follows = 0; |
132 | list_del_init(&hole->hole_stack); |
198 | list_del_init(&hole->hole_stack); |
133 | } |
199 | } |
134 | 200 | ||
135 | node->hole_follows = 0; |
201 | node->hole_follows = 0; |
136 | if (end != hole_end) { |
202 | if (end != hole_end) { |
137 | list_add(&node->hole_stack, &mm->hole_stack); |
203 | list_add(&node->hole_stack, &mm->hole_stack); |
138 | node->hole_follows = 1; |
204 | node->hole_follows = 1; |
139 | } |
205 | } |
140 | 206 | ||
141 | return 0; |
207 | return 0; |
142 | } |
208 | } |
143 | - | ||
144 | WARN(1, "no hole found for node 0x%lx + 0x%lx\n", |
- | |
145 | node->start, node->size); |
209 | |
146 | return -ENOSPC; |
210 | return -ENOSPC; |
147 | } |
211 | } |
148 | EXPORT_SYMBOL(drm_mm_reserve_node); |
212 | EXPORT_SYMBOL(drm_mm_reserve_node); |
149 | 213 | ||
150 | /** |
214 | /** |
- | 215 | * drm_mm_insert_node_generic - search for space and insert @node |
|
- | 216 | * @mm: drm_mm to allocate from |
|
- | 217 | * @node: preallocate node to insert |
|
- | 218 | * @size: size of the allocation |
|
- | 219 | * @alignment: alignment of the allocation |
|
- | 220 | * @color: opaque tag value to use for this node |
|
151 | * Search for free space and insert a preallocated memory node. Returns |
221 | * @sflags: flags to fine-tune the allocation search |
152 | * -ENOSPC if no suitable free area is available. The preallocated memory node |
222 | * @aflags: flags to fine-tune the allocation behavior |
- | 223 | * |
|
153 | * must be cleared. |
224 | * The preallocated node must be cleared to 0. |
- | 225 | * |
|
- | 226 | * Returns: |
|
- | 227 | * 0 on success, -ENOSPC if there's no suitable hole. |
|
154 | */ |
228 | */ |
155 | int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, |
229 | int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, |
156 | unsigned long size, unsigned alignment, |
230 | unsigned long size, unsigned alignment, |
157 | unsigned long color, |
231 | unsigned long color, |
158 | enum drm_mm_search_flags flags) |
232 | enum drm_mm_search_flags sflags, |
- | 233 | enum drm_mm_allocator_flags aflags) |
|
159 | { |
234 | { |
160 | struct drm_mm_node *hole_node; |
235 | struct drm_mm_node *hole_node; |
161 | 236 | ||
162 | hole_node = drm_mm_search_free_generic(mm, size, alignment, |
237 | hole_node = drm_mm_search_free_generic(mm, size, alignment, |
163 | color, flags); |
238 | color, sflags); |
164 | if (!hole_node) |
239 | if (!hole_node) |
165 | return -ENOSPC; |
240 | return -ENOSPC; |
166 | 241 | ||
167 | drm_mm_insert_helper(hole_node, node, size, alignment, color); |
242 | drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags); |
168 | return 0; |
243 | return 0; |
169 | } |
244 | } |
170 | EXPORT_SYMBOL(drm_mm_insert_node_generic); |
245 | EXPORT_SYMBOL(drm_mm_insert_node_generic); |
171 | 246 | ||
172 | static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, |
247 | static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, |
173 | struct drm_mm_node *node, |
248 | struct drm_mm_node *node, |
174 | unsigned long size, unsigned alignment, |
249 | unsigned long size, unsigned alignment, |
175 | unsigned long color, |
250 | unsigned long color, |
176 | unsigned long start, unsigned long end) |
251 | unsigned long start, unsigned long end, |
- | 252 | enum drm_mm_allocator_flags flags) |
|
177 | { |
253 | { |
178 | struct drm_mm *mm = hole_node->mm; |
254 | struct drm_mm *mm = hole_node->mm; |
179 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); |
255 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); |
180 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); |
256 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); |
181 | unsigned long adj_start = hole_start; |
257 | unsigned long adj_start = hole_start; |
182 | unsigned long adj_end = hole_end; |
258 | unsigned long adj_end = hole_end; |
183 | 259 | ||
184 | BUG_ON(!hole_node->hole_follows || node->allocated); |
260 | BUG_ON(!hole_node->hole_follows || node->allocated); |
185 | 261 | ||
186 | if (adj_start < start) |
262 | if (adj_start < start) |
187 | adj_start = start; |
263 | adj_start = start; |
188 | if (adj_end > end) |
264 | if (adj_end > end) |
189 | adj_end = end; |
265 | adj_end = end; |
- | 266 | ||
- | 267 | if (flags & DRM_MM_CREATE_TOP) |
|
- | 268 | adj_start = adj_end - size; |
|
190 | 269 | ||
191 | if (mm->color_adjust) |
270 | if (mm->color_adjust) |
192 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); |
271 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); |
193 | 272 | ||
194 | if (alignment) { |
273 | if (alignment) { |
195 | unsigned tmp = adj_start % alignment; |
274 | unsigned tmp = adj_start % alignment; |
196 | if (tmp) |
275 | if (tmp) { |
- | 276 | if (flags & DRM_MM_CREATE_TOP) |
|
- | 277 | adj_start -= tmp; |
|
- | 278 | else |
|
197 | adj_start += alignment - tmp; |
279 | adj_start += alignment - tmp; |
198 | } |
280 | } |
- | 281 | } |
|
199 | 282 | ||
200 | if (adj_start == hole_start) { |
283 | if (adj_start == hole_start) { |
201 | hole_node->hole_follows = 0; |
284 | hole_node->hole_follows = 0; |
202 | list_del(&hole_node->hole_stack); |
285 | list_del(&hole_node->hole_stack); |
203 | } |
286 | } |
204 | 287 | ||
205 | node->start = adj_start; |
288 | node->start = adj_start; |
206 | node->size = size; |
289 | node->size = size; |
207 | node->mm = mm; |
290 | node->mm = mm; |
208 | node->color = color; |
291 | node->color = color; |
209 | node->allocated = 1; |
292 | node->allocated = 1; |
210 | 293 | ||
211 | INIT_LIST_HEAD(&node->hole_stack); |
294 | INIT_LIST_HEAD(&node->hole_stack); |
212 | list_add(&node->node_list, &hole_node->node_list); |
295 | list_add(&node->node_list, &hole_node->node_list); |
- | 296 | ||
- | 297 | BUG_ON(node->start < start); |
|
213 | 298 | BUG_ON(node->start < adj_start); |
|
214 | BUG_ON(node->start + node->size > adj_end); |
299 | BUG_ON(node->start + node->size > adj_end); |
215 | BUG_ON(node->start + node->size > end); |
300 | BUG_ON(node->start + node->size > end); |
216 | 301 | ||
217 | node->hole_follows = 0; |
302 | node->hole_follows = 0; |
218 | if (__drm_mm_hole_node_start(node) < hole_end) { |
303 | if (__drm_mm_hole_node_start(node) < hole_end) { |
219 | list_add(&node->hole_stack, &mm->hole_stack); |
304 | list_add(&node->hole_stack, &mm->hole_stack); |
220 | node->hole_follows = 1; |
305 | node->hole_follows = 1; |
221 | } |
306 | } |
222 | } |
307 | } |
223 | 308 | ||
224 | /** |
309 | /** |
- | 310 | * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node |
|
- | 311 | * @mm: drm_mm to allocate from |
|
- | 312 | * @node: preallocate node to insert |
|
- | 313 | * @size: size of the allocation |
|
- | 314 | * @alignment: alignment of the allocation |
|
- | 315 | * @color: opaque tag value to use for this node |
|
- | 316 | * @start: start of the allowed range for this node |
|
- | 317 | * @end: end of the allowed range for this node |
|
225 | * Search for free space and insert a preallocated memory node. Returns |
318 | * @sflags: flags to fine-tune the allocation search |
226 | * -ENOSPC if no suitable free area is available. This is for range |
319 | * @aflags: flags to fine-tune the allocation behavior |
- | 320 | * |
|
227 | * restricted allocations. The preallocated memory node must be cleared. |
321 | * The preallocated node must be cleared to 0. |
- | 322 | * |
|
- | 323 | * Returns: |
|
- | 324 | * 0 on success, -ENOSPC if there's no suitable hole. |
|
228 | */ |
325 | */ |
229 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, |
326 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, |
230 | unsigned long size, unsigned alignment, unsigned long color, |
327 | unsigned long size, unsigned alignment, |
- | 328 | unsigned long color, |
|
231 | unsigned long start, unsigned long end, |
329 | unsigned long start, unsigned long end, |
232 | enum drm_mm_search_flags flags) |
330 | enum drm_mm_search_flags sflags, |
- | 331 | enum drm_mm_allocator_flags aflags) |
|
233 | { |
332 | { |
234 | struct drm_mm_node *hole_node; |
333 | struct drm_mm_node *hole_node; |
235 | 334 | ||
236 | hole_node = drm_mm_search_free_in_range_generic(mm, |
335 | hole_node = drm_mm_search_free_in_range_generic(mm, |
237 | size, alignment, color, |
336 | size, alignment, color, |
238 | start, end, flags); |
337 | start, end, sflags); |
239 | if (!hole_node) |
338 | if (!hole_node) |
240 | return -ENOSPC; |
339 | return -ENOSPC; |
241 | 340 | ||
242 | drm_mm_insert_helper_range(hole_node, node, |
341 | drm_mm_insert_helper_range(hole_node, node, |
243 | size, alignment, color, |
342 | size, alignment, color, |
244 | start, end); |
343 | start, end, aflags); |
245 | return 0; |
344 | return 0; |
246 | } |
345 | } |
247 | EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); |
346 | EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); |
248 | 347 | ||
249 | /** |
348 | /** |
250 | * Remove a memory node from the allocator. |
349 | * drm_mm_remove_node - Remove a memory node from the allocator. |
- | 350 | * @node: drm_mm_node to remove |
|
- | 351 | * |
|
- | 352 | * This just removes a node from its drm_mm allocator. The node does not need to |
|
- | 353 | * be cleared again before it can be re-inserted into this or any other drm_mm |
|
- | 354 | * allocator. It is a bug to call this function on a un-allocated node. |
|
251 | */ |
355 | */ |
252 | void drm_mm_remove_node(struct drm_mm_node *node) |
356 | void drm_mm_remove_node(struct drm_mm_node *node) |
253 | { |
357 | { |
254 | struct drm_mm *mm = node->mm; |
358 | struct drm_mm *mm = node->mm; |
255 | struct drm_mm_node *prev_node; |
359 | struct drm_mm_node *prev_node; |
256 | 360 | ||
257 | if (WARN_ON(!node->allocated)) |
361 | if (WARN_ON(!node->allocated)) |
258 | return; |
362 | return; |
259 | 363 | ||
260 | BUG_ON(node->scanned_block || node->scanned_prev_free |
364 | BUG_ON(node->scanned_block || node->scanned_prev_free |
261 | || node->scanned_next_free); |
365 | || node->scanned_next_free); |
262 | 366 | ||
263 | prev_node = |
367 | prev_node = |
264 | list_entry(node->node_list.prev, struct drm_mm_node, node_list); |
368 | list_entry(node->node_list.prev, struct drm_mm_node, node_list); |
265 | 369 | ||
266 | if (node->hole_follows) { |
370 | if (node->hole_follows) { |
267 | BUG_ON(__drm_mm_hole_node_start(node) == |
371 | BUG_ON(__drm_mm_hole_node_start(node) == |
268 | __drm_mm_hole_node_end(node)); |
372 | __drm_mm_hole_node_end(node)); |
269 | list_del(&node->hole_stack); |
373 | list_del(&node->hole_stack); |
270 | } else |
374 | } else |
271 | BUG_ON(__drm_mm_hole_node_start(node) != |
375 | BUG_ON(__drm_mm_hole_node_start(node) != |
272 | __drm_mm_hole_node_end(node)); |
376 | __drm_mm_hole_node_end(node)); |
273 | 377 | ||
274 | 378 | ||
275 | if (!prev_node->hole_follows) { |
379 | if (!prev_node->hole_follows) { |
276 | prev_node->hole_follows = 1; |
380 | prev_node->hole_follows = 1; |
277 | list_add(&prev_node->hole_stack, &mm->hole_stack); |
381 | list_add(&prev_node->hole_stack, &mm->hole_stack); |
278 | } else |
382 | } else |
279 | list_move(&prev_node->hole_stack, &mm->hole_stack); |
383 | list_move(&prev_node->hole_stack, &mm->hole_stack); |
280 | 384 | ||
281 | list_del(&node->node_list); |
385 | list_del(&node->node_list); |
282 | node->allocated = 0; |
386 | node->allocated = 0; |
283 | } |
387 | } |
284 | EXPORT_SYMBOL(drm_mm_remove_node); |
388 | EXPORT_SYMBOL(drm_mm_remove_node); |
285 | 389 | ||
286 | static int check_free_hole(unsigned long start, unsigned long end, |
390 | static int check_free_hole(unsigned long start, unsigned long end, |
287 | unsigned long size, unsigned alignment) |
391 | unsigned long size, unsigned alignment) |
288 | { |
392 | { |
289 | if (end - start < size) |
393 | if (end - start < size) |
290 | return 0; |
394 | return 0; |
291 | 395 | ||
292 | if (alignment) { |
396 | if (alignment) { |
293 | unsigned tmp = start % alignment; |
397 | unsigned tmp = start % alignment; |
294 | if (tmp) |
398 | if (tmp) |
295 | start += alignment - tmp; |
399 | start += alignment - tmp; |
296 | } |
400 | } |
297 | 401 | ||
298 | return end >= start + size; |
402 | return end >= start + size; |
299 | } |
403 | } |
300 | 404 | ||
301 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
405 | static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
302 | unsigned long size, |
406 | unsigned long size, |
303 | unsigned alignment, |
407 | unsigned alignment, |
304 | unsigned long color, |
408 | unsigned long color, |
305 | enum drm_mm_search_flags flags) |
409 | enum drm_mm_search_flags flags) |
306 | { |
410 | { |
307 | struct drm_mm_node *entry; |
411 | struct drm_mm_node *entry; |
308 | struct drm_mm_node *best; |
412 | struct drm_mm_node *best; |
309 | unsigned long adj_start; |
413 | unsigned long adj_start; |
310 | unsigned long adj_end; |
414 | unsigned long adj_end; |
311 | unsigned long best_size; |
415 | unsigned long best_size; |
312 | 416 | ||
313 | BUG_ON(mm->scanned_blocks); |
417 | BUG_ON(mm->scanned_blocks); |
314 | 418 | ||
315 | best = NULL; |
419 | best = NULL; |
316 | best_size = ~0UL; |
420 | best_size = ~0UL; |
317 | 421 | ||
- | 422 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, |
|
- | 423 | flags & DRM_MM_SEARCH_BELOW) { |
|
- | 424 | unsigned long hole_size = adj_end - adj_start; |
|
318 | drm_mm_for_each_hole(entry, mm, adj_start, adj_end) { |
425 | |
319 | if (mm->color_adjust) { |
426 | if (mm->color_adjust) { |
320 | mm->color_adjust(entry, color, &adj_start, &adj_end); |
427 | mm->color_adjust(entry, color, &adj_start, &adj_end); |
321 | if (adj_end <= adj_start) |
428 | if (adj_end <= adj_start) |
322 | continue; |
429 | continue; |
323 | } |
430 | } |
324 | 431 | ||
325 | if (!check_free_hole(adj_start, adj_end, size, alignment)) |
432 | if (!check_free_hole(adj_start, adj_end, size, alignment)) |
326 | continue; |
433 | continue; |
327 | 434 | ||
328 | if (!(flags & DRM_MM_SEARCH_BEST)) |
435 | if (!(flags & DRM_MM_SEARCH_BEST)) |
329 | return entry; |
436 | return entry; |
330 | 437 | ||
331 | if (entry->size < best_size) { |
438 | if (hole_size < best_size) { |
332 | best = entry; |
439 | best = entry; |
333 | best_size = entry->size; |
440 | best_size = hole_size; |
334 | } |
441 | } |
335 | } |
442 | } |
336 | 443 | ||
337 | return best; |
444 | return best; |
338 | } |
445 | } |
339 | 446 | ||
340 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, |
447 | static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, |
341 | unsigned long size, |
448 | unsigned long size, |
342 | unsigned alignment, |
449 | unsigned alignment, |
343 | unsigned long color, |
450 | unsigned long color, |
344 | unsigned long start, |
451 | unsigned long start, |
345 | unsigned long end, |
452 | unsigned long end, |
346 | enum drm_mm_search_flags flags) |
453 | enum drm_mm_search_flags flags) |
347 | { |
454 | { |
348 | struct drm_mm_node *entry; |
455 | struct drm_mm_node *entry; |
349 | struct drm_mm_node *best; |
456 | struct drm_mm_node *best; |
350 | unsigned long adj_start; |
457 | unsigned long adj_start; |
351 | unsigned long adj_end; |
458 | unsigned long adj_end; |
352 | unsigned long best_size; |
459 | unsigned long best_size; |
353 | 460 | ||
354 | BUG_ON(mm->scanned_blocks); |
461 | BUG_ON(mm->scanned_blocks); |
355 | 462 | ||
356 | best = NULL; |
463 | best = NULL; |
357 | best_size = ~0UL; |
464 | best_size = ~0UL; |
358 | 465 | ||
- | 466 | __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, |
|
- | 467 | flags & DRM_MM_SEARCH_BELOW) { |
|
- | 468 | unsigned long hole_size = adj_end - adj_start; |
|
359 | drm_mm_for_each_hole(entry, mm, adj_start, adj_end) { |
469 | |
360 | if (adj_start < start) |
470 | if (adj_start < start) |
361 | adj_start = start; |
471 | adj_start = start; |
362 | if (adj_end > end) |
472 | if (adj_end > end) |
363 | adj_end = end; |
473 | adj_end = end; |
364 | 474 | ||
365 | if (mm->color_adjust) { |
475 | if (mm->color_adjust) { |
366 | mm->color_adjust(entry, color, &adj_start, &adj_end); |
476 | mm->color_adjust(entry, color, &adj_start, &adj_end); |
367 | if (adj_end <= adj_start) |
477 | if (adj_end <= adj_start) |
368 | continue; |
478 | continue; |
369 | } |
479 | } |
370 | 480 | ||
371 | if (!check_free_hole(adj_start, adj_end, size, alignment)) |
481 | if (!check_free_hole(adj_start, adj_end, size, alignment)) |
372 | continue; |
482 | continue; |
373 | 483 | ||
374 | if (!(flags & DRM_MM_SEARCH_BEST)) |
484 | if (!(flags & DRM_MM_SEARCH_BEST)) |
375 | return entry; |
485 | return entry; |
376 | 486 | ||
377 | if (entry->size < best_size) { |
487 | if (hole_size < best_size) { |
378 | best = entry; |
488 | best = entry; |
379 | best_size = entry->size; |
489 | best_size = hole_size; |
380 | } |
490 | } |
381 | } |
491 | } |
382 | 492 | ||
383 | return best; |
493 | return best; |
384 | } |
494 | } |
385 | 495 | ||
386 | /** |
496 | /** |
- | 497 | * drm_mm_replace_node - move an allocation from @old to @new |
|
- | 498 | * @old: drm_mm_node to remove from the allocator |
|
- | 499 | * @new: drm_mm_node which should inherit @old's allocation |
|
- | 500 | * |
|
- | 501 | * This is useful for when drivers embed the drm_mm_node structure and hence |
|
387 | * Moves an allocation. To be used with embedded struct drm_mm_node. |
502 | * can't move allocations by reassigning pointers. It's a combination of remove |
- | 503 | * and insert with the guarantee that the allocation start will match. |
|
388 | */ |
504 | */ |
389 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) |
505 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) |
390 | { |
506 | { |
391 | list_replace(&old->node_list, &new->node_list); |
507 | list_replace(&old->node_list, &new->node_list); |
392 | list_replace(&old->hole_stack, &new->hole_stack); |
508 | list_replace(&old->hole_stack, &new->hole_stack); |
393 | new->hole_follows = old->hole_follows; |
509 | new->hole_follows = old->hole_follows; |
394 | new->mm = old->mm; |
510 | new->mm = old->mm; |
395 | new->start = old->start; |
511 | new->start = old->start; |
396 | new->size = old->size; |
512 | new->size = old->size; |
397 | new->color = old->color; |
513 | new->color = old->color; |
398 | 514 | ||
399 | old->allocated = 0; |
515 | old->allocated = 0; |
400 | new->allocated = 1; |
516 | new->allocated = 1; |
401 | } |
517 | } |
402 | EXPORT_SYMBOL(drm_mm_replace_node); |
518 | EXPORT_SYMBOL(drm_mm_replace_node); |
403 | 519 | ||
404 | /** |
520 | /** |
- | 521 | * DOC: lru scan roaster |
|
- | 522 | * |
|
- | 523 | * Very often GPUs need to have continuous allocations for a given object. When |
|
- | 524 | * evicting objects to make space for a new one it is therefore not most |
|
- | 525 | * efficient when we simply start to select all objects from the tail of an LRU |
|
- | 526 | * until there's a suitable hole: Especially for big objects or nodes that |
|
- | 527 | * otherwise have special allocation constraints there's a good chance we evict |
|
- | 528 | * lots of (smaller) objects unecessarily. |
|
- | 529 | * |
|
- | 530 | * The DRM range allocator supports this use-case through the scanning |
|
- | 531 | * interfaces. First a scan operation needs to be initialized with |
|
- | 532 | * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds |
|
- | 533 | * objects to the roaster (probably by walking an LRU list, but this can be |
|
- | 534 | * freely implemented) until a suitable hole is found or there's no further |
|
- | 535 | * evitable object. |
|
- | 536 | * |
|
- | 537 | * The the driver must walk through all objects again in exactly the reverse |
|
- | 538 | * order to restore the allocator state. Note that while the allocator is used |
|
- | 539 | * in the scan mode no other operation is allowed. |
|
- | 540 | * |
|
- | 541 | * Finally the driver evicts all objects selected in the scan. Adding and |
|
- | 542 | * removing an object is O(1), and since freeing a node is also O(1) the overall |
|
- | 543 | * complexity is O(scanned_objects). So like the free stack which needs to be |
|
- | 544 | * walked before a scan operation even begins this is linear in the number of |
|
- | 545 | * objects. It doesn't seem to hurt badly. |
|
- | 546 | */ |
|
- | 547 | ||
- | 548 | /** |
|
405 | * Initializa lru scanning. |
549 | * drm_mm_init_scan - initialize lru scanning |
- | 550 | * @mm: drm_mm to scan |
|
- | 551 | * @size: size of the allocation |
|
- | 552 | * @alignment: alignment of the allocation |
|
- | 553 | * @color: opaque tag value to use for the allocation |
|
406 | * |
554 | * |
407 | * This simply sets up the scanning routines with the parameters for the desired |
555 | * This simply sets up the scanning routines with the parameters for the desired |
- | 556 | * hole. Note that there's no need to specify allocation flags, since they only |
|
408 | * hole. |
557 | * change the place a node is allocated from within a suitable hole. |
409 | * |
558 | * |
- | 559 | * Warning: |
|
410 | * Warning: As long as the scan list is non-empty, no other operations than |
560 | * As long as the scan list is non-empty, no other operations than |
411 | * adding/removing nodes to/from the scan list are allowed. |
561 | * adding/removing nodes to/from the scan list are allowed. |
412 | */ |
562 | */ |
413 | void drm_mm_init_scan(struct drm_mm *mm, |
563 | void drm_mm_init_scan(struct drm_mm *mm, |
414 | unsigned long size, |
564 | unsigned long size, |
415 | unsigned alignment, |
565 | unsigned alignment, |
416 | unsigned long color) |
566 | unsigned long color) |
417 | { |
567 | { |
418 | mm->scan_color = color; |
568 | mm->scan_color = color; |
419 | mm->scan_alignment = alignment; |
569 | mm->scan_alignment = alignment; |
420 | mm->scan_size = size; |
570 | mm->scan_size = size; |
421 | mm->scanned_blocks = 0; |
571 | mm->scanned_blocks = 0; |
422 | mm->scan_hit_start = 0; |
572 | mm->scan_hit_start = 0; |
423 | mm->scan_hit_end = 0; |
573 | mm->scan_hit_end = 0; |
424 | mm->scan_check_range = 0; |
574 | mm->scan_check_range = 0; |
425 | mm->prev_scanned_node = NULL; |
575 | mm->prev_scanned_node = NULL; |
426 | } |
576 | } |
427 | EXPORT_SYMBOL(drm_mm_init_scan); |
577 | EXPORT_SYMBOL(drm_mm_init_scan); |
428 | 578 | ||
429 | /** |
579 | /** |
- | 580 | * drm_mm_init_scan - initialize range-restricted lru scanning |
|
430 | * Initializa lru scanning. |
581 | * @mm: drm_mm to scan |
- | 582 | * @size: size of the allocation |
|
- | 583 | * @alignment: alignment of the allocation |
|
- | 584 | * @color: opaque tag value to use for the allocation |
|
- | 585 | * @start: start of the allowed range for the allocation |
|
- | 586 | * @end: end of the allowed range for the allocation |
|
431 | * |
587 | * |
432 | * This simply sets up the scanning routines with the parameters for the desired |
588 | * This simply sets up the scanning routines with the parameters for the desired |
433 | * hole. This version is for range-restricted scans. |
589 | * hole. Note that there's no need to specify allocation flags, since they only |
- | 590 | * change the place a node is allocated from within a suitable hole. |
|
434 | * |
591 | * |
- | 592 | * Warning: |
|
435 | * Warning: As long as the scan list is non-empty, no other operations than |
593 | * As long as the scan list is non-empty, no other operations than |
436 | * adding/removing nodes to/from the scan list are allowed. |
594 | * adding/removing nodes to/from the scan list are allowed. |
437 | */ |
595 | */ |
438 | void drm_mm_init_scan_with_range(struct drm_mm *mm, |
596 | void drm_mm_init_scan_with_range(struct drm_mm *mm, |
439 | unsigned long size, |
597 | unsigned long size, |
440 | unsigned alignment, |
598 | unsigned alignment, |
441 | unsigned long color, |
599 | unsigned long color, |
442 | unsigned long start, |
600 | unsigned long start, |
443 | unsigned long end) |
601 | unsigned long end) |
444 | { |
602 | { |
445 | mm->scan_color = color; |
603 | mm->scan_color = color; |
446 | mm->scan_alignment = alignment; |
604 | mm->scan_alignment = alignment; |
447 | mm->scan_size = size; |
605 | mm->scan_size = size; |
448 | mm->scanned_blocks = 0; |
606 | mm->scanned_blocks = 0; |
449 | mm->scan_hit_start = 0; |
607 | mm->scan_hit_start = 0; |
450 | mm->scan_hit_end = 0; |
608 | mm->scan_hit_end = 0; |
451 | mm->scan_start = start; |
609 | mm->scan_start = start; |
452 | mm->scan_end = end; |
610 | mm->scan_end = end; |
453 | mm->scan_check_range = 1; |
611 | mm->scan_check_range = 1; |
454 | mm->prev_scanned_node = NULL; |
612 | mm->prev_scanned_node = NULL; |
455 | } |
613 | } |
456 | EXPORT_SYMBOL(drm_mm_init_scan_with_range); |
614 | EXPORT_SYMBOL(drm_mm_init_scan_with_range); |
457 | 615 | ||
458 | /** |
616 | /** |
- | 617 | * drm_mm_scan_add_block - add a node to the scan list |
|
- | 618 | * @node: drm_mm_node to add |
|
- | 619 | * |
|
459 | * Add a node to the scan list that might be freed to make space for the desired |
620 | * Add a node to the scan list that might be freed to make space for the desired |
460 | * hole. |
621 | * hole. |
461 | * |
622 | * |
- | 623 | * Returns: |
|
462 | * Returns non-zero, if a hole has been found, zero otherwise. |
624 | * True if a hole has been found, false otherwise. |
463 | */ |
625 | */ |
464 | int drm_mm_scan_add_block(struct drm_mm_node *node) |
626 | bool drm_mm_scan_add_block(struct drm_mm_node *node) |
465 | { |
627 | { |
466 | struct drm_mm *mm = node->mm; |
628 | struct drm_mm *mm = node->mm; |
467 | struct drm_mm_node *prev_node; |
629 | struct drm_mm_node *prev_node; |
468 | unsigned long hole_start, hole_end; |
630 | unsigned long hole_start, hole_end; |
469 | unsigned long adj_start, adj_end; |
631 | unsigned long adj_start, adj_end; |
470 | 632 | ||
471 | mm->scanned_blocks++; |
633 | mm->scanned_blocks++; |
472 | 634 | ||
473 | BUG_ON(node->scanned_block); |
635 | BUG_ON(node->scanned_block); |
474 | node->scanned_block = 1; |
636 | node->scanned_block = 1; |
475 | 637 | ||
476 | prev_node = list_entry(node->node_list.prev, struct drm_mm_node, |
638 | prev_node = list_entry(node->node_list.prev, struct drm_mm_node, |
477 | node_list); |
639 | node_list); |
478 | 640 | ||
479 | node->scanned_preceeds_hole = prev_node->hole_follows; |
641 | node->scanned_preceeds_hole = prev_node->hole_follows; |
480 | prev_node->hole_follows = 1; |
642 | prev_node->hole_follows = 1; |
481 | list_del(&node->node_list); |
643 | list_del(&node->node_list); |
482 | node->node_list.prev = &prev_node->node_list; |
644 | node->node_list.prev = &prev_node->node_list; |
483 | node->node_list.next = &mm->prev_scanned_node->node_list; |
645 | node->node_list.next = &mm->prev_scanned_node->node_list; |
484 | mm->prev_scanned_node = node; |
646 | mm->prev_scanned_node = node; |
485 | 647 | ||
486 | adj_start = hole_start = drm_mm_hole_node_start(prev_node); |
648 | adj_start = hole_start = drm_mm_hole_node_start(prev_node); |
487 | adj_end = hole_end = drm_mm_hole_node_end(prev_node); |
649 | adj_end = hole_end = drm_mm_hole_node_end(prev_node); |
488 | 650 | ||
489 | if (mm->scan_check_range) { |
651 | if (mm->scan_check_range) { |
490 | if (adj_start < mm->scan_start) |
652 | if (adj_start < mm->scan_start) |
491 | adj_start = mm->scan_start; |
653 | adj_start = mm->scan_start; |
492 | if (adj_end > mm->scan_end) |
654 | if (adj_end > mm->scan_end) |
493 | adj_end = mm->scan_end; |
655 | adj_end = mm->scan_end; |
494 | } |
656 | } |
495 | 657 | ||
496 | if (mm->color_adjust) |
658 | if (mm->color_adjust) |
497 | mm->color_adjust(prev_node, mm->scan_color, |
659 | mm->color_adjust(prev_node, mm->scan_color, |
498 | &adj_start, &adj_end); |
660 | &adj_start, &adj_end); |
499 | 661 | ||
500 | if (check_free_hole(adj_start, adj_end, |
662 | if (check_free_hole(adj_start, adj_end, |
501 | mm->scan_size, mm->scan_alignment)) { |
663 | mm->scan_size, mm->scan_alignment)) { |
502 | mm->scan_hit_start = hole_start; |
664 | mm->scan_hit_start = hole_start; |
503 | mm->scan_hit_end = hole_end; |
665 | mm->scan_hit_end = hole_end; |
504 | return 1; |
666 | return true; |
505 | } |
667 | } |
506 | 668 | ||
507 | return 0; |
669 | return false; |
508 | } |
670 | } |
509 | EXPORT_SYMBOL(drm_mm_scan_add_block); |
671 | EXPORT_SYMBOL(drm_mm_scan_add_block); |
510 | 672 | ||
511 | /** |
673 | /** |
512 | * Remove a node from the scan list. |
674 | * drm_mm_scan_remove_block - remove a node from the scan list |
- | 675 | * @node: drm_mm_node to remove |
|
513 | * |
676 | * |
514 | * Nodes _must_ be removed in the exact same order from the scan list as they |
677 | * Nodes _must_ be removed in the exact same order from the scan list as they |
515 | * have been added, otherwise the internal state of the memory manager will be |
678 | * have been added, otherwise the internal state of the memory manager will be |
516 | * corrupted. |
679 | * corrupted. |
517 | * |
680 | * |
518 | * When the scan list is empty, the selected memory nodes can be freed. An |
681 | * When the scan list is empty, the selected memory nodes can be freed. An |
519 | * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then |
682 | * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then |
520 | * return the just freed block (because its at the top of the free_stack list). |
683 | * return the just freed block (because its at the top of the free_stack list). |
521 | * |
684 | * |
- | 685 | * Returns: |
|
522 | * Returns one if this block should be evicted, zero otherwise. Will always |
686 | * True if this block should be evicted, false otherwise. Will always |
523 | * return zero when no hole has been found. |
687 | * return false when no hole has been found. |
524 | */ |
688 | */ |
525 | int drm_mm_scan_remove_block(struct drm_mm_node *node) |
689 | bool drm_mm_scan_remove_block(struct drm_mm_node *node) |
526 | { |
690 | { |
527 | struct drm_mm *mm = node->mm; |
691 | struct drm_mm *mm = node->mm; |
528 | struct drm_mm_node *prev_node; |
692 | struct drm_mm_node *prev_node; |
529 | 693 | ||
530 | mm->scanned_blocks--; |
694 | mm->scanned_blocks--; |
531 | 695 | ||
532 | BUG_ON(!node->scanned_block); |
696 | BUG_ON(!node->scanned_block); |
533 | node->scanned_block = 0; |
697 | node->scanned_block = 0; |
534 | 698 | ||
535 | prev_node = list_entry(node->node_list.prev, struct drm_mm_node, |
699 | prev_node = list_entry(node->node_list.prev, struct drm_mm_node, |
536 | node_list); |
700 | node_list); |
537 | 701 | ||
538 | prev_node->hole_follows = node->scanned_preceeds_hole; |
702 | prev_node->hole_follows = node->scanned_preceeds_hole; |
539 | list_add(&node->node_list, &prev_node->node_list); |
703 | list_add(&node->node_list, &prev_node->node_list); |
540 | 704 | ||
541 | return (drm_mm_hole_node_end(node) > mm->scan_hit_start && |
705 | return (drm_mm_hole_node_end(node) > mm->scan_hit_start && |
542 | node->start < mm->scan_hit_end); |
706 | node->start < mm->scan_hit_end); |
543 | } |
707 | } |
544 | EXPORT_SYMBOL(drm_mm_scan_remove_block); |
708 | EXPORT_SYMBOL(drm_mm_scan_remove_block); |
- | 709 | ||
- | 710 | /** |
|
- | 711 | * drm_mm_clean - checks whether an allocator is clean |
|
- | 712 | * @mm: drm_mm allocator to check |
|
- | 713 | * |
|
- | 714 | * Returns: |
|
- | 715 | * True if the allocator is completely free, false if there's still a node |
|
- | 716 | * allocated in it. |
|
545 | 717 | */ |
|
546 | int drm_mm_clean(struct drm_mm * mm) |
718 | bool drm_mm_clean(struct drm_mm * mm) |
547 | { |
719 | { |
548 | struct list_head *head = &mm->head_node.node_list; |
720 | struct list_head *head = &mm->head_node.node_list; |
549 | 721 | ||
550 | return (head->next->next == head); |
722 | return (head->next->next == head); |
551 | } |
723 | } |
552 | EXPORT_SYMBOL(drm_mm_clean); |
724 | EXPORT_SYMBOL(drm_mm_clean); |
- | 725 | ||
- | 726 | /** |
|
- | 727 | * drm_mm_init - initialize a drm-mm allocator |
|
- | 728 | * @mm: the drm_mm structure to initialize |
|
- | 729 | * @start: start of the range managed by @mm |
|
- | 730 | * @size: end of the range managed by @mm |
|
- | 731 | * |
|
- | 732 | * Note that @mm must be cleared to 0 before calling this function. |
|
553 | 733 | */ |
|
554 | void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) |
734 | void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) |
555 | { |
735 | { |
556 | INIT_LIST_HEAD(&mm->hole_stack); |
736 | INIT_LIST_HEAD(&mm->hole_stack); |
557 | mm->scanned_blocks = 0; |
737 | mm->scanned_blocks = 0; |
558 | 738 | ||
559 | /* Clever trick to avoid a special case in the free hole tracking. */ |
739 | /* Clever trick to avoid a special case in the free hole tracking. */ |
560 | INIT_LIST_HEAD(&mm->head_node.node_list); |
740 | INIT_LIST_HEAD(&mm->head_node.node_list); |
561 | INIT_LIST_HEAD(&mm->head_node.hole_stack); |
741 | INIT_LIST_HEAD(&mm->head_node.hole_stack); |
562 | mm->head_node.hole_follows = 1; |
742 | mm->head_node.hole_follows = 1; |
563 | mm->head_node.scanned_block = 0; |
743 | mm->head_node.scanned_block = 0; |
564 | mm->head_node.scanned_prev_free = 0; |
744 | mm->head_node.scanned_prev_free = 0; |
565 | mm->head_node.scanned_next_free = 0; |
745 | mm->head_node.scanned_next_free = 0; |
566 | mm->head_node.mm = mm; |
746 | mm->head_node.mm = mm; |
567 | mm->head_node.start = start + size; |
747 | mm->head_node.start = start + size; |
568 | mm->head_node.size = start - mm->head_node.start; |
748 | mm->head_node.size = start - mm->head_node.start; |
569 | list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); |
749 | list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); |
570 | 750 | ||
571 | mm->color_adjust = NULL; |
751 | mm->color_adjust = NULL; |
572 | } |
752 | } |
573 | EXPORT_SYMBOL(drm_mm_init); |
753 | EXPORT_SYMBOL(drm_mm_init); |
- | 754 | ||
- | 755 | /** |
|
- | 756 | * drm_mm_takedown - clean up a drm_mm allocator |
|
- | 757 | * @mm: drm_mm allocator to clean up |
|
- | 758 | * |
|
- | 759 | * Note that it is a bug to call this function on an allocator which is not |
|
- | 760 | * clean. |
|
574 | 761 | */ |
|
575 | void drm_mm_takedown(struct drm_mm * mm) |
762 | void drm_mm_takedown(struct drm_mm * mm) |
576 | { |
763 | { |
577 | WARN(!list_empty(&mm->head_node.node_list), |
764 | WARN(!list_empty(&mm->head_node.node_list), |
578 | "Memory manager not clean during takedown.\n"); |
765 | "Memory manager not clean during takedown.\n"); |
579 | } |
766 | } |
580 | EXPORT_SYMBOL(drm_mm_takedown); |
767 | EXPORT_SYMBOL(drm_mm_takedown); |
581 | 768 | ||
582 | static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry, |
769 | static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry, |
583 | const char *prefix) |
770 | const char *prefix) |
584 | { |
771 | { |
585 | unsigned long hole_start, hole_end, hole_size; |
772 | unsigned long hole_start, hole_end, hole_size; |
586 | 773 | ||
587 | if (entry->hole_follows) { |
774 | if (entry->hole_follows) { |
588 | hole_start = drm_mm_hole_node_start(entry); |
775 | hole_start = drm_mm_hole_node_start(entry); |
589 | hole_end = drm_mm_hole_node_end(entry); |
776 | hole_end = drm_mm_hole_node_end(entry); |
590 | hole_size = hole_end - hole_start; |
777 | hole_size = hole_end - hole_start; |
591 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n", |
778 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n", |
592 | prefix, hole_start, hole_end, |
779 | prefix, hole_start, hole_end, |
593 | hole_size); |
780 | hole_size); |
594 | return hole_size; |
781 | return hole_size; |
595 | } |
782 | } |
596 | 783 | ||
597 | return 0; |
784 | return 0; |
598 | } |
785 | } |
- | 786 | ||
- | 787 | /** |
|
- | 788 | * drm_mm_debug_table - dump allocator state to dmesg |
|
- | 789 | * @mm: drm_mm allocator to dump |
|
- | 790 | * @prefix: prefix to use for dumping to dmesg |
|
599 | 791 | */ |
|
600 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) |
792 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) |
601 | { |
793 | { |
602 | struct drm_mm_node *entry; |
794 | struct drm_mm_node *entry; |
603 | unsigned long total_used = 0, total_free = 0, total = 0; |
795 | unsigned long total_used = 0, total_free = 0, total = 0; |
604 | 796 | ||
605 | total_free += drm_mm_debug_hole(&mm->head_node, prefix); |
797 | total_free += drm_mm_debug_hole(&mm->head_node, prefix); |
606 | 798 | ||
607 | drm_mm_for_each_node(entry, mm) { |
799 | drm_mm_for_each_node(entry, mm) { |
608 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n", |
800 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n", |
609 | prefix, entry->start, entry->start + entry->size, |
801 | prefix, entry->start, entry->start + entry->size, |
610 | entry->size); |
802 | entry->size); |
611 | total_used += entry->size; |
803 | total_used += entry->size; |
612 | total_free += drm_mm_debug_hole(entry, prefix); |
804 | total_free += drm_mm_debug_hole(entry, prefix); |
613 | } |
805 | } |
614 | total = total_free + total_used; |
806 | total = total_free + total_used; |
615 | 807 | ||
616 | printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total, |
808 | printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total, |
617 | total_used, total_free); |
809 | total_used, total_free); |
618 | } |
810 | } |
619 | EXPORT_SYMBOL(drm_mm_debug_table); |
811 | EXPORT_SYMBOL(drm_mm_debug_table); |
620 | 812 | ||
621 | #if defined(CONFIG_DEBUG_FS) |
813 | #if defined(CONFIG_DEBUG_FS) |
622 | static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) |
814 | static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) |
623 | { |
815 | { |
624 | unsigned long hole_start, hole_end, hole_size; |
816 | unsigned long hole_start, hole_end, hole_size; |
625 | 817 | ||
626 | if (entry->hole_follows) { |
818 | if (entry->hole_follows) { |
627 | hole_start = drm_mm_hole_node_start(entry); |
819 | hole_start = drm_mm_hole_node_start(entry); |
628 | hole_end = drm_mm_hole_node_end(entry); |
820 | hole_end = drm_mm_hole_node_end(entry); |
629 | hole_size = hole_end - hole_start; |
821 | hole_size = hole_end - hole_start; |
630 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", |
822 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", |
631 | hole_start, hole_end, hole_size); |
823 | hole_start, hole_end, hole_size); |
632 | return hole_size; |
824 | return hole_size; |
633 | } |
825 | } |
634 | 826 | ||
635 | return 0; |
827 | return 0; |
636 | } |
828 | } |
- | 829 | ||
- | 830 | /** |
|
- | 831 | * drm_mm_dump_table - dump allocator state to a seq_file |
|
- | 832 | * @m: seq_file to dump to |
|
- | 833 | * @mm: drm_mm allocator to dump |
|
637 | 834 | */ |
|
638 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) |
835 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) |
639 | { |
836 | { |
640 | struct drm_mm_node *entry; |
837 | struct drm_mm_node *entry; |
641 | unsigned long total_used = 0, total_free = 0, total = 0; |
838 | unsigned long total_used = 0, total_free = 0, total = 0; |
642 | 839 | ||
643 | total_free += drm_mm_dump_hole(m, &mm->head_node); |
840 | total_free += drm_mm_dump_hole(m, &mm->head_node); |
644 | 841 | ||
645 | drm_mm_for_each_node(entry, mm) { |
842 | drm_mm_for_each_node(entry, mm) { |
646 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", |
843 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", |
647 | entry->start, entry->start + entry->size, |
844 | entry->start, entry->start + entry->size, |
648 | entry->size); |
845 | entry->size); |
649 | total_used += entry->size; |
846 | total_used += entry->size; |
650 | total_free += drm_mm_dump_hole(m, entry); |
847 | total_free += drm_mm_dump_hole(m, entry); |
651 | } |
848 | } |
652 | total = total_free + total_used; |
849 | total = total_free + total_used; |
653 | 850 | ||
654 | seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free); |
851 | seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free); |
655 | return 0; |
852 | return 0; |
656 | } |
853 | } |
657 | EXPORT_SYMBOL(drm_mm_dump_table); |
854 | EXPORT_SYMBOL(drm_mm_dump_table); |
658 | #endif>>>=>>>=>>>>>> |
855 | #endif>>>=>>>=>>>>>>>>> |