Rev 3031 | Rev 3480 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1123 | serge | 1 | /************************************************************************** |
2 | * |
||
3 | * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. |
||
4 | * All Rights Reserved. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the |
||
8 | * "Software"), to deal in the Software without restriction, including |
||
9 | * without limitation the rights to use, copy, modify, merge, publish, |
||
10 | * distribute, sub license, and/or sell copies of the Software, and to |
||
11 | * permit persons to whom the Software is furnished to do so, subject to |
||
12 | * the following conditions: |
||
13 | * |
||
14 | * The above copyright notice and this permission notice (including the |
||
15 | * next paragraph) shall be included in all copies or substantial portions |
||
16 | * of the Software. |
||
17 | * |
||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
25 | * |
||
26 | * |
||
27 | **************************************************************************/ |
||
28 | |||
29 | /* |
||
30 | * Generic simple memory manager implementation. Intended to be used as a base |
||
31 | * class implementation for more advanced memory managers. |
||
32 | * |
||
33 | * Note that the algorithm used is quite simple and there might be substantial |
||
34 | * performance gains if a smarter free list is implemented. Currently it is just an |
||
35 | * unordered stack of free regions. This could easily be improved if an RB-tree |
||
36 | * is used instead. At least if we expect heavy fragmentation. |
||
37 | * |
||
38 | * Aligned allocations can also see improvement. |
||
39 | * |
||
40 | * Authors: |
||
41 | * Thomas Hellström |
||
42 | */ |
||
43 | |||
3031 | serge | 44 | #include |
45 | #include |
||
1963 | serge | 46 | #include |
1179 | serge | 47 | #include |
3031 | serge | 48 | #include |
1123 | serge | 49 | |
50 | #define MM_UNUSED_TARGET 4 |
||
51 | |||
52 | static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) |
||
53 | { |
||
54 | struct drm_mm_node *child; |
||
55 | |||
1963 | serge | 56 | if (atomic) |
57 | child = kzalloc(sizeof(*child), GFP_ATOMIC); |
||
58 | else |
||
59 | child = kzalloc(sizeof(*child), GFP_KERNEL); |
||
1123 | serge | 60 | |
61 | if (unlikely(child == NULL)) { |
||
62 | spin_lock(&mm->unused_lock); |
||
63 | if (list_empty(&mm->unused_nodes)) |
||
64 | child = NULL; |
||
65 | else { |
||
66 | child = |
||
67 | list_entry(mm->unused_nodes.next, |
||
1963 | serge | 68 | struct drm_mm_node, node_list); |
69 | list_del(&child->node_list); |
||
1123 | serge | 70 | --mm->num_unused; |
71 | } |
||
72 | spin_unlock(&mm->unused_lock); |
||
73 | } |
||
74 | return child; |
||
75 | } |
||
76 | |||
1321 | serge | 77 | /* drm_mm_pre_get() - pre allocate drm_mm_node structure |
78 | * drm_mm: memory manager struct we are pre-allocating for |
||
79 | * |
||
80 | * Returns 0 on success or -ENOMEM if allocation fails. |
||
81 | */ |
||
1123 | serge | 82 | int drm_mm_pre_get(struct drm_mm *mm) |
83 | { |
||
84 | struct drm_mm_node *node; |
||
85 | |||
86 | spin_lock(&mm->unused_lock); |
||
87 | while (mm->num_unused < MM_UNUSED_TARGET) { |
||
88 | spin_unlock(&mm->unused_lock); |
||
1963 | serge | 89 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
1123 | serge | 90 | spin_lock(&mm->unused_lock); |
91 | |||
92 | if (unlikely(node == NULL)) { |
||
93 | int ret = (mm->num_unused < 2) ? -ENOMEM : 0; |
||
94 | spin_unlock(&mm->unused_lock); |
||
95 | return ret; |
||
96 | } |
||
97 | ++mm->num_unused; |
||
1963 | serge | 98 | list_add_tail(&node->node_list, &mm->unused_nodes); |
1123 | serge | 99 | } |
100 | spin_unlock(&mm->unused_lock); |
||
101 | return 0; |
||
102 | } |
||
1126 | serge | 103 | EXPORT_SYMBOL(drm_mm_pre_get); |
1123 | serge | 104 | |
1963 | serge | 105 | static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) |
1123 | serge | 106 | { |
1963 | serge | 107 | return hole_node->start + hole_node->size; |
108 | } |
||
1123 | serge | 109 | |
1963 | serge | 110 | static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) |
111 | { |
||
112 | struct drm_mm_node *next_node = |
||
113 | list_entry(hole_node->node_list.next, struct drm_mm_node, |
||
114 | node_list); |
||
1123 | serge | 115 | |
1963 | serge | 116 | return next_node->start; |
1123 | serge | 117 | } |
118 | |||
1963 | serge | 119 | static void drm_mm_insert_helper(struct drm_mm_node *hole_node, |
120 | struct drm_mm_node *node, |
||
3031 | serge | 121 | unsigned long size, unsigned alignment, |
122 | unsigned long color) |
||
1123 | serge | 123 | { |
1963 | serge | 124 | struct drm_mm *mm = hole_node->mm; |
125 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); |
||
126 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); |
||
3031 | serge | 127 | unsigned long adj_start = hole_start; |
128 | unsigned long adj_end = hole_end; |
||
1123 | serge | 129 | |
1963 | serge | 130 | BUG_ON(!hole_node->hole_follows || node->allocated); |
131 | |||
3031 | serge | 132 | if (mm->color_adjust) |
133 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); |
||
1963 | serge | 134 | |
3031 | serge | 135 | if (alignment) { |
136 | unsigned tmp = adj_start % alignment; |
||
137 | if (tmp) |
||
138 | adj_start += alignment - tmp; |
||
139 | } |
||
140 | |||
141 | if (adj_start == hole_start) { |
||
1963 | serge | 142 | hole_node->hole_follows = 0; |
3031 | serge | 143 | list_del(&hole_node->hole_stack); |
144 | } |
||
1963 | serge | 145 | |
3031 | serge | 146 | node->start = adj_start; |
1963 | serge | 147 | node->size = size; |
148 | node->mm = mm; |
||
3031 | serge | 149 | node->color = color; |
1963 | serge | 150 | node->allocated = 1; |
151 | |||
152 | INIT_LIST_HEAD(&node->hole_stack); |
||
153 | list_add(&node->node_list, &hole_node->node_list); |
||
154 | |||
3031 | serge | 155 | BUG_ON(node->start + node->size > adj_end); |
1963 | serge | 156 | |
3031 | serge | 157 | node->hole_follows = 0; |
1963 | serge | 158 | if (node->start + node->size < hole_end) { |
159 | list_add(&node->hole_stack, &mm->hole_stack); |
||
160 | node->hole_follows = 1; |
||
1123 | serge | 161 | } |
162 | } |
||
163 | |||
1963 | serge | 164 | struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, |
1123 | serge | 165 | unsigned long size, |
1963 | serge | 166 | unsigned alignment, |
3031 | serge | 167 | unsigned long color, |
1123 | serge | 168 | int atomic) |
169 | { |
||
1963 | serge | 170 | struct drm_mm_node *node; |
1123 | serge | 171 | |
1963 | serge | 172 | node = drm_mm_kmalloc(hole_node->mm, atomic); |
173 | if (unlikely(node == NULL)) |
||
1123 | serge | 174 | return NULL; |
175 | |||
3031 | serge | 176 | drm_mm_insert_helper(hole_node, node, size, alignment, color); |
1123 | serge | 177 | |
1963 | serge | 178 | return node; |
179 | } |
||
180 | EXPORT_SYMBOL(drm_mm_get_block_generic); |
||
1123 | serge | 181 | |
1963 | serge | 182 | /** |
183 | * Search for free space and insert a preallocated memory node. Returns |
||
184 | * -ENOSPC if no suitable free area is available. The preallocated memory node |
||
185 | * must be cleared. |
||
186 | */ |
||
3192 | Serge | 187 | int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, |
188 | unsigned long size, unsigned alignment, |
||
189 | unsigned long color) |
||
1963 | serge | 190 | { |
191 | struct drm_mm_node *hole_node; |
||
1123 | serge | 192 | |
3192 | Serge | 193 | hole_node = drm_mm_search_free_generic(mm, size, alignment, |
194 | color, 0); |
||
1963 | serge | 195 | if (!hole_node) |
196 | return -ENOSPC; |
||
197 | |||
3192 | Serge | 198 | drm_mm_insert_helper(hole_node, node, size, alignment, color); |
1963 | serge | 199 | return 0; |
1123 | serge | 200 | } |
3192 | Serge | 201 | EXPORT_SYMBOL(drm_mm_insert_node_generic); |
202 | |||
203 | int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, |
||
204 | unsigned long size, unsigned alignment) |
||
205 | { |
||
206 | return drm_mm_insert_node_generic(mm, node, size, alignment, 0); |
||
207 | } |
||
1963 | serge | 208 | EXPORT_SYMBOL(drm_mm_insert_node); |
1123 | serge | 209 | |
1963 | serge | 210 | static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, |
211 | struct drm_mm_node *node, |
||
212 | unsigned long size, unsigned alignment, |
||
3031 | serge | 213 | unsigned long color, |
1963 | serge | 214 | unsigned long start, unsigned long end) |
1123 | serge | 215 | { |
1963 | serge | 216 | struct drm_mm *mm = hole_node->mm; |
217 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); |
||
218 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); |
||
3031 | serge | 219 | unsigned long adj_start = hole_start; |
220 | unsigned long adj_end = hole_end; |
||
1123 | serge | 221 | |
1963 | serge | 222 | BUG_ON(!hole_node->hole_follows || node->allocated); |
1123 | serge | 223 | |
3192 | Serge | 224 | if (adj_start < start) |
225 | adj_start = start; |
||
226 | if (adj_end > end) |
||
227 | adj_end = end; |
||
228 | |||
3031 | serge | 229 | if (mm->color_adjust) |
230 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); |
||
1123 | serge | 231 | |
3031 | serge | 232 | if (alignment) { |
233 | unsigned tmp = adj_start % alignment; |
||
1963 | serge | 234 | if (tmp) |
3031 | serge | 235 | adj_start += alignment - tmp; |
236 | } |
||
1963 | serge | 237 | |
3031 | serge | 238 | if (adj_start == hole_start) { |
1963 | serge | 239 | hole_node->hole_follows = 0; |
3031 | serge | 240 | list_del(&hole_node->hole_stack); |
1123 | serge | 241 | } |
242 | |||
3031 | serge | 243 | node->start = adj_start; |
1963 | serge | 244 | node->size = size; |
245 | node->mm = mm; |
||
3031 | serge | 246 | node->color = color; |
1963 | serge | 247 | node->allocated = 1; |
248 | |||
249 | INIT_LIST_HEAD(&node->hole_stack); |
||
250 | list_add(&node->node_list, &hole_node->node_list); |
||
251 | |||
3031 | serge | 252 | BUG_ON(node->start + node->size > adj_end); |
1963 | serge | 253 | BUG_ON(node->start + node->size > end); |
254 | |||
3031 | serge | 255 | node->hole_follows = 0; |
1963 | serge | 256 | if (node->start + node->size < hole_end) { |
257 | list_add(&node->hole_stack, &mm->hole_stack); |
||
258 | node->hole_follows = 1; |
||
1123 | serge | 259 | } |
260 | } |
||
261 | |||
1963 | serge | 262 | struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node, |
1321 | serge | 263 | unsigned long size, |
264 | unsigned alignment, |
||
3031 | serge | 265 | unsigned long color, |
1321 | serge | 266 | unsigned long start, |
267 | unsigned long end, |
||
268 | int atomic) |
||
269 | { |
||
1963 | serge | 270 | struct drm_mm_node *node; |
1321 | serge | 271 | |
1963 | serge | 272 | node = drm_mm_kmalloc(hole_node->mm, atomic); |
273 | if (unlikely(node == NULL)) |
||
1321 | serge | 274 | return NULL; |
275 | |||
3031 | serge | 276 | drm_mm_insert_helper_range(hole_node, node, size, alignment, color, |
1963 | serge | 277 | start, end); |
1321 | serge | 278 | |
279 | return node; |
||
280 | } |
||
281 | EXPORT_SYMBOL(drm_mm_get_block_range_generic); |
||
282 | |||
1963 | serge | 283 | /** |
284 | * Search for free space and insert a preallocated memory node. Returns |
||
285 | * -ENOSPC if no suitable free area is available. This is for range |
||
286 | * restricted allocations. The preallocated memory node must be cleared. |
||
1123 | serge | 287 | */ |
3192 | Serge | 288 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, |
289 | unsigned long size, unsigned alignment, unsigned long color, |
||
1963 | serge | 290 | unsigned long start, unsigned long end) |
291 | { |
||
292 | struct drm_mm_node *hole_node; |
||
1123 | serge | 293 | |
3192 | Serge | 294 | hole_node = drm_mm_search_free_in_range_generic(mm, |
295 | size, alignment, color, |
||
296 | start, end, 0); |
||
1963 | serge | 297 | if (!hole_node) |
298 | return -ENOSPC; |
||
299 | |||
3192 | Serge | 300 | drm_mm_insert_helper_range(hole_node, node, |
301 | size, alignment, color, |
||
1963 | serge | 302 | start, end); |
303 | return 0; |
||
304 | } |
||
3192 | Serge | 305 | EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); |
306 | |||
307 | int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node, |
||
308 | unsigned long size, unsigned alignment, |
||
309 | unsigned long start, unsigned long end) |
||
310 | { |
||
311 | return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end); |
||
312 | } |
||
1963 | serge | 313 | EXPORT_SYMBOL(drm_mm_insert_node_in_range); |
314 | |||
315 | /** |
||
316 | * Remove a memory node from the allocator. |
||
317 | */ |
||
318 | void drm_mm_remove_node(struct drm_mm_node *node) |
||
1123 | serge | 319 | { |
1963 | serge | 320 | struct drm_mm *mm = node->mm; |
321 | struct drm_mm_node *prev_node; |
||
1123 | serge | 322 | |
1963 | serge | 323 | BUG_ON(node->scanned_block || node->scanned_prev_free |
324 | || node->scanned_next_free); |
||
1123 | serge | 325 | |
1963 | serge | 326 | prev_node = |
327 | list_entry(node->node_list.prev, struct drm_mm_node, node_list); |
||
1123 | serge | 328 | |
1963 | serge | 329 | if (node->hole_follows) { |
330 | BUG_ON(drm_mm_hole_node_start(node) |
||
331 | == drm_mm_hole_node_end(node)); |
||
332 | list_del(&node->hole_stack); |
||
333 | } else |
||
334 | BUG_ON(drm_mm_hole_node_start(node) |
||
335 | != drm_mm_hole_node_end(node)); |
||
336 | |||
337 | if (!prev_node->hole_follows) { |
||
338 | prev_node->hole_follows = 1; |
||
339 | list_add(&prev_node->hole_stack, &mm->hole_stack); |
||
1123 | serge | 340 | } else |
1963 | serge | 341 | list_move(&prev_node->hole_stack, &mm->hole_stack); |
342 | |||
343 | list_del(&node->node_list); |
||
344 | node->allocated = 0; |
||
345 | } |
||
346 | EXPORT_SYMBOL(drm_mm_remove_node); |
||
347 | |||
348 | /* |
||
349 | * Remove a memory node from the allocator and free the allocated struct |
||
350 | * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the |
||
351 | * drm_mm_get_block functions. |
||
352 | */ |
||
353 | void drm_mm_put_block(struct drm_mm_node *node) |
||
354 | { |
||
355 | |||
356 | struct drm_mm *mm = node->mm; |
||
357 | |||
358 | drm_mm_remove_node(node); |
||
359 | |||
1321 | serge | 360 | spin_lock(&mm->unused_lock); |
1123 | serge | 361 | if (mm->num_unused < MM_UNUSED_TARGET) { |
1963 | serge | 362 | list_add(&node->node_list, &mm->unused_nodes); |
1123 | serge | 363 | ++mm->num_unused; |
364 | } else |
||
1963 | serge | 365 | kfree(node); |
1321 | serge | 366 | spin_unlock(&mm->unused_lock); |
1963 | serge | 367 | } |
368 | EXPORT_SYMBOL(drm_mm_put_block); |
||
369 | |||
370 | static int check_free_hole(unsigned long start, unsigned long end, |
||
371 | unsigned long size, unsigned alignment) |
||
372 | { |
||
373 | if (end - start < size) |
||
374 | return 0; |
||
375 | |||
376 | if (alignment) { |
||
377 | unsigned tmp = start % alignment; |
||
378 | if (tmp) |
||
3031 | serge | 379 | start += alignment - tmp; |
1123 | serge | 380 | } |
1963 | serge | 381 | |
3031 | serge | 382 | return end >= start + size; |
1123 | serge | 383 | } |
384 | |||
3031 | serge | 385 | struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
1123 | serge | 386 | unsigned long size, |
3031 | serge | 387 | unsigned alignment, |
388 | unsigned long color, |
||
389 | bool best_match) |
||
1123 | serge | 390 | { |
391 | struct drm_mm_node *entry; |
||
392 | struct drm_mm_node *best; |
||
393 | unsigned long best_size; |
||
394 | |||
1963 | serge | 395 | BUG_ON(mm->scanned_blocks); |
396 | |||
1123 | serge | 397 | best = NULL; |
398 | best_size = ~0UL; |
||
399 | |||
1963 | serge | 400 | list_for_each_entry(entry, &mm->hole_stack, hole_stack) { |
3031 | serge | 401 | unsigned long adj_start = drm_mm_hole_node_start(entry); |
402 | unsigned long adj_end = drm_mm_hole_node_end(entry); |
||
403 | |||
404 | if (mm->color_adjust) { |
||
405 | mm->color_adjust(entry, color, &adj_start, &adj_end); |
||
406 | if (adj_end <= adj_start) |
||
407 | continue; |
||
408 | } |
||
409 | |||
1963 | serge | 410 | BUG_ON(!entry->hole_follows); |
3031 | serge | 411 | if (!check_free_hole(adj_start, adj_end, size, alignment)) |
1123 | serge | 412 | continue; |
413 | |||
414 | if (!best_match) |
||
415 | return entry; |
||
1963 | serge | 416 | |
1404 | serge | 417 | if (entry->size < best_size) { |
1123 | serge | 418 | best = entry; |
419 | best_size = entry->size; |
||
420 | } |
||
421 | } |
||
422 | |||
423 | return best; |
||
424 | } |
||
3031 | serge | 425 | EXPORT_SYMBOL(drm_mm_search_free_generic); |
1123 | serge | 426 | |
3031 | serge | 427 | struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, |
1321 | serge | 428 | unsigned long size, |
429 | unsigned alignment, |
||
3031 | serge | 430 | unsigned long color, |
1321 | serge | 431 | unsigned long start, |
432 | unsigned long end, |
||
3031 | serge | 433 | bool best_match) |
1321 | serge | 434 | { |
435 | struct drm_mm_node *entry; |
||
436 | struct drm_mm_node *best; |
||
437 | unsigned long best_size; |
||
438 | |||
1963 | serge | 439 | BUG_ON(mm->scanned_blocks); |
440 | |||
1321 | serge | 441 | best = NULL; |
442 | best_size = ~0UL; |
||
443 | |||
1963 | serge | 444 | list_for_each_entry(entry, &mm->hole_stack, hole_stack) { |
445 | unsigned long adj_start = drm_mm_hole_node_start(entry) < start ? |
||
446 | start : drm_mm_hole_node_start(entry); |
||
447 | unsigned long adj_end = drm_mm_hole_node_end(entry) > end ? |
||
448 | end : drm_mm_hole_node_end(entry); |
||
1321 | serge | 449 | |
1963 | serge | 450 | BUG_ON(!entry->hole_follows); |
3031 | serge | 451 | |
452 | if (mm->color_adjust) { |
||
453 | mm->color_adjust(entry, color, &adj_start, &adj_end); |
||
454 | if (adj_end <= adj_start) |
||
455 | continue; |
||
456 | } |
||
457 | |||
1963 | serge | 458 | if (!check_free_hole(adj_start, adj_end, size, alignment)) |
1321 | serge | 459 | continue; |
460 | |||
461 | if (!best_match) |
||
462 | return entry; |
||
1963 | serge | 463 | |
1404 | serge | 464 | if (entry->size < best_size) { |
1321 | serge | 465 | best = entry; |
466 | best_size = entry->size; |
||
467 | } |
||
468 | } |
||
469 | |||
470 | return best; |
||
471 | } |
||
3031 | serge | 472 | EXPORT_SYMBOL(drm_mm_search_free_in_range_generic); |
1321 | serge | 473 | |
1963 | serge | 474 | /** |
475 | * Moves an allocation. To be used with embedded struct drm_mm_node. |
||
476 | */ |
||
477 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) |
||
478 | { |
||
479 | list_replace(&old->node_list, &new->node_list); |
||
480 | list_replace(&old->hole_stack, &new->hole_stack); |
||
481 | new->hole_follows = old->hole_follows; |
||
482 | new->mm = old->mm; |
||
483 | new->start = old->start; |
||
484 | new->size = old->size; |
||
3031 | serge | 485 | new->color = old->color; |
1963 | serge | 486 | |
487 | old->allocated = 0; |
||
488 | new->allocated = 1; |
||
489 | } |
||
490 | EXPORT_SYMBOL(drm_mm_replace_node); |
||
491 | |||
492 | /** |
||
493 | * Initializa lru scanning. |
||
494 | * |
||
495 | * This simply sets up the scanning routines with the parameters for the desired |
||
496 | * hole. |
||
497 | * |
||
498 | * Warning: As long as the scan list is non-empty, no other operations than |
||
499 | * adding/removing nodes to/from the scan list are allowed. |
||
500 | */ |
||
3031 | serge | 501 | void drm_mm_init_scan(struct drm_mm *mm, |
502 | unsigned long size, |
||
503 | unsigned alignment, |
||
504 | unsigned long color) |
||
1963 | serge | 505 | { |
3031 | serge | 506 | mm->scan_color = color; |
1963 | serge | 507 | mm->scan_alignment = alignment; |
508 | mm->scan_size = size; |
||
509 | mm->scanned_blocks = 0; |
||
510 | mm->scan_hit_start = 0; |
||
3192 | Serge | 511 | mm->scan_hit_end = 0; |
1963 | serge | 512 | mm->scan_check_range = 0; |
513 | mm->prev_scanned_node = NULL; |
||
514 | } |
||
515 | EXPORT_SYMBOL(drm_mm_init_scan); |
||
516 | |||
517 | /** |
||
518 | * Initializa lru scanning. |
||
519 | * |
||
520 | * This simply sets up the scanning routines with the parameters for the desired |
||
521 | * hole. This version is for range-restricted scans. |
||
522 | * |
||
523 | * Warning: As long as the scan list is non-empty, no other operations than |
||
524 | * adding/removing nodes to/from the scan list are allowed. |
||
525 | */ |
||
3031 | serge | 526 | void drm_mm_init_scan_with_range(struct drm_mm *mm, |
527 | unsigned long size, |
||
1963 | serge | 528 | unsigned alignment, |
3031 | serge | 529 | unsigned long color, |
1963 | serge | 530 | unsigned long start, |
531 | unsigned long end) |
||
532 | { |
||
3031 | serge | 533 | mm->scan_color = color; |
1963 | serge | 534 | mm->scan_alignment = alignment; |
535 | mm->scan_size = size; |
||
536 | mm->scanned_blocks = 0; |
||
537 | mm->scan_hit_start = 0; |
||
3192 | Serge | 538 | mm->scan_hit_end = 0; |
1963 | serge | 539 | mm->scan_start = start; |
540 | mm->scan_end = end; |
||
541 | mm->scan_check_range = 1; |
||
542 | mm->prev_scanned_node = NULL; |
||
543 | } |
||
544 | EXPORT_SYMBOL(drm_mm_init_scan_with_range); |
||
545 | |||
546 | /** |
||
547 | * Add a node to the scan list that might be freed to make space for the desired |
||
548 | * hole. |
||
549 | * |
||
550 | * Returns non-zero, if a hole has been found, zero otherwise. |
||
551 | */ |
||
552 | int drm_mm_scan_add_block(struct drm_mm_node *node) |
||
553 | { |
||
554 | struct drm_mm *mm = node->mm; |
||
555 | struct drm_mm_node *prev_node; |
||
556 | unsigned long hole_start, hole_end; |
||
3192 | Serge | 557 | unsigned long adj_start, adj_end; |
1963 | serge | 558 | |
559 | mm->scanned_blocks++; |
||
560 | |||
561 | BUG_ON(node->scanned_block); |
||
562 | node->scanned_block = 1; |
||
563 | |||
564 | prev_node = list_entry(node->node_list.prev, struct drm_mm_node, |
||
565 | node_list); |
||
566 | |||
567 | node->scanned_preceeds_hole = prev_node->hole_follows; |
||
568 | prev_node->hole_follows = 1; |
||
569 | list_del(&node->node_list); |
||
570 | node->node_list.prev = &prev_node->node_list; |
||
571 | node->node_list.next = &mm->prev_scanned_node->node_list; |
||
572 | mm->prev_scanned_node = node; |
||
573 | |||
3192 | Serge | 574 | adj_start = hole_start = drm_mm_hole_node_start(prev_node); |
575 | adj_end = hole_end = drm_mm_hole_node_end(prev_node); |
||
3031 | serge | 576 | |
577 | if (mm->scan_check_range) { |
||
578 | if (adj_start < mm->scan_start) |
||
579 | adj_start = mm->scan_start; |
||
580 | if (adj_end > mm->scan_end) |
||
581 | adj_end = mm->scan_end; |
||
1963 | serge | 582 | } |
583 | |||
3192 | Serge | 584 | if (mm->color_adjust) |
585 | mm->color_adjust(prev_node, mm->scan_color, |
||
586 | &adj_start, &adj_end); |
||
587 | |||
3031 | serge | 588 | if (check_free_hole(adj_start, adj_end, |
1963 | serge | 589 | mm->scan_size, mm->scan_alignment)) { |
590 | mm->scan_hit_start = hole_start; |
||
3192 | Serge | 591 | mm->scan_hit_end = hole_end; |
1963 | serge | 592 | return 1; |
593 | } |
||
594 | |||
595 | return 0; |
||
596 | } |
||
597 | EXPORT_SYMBOL(drm_mm_scan_add_block); |
||
598 | |||
599 | /** |
||
600 | * Remove a node from the scan list. |
||
601 | * |
||
602 | * Nodes _must_ be removed in the exact same order from the scan list as they |
||
603 | * have been added, otherwise the internal state of the memory manager will be |
||
604 | * corrupted. |
||
605 | * |
||
606 | * When the scan list is empty, the selected memory nodes can be freed. An |
||
607 | * immediately following drm_mm_search_free with best_match = 0 will then return |
||
608 | * the just freed block (because its at the top of the free_stack list). |
||
609 | * |
||
610 | * Returns one if this block should be evicted, zero otherwise. Will always |
||
611 | * return zero when no hole has been found. |
||
612 | */ |
||
613 | int drm_mm_scan_remove_block(struct drm_mm_node *node) |
||
614 | { |
||
615 | struct drm_mm *mm = node->mm; |
||
616 | struct drm_mm_node *prev_node; |
||
617 | |||
618 | mm->scanned_blocks--; |
||
619 | |||
620 | BUG_ON(!node->scanned_block); |
||
621 | node->scanned_block = 0; |
||
622 | |||
623 | prev_node = list_entry(node->node_list.prev, struct drm_mm_node, |
||
624 | node_list); |
||
625 | |||
626 | prev_node->hole_follows = node->scanned_preceeds_hole; |
||
627 | list_add(&node->node_list, &prev_node->node_list); |
||
628 | |||
3192 | Serge | 629 | return (drm_mm_hole_node_end(node) > mm->scan_hit_start && |
630 | node->start < mm->scan_hit_end); |
||
1963 | serge | 631 | } |
632 | EXPORT_SYMBOL(drm_mm_scan_remove_block); |
||
633 | |||
1123 | serge | 634 | int drm_mm_clean(struct drm_mm * mm) |
635 | { |
||
1963 | serge | 636 | struct list_head *head = &mm->head_node.node_list; |
1123 | serge | 637 | |
638 | return (head->next->next == head); |
||
639 | } |
||
1126 | serge | 640 | EXPORT_SYMBOL(drm_mm_clean); |
1123 | serge | 641 | |
642 | int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) |
||
643 | { |
||
1963 | serge | 644 | INIT_LIST_HEAD(&mm->hole_stack); |
1123 | serge | 645 | INIT_LIST_HEAD(&mm->unused_nodes); |
646 | mm->num_unused = 0; |
||
1963 | serge | 647 | mm->scanned_blocks = 0; |
1123 | serge | 648 | spin_lock_init(&mm->unused_lock); |
649 | |||
1963 | serge | 650 | /* Clever trick to avoid a special case in the free hole tracking. */ |
651 | INIT_LIST_HEAD(&mm->head_node.node_list); |
||
652 | INIT_LIST_HEAD(&mm->head_node.hole_stack); |
||
653 | mm->head_node.hole_follows = 1; |
||
654 | mm->head_node.scanned_block = 0; |
||
655 | mm->head_node.scanned_prev_free = 0; |
||
656 | mm->head_node.scanned_next_free = 0; |
||
657 | mm->head_node.mm = mm; |
||
658 | mm->head_node.start = start + size; |
||
659 | mm->head_node.size = start - mm->head_node.start; |
||
660 | list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); |
||
661 | |||
3031 | serge | 662 | mm->color_adjust = NULL; |
663 | |||
1963 | serge | 664 | return 0; |
1123 | serge | 665 | } |
1126 | serge | 666 | EXPORT_SYMBOL(drm_mm_init); |
1123 | serge | 667 | |
668 | void drm_mm_takedown(struct drm_mm * mm) |
||
669 | { |
||
1963 | serge | 670 | struct drm_mm_node *entry, *next; |
1123 | serge | 671 | |
1963 | serge | 672 | if (!list_empty(&mm->head_node.node_list)) { |
1123 | serge | 673 | DRM_ERROR("Memory manager not clean. Delaying takedown\n"); |
674 | return; |
||
675 | } |
||
676 | |||
1963 | serge | 677 | spin_lock(&mm->unused_lock); |
678 | list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) { |
||
679 | list_del(&entry->node_list); |
||
1123 | serge | 680 | kfree(entry); |
681 | --mm->num_unused; |
||
682 | } |
||
683 | spin_unlock(&mm->unused_lock); |
||
684 | |||
685 | BUG_ON(mm->num_unused != 0); |
||
686 | } |
||
1126 | serge | 687 | EXPORT_SYMBOL(drm_mm_takedown); |
3192 | Serge | 688 | |
689 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) |
||
690 | { |
||
691 | struct drm_mm_node *entry; |
||
692 | unsigned long total_used = 0, total_free = 0, total = 0; |
||
693 | unsigned long hole_start, hole_end, hole_size; |
||
694 | |||
695 | hole_start = drm_mm_hole_node_start(&mm->head_node); |
||
696 | hole_end = drm_mm_hole_node_end(&mm->head_node); |
||
697 | hole_size = hole_end - hole_start; |
||
698 | if (hole_size) |
||
699 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n", |
||
700 | prefix, hole_start, hole_end, |
||
701 | hole_size); |
||
702 | total_free += hole_size; |
||
703 | |||
704 | drm_mm_for_each_node(entry, mm) { |
||
705 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n", |
||
706 | prefix, entry->start, entry->start + entry->size, |
||
707 | entry->size); |
||
708 | total_used += entry->size; |
||
709 | |||
710 | if (entry->hole_follows) { |
||
711 | hole_start = drm_mm_hole_node_start(entry); |
||
712 | hole_end = drm_mm_hole_node_end(entry); |
||
713 | hole_size = hole_end - hole_start; |
||
714 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n", |
||
715 | prefix, hole_start, hole_end, |
||
716 | hole_size); |
||
717 | total_free += hole_size; |
||
718 | } |
||
719 | } |
||
720 | total = total_free + total_used; |
||
721 | |||
722 | printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total, |
||
723 | total_used, total_free); |
||
724 | } |
||
725 | EXPORT_SYMBOL(drm_mm_debug_table); |
||
726 | |||
727 | #if defined(CONFIG_DEBUG_FS) |
||
728 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) |
||
729 | { |
||
730 | struct drm_mm_node *entry; |
||
731 | unsigned long total_used = 0, total_free = 0, total = 0; |
||
732 | unsigned long hole_start, hole_end, hole_size; |
||
733 | |||
734 | hole_start = drm_mm_hole_node_start(&mm->head_node); |
||
735 | hole_end = drm_mm_hole_node_end(&mm->head_node); |
||
736 | hole_size = hole_end - hole_start; |
||
737 | if (hole_size) |
||
738 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", |
||
739 | hole_start, hole_end, hole_size); |
||
740 | total_free += hole_size; |
||
741 | |||
742 | drm_mm_for_each_node(entry, mm) { |
||
743 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", |
||
744 | entry->start, entry->start + entry->size, |
||
745 | entry->size); |
||
746 | total_used += entry->size; |
||
747 | if (entry->hole_follows) { |
||
748 | hole_start = drm_mm_hole_node_start(entry); |
||
749 | hole_end = drm_mm_hole_node_end(entry); |
||
750 | hole_size = hole_end - hole_start; |
||
751 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", |
||
752 | hole_start, hole_end, hole_size); |
||
753 | total_free += hole_size; |
||
754 | } |
||
755 | } |
||
756 | total = total_free + total_used; |
||
757 | |||
758 | seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free); |
||
759 | return 0; |
||
760 | } |
||
761 | EXPORT_SYMBOL(drm_mm_dump_table); |
||
762 | #endif>>>=>>>=>>>>>>>> |