Rev 1963 | Rev 3192 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1123 | serge | 1 | /************************************************************************** |
2 | * |
||
3 | * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. |
||
4 | * All Rights Reserved. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the |
||
8 | * "Software"), to deal in the Software without restriction, including |
||
9 | * without limitation the rights to use, copy, modify, merge, publish, |
||
10 | * distribute, sub license, and/or sell copies of the Software, and to |
||
11 | * permit persons to whom the Software is furnished to do so, subject to |
||
12 | * the following conditions: |
||
13 | * |
||
14 | * The above copyright notice and this permission notice (including the |
||
15 | * next paragraph) shall be included in all copies or substantial portions |
||
16 | * of the Software. |
||
17 | * |
||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
25 | * |
||
26 | * |
||
27 | **************************************************************************/ |
||
28 | |||
29 | /* |
||
30 | * Generic simple memory manager implementation. Intended to be used as a base |
||
31 | * class implementation for more advanced memory managers. |
||
32 | * |
||
33 | * Note that the algorithm used is quite simple and there might be substantial |
||
34 | * performance gains if a smarter free list is implemented. Currently it is just an |
||
35 | * unordered stack of free regions. This could easily be improved if an RB-tree |
||
36 | * is used instead. At least if we expect heavy fragmentation. |
||
37 | * |
||
38 | * Aligned allocations can also see improvement. |
||
39 | * |
||
40 | * Authors: |
||
41 | * Thomas Hellström |
||
42 | */ |
||
43 | |||
3031 | serge | 44 | #include |
45 | #include |
||
1963 | serge | 46 | #include |
1179 | serge | 47 | #include |
3031 | serge | 48 | #include |
1123 | serge | 49 | |
50 | #define MM_UNUSED_TARGET 4 |
||
51 | |||
52 | static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) |
||
53 | { |
||
54 | struct drm_mm_node *child; |
||
55 | |||
1963 | serge | 56 | if (atomic) |
57 | child = kzalloc(sizeof(*child), GFP_ATOMIC); |
||
58 | else |
||
59 | child = kzalloc(sizeof(*child), GFP_KERNEL); |
||
1123 | serge | 60 | |
61 | if (unlikely(child == NULL)) { |
||
62 | spin_lock(&mm->unused_lock); |
||
63 | if (list_empty(&mm->unused_nodes)) |
||
64 | child = NULL; |
||
65 | else { |
||
66 | child = |
||
67 | list_entry(mm->unused_nodes.next, |
||
1963 | serge | 68 | struct drm_mm_node, node_list); |
69 | list_del(&child->node_list); |
||
1123 | serge | 70 | --mm->num_unused; |
71 | } |
||
72 | spin_unlock(&mm->unused_lock); |
||
73 | } |
||
74 | return child; |
||
75 | } |
||
76 | |||
1321 | serge | 77 | /* drm_mm_pre_get() - pre allocate drm_mm_node structure |
78 | * drm_mm: memory manager struct we are pre-allocating for |
||
79 | * |
||
80 | * Returns 0 on success or -ENOMEM if allocation fails. |
||
81 | */ |
||
1123 | serge | 82 | int drm_mm_pre_get(struct drm_mm *mm) |
83 | { |
||
84 | struct drm_mm_node *node; |
||
85 | |||
86 | spin_lock(&mm->unused_lock); |
||
87 | while (mm->num_unused < MM_UNUSED_TARGET) { |
||
88 | spin_unlock(&mm->unused_lock); |
||
1963 | serge | 89 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
1123 | serge | 90 | spin_lock(&mm->unused_lock); |
91 | |||
92 | if (unlikely(node == NULL)) { |
||
93 | int ret = (mm->num_unused < 2) ? -ENOMEM : 0; |
||
94 | spin_unlock(&mm->unused_lock); |
||
95 | return ret; |
||
96 | } |
||
97 | ++mm->num_unused; |
||
1963 | serge | 98 | list_add_tail(&node->node_list, &mm->unused_nodes); |
1123 | serge | 99 | } |
100 | spin_unlock(&mm->unused_lock); |
||
101 | return 0; |
||
102 | } |
||
1126 | serge | 103 | EXPORT_SYMBOL(drm_mm_pre_get); |
1123 | serge | 104 | |
1963 | serge | 105 | static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) |
1123 | serge | 106 | { |
1963 | serge | 107 | return hole_node->start + hole_node->size; |
108 | } |
||
1123 | serge | 109 | |
1963 | serge | 110 | static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) |
111 | { |
||
112 | struct drm_mm_node *next_node = |
||
113 | list_entry(hole_node->node_list.next, struct drm_mm_node, |
||
114 | node_list); |
||
1123 | serge | 115 | |
1963 | serge | 116 | return next_node->start; |
1123 | serge | 117 | } |
118 | |||
1963 | serge | 119 | static void drm_mm_insert_helper(struct drm_mm_node *hole_node, |
120 | struct drm_mm_node *node, |
||
3031 | serge | 121 | unsigned long size, unsigned alignment, |
122 | unsigned long color) |
||
1123 | serge | 123 | { |
1963 | serge | 124 | struct drm_mm *mm = hole_node->mm; |
125 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); |
||
126 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); |
||
3031 | serge | 127 | unsigned long adj_start = hole_start; |
128 | unsigned long adj_end = hole_end; |
||
1123 | serge | 129 | |
1963 | serge | 130 | BUG_ON(!hole_node->hole_follows || node->allocated); |
131 | |||
3031 | serge | 132 | if (mm->color_adjust) |
133 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); |
||
1963 | serge | 134 | |
3031 | serge | 135 | if (alignment) { |
136 | unsigned tmp = adj_start % alignment; |
||
137 | if (tmp) |
||
138 | adj_start += alignment - tmp; |
||
139 | } |
||
140 | |||
141 | if (adj_start == hole_start) { |
||
1963 | serge | 142 | hole_node->hole_follows = 0; |
3031 | serge | 143 | list_del(&hole_node->hole_stack); |
144 | } |
||
1963 | serge | 145 | |
3031 | serge | 146 | node->start = adj_start; |
1963 | serge | 147 | node->size = size; |
148 | node->mm = mm; |
||
3031 | serge | 149 | node->color = color; |
1963 | serge | 150 | node->allocated = 1; |
151 | |||
152 | INIT_LIST_HEAD(&node->hole_stack); |
||
153 | list_add(&node->node_list, &hole_node->node_list); |
||
154 | |||
3031 | serge | 155 | BUG_ON(node->start + node->size > adj_end); |
1963 | serge | 156 | |
3031 | serge | 157 | node->hole_follows = 0; |
1963 | serge | 158 | if (node->start + node->size < hole_end) { |
159 | list_add(&node->hole_stack, &mm->hole_stack); |
||
160 | node->hole_follows = 1; |
||
1123 | serge | 161 | } |
162 | } |
||
163 | |||
1963 | serge | 164 | struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, |
1123 | serge | 165 | unsigned long size, |
1963 | serge | 166 | unsigned alignment, |
3031 | serge | 167 | unsigned long color, |
1123 | serge | 168 | int atomic) |
169 | { |
||
1963 | serge | 170 | struct drm_mm_node *node; |
1123 | serge | 171 | |
1963 | serge | 172 | node = drm_mm_kmalloc(hole_node->mm, atomic); |
173 | if (unlikely(node == NULL)) |
||
1123 | serge | 174 | return NULL; |
175 | |||
3031 | serge | 176 | drm_mm_insert_helper(hole_node, node, size, alignment, color); |
1123 | serge | 177 | |
1963 | serge | 178 | return node; |
179 | } |
||
180 | EXPORT_SYMBOL(drm_mm_get_block_generic); |
||
1123 | serge | 181 | |
1963 | serge | 182 | /** |
183 | * Search for free space and insert a preallocated memory node. Returns |
||
184 | * -ENOSPC if no suitable free area is available. The preallocated memory node |
||
185 | * must be cleared. |
||
186 | */ |
||
187 | int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, |
||
188 | unsigned long size, unsigned alignment) |
||
189 | { |
||
190 | struct drm_mm_node *hole_node; |
||
1123 | serge | 191 | |
3031 | serge | 192 | hole_node = drm_mm_search_free(mm, size, alignment, false); |
1963 | serge | 193 | if (!hole_node) |
194 | return -ENOSPC; |
||
195 | |||
3031 | serge | 196 | drm_mm_insert_helper(hole_node, node, size, alignment, 0); |
1963 | serge | 197 | |
198 | return 0; |
||
1123 | serge | 199 | } |
1963 | serge | 200 | EXPORT_SYMBOL(drm_mm_insert_node); |
1123 | serge | 201 | |
1963 | serge | 202 | static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, |
203 | struct drm_mm_node *node, |
||
204 | unsigned long size, unsigned alignment, |
||
3031 | serge | 205 | unsigned long color, |
1963 | serge | 206 | unsigned long start, unsigned long end) |
1123 | serge | 207 | { |
1963 | serge | 208 | struct drm_mm *mm = hole_node->mm; |
209 | unsigned long hole_start = drm_mm_hole_node_start(hole_node); |
||
210 | unsigned long hole_end = drm_mm_hole_node_end(hole_node); |
||
3031 | serge | 211 | unsigned long adj_start = hole_start; |
212 | unsigned long adj_end = hole_end; |
||
1123 | serge | 213 | |
1963 | serge | 214 | BUG_ON(!hole_node->hole_follows || node->allocated); |
1123 | serge | 215 | |
3031 | serge | 216 | if (mm->color_adjust) |
217 | mm->color_adjust(hole_node, color, &adj_start, &adj_end); |
||
1123 | serge | 218 | |
3031 | serge | 219 | if (adj_start < start) |
220 | adj_start = start; |
||
221 | |||
222 | if (alignment) { |
||
223 | unsigned tmp = adj_start % alignment; |
||
1963 | serge | 224 | if (tmp) |
3031 | serge | 225 | adj_start += alignment - tmp; |
226 | } |
||
1963 | serge | 227 | |
3031 | serge | 228 | if (adj_start == hole_start) { |
1963 | serge | 229 | hole_node->hole_follows = 0; |
3031 | serge | 230 | list_del(&hole_node->hole_stack); |
1123 | serge | 231 | } |
232 | |||
3031 | serge | 233 | node->start = adj_start; |
1963 | serge | 234 | node->size = size; |
235 | node->mm = mm; |
||
3031 | serge | 236 | node->color = color; |
1963 | serge | 237 | node->allocated = 1; |
238 | |||
239 | INIT_LIST_HEAD(&node->hole_stack); |
||
240 | list_add(&node->node_list, &hole_node->node_list); |
||
241 | |||
3031 | serge | 242 | BUG_ON(node->start + node->size > adj_end); |
1963 | serge | 243 | BUG_ON(node->start + node->size > end); |
244 | |||
3031 | serge | 245 | node->hole_follows = 0; |
1963 | serge | 246 | if (node->start + node->size < hole_end) { |
247 | list_add(&node->hole_stack, &mm->hole_stack); |
||
248 | node->hole_follows = 1; |
||
1123 | serge | 249 | } |
250 | } |
||
251 | |||
1963 | serge | 252 | struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node, |
1321 | serge | 253 | unsigned long size, |
254 | unsigned alignment, |
||
3031 | serge | 255 | unsigned long color, |
1321 | serge | 256 | unsigned long start, |
257 | unsigned long end, |
||
258 | int atomic) |
||
259 | { |
||
1963 | serge | 260 | struct drm_mm_node *node; |
1321 | serge | 261 | |
1963 | serge | 262 | node = drm_mm_kmalloc(hole_node->mm, atomic); |
263 | if (unlikely(node == NULL)) |
||
1321 | serge | 264 | return NULL; |
265 | |||
3031 | serge | 266 | drm_mm_insert_helper_range(hole_node, node, size, alignment, color, |
1963 | serge | 267 | start, end); |
1321 | serge | 268 | |
269 | return node; |
||
270 | } |
||
271 | EXPORT_SYMBOL(drm_mm_get_block_range_generic); |
||
272 | |||
1963 | serge | 273 | /** |
274 | * Search for free space and insert a preallocated memory node. Returns |
||
275 | * -ENOSPC if no suitable free area is available. This is for range |
||
276 | * restricted allocations. The preallocated memory node must be cleared. |
||
1123 | serge | 277 | */ |
1963 | serge | 278 | int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node, |
279 | unsigned long size, unsigned alignment, |
||
280 | unsigned long start, unsigned long end) |
||
281 | { |
||
282 | struct drm_mm_node *hole_node; |
||
1123 | serge | 283 | |
1963 | serge | 284 | hole_node = drm_mm_search_free_in_range(mm, size, alignment, |
3031 | serge | 285 | start, end, false); |
1963 | serge | 286 | if (!hole_node) |
287 | return -ENOSPC; |
||
288 | |||
3031 | serge | 289 | drm_mm_insert_helper_range(hole_node, node, size, alignment, 0, |
1963 | serge | 290 | start, end); |
291 | |||
292 | return 0; |
||
293 | } |
||
294 | EXPORT_SYMBOL(drm_mm_insert_node_in_range); |
||
295 | |||
296 | /** |
||
297 | * Remove a memory node from the allocator. |
||
298 | */ |
||
299 | void drm_mm_remove_node(struct drm_mm_node *node) |
||
1123 | serge | 300 | { |
1963 | serge | 301 | struct drm_mm *mm = node->mm; |
302 | struct drm_mm_node *prev_node; |
||
1123 | serge | 303 | |
1963 | serge | 304 | BUG_ON(node->scanned_block || node->scanned_prev_free |
305 | || node->scanned_next_free); |
||
1123 | serge | 306 | |
1963 | serge | 307 | prev_node = |
308 | list_entry(node->node_list.prev, struct drm_mm_node, node_list); |
||
1123 | serge | 309 | |
1963 | serge | 310 | if (node->hole_follows) { |
311 | BUG_ON(drm_mm_hole_node_start(node) |
||
312 | == drm_mm_hole_node_end(node)); |
||
313 | list_del(&node->hole_stack); |
||
314 | } else |
||
315 | BUG_ON(drm_mm_hole_node_start(node) |
||
316 | != drm_mm_hole_node_end(node)); |
||
317 | |||
318 | if (!prev_node->hole_follows) { |
||
319 | prev_node->hole_follows = 1; |
||
320 | list_add(&prev_node->hole_stack, &mm->hole_stack); |
||
1123 | serge | 321 | } else |
1963 | serge | 322 | list_move(&prev_node->hole_stack, &mm->hole_stack); |
323 | |||
324 | list_del(&node->node_list); |
||
325 | node->allocated = 0; |
||
326 | } |
||
327 | EXPORT_SYMBOL(drm_mm_remove_node); |
||
328 | |||
329 | /* |
||
330 | * Remove a memory node from the allocator and free the allocated struct |
||
331 | * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the |
||
332 | * drm_mm_get_block functions. |
||
333 | */ |
||
334 | void drm_mm_put_block(struct drm_mm_node *node) |
||
335 | { |
||
336 | |||
337 | struct drm_mm *mm = node->mm; |
||
338 | |||
339 | drm_mm_remove_node(node); |
||
340 | |||
1321 | serge | 341 | spin_lock(&mm->unused_lock); |
1123 | serge | 342 | if (mm->num_unused < MM_UNUSED_TARGET) { |
1963 | serge | 343 | list_add(&node->node_list, &mm->unused_nodes); |
1123 | serge | 344 | ++mm->num_unused; |
345 | } else |
||
1963 | serge | 346 | kfree(node); |
1321 | serge | 347 | spin_unlock(&mm->unused_lock); |
1963 | serge | 348 | } |
349 | EXPORT_SYMBOL(drm_mm_put_block); |
||
350 | |||
351 | static int check_free_hole(unsigned long start, unsigned long end, |
||
352 | unsigned long size, unsigned alignment) |
||
353 | { |
||
354 | if (end - start < size) |
||
355 | return 0; |
||
356 | |||
357 | if (alignment) { |
||
358 | unsigned tmp = start % alignment; |
||
359 | if (tmp) |
||
3031 | serge | 360 | start += alignment - tmp; |
1123 | serge | 361 | } |
1963 | serge | 362 | |
3031 | serge | 363 | return end >= start + size; |
1123 | serge | 364 | } |
365 | |||
3031 | serge | 366 | struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, |
1123 | serge | 367 | unsigned long size, |
3031 | serge | 368 | unsigned alignment, |
369 | unsigned long color, |
||
370 | bool best_match) |
||
1123 | serge | 371 | { |
372 | struct drm_mm_node *entry; |
||
373 | struct drm_mm_node *best; |
||
374 | unsigned long best_size; |
||
375 | |||
1963 | serge | 376 | BUG_ON(mm->scanned_blocks); |
377 | |||
1123 | serge | 378 | best = NULL; |
379 | best_size = ~0UL; |
||
380 | |||
1963 | serge | 381 | list_for_each_entry(entry, &mm->hole_stack, hole_stack) { |
3031 | serge | 382 | unsigned long adj_start = drm_mm_hole_node_start(entry); |
383 | unsigned long adj_end = drm_mm_hole_node_end(entry); |
||
384 | |||
385 | if (mm->color_adjust) { |
||
386 | mm->color_adjust(entry, color, &adj_start, &adj_end); |
||
387 | if (adj_end <= adj_start) |
||
388 | continue; |
||
389 | } |
||
390 | |||
1963 | serge | 391 | BUG_ON(!entry->hole_follows); |
3031 | serge | 392 | if (!check_free_hole(adj_start, adj_end, size, alignment)) |
1123 | serge | 393 | continue; |
394 | |||
395 | if (!best_match) |
||
396 | return entry; |
||
1963 | serge | 397 | |
1404 | serge | 398 | if (entry->size < best_size) { |
1123 | serge | 399 | best = entry; |
400 | best_size = entry->size; |
||
401 | } |
||
402 | } |
||
403 | |||
404 | return best; |
||
405 | } |
||
3031 | serge | 406 | EXPORT_SYMBOL(drm_mm_search_free_generic); |
1123 | serge | 407 | |
3031 | serge | 408 | struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, |
1321 | serge | 409 | unsigned long size, |
410 | unsigned alignment, |
||
3031 | serge | 411 | unsigned long color, |
1321 | serge | 412 | unsigned long start, |
413 | unsigned long end, |
||
3031 | serge | 414 | bool best_match) |
1321 | serge | 415 | { |
416 | struct drm_mm_node *entry; |
||
417 | struct drm_mm_node *best; |
||
418 | unsigned long best_size; |
||
419 | |||
1963 | serge | 420 | BUG_ON(mm->scanned_blocks); |
421 | |||
1321 | serge | 422 | best = NULL; |
423 | best_size = ~0UL; |
||
424 | |||
1963 | serge | 425 | list_for_each_entry(entry, &mm->hole_stack, hole_stack) { |
426 | unsigned long adj_start = drm_mm_hole_node_start(entry) < start ? |
||
427 | start : drm_mm_hole_node_start(entry); |
||
428 | unsigned long adj_end = drm_mm_hole_node_end(entry) > end ? |
||
429 | end : drm_mm_hole_node_end(entry); |
||
1321 | serge | 430 | |
1963 | serge | 431 | BUG_ON(!entry->hole_follows); |
3031 | serge | 432 | |
433 | if (mm->color_adjust) { |
||
434 | mm->color_adjust(entry, color, &adj_start, &adj_end); |
||
435 | if (adj_end <= adj_start) |
||
436 | continue; |
||
437 | } |
||
438 | |||
1963 | serge | 439 | if (!check_free_hole(adj_start, adj_end, size, alignment)) |
1321 | serge | 440 | continue; |
441 | |||
442 | if (!best_match) |
||
443 | return entry; |
||
1963 | serge | 444 | |
1404 | serge | 445 | if (entry->size < best_size) { |
1321 | serge | 446 | best = entry; |
447 | best_size = entry->size; |
||
448 | } |
||
449 | } |
||
450 | |||
451 | return best; |
||
452 | } |
||
3031 | serge | 453 | EXPORT_SYMBOL(drm_mm_search_free_in_range_generic); |
1321 | serge | 454 | |
1963 | serge | 455 | /** |
456 | * Moves an allocation. To be used with embedded struct drm_mm_node. |
||
457 | */ |
||
458 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) |
||
459 | { |
||
460 | list_replace(&old->node_list, &new->node_list); |
||
461 | list_replace(&old->hole_stack, &new->hole_stack); |
||
462 | new->hole_follows = old->hole_follows; |
||
463 | new->mm = old->mm; |
||
464 | new->start = old->start; |
||
465 | new->size = old->size; |
||
3031 | serge | 466 | new->color = old->color; |
1963 | serge | 467 | |
468 | old->allocated = 0; |
||
469 | new->allocated = 1; |
||
470 | } |
||
471 | EXPORT_SYMBOL(drm_mm_replace_node); |
||
472 | |||
473 | /** |
||
474 | * Initializa lru scanning. |
||
475 | * |
||
476 | * This simply sets up the scanning routines with the parameters for the desired |
||
477 | * hole. |
||
478 | * |
||
479 | * Warning: As long as the scan list is non-empty, no other operations than |
||
480 | * adding/removing nodes to/from the scan list are allowed. |
||
481 | */ |
||
3031 | serge | 482 | void drm_mm_init_scan(struct drm_mm *mm, |
483 | unsigned long size, |
||
484 | unsigned alignment, |
||
485 | unsigned long color) |
||
1963 | serge | 486 | { |
3031 | serge | 487 | mm->scan_color = color; |
1963 | serge | 488 | mm->scan_alignment = alignment; |
489 | mm->scan_size = size; |
||
490 | mm->scanned_blocks = 0; |
||
491 | mm->scan_hit_start = 0; |
||
492 | mm->scan_hit_size = 0; |
||
493 | mm->scan_check_range = 0; |
||
494 | mm->prev_scanned_node = NULL; |
||
495 | } |
||
496 | EXPORT_SYMBOL(drm_mm_init_scan); |
||
497 | |||
498 | /** |
||
499 | * Initializa lru scanning. |
||
500 | * |
||
501 | * This simply sets up the scanning routines with the parameters for the desired |
||
502 | * hole. This version is for range-restricted scans. |
||
503 | * |
||
504 | * Warning: As long as the scan list is non-empty, no other operations than |
||
505 | * adding/removing nodes to/from the scan list are allowed. |
||
506 | */ |
||
3031 | serge | 507 | void drm_mm_init_scan_with_range(struct drm_mm *mm, |
508 | unsigned long size, |
||
1963 | serge | 509 | unsigned alignment, |
3031 | serge | 510 | unsigned long color, |
1963 | serge | 511 | unsigned long start, |
512 | unsigned long end) |
||
513 | { |
||
3031 | serge | 514 | mm->scan_color = color; |
1963 | serge | 515 | mm->scan_alignment = alignment; |
516 | mm->scan_size = size; |
||
517 | mm->scanned_blocks = 0; |
||
518 | mm->scan_hit_start = 0; |
||
519 | mm->scan_hit_size = 0; |
||
520 | mm->scan_start = start; |
||
521 | mm->scan_end = end; |
||
522 | mm->scan_check_range = 1; |
||
523 | mm->prev_scanned_node = NULL; |
||
524 | } |
||
525 | EXPORT_SYMBOL(drm_mm_init_scan_with_range); |
||
526 | |||
527 | /** |
||
528 | * Add a node to the scan list that might be freed to make space for the desired |
||
529 | * hole. |
||
530 | * |
||
531 | * Returns non-zero, if a hole has been found, zero otherwise. |
||
532 | */ |
||
533 | int drm_mm_scan_add_block(struct drm_mm_node *node) |
||
534 | { |
||
535 | struct drm_mm *mm = node->mm; |
||
536 | struct drm_mm_node *prev_node; |
||
537 | unsigned long hole_start, hole_end; |
||
538 | unsigned long adj_start; |
||
539 | unsigned long adj_end; |
||
540 | |||
541 | mm->scanned_blocks++; |
||
542 | |||
543 | BUG_ON(node->scanned_block); |
||
544 | node->scanned_block = 1; |
||
545 | |||
546 | prev_node = list_entry(node->node_list.prev, struct drm_mm_node, |
||
547 | node_list); |
||
548 | |||
549 | node->scanned_preceeds_hole = prev_node->hole_follows; |
||
550 | prev_node->hole_follows = 1; |
||
551 | list_del(&node->node_list); |
||
552 | node->node_list.prev = &prev_node->node_list; |
||
553 | node->node_list.next = &mm->prev_scanned_node->node_list; |
||
554 | mm->prev_scanned_node = node; |
||
555 | |||
556 | hole_start = drm_mm_hole_node_start(prev_node); |
||
557 | hole_end = drm_mm_hole_node_end(prev_node); |
||
3031 | serge | 558 | |
1963 | serge | 559 | adj_start = hole_start; |
560 | adj_end = hole_end; |
||
3031 | serge | 561 | |
562 | if (mm->color_adjust) |
||
563 | mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end); |
||
564 | |||
565 | if (mm->scan_check_range) { |
||
566 | if (adj_start < mm->scan_start) |
||
567 | adj_start = mm->scan_start; |
||
568 | if (adj_end > mm->scan_end) |
||
569 | adj_end = mm->scan_end; |
||
1963 | serge | 570 | } |
571 | |||
3031 | serge | 572 | if (check_free_hole(adj_start, adj_end, |
1963 | serge | 573 | mm->scan_size, mm->scan_alignment)) { |
574 | mm->scan_hit_start = hole_start; |
||
575 | mm->scan_hit_size = hole_end; |
||
576 | |||
577 | return 1; |
||
578 | } |
||
579 | |||
580 | return 0; |
||
581 | } |
||
582 | EXPORT_SYMBOL(drm_mm_scan_add_block); |
||
583 | |||
584 | /** |
||
585 | * Remove a node from the scan list. |
||
586 | * |
||
587 | * Nodes _must_ be removed in the exact same order from the scan list as they |
||
588 | * have been added, otherwise the internal state of the memory manager will be |
||
589 | * corrupted. |
||
590 | * |
||
591 | * When the scan list is empty, the selected memory nodes can be freed. An |
||
592 | * immediately following drm_mm_search_free with best_match = 0 will then return |
||
593 | * the just freed block (because its at the top of the free_stack list). |
||
594 | * |
||
595 | * Returns one if this block should be evicted, zero otherwise. Will always |
||
596 | * return zero when no hole has been found. |
||
597 | */ |
||
598 | int drm_mm_scan_remove_block(struct drm_mm_node *node) |
||
599 | { |
||
600 | struct drm_mm *mm = node->mm; |
||
601 | struct drm_mm_node *prev_node; |
||
602 | |||
603 | mm->scanned_blocks--; |
||
604 | |||
605 | BUG_ON(!node->scanned_block); |
||
606 | node->scanned_block = 0; |
||
607 | |||
608 | prev_node = list_entry(node->node_list.prev, struct drm_mm_node, |
||
609 | node_list); |
||
610 | |||
611 | prev_node->hole_follows = node->scanned_preceeds_hole; |
||
612 | INIT_LIST_HEAD(&node->node_list); |
||
613 | list_add(&node->node_list, &prev_node->node_list); |
||
614 | |||
615 | /* Only need to check for containement because start&size for the |
||
616 | * complete resulting free block (not just the desired part) is |
||
617 | * stored. */ |
||
618 | if (node->start >= mm->scan_hit_start && |
||
619 | node->start + node->size |
||
620 | <= mm->scan_hit_start + mm->scan_hit_size) { |
||
621 | return 1; |
||
622 | } |
||
623 | |||
624 | return 0; |
||
625 | } |
||
626 | EXPORT_SYMBOL(drm_mm_scan_remove_block); |
||
627 | |||
1123 | serge | 628 | int drm_mm_clean(struct drm_mm * mm) |
629 | { |
||
1963 | serge | 630 | struct list_head *head = &mm->head_node.node_list; |
1123 | serge | 631 | |
632 | return (head->next->next == head); |
||
633 | } |
||
1126 | serge | 634 | EXPORT_SYMBOL(drm_mm_clean); |
1123 | serge | 635 | |
636 | int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) |
||
637 | { |
||
1963 | serge | 638 | INIT_LIST_HEAD(&mm->hole_stack); |
1123 | serge | 639 | INIT_LIST_HEAD(&mm->unused_nodes); |
640 | mm->num_unused = 0; |
||
1963 | serge | 641 | mm->scanned_blocks = 0; |
1123 | serge | 642 | spin_lock_init(&mm->unused_lock); |
643 | |||
1963 | serge | 644 | /* Clever trick to avoid a special case in the free hole tracking. */ |
645 | INIT_LIST_HEAD(&mm->head_node.node_list); |
||
646 | INIT_LIST_HEAD(&mm->head_node.hole_stack); |
||
647 | mm->head_node.hole_follows = 1; |
||
648 | mm->head_node.scanned_block = 0; |
||
649 | mm->head_node.scanned_prev_free = 0; |
||
650 | mm->head_node.scanned_next_free = 0; |
||
651 | mm->head_node.mm = mm; |
||
652 | mm->head_node.start = start + size; |
||
653 | mm->head_node.size = start - mm->head_node.start; |
||
654 | list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack); |
||
655 | |||
3031 | serge | 656 | mm->color_adjust = NULL; |
657 | |||
1963 | serge | 658 | return 0; |
1123 | serge | 659 | } |
1126 | serge | 660 | EXPORT_SYMBOL(drm_mm_init); |
1123 | serge | 661 | |
662 | void drm_mm_takedown(struct drm_mm * mm) |
||
663 | { |
||
1963 | serge | 664 | struct drm_mm_node *entry, *next; |
1123 | serge | 665 | |
1963 | serge | 666 | if (!list_empty(&mm->head_node.node_list)) { |
1123 | serge | 667 | DRM_ERROR("Memory manager not clean. Delaying takedown\n"); |
668 | return; |
||
669 | } |
||
670 | |||
1963 | serge | 671 | spin_lock(&mm->unused_lock); |
672 | list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) { |
||
673 | list_del(&entry->node_list); |
||
1123 | serge | 674 | kfree(entry); |
675 | --mm->num_unused; |
||
676 | } |
||
677 | spin_unlock(&mm->unused_lock); |
||
678 | |||
679 | BUG_ON(mm->num_unused != 0); |
||
680 | } |
||
1126 | serge | 681 | EXPORT_SYMBOL(drm_mm_takedown);=>>>=>>>=>>>>>>>> |