Rev 1246 | Rev 1404 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1246 | Rev 1321 | ||
---|---|---|---|
Line 98... | Line 98... | ||
98 | spin_unlock(&mm->unused_lock); |
98 | spin_unlock(&mm->unused_lock); |
99 | } |
99 | } |
100 | return child; |
100 | return child; |
101 | } |
101 | } |
Line -... | Line 102... | ||
- | 102 | ||
- | 103 | /* drm_mm_pre_get() - pre allocate drm_mm_node structure |
|
- | 104 | * drm_mm: memory manager struct we are pre-allocating for |
|
- | 105 | * |
|
- | 106 | * Returns 0 on success or -ENOMEM if allocation fails. |
|
102 | 107 | */ |
|
103 | int drm_mm_pre_get(struct drm_mm *mm) |
108 | int drm_mm_pre_get(struct drm_mm *mm) |
104 | { |
109 | { |
Line 105... | Line 110... | ||
105 | struct drm_mm_node *node; |
110 | struct drm_mm_node *node; |
Line 216... | Line 221... | ||
216 | 221 | ||
217 | return node; |
222 | return node; |
218 | } |
223 | } |
Line -... | Line 224... | ||
- | 224 | EXPORT_SYMBOL(drm_mm_get_block_generic); |
|
- | 225 | ||
- | 226 | struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node, |
|
- | 227 | unsigned long size, |
|
- | 228 | unsigned alignment, |
|
- | 229 | unsigned long start, |
|
- | 230 | unsigned long end, |
|
- | 231 | int atomic) |
|
- | 232 | { |
|
- | 233 | struct drm_mm_node *align_splitoff = NULL; |
|
- | 234 | unsigned tmp = 0; |
|
- | 235 | unsigned wasted = 0; |
|
- | 236 | ||
- | 237 | if (node->start < start) |
|
- | 238 | wasted += start - node->start; |
|
- | 239 | if (alignment) |
|
- | 240 | tmp = ((node->start + wasted) % alignment); |
|
- | 241 | ||
- | 242 | if (tmp) |
|
- | 243 | wasted += alignment - tmp; |
|
- | 244 | if (wasted) { |
|
- | 245 | align_splitoff = drm_mm_split_at_start(node, wasted, atomic); |
|
- | 246 | if (unlikely(align_splitoff == NULL)) |
|
- | 247 | return NULL; |
|
- | 248 | } |
|
- | 249 | ||
- | 250 | if (node->size == size) { |
|
- | 251 | list_del_init(&node->fl_entry); |
|
- | 252 | node->free = 0; |
|
- | 253 | } else { |
|
- | 254 | node = drm_mm_split_at_start(node, size, atomic); |
|
- | 255 | } |
|
- | 256 | ||
- | 257 | if (align_splitoff) |
|
- | 258 | drm_mm_put_block(align_splitoff); |
|
- | 259 | ||
- | 260 | return node; |
|
- | 261 | } |
|
219 | EXPORT_SYMBOL(drm_mm_get_block_generic); |
262 | EXPORT_SYMBOL(drm_mm_get_block_range_generic); |
220 | 263 | ||
221 | /* |
264 | /* |
222 | * Put a block. Merge with the previous and / or next block if they are free. |
265 | * Put a block. Merge with the previous and / or next block if they are free. |
Line 248... | Line 291... | ||
248 | if (next_node->free) { |
291 | if (next_node->free) { |
249 | if (merged) { |
292 | if (merged) { |
250 | prev_node->size += next_node->size; |
293 | prev_node->size += next_node->size; |
251 | list_del(&next_node->ml_entry); |
294 | list_del(&next_node->ml_entry); |
252 | list_del(&next_node->fl_entry); |
295 | list_del(&next_node->fl_entry); |
- | 296 | spin_lock(&mm->unused_lock); |
|
253 | if (mm->num_unused < MM_UNUSED_TARGET) { |
297 | if (mm->num_unused < MM_UNUSED_TARGET) { |
254 | list_add(&next_node->fl_entry, |
298 | list_add(&next_node->fl_entry, |
255 | &mm->unused_nodes); |
299 | &mm->unused_nodes); |
256 | ++mm->num_unused; |
300 | ++mm->num_unused; |
257 | } else |
301 | } else |
258 | kfree(next_node); |
302 | kfree(next_node); |
- | 303 | spin_unlock(&mm->unused_lock); |
|
259 | } else { |
304 | } else { |
260 | next_node->size += cur->size; |
305 | next_node->size += cur->size; |
261 | next_node->start = cur->start; |
306 | next_node->start = cur->start; |
262 | merged = 1; |
307 | merged = 1; |
263 | } |
308 | } |
Line 266... | Line 311... | ||
266 | if (!merged) { |
311 | if (!merged) { |
267 | cur->free = 1; |
312 | cur->free = 1; |
268 | list_add(&cur->fl_entry, &mm->fl_entry); |
313 | list_add(&cur->fl_entry, &mm->fl_entry); |
269 | } else { |
314 | } else { |
270 | list_del(&cur->ml_entry); |
315 | list_del(&cur->ml_entry); |
- | 316 | spin_lock(&mm->unused_lock); |
|
271 | if (mm->num_unused < MM_UNUSED_TARGET) { |
317 | if (mm->num_unused < MM_UNUSED_TARGET) { |
272 | list_add(&cur->fl_entry, &mm->unused_nodes); |
318 | list_add(&cur->fl_entry, &mm->unused_nodes); |
273 | ++mm->num_unused; |
319 | ++mm->num_unused; |
274 | } else |
320 | } else |
275 | kfree(cur); |
321 | kfree(cur); |
- | 322 | spin_unlock(&mm->unused_lock); |
|
276 | } |
323 | } |
277 | } |
324 | } |
Line 278... | Line 325... | ||
278 | 325 | ||
Line 317... | Line 364... | ||
317 | 364 | ||
318 | return best; |
365 | return best; |
319 | } |
366 | } |
Line -... | Line 367... | ||
- | 367 | EXPORT_SYMBOL(drm_mm_search_free); |
|
- | 368 | ||
- | 369 | struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, |
|
- | 370 | unsigned long size, |
|
- | 371 | unsigned alignment, |
|
- | 372 | unsigned long start, |
|
- | 373 | unsigned long end, |
|
- | 374 | int best_match) |
|
- | 375 | { |
|
- | 376 | struct list_head *list; |
|
- | 377 | const struct list_head *free_stack = &mm->fl_entry; |
|
- | 378 | struct drm_mm_node *entry; |
|
- | 379 | struct drm_mm_node *best; |
|
- | 380 | unsigned long best_size; |
|
- | 381 | unsigned wasted; |
|
- | 382 | ||
- | 383 | best = NULL; |
|
- | 384 | best_size = ~0UL; |
|
- | 385 | ||
- | 386 | list_for_each(list, free_stack) { |
|
- | 387 | entry = list_entry(list, struct drm_mm_node, fl_entry); |
|
- | 388 | wasted = 0; |
|
- | 389 | ||
- | 390 | if (entry->size < size) |
|
- | 391 | continue; |
|
- | 392 | ||
- | 393 | if (entry->start > end || (entry->start+entry->size) < start) |
|
- | 394 | continue; |
|
- | 395 | ||
- | 396 | if (entry->start < start) |
|
- | 397 | wasted += start - entry->start; |
|
- | 398 | ||
- | 399 | if (alignment) { |
|
- | 400 | register unsigned tmp = (entry->start + wasted) % alignment; |
|
- | 401 | if (tmp) |
|
- | 402 | wasted += alignment - tmp; |
|
- | 403 | } |
|
- | 404 | ||
- | 405 | if (entry->size >= size + wasted) { |
|
- | 406 | if (!best_match) |
|
- | 407 | return entry; |
|
- | 408 | if (size < best_size) { |
|
- | 409 | best = entry; |
|
- | 410 | best_size = entry->size; |
|
- | 411 | } |
|
- | 412 | } |
|
- | 413 | } |
|
- | 414 | ||
- | 415 | return best; |
|
- | 416 | } |
|
320 | EXPORT_SYMBOL(drm_mm_search_free); |
417 | EXPORT_SYMBOL(drm_mm_search_free_in_range); |
321 | 418 | ||
322 | int drm_mm_clean(struct drm_mm * mm) |
419 | int drm_mm_clean(struct drm_mm * mm) |
Line 323... | Line 420... | ||
323 | { |
420 | { |