Rev 1246 | Rev 1404 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1246 | Rev 1321 | ||
---|---|---|---|
1 | /************************************************************************** |
1 | /************************************************************************** |
2 | * |
2 | * |
3 | * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. |
3 | * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. |
4 | * All Rights Reserved. |
4 | * All Rights Reserved. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the |
7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including |
8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: |
12 | * the following conditions: |
13 | * |
13 | * |
14 | * The above copyright notice and this permission notice (including the |
14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions |
15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. |
16 | * of the Software. |
17 | * |
17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * |
25 | * |
26 | * |
26 | * |
27 | **************************************************************************/ |
27 | **************************************************************************/ |
28 | 28 | ||
29 | /* |
29 | /* |
30 | * Generic simple memory manager implementation. Intended to be used as a base |
30 | * Generic simple memory manager implementation. Intended to be used as a base |
31 | * class implementation for more advanced memory managers. |
31 | * class implementation for more advanced memory managers. |
32 | * |
32 | * |
33 | * Note that the algorithm used is quite simple and there might be substantial |
33 | * Note that the algorithm used is quite simple and there might be substantial |
34 | * performance gains if a smarter free list is implemented. Currently it is just an |
34 | * performance gains if a smarter free list is implemented. Currently it is just an |
35 | * unordered stack of free regions. This could easily be improved if an RB-tree |
35 | * unordered stack of free regions. This could easily be improved if an RB-tree |
36 | * is used instead. At least if we expect heavy fragmentation. |
36 | * is used instead. At least if we expect heavy fragmentation. |
37 | * |
37 | * |
38 | * Aligned allocations can also see improvement. |
38 | * Aligned allocations can also see improvement. |
39 | * |
39 | * |
40 | * Authors: |
40 | * Authors: |
41 | * Thomas Hellström |
41 | * Thomas Hellström |
42 | */ |
42 | */ |
43 | 43 | ||
44 | #include "drmP.h" |
44 | #include "drmP.h" |
45 | #include "drm_mm.h" |
45 | #include "drm_mm.h" |
46 | //#include |
46 | //#include |
47 | #include |
47 | #include |
48 | 48 | ||
49 | #define MM_UNUSED_TARGET 4 |
49 | #define MM_UNUSED_TARGET 4 |
50 | 50 | ||
51 | unsigned long drm_mm_tail_space(struct drm_mm *mm) |
51 | unsigned long drm_mm_tail_space(struct drm_mm *mm) |
52 | { |
52 | { |
53 | struct list_head *tail_node; |
53 | struct list_head *tail_node; |
54 | struct drm_mm_node *entry; |
54 | struct drm_mm_node *entry; |
55 | 55 | ||
56 | tail_node = mm->ml_entry.prev; |
56 | tail_node = mm->ml_entry.prev; |
57 | entry = list_entry(tail_node, struct drm_mm_node, ml_entry); |
57 | entry = list_entry(tail_node, struct drm_mm_node, ml_entry); |
58 | if (!entry->free) |
58 | if (!entry->free) |
59 | return 0; |
59 | return 0; |
60 | 60 | ||
61 | return entry->size; |
61 | return entry->size; |
62 | } |
62 | } |
63 | 63 | ||
64 | int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size) |
64 | int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size) |
65 | { |
65 | { |
66 | struct list_head *tail_node; |
66 | struct list_head *tail_node; |
67 | struct drm_mm_node *entry; |
67 | struct drm_mm_node *entry; |
68 | 68 | ||
69 | tail_node = mm->ml_entry.prev; |
69 | tail_node = mm->ml_entry.prev; |
70 | entry = list_entry(tail_node, struct drm_mm_node, ml_entry); |
70 | entry = list_entry(tail_node, struct drm_mm_node, ml_entry); |
71 | if (!entry->free) |
71 | if (!entry->free) |
72 | return -ENOMEM; |
72 | return -ENOMEM; |
73 | 73 | ||
74 | if (entry->size <= size) |
74 | if (entry->size <= size) |
75 | return -ENOMEM; |
75 | return -ENOMEM; |
76 | 76 | ||
77 | entry->size -= size; |
77 | entry->size -= size; |
78 | return 0; |
78 | return 0; |
79 | } |
79 | } |
80 | 80 | ||
81 | static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) |
81 | static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) |
82 | { |
82 | { |
83 | struct drm_mm_node *child; |
83 | struct drm_mm_node *child; |
84 | 84 | ||
85 | child = kzalloc(sizeof(*child), 0); |
85 | child = kzalloc(sizeof(*child), 0); |
86 | 86 | ||
87 | if (unlikely(child == NULL)) { |
87 | if (unlikely(child == NULL)) { |
88 | spin_lock(&mm->unused_lock); |
88 | spin_lock(&mm->unused_lock); |
89 | if (list_empty(&mm->unused_nodes)) |
89 | if (list_empty(&mm->unused_nodes)) |
90 | child = NULL; |
90 | child = NULL; |
91 | else { |
91 | else { |
92 | child = |
92 | child = |
93 | list_entry(mm->unused_nodes.next, |
93 | list_entry(mm->unused_nodes.next, |
94 | struct drm_mm_node, fl_entry); |
94 | struct drm_mm_node, fl_entry); |
95 | list_del(&child->fl_entry); |
95 | list_del(&child->fl_entry); |
96 | --mm->num_unused; |
96 | --mm->num_unused; |
97 | } |
97 | } |
98 | spin_unlock(&mm->unused_lock); |
98 | spin_unlock(&mm->unused_lock); |
99 | } |
99 | } |
100 | return child; |
100 | return child; |
101 | } |
101 | } |
- | 102 | ||
- | 103 | /* drm_mm_pre_get() - pre allocate drm_mm_node structure |
|
- | 104 | * drm_mm: memory manager struct we are pre-allocating for |
|
- | 105 | * |
|
- | 106 | * Returns 0 on success or -ENOMEM if allocation fails. |
|
102 | 107 | */ |
|
103 | int drm_mm_pre_get(struct drm_mm *mm) |
108 | int drm_mm_pre_get(struct drm_mm *mm) |
104 | { |
109 | { |
105 | struct drm_mm_node *node; |
110 | struct drm_mm_node *node; |
106 | 111 | ||
107 | spin_lock(&mm->unused_lock); |
112 | spin_lock(&mm->unused_lock); |
108 | while (mm->num_unused < MM_UNUSED_TARGET) { |
113 | while (mm->num_unused < MM_UNUSED_TARGET) { |
109 | spin_unlock(&mm->unused_lock); |
114 | spin_unlock(&mm->unused_lock); |
110 | node = kmalloc(sizeof(*node), GFP_KERNEL); |
115 | node = kmalloc(sizeof(*node), GFP_KERNEL); |
111 | spin_lock(&mm->unused_lock); |
116 | spin_lock(&mm->unused_lock); |
112 | 117 | ||
113 | if (unlikely(node == NULL)) { |
118 | if (unlikely(node == NULL)) { |
114 | int ret = (mm->num_unused < 2) ? -ENOMEM : 0; |
119 | int ret = (mm->num_unused < 2) ? -ENOMEM : 0; |
115 | spin_unlock(&mm->unused_lock); |
120 | spin_unlock(&mm->unused_lock); |
116 | return ret; |
121 | return ret; |
117 | } |
122 | } |
118 | ++mm->num_unused; |
123 | ++mm->num_unused; |
119 | list_add_tail(&node->fl_entry, &mm->unused_nodes); |
124 | list_add_tail(&node->fl_entry, &mm->unused_nodes); |
120 | } |
125 | } |
121 | spin_unlock(&mm->unused_lock); |
126 | spin_unlock(&mm->unused_lock); |
122 | return 0; |
127 | return 0; |
123 | } |
128 | } |
124 | EXPORT_SYMBOL(drm_mm_pre_get); |
129 | EXPORT_SYMBOL(drm_mm_pre_get); |
125 | 130 | ||
126 | static int drm_mm_create_tail_node(struct drm_mm *mm, |
131 | static int drm_mm_create_tail_node(struct drm_mm *mm, |
127 | unsigned long start, |
132 | unsigned long start, |
128 | unsigned long size, int atomic) |
133 | unsigned long size, int atomic) |
129 | { |
134 | { |
130 | struct drm_mm_node *child; |
135 | struct drm_mm_node *child; |
131 | 136 | ||
132 | child = drm_mm_kmalloc(mm, atomic); |
137 | child = drm_mm_kmalloc(mm, atomic); |
133 | if (unlikely(child == NULL)) |
138 | if (unlikely(child == NULL)) |
134 | return -ENOMEM; |
139 | return -ENOMEM; |
135 | 140 | ||
136 | child->free = 1; |
141 | child->free = 1; |
137 | child->size = size; |
142 | child->size = size; |
138 | child->start = start; |
143 | child->start = start; |
139 | child->mm = mm; |
144 | child->mm = mm; |
140 | 145 | ||
141 | list_add_tail(&child->ml_entry, &mm->ml_entry); |
146 | list_add_tail(&child->ml_entry, &mm->ml_entry); |
142 | list_add_tail(&child->fl_entry, &mm->fl_entry); |
147 | list_add_tail(&child->fl_entry, &mm->fl_entry); |
143 | 148 | ||
144 | return 0; |
149 | return 0; |
145 | } |
150 | } |
146 | 151 | ||
147 | int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic) |
152 | int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic) |
148 | { |
153 | { |
149 | struct list_head *tail_node; |
154 | struct list_head *tail_node; |
150 | struct drm_mm_node *entry; |
155 | struct drm_mm_node *entry; |
151 | 156 | ||
152 | tail_node = mm->ml_entry.prev; |
157 | tail_node = mm->ml_entry.prev; |
153 | entry = list_entry(tail_node, struct drm_mm_node, ml_entry); |
158 | entry = list_entry(tail_node, struct drm_mm_node, ml_entry); |
154 | if (!entry->free) { |
159 | if (!entry->free) { |
155 | return drm_mm_create_tail_node(mm, entry->start + entry->size, |
160 | return drm_mm_create_tail_node(mm, entry->start + entry->size, |
156 | size, atomic); |
161 | size, atomic); |
157 | } |
162 | } |
158 | entry->size += size; |
163 | entry->size += size; |
159 | return 0; |
164 | return 0; |
160 | } |
165 | } |
161 | 166 | ||
162 | static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, |
167 | static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, |
163 | unsigned long size, |
168 | unsigned long size, |
164 | int atomic) |
169 | int atomic) |
165 | { |
170 | { |
166 | struct drm_mm_node *child; |
171 | struct drm_mm_node *child; |
167 | 172 | ||
168 | child = drm_mm_kmalloc(parent->mm, atomic); |
173 | child = drm_mm_kmalloc(parent->mm, atomic); |
169 | if (unlikely(child == NULL)) |
174 | if (unlikely(child == NULL)) |
170 | return NULL; |
175 | return NULL; |
171 | 176 | ||
172 | INIT_LIST_HEAD(&child->fl_entry); |
177 | INIT_LIST_HEAD(&child->fl_entry); |
173 | 178 | ||
174 | child->free = 0; |
179 | child->free = 0; |
175 | child->size = size; |
180 | child->size = size; |
176 | child->start = parent->start; |
181 | child->start = parent->start; |
177 | child->mm = parent->mm; |
182 | child->mm = parent->mm; |
178 | 183 | ||
179 | list_add_tail(&child->ml_entry, &parent->ml_entry); |
184 | list_add_tail(&child->ml_entry, &parent->ml_entry); |
180 | INIT_LIST_HEAD(&child->fl_entry); |
185 | INIT_LIST_HEAD(&child->fl_entry); |
181 | 186 | ||
182 | parent->size -= size; |
187 | parent->size -= size; |
183 | parent->start += size; |
188 | parent->start += size; |
184 | return child; |
189 | return child; |
185 | } |
190 | } |
186 | 191 | ||
187 | 192 | ||
188 | struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, |
193 | struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, |
189 | unsigned long size, |
194 | unsigned long size, |
190 | unsigned alignment, |
195 | unsigned alignment, |
191 | int atomic) |
196 | int atomic) |
192 | { |
197 | { |
193 | 198 | ||
194 | struct drm_mm_node *align_splitoff = NULL; |
199 | struct drm_mm_node *align_splitoff = NULL; |
195 | unsigned tmp = 0; |
200 | unsigned tmp = 0; |
196 | 201 | ||
197 | if (alignment) |
202 | if (alignment) |
198 | tmp = node->start % alignment; |
203 | tmp = node->start % alignment; |
199 | 204 | ||
200 | if (tmp) { |
205 | if (tmp) { |
201 | align_splitoff = |
206 | align_splitoff = |
202 | drm_mm_split_at_start(node, alignment - tmp, atomic); |
207 | drm_mm_split_at_start(node, alignment - tmp, atomic); |
203 | if (unlikely(align_splitoff == NULL)) |
208 | if (unlikely(align_splitoff == NULL)) |
204 | return NULL; |
209 | return NULL; |
205 | } |
210 | } |
206 | 211 | ||
207 | if (node->size == size) { |
212 | if (node->size == size) { |
208 | list_del_init(&node->fl_entry); |
213 | list_del_init(&node->fl_entry); |
209 | node->free = 0; |
214 | node->free = 0; |
210 | } else { |
215 | } else { |
211 | node = drm_mm_split_at_start(node, size, atomic); |
216 | node = drm_mm_split_at_start(node, size, atomic); |
212 | } |
217 | } |
213 | 218 | ||
214 | if (align_splitoff) |
219 | if (align_splitoff) |
215 | drm_mm_put_block(align_splitoff); |
220 | drm_mm_put_block(align_splitoff); |
216 | 221 | ||
217 | return node; |
222 | return node; |
218 | } |
223 | } |
219 | EXPORT_SYMBOL(drm_mm_get_block_generic); |
224 | EXPORT_SYMBOL(drm_mm_get_block_generic); |
- | 225 | ||
- | 226 | struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node, |
|
- | 227 | unsigned long size, |
|
- | 228 | unsigned alignment, |
|
- | 229 | unsigned long start, |
|
- | 230 | unsigned long end, |
|
- | 231 | int atomic) |
|
- | 232 | { |
|
- | 233 | struct drm_mm_node *align_splitoff = NULL; |
|
- | 234 | unsigned tmp = 0; |
|
- | 235 | unsigned wasted = 0; |
|
- | 236 | ||
- | 237 | if (node->start < start) |
|
- | 238 | wasted += start - node->start; |
|
- | 239 | if (alignment) |
|
- | 240 | tmp = ((node->start + wasted) % alignment); |
|
- | 241 | ||
- | 242 | if (tmp) |
|
- | 243 | wasted += alignment - tmp; |
|
- | 244 | if (wasted) { |
|
- | 245 | align_splitoff = drm_mm_split_at_start(node, wasted, atomic); |
|
- | 246 | if (unlikely(align_splitoff == NULL)) |
|
- | 247 | return NULL; |
|
- | 248 | } |
|
- | 249 | ||
- | 250 | if (node->size == size) { |
|
- | 251 | list_del_init(&node->fl_entry); |
|
- | 252 | node->free = 0; |
|
- | 253 | } else { |
|
- | 254 | node = drm_mm_split_at_start(node, size, atomic); |
|
- | 255 | } |
|
- | 256 | ||
- | 257 | if (align_splitoff) |
|
- | 258 | drm_mm_put_block(align_splitoff); |
|
- | 259 | ||
- | 260 | return node; |
|
- | 261 | } |
|
- | 262 | EXPORT_SYMBOL(drm_mm_get_block_range_generic); |
|
220 | 263 | ||
221 | /* |
264 | /* |
222 | * Put a block. Merge with the previous and / or next block if they are free. |
265 | * Put a block. Merge with the previous and / or next block if they are free. |
223 | * Otherwise add to the free stack. |
266 | * Otherwise add to the free stack. |
224 | */ |
267 | */ |
225 | 268 | ||
226 | void drm_mm_put_block(struct drm_mm_node *cur) |
269 | void drm_mm_put_block(struct drm_mm_node *cur) |
227 | { |
270 | { |
228 | 271 | ||
229 | struct drm_mm *mm = cur->mm; |
272 | struct drm_mm *mm = cur->mm; |
230 | struct list_head *cur_head = &cur->ml_entry; |
273 | struct list_head *cur_head = &cur->ml_entry; |
231 | struct list_head *root_head = &mm->ml_entry; |
274 | struct list_head *root_head = &mm->ml_entry; |
232 | struct drm_mm_node *prev_node = NULL; |
275 | struct drm_mm_node *prev_node = NULL; |
233 | struct drm_mm_node *next_node; |
276 | struct drm_mm_node *next_node; |
234 | 277 | ||
235 | int merged = 0; |
278 | int merged = 0; |
236 | 279 | ||
237 | if (cur_head->prev != root_head) { |
280 | if (cur_head->prev != root_head) { |
238 | prev_node = |
281 | prev_node = |
239 | list_entry(cur_head->prev, struct drm_mm_node, ml_entry); |
282 | list_entry(cur_head->prev, struct drm_mm_node, ml_entry); |
240 | if (prev_node->free) { |
283 | if (prev_node->free) { |
241 | prev_node->size += cur->size; |
284 | prev_node->size += cur->size; |
242 | merged = 1; |
285 | merged = 1; |
243 | } |
286 | } |
244 | } |
287 | } |
245 | if (cur_head->next != root_head) { |
288 | if (cur_head->next != root_head) { |
246 | next_node = |
289 | next_node = |
247 | list_entry(cur_head->next, struct drm_mm_node, ml_entry); |
290 | list_entry(cur_head->next, struct drm_mm_node, ml_entry); |
248 | if (next_node->free) { |
291 | if (next_node->free) { |
249 | if (merged) { |
292 | if (merged) { |
250 | prev_node->size += next_node->size; |
293 | prev_node->size += next_node->size; |
251 | list_del(&next_node->ml_entry); |
294 | list_del(&next_node->ml_entry); |
252 | list_del(&next_node->fl_entry); |
295 | list_del(&next_node->fl_entry); |
- | 296 | spin_lock(&mm->unused_lock); |
|
253 | if (mm->num_unused < MM_UNUSED_TARGET) { |
297 | if (mm->num_unused < MM_UNUSED_TARGET) { |
254 | list_add(&next_node->fl_entry, |
298 | list_add(&next_node->fl_entry, |
255 | &mm->unused_nodes); |
299 | &mm->unused_nodes); |
256 | ++mm->num_unused; |
300 | ++mm->num_unused; |
257 | } else |
301 | } else |
258 | kfree(next_node); |
302 | kfree(next_node); |
- | 303 | spin_unlock(&mm->unused_lock); |
|
259 | } else { |
304 | } else { |
260 | next_node->size += cur->size; |
305 | next_node->size += cur->size; |
261 | next_node->start = cur->start; |
306 | next_node->start = cur->start; |
262 | merged = 1; |
307 | merged = 1; |
263 | } |
308 | } |
264 | } |
309 | } |
265 | } |
310 | } |
266 | if (!merged) { |
311 | if (!merged) { |
267 | cur->free = 1; |
312 | cur->free = 1; |
268 | list_add(&cur->fl_entry, &mm->fl_entry); |
313 | list_add(&cur->fl_entry, &mm->fl_entry); |
269 | } else { |
314 | } else { |
270 | list_del(&cur->ml_entry); |
315 | list_del(&cur->ml_entry); |
- | 316 | spin_lock(&mm->unused_lock); |
|
271 | if (mm->num_unused < MM_UNUSED_TARGET) { |
317 | if (mm->num_unused < MM_UNUSED_TARGET) { |
272 | list_add(&cur->fl_entry, &mm->unused_nodes); |
318 | list_add(&cur->fl_entry, &mm->unused_nodes); |
273 | ++mm->num_unused; |
319 | ++mm->num_unused; |
274 | } else |
320 | } else |
275 | kfree(cur); |
321 | kfree(cur); |
- | 322 | spin_unlock(&mm->unused_lock); |
|
276 | } |
323 | } |
277 | } |
324 | } |
278 | 325 | ||
279 | EXPORT_SYMBOL(drm_mm_put_block); |
326 | EXPORT_SYMBOL(drm_mm_put_block); |
280 | 327 | ||
281 | struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, |
328 | struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, |
282 | unsigned long size, |
329 | unsigned long size, |
283 | unsigned alignment, int best_match) |
330 | unsigned alignment, int best_match) |
284 | { |
331 | { |
285 | struct list_head *list; |
332 | struct list_head *list; |
286 | const struct list_head *free_stack = &mm->fl_entry; |
333 | const struct list_head *free_stack = &mm->fl_entry; |
287 | struct drm_mm_node *entry; |
334 | struct drm_mm_node *entry; |
288 | struct drm_mm_node *best; |
335 | struct drm_mm_node *best; |
289 | unsigned long best_size; |
336 | unsigned long best_size; |
290 | unsigned wasted; |
337 | unsigned wasted; |
291 | 338 | ||
292 | best = NULL; |
339 | best = NULL; |
293 | best_size = ~0UL; |
340 | best_size = ~0UL; |
294 | 341 | ||
295 | list_for_each(list, free_stack) { |
342 | list_for_each(list, free_stack) { |
296 | entry = list_entry(list, struct drm_mm_node, fl_entry); |
343 | entry = list_entry(list, struct drm_mm_node, fl_entry); |
297 | wasted = 0; |
344 | wasted = 0; |
298 | 345 | ||
299 | if (entry->size < size) |
346 | if (entry->size < size) |
300 | continue; |
347 | continue; |
301 | 348 | ||
302 | if (alignment) { |
349 | if (alignment) { |
303 | register unsigned tmp = entry->start % alignment; |
350 | register unsigned tmp = entry->start % alignment; |
304 | if (tmp) |
351 | if (tmp) |
305 | wasted += alignment - tmp; |
352 | wasted += alignment - tmp; |
306 | } |
353 | } |
307 | 354 | ||
308 | if (entry->size >= size + wasted) { |
355 | if (entry->size >= size + wasted) { |
309 | if (!best_match) |
356 | if (!best_match) |
310 | return entry; |
357 | return entry; |
311 | if (size < best_size) { |
358 | if (size < best_size) { |
312 | best = entry; |
359 | best = entry; |
313 | best_size = entry->size; |
360 | best_size = entry->size; |
314 | } |
361 | } |
315 | } |
362 | } |
316 | } |
363 | } |
317 | 364 | ||
318 | return best; |
365 | return best; |
319 | } |
366 | } |
320 | EXPORT_SYMBOL(drm_mm_search_free); |
367 | EXPORT_SYMBOL(drm_mm_search_free); |
- | 368 | ||
- | 369 | struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, |
|
- | 370 | unsigned long size, |
|
- | 371 | unsigned alignment, |
|
- | 372 | unsigned long start, |
|
- | 373 | unsigned long end, |
|
- | 374 | int best_match) |
|
- | 375 | { |
|
- | 376 | struct list_head *list; |
|
- | 377 | const struct list_head *free_stack = &mm->fl_entry; |
|
- | 378 | struct drm_mm_node *entry; |
|
- | 379 | struct drm_mm_node *best; |
|
- | 380 | unsigned long best_size; |
|
- | 381 | unsigned wasted; |
|
- | 382 | ||
- | 383 | best = NULL; |
|
- | 384 | best_size = ~0UL; |
|
- | 385 | ||
- | 386 | list_for_each(list, free_stack) { |
|
- | 387 | entry = list_entry(list, struct drm_mm_node, fl_entry); |
|
- | 388 | wasted = 0; |
|
- | 389 | ||
- | 390 | if (entry->size < size) |
|
- | 391 | continue; |
|
- | 392 | ||
- | 393 | if (entry->start > end || (entry->start+entry->size) < start) |
|
- | 394 | continue; |
|
- | 395 | ||
- | 396 | if (entry->start < start) |
|
- | 397 | wasted += start - entry->start; |
|
- | 398 | ||
- | 399 | if (alignment) { |
|
- | 400 | register unsigned tmp = (entry->start + wasted) % alignment; |
|
- | 401 | if (tmp) |
|
- | 402 | wasted += alignment - tmp; |
|
- | 403 | } |
|
- | 404 | ||
- | 405 | if (entry->size >= size + wasted) { |
|
- | 406 | if (!best_match) |
|
- | 407 | return entry; |
|
- | 408 | if (size < best_size) { |
|
- | 409 | best = entry; |
|
- | 410 | best_size = entry->size; |
|
- | 411 | } |
|
- | 412 | } |
|
- | 413 | } |
|
- | 414 | ||
- | 415 | return best; |
|
- | 416 | } |
|
- | 417 | EXPORT_SYMBOL(drm_mm_search_free_in_range); |
|
321 | 418 | ||
322 | int drm_mm_clean(struct drm_mm * mm) |
419 | int drm_mm_clean(struct drm_mm * mm) |
323 | { |
420 | { |
324 | struct list_head *head = &mm->ml_entry; |
421 | struct list_head *head = &mm->ml_entry; |
325 | 422 | ||
326 | return (head->next->next == head); |
423 | return (head->next->next == head); |
327 | } |
424 | } |
328 | EXPORT_SYMBOL(drm_mm_clean); |
425 | EXPORT_SYMBOL(drm_mm_clean); |
329 | 426 | ||
330 | int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) |
427 | int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) |
331 | { |
428 | { |
332 | INIT_LIST_HEAD(&mm->ml_entry); |
429 | INIT_LIST_HEAD(&mm->ml_entry); |
333 | INIT_LIST_HEAD(&mm->fl_entry); |
430 | INIT_LIST_HEAD(&mm->fl_entry); |
334 | INIT_LIST_HEAD(&mm->unused_nodes); |
431 | INIT_LIST_HEAD(&mm->unused_nodes); |
335 | mm->num_unused = 0; |
432 | mm->num_unused = 0; |
336 | spin_lock_init(&mm->unused_lock); |
433 | spin_lock_init(&mm->unused_lock); |
337 | 434 | ||
338 | return drm_mm_create_tail_node(mm, start, size, 0); |
435 | return drm_mm_create_tail_node(mm, start, size, 0); |
339 | } |
436 | } |
340 | EXPORT_SYMBOL(drm_mm_init); |
437 | EXPORT_SYMBOL(drm_mm_init); |
341 | 438 | ||
342 | void drm_mm_takedown(struct drm_mm * mm) |
439 | void drm_mm_takedown(struct drm_mm * mm) |
343 | { |
440 | { |
344 | struct list_head *bnode = mm->fl_entry.next; |
441 | struct list_head *bnode = mm->fl_entry.next; |
345 | struct drm_mm_node *entry; |
442 | struct drm_mm_node *entry; |
346 | struct drm_mm_node *next; |
443 | struct drm_mm_node *next; |
347 | 444 | ||
348 | entry = list_entry(bnode, struct drm_mm_node, fl_entry); |
445 | entry = list_entry(bnode, struct drm_mm_node, fl_entry); |
349 | 446 | ||
350 | if (entry->ml_entry.next != &mm->ml_entry || |
447 | if (entry->ml_entry.next != &mm->ml_entry || |
351 | entry->fl_entry.next != &mm->fl_entry) { |
448 | entry->fl_entry.next != &mm->fl_entry) { |
352 | DRM_ERROR("Memory manager not clean. Delaying takedown\n"); |
449 | DRM_ERROR("Memory manager not clean. Delaying takedown\n"); |
353 | return; |
450 | return; |
354 | } |
451 | } |
355 | 452 | ||
356 | list_del(&entry->fl_entry); |
453 | list_del(&entry->fl_entry); |
357 | list_del(&entry->ml_entry); |
454 | list_del(&entry->ml_entry); |
358 | kfree(entry); |
455 | kfree(entry); |
359 | 456 | ||
360 | spin_lock(&mm->unused_lock); |
457 | spin_lock(&mm->unused_lock); |
361 | list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) { |
458 | list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) { |
362 | list_del(&entry->fl_entry); |
459 | list_del(&entry->fl_entry); |
363 | kfree(entry); |
460 | kfree(entry); |
364 | --mm->num_unused; |
461 | --mm->num_unused; |
365 | } |
462 | } |
366 | spin_unlock(&mm->unused_lock); |
463 | spin_unlock(&mm->unused_lock); |
367 | 464 | ||
368 | BUG_ON(mm->num_unused != 0); |
465 | BUG_ON(mm->num_unused != 0); |
369 | } |
466 | } |
370 | EXPORT_SYMBOL(drm_mm_takedown);>>>>>>=> |
467 | EXPORT_SYMBOL(drm_mm_takedown);>>>>>>>>>>>=> |