Subversion Repositories Kolibri OS

Rev

Rev 1246 | Rev 1404 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1123 serge 1
/**************************************************************************
2
 *
3
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 *
27
 **************************************************************************/
28
 
29
/*
30
 * Generic simple memory manager implementation. Intended to be used as a base
31
 * class implementation for more advanced memory managers.
32
 *
33
 * Note that the algorithm used is quite simple and there might be substantial
34
 * performance gains if a smarter free list is implemented. Currently it is just an
35
 * unordered stack of free regions. This could easily be improved if an RB-tree
36
 * is used instead. At least if we expect heavy fragmentation.
37
 *
38
 * Aligned allocations can also see improvement.
39
 *
40
 * Authors:
41
 * Thomas Hellström 
42
 */
43
 
1126 serge 44
#include "drmP.h"
1123 serge 45
#include "drm_mm.h"
46
//#include 
1179 serge 47
#include 
1123 serge 48
 
49
#define MM_UNUSED_TARGET 4
50
 
51
unsigned long drm_mm_tail_space(struct drm_mm *mm)
52
{
53
	struct list_head *tail_node;
54
	struct drm_mm_node *entry;
55
 
56
	tail_node = mm->ml_entry.prev;
57
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
58
	if (!entry->free)
59
		return 0;
60
 
61
	return entry->size;
62
}
63
 
64
int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
65
{
66
	struct list_head *tail_node;
67
	struct drm_mm_node *entry;
68
 
69
	tail_node = mm->ml_entry.prev;
70
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
71
	if (!entry->free)
72
		return -ENOMEM;
73
 
74
	if (entry->size <= size)
75
		return -ENOMEM;
76
 
77
	entry->size -= size;
78
	return 0;
79
}
80
 
81
static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
82
{
83
	struct drm_mm_node *child;
84
 
1246 serge 85
    child = kzalloc(sizeof(*child), 0);
1123 serge 86
 
87
	if (unlikely(child == NULL)) {
88
       spin_lock(&mm->unused_lock);
89
		if (list_empty(&mm->unused_nodes))
90
			child = NULL;
91
		else {
92
			child =
93
			    list_entry(mm->unused_nodes.next,
94
				       struct drm_mm_node, fl_entry);
95
			list_del(&child->fl_entry);
96
			--mm->num_unused;
97
		}
98
       spin_unlock(&mm->unused_lock);
99
	}
100
	return child;
101
}
102
 
1321 serge 103
/* drm_mm_pre_get() - pre allocate drm_mm_node structure
104
 * drm_mm:	memory manager struct we are pre-allocating for
105
 *
106
 * Returns 0 on success or -ENOMEM if allocation fails.
107
 */
1123 serge 108
int drm_mm_pre_get(struct drm_mm *mm)
109
{
110
	struct drm_mm_node *node;
111
 
112
	spin_lock(&mm->unused_lock);
113
	while (mm->num_unused < MM_UNUSED_TARGET) {
114
		spin_unlock(&mm->unused_lock);
115
		node = kmalloc(sizeof(*node), GFP_KERNEL);
116
		spin_lock(&mm->unused_lock);
117
 
118
		if (unlikely(node == NULL)) {
119
			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
120
			spin_unlock(&mm->unused_lock);
121
			return ret;
122
		}
123
		++mm->num_unused;
124
		list_add_tail(&node->fl_entry, &mm->unused_nodes);
125
	}
126
	spin_unlock(&mm->unused_lock);
127
	return 0;
128
}
1126 serge 129
EXPORT_SYMBOL(drm_mm_pre_get);
1123 serge 130
 
131
static int drm_mm_create_tail_node(struct drm_mm *mm,
132
				   unsigned long start,
133
				   unsigned long size, int atomic)
134
{
135
	struct drm_mm_node *child;
136
 
137
	child = drm_mm_kmalloc(mm, atomic);
138
	if (unlikely(child == NULL))
139
		return -ENOMEM;
140
 
141
	child->free = 1;
142
	child->size = size;
143
	child->start = start;
144
	child->mm = mm;
145
 
146
	list_add_tail(&child->ml_entry, &mm->ml_entry);
147
	list_add_tail(&child->fl_entry, &mm->fl_entry);
148
 
149
	return 0;
150
}
151
 
152
int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
153
{
154
	struct list_head *tail_node;
155
	struct drm_mm_node *entry;
156
 
157
	tail_node = mm->ml_entry.prev;
158
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
159
	if (!entry->free) {
160
		return drm_mm_create_tail_node(mm, entry->start + entry->size,
161
					       size, atomic);
162
	}
163
	entry->size += size;
164
	return 0;
165
}
166
 
167
static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
168
						 unsigned long size,
169
						 int atomic)
170
{
171
	struct drm_mm_node *child;
172
 
173
	child = drm_mm_kmalloc(parent->mm, atomic);
174
	if (unlikely(child == NULL))
175
		return NULL;
176
 
177
	INIT_LIST_HEAD(&child->fl_entry);
178
 
179
	child->free = 0;
180
	child->size = size;
181
	child->start = parent->start;
182
	child->mm = parent->mm;
183
 
184
	list_add_tail(&child->ml_entry, &parent->ml_entry);
185
	INIT_LIST_HEAD(&child->fl_entry);
186
 
187
	parent->size -= size;
188
	parent->start += size;
189
	return child;
190
}
191
 
192
 
193
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
194
					     unsigned long size,
195
					     unsigned alignment,
196
					     int atomic)
197
{
198
 
199
	struct drm_mm_node *align_splitoff = NULL;
200
	unsigned tmp = 0;
201
 
202
	if (alignment)
203
		tmp = node->start % alignment;
204
 
205
	if (tmp) {
206
		align_splitoff =
207
		    drm_mm_split_at_start(node, alignment - tmp, atomic);
208
		if (unlikely(align_splitoff == NULL))
209
			return NULL;
210
	}
211
 
212
	if (node->size == size) {
213
		list_del_init(&node->fl_entry);
214
		node->free = 0;
215
	} else {
216
		node = drm_mm_split_at_start(node, size, atomic);
217
	}
218
 
219
	if (align_splitoff)
220
		drm_mm_put_block(align_splitoff);
221
 
222
	return node;
223
}
1126 serge 224
EXPORT_SYMBOL(drm_mm_get_block_generic);
1123 serge 225
 
1321 serge 226
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
227
						unsigned long size,
228
						unsigned alignment,
229
						unsigned long start,
230
						unsigned long end,
231
						int atomic)
232
{
233
	struct drm_mm_node *align_splitoff = NULL;
234
	unsigned tmp = 0;
235
	unsigned wasted = 0;
236
 
237
	if (node->start < start)
238
		wasted += start - node->start;
239
	if (alignment)
240
		tmp = ((node->start + wasted) % alignment);
241
 
242
	if (tmp)
243
		wasted += alignment - tmp;
244
	if (wasted) {
245
		align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
246
		if (unlikely(align_splitoff == NULL))
247
			return NULL;
248
	}
249
 
250
	if (node->size == size) {
251
		list_del_init(&node->fl_entry);
252
		node->free = 0;
253
	} else {
254
		node = drm_mm_split_at_start(node, size, atomic);
255
	}
256
 
257
	if (align_splitoff)
258
		drm_mm_put_block(align_splitoff);
259
 
260
	return node;
261
}
262
EXPORT_SYMBOL(drm_mm_get_block_range_generic);
263
 
1123 serge 264
/*
265
 * Put a block. Merge with the previous and / or next block if they are free.
266
 * Otherwise add to the free stack.
267
 */
268
 
269
void drm_mm_put_block(struct drm_mm_node *cur)
270
{
271
 
272
	struct drm_mm *mm = cur->mm;
273
	struct list_head *cur_head = &cur->ml_entry;
274
	struct list_head *root_head = &mm->ml_entry;
275
	struct drm_mm_node *prev_node = NULL;
276
	struct drm_mm_node *next_node;
277
 
278
	int merged = 0;
279
 
280
	if (cur_head->prev != root_head) {
281
		prev_node =
282
		    list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
283
		if (prev_node->free) {
284
			prev_node->size += cur->size;
285
			merged = 1;
286
		}
287
	}
288
	if (cur_head->next != root_head) {
289
		next_node =
290
		    list_entry(cur_head->next, struct drm_mm_node, ml_entry);
291
		if (next_node->free) {
292
			if (merged) {
293
				prev_node->size += next_node->size;
294
				list_del(&next_node->ml_entry);
295
				list_del(&next_node->fl_entry);
1321 serge 296
				spin_lock(&mm->unused_lock);
1123 serge 297
				if (mm->num_unused < MM_UNUSED_TARGET) {
298
					list_add(&next_node->fl_entry,
299
						 &mm->unused_nodes);
300
					++mm->num_unused;
301
				} else
302
					kfree(next_node);
1321 serge 303
				spin_unlock(&mm->unused_lock);
1123 serge 304
			} else {
305
				next_node->size += cur->size;
306
				next_node->start = cur->start;
307
				merged = 1;
308
			}
309
		}
310
	}
311
	if (!merged) {
312
		cur->free = 1;
313
		list_add(&cur->fl_entry, &mm->fl_entry);
314
	} else {
315
		list_del(&cur->ml_entry);
1321 serge 316
		spin_lock(&mm->unused_lock);
1123 serge 317
		if (mm->num_unused < MM_UNUSED_TARGET) {
318
			list_add(&cur->fl_entry, &mm->unused_nodes);
319
			++mm->num_unused;
320
		} else
321
			kfree(cur);
1321 serge 322
		spin_unlock(&mm->unused_lock);
1123 serge 323
	}
324
}
325
 
1126 serge 326
EXPORT_SYMBOL(drm_mm_put_block);
1123 serge 327
 
328
struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
329
				       unsigned long size,
330
				       unsigned alignment, int best_match)
331
{
332
	struct list_head *list;
333
	const struct list_head *free_stack = &mm->fl_entry;
334
	struct drm_mm_node *entry;
335
	struct drm_mm_node *best;
336
	unsigned long best_size;
337
	unsigned wasted;
338
 
339
	best = NULL;
340
	best_size = ~0UL;
341
 
342
	list_for_each(list, free_stack) {
343
		entry = list_entry(list, struct drm_mm_node, fl_entry);
344
		wasted = 0;
345
 
346
		if (entry->size < size)
347
			continue;
348
 
349
		if (alignment) {
350
			register unsigned tmp = entry->start % alignment;
351
			if (tmp)
352
				wasted += alignment - tmp;
353
		}
354
 
355
		if (entry->size >= size + wasted) {
356
			if (!best_match)
357
				return entry;
358
			if (size < best_size) {
359
				best = entry;
360
				best_size = entry->size;
361
			}
362
		}
363
	}
364
 
365
	return best;
366
}
1126 serge 367
EXPORT_SYMBOL(drm_mm_search_free);
1123 serge 368
 
1321 serge 369
struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
370
						unsigned long size,
371
						unsigned alignment,
372
						unsigned long start,
373
						unsigned long end,
374
						int best_match)
375
{
376
	struct list_head *list;
377
	const struct list_head *free_stack = &mm->fl_entry;
378
	struct drm_mm_node *entry;
379
	struct drm_mm_node *best;
380
	unsigned long best_size;
381
	unsigned wasted;
382
 
383
	best = NULL;
384
	best_size = ~0UL;
385
 
386
	list_for_each(list, free_stack) {
387
		entry = list_entry(list, struct drm_mm_node, fl_entry);
388
		wasted = 0;
389
 
390
		if (entry->size < size)
391
			continue;
392
 
393
		if (entry->start > end || (entry->start+entry->size) < start)
394
			continue;
395
 
396
		if (entry->start < start)
397
			wasted += start - entry->start;
398
 
399
		if (alignment) {
400
			register unsigned tmp = (entry->start + wasted) % alignment;
401
			if (tmp)
402
				wasted += alignment - tmp;
403
		}
404
 
405
		if (entry->size >= size + wasted) {
406
			if (!best_match)
407
				return entry;
408
			if (size < best_size) {
409
				best = entry;
410
				best_size = entry->size;
411
			}
412
		}
413
	}
414
 
415
	return best;
416
}
417
EXPORT_SYMBOL(drm_mm_search_free_in_range);
418
 
1123 serge 419
int drm_mm_clean(struct drm_mm * mm)
420
{
421
	struct list_head *head = &mm->ml_entry;
422
 
423
	return (head->next->next == head);
424
}
1126 serge 425
EXPORT_SYMBOL(drm_mm_clean);
1123 serge 426
 
427
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
428
{
429
	INIT_LIST_HEAD(&mm->ml_entry);
430
	INIT_LIST_HEAD(&mm->fl_entry);
431
	INIT_LIST_HEAD(&mm->unused_nodes);
432
	mm->num_unused = 0;
433
	spin_lock_init(&mm->unused_lock);
434
 
435
	return drm_mm_create_tail_node(mm, start, size, 0);
436
}
1126 serge 437
EXPORT_SYMBOL(drm_mm_init);
1123 serge 438
 
439
void drm_mm_takedown(struct drm_mm * mm)
440
{
441
	struct list_head *bnode = mm->fl_entry.next;
442
	struct drm_mm_node *entry;
443
	struct drm_mm_node *next;
444
 
445
	entry = list_entry(bnode, struct drm_mm_node, fl_entry);
446
 
447
	if (entry->ml_entry.next != &mm->ml_entry ||
448
	    entry->fl_entry.next != &mm->fl_entry) {
449
		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
450
		return;
451
	}
452
 
453
	list_del(&entry->fl_entry);
454
	list_del(&entry->ml_entry);
455
	kfree(entry);
456
 
457
	spin_lock(&mm->unused_lock);
458
	list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
459
		list_del(&entry->fl_entry);
460
		kfree(entry);
461
		--mm->num_unused;
462
	}
463
	spin_unlock(&mm->unused_lock);
464
 
465
	BUG_ON(mm->num_unused != 0);
466
}
1126 serge 467
EXPORT_SYMBOL(drm_mm_takedown);