Subversion Repositories Kolibri OS

Rev

Rev 1123 | Rev 1179 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1123 serge 1
/**************************************************************************
2
 *
3
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 *
27
 **************************************************************************/
28
 
29
/*
30
 * Generic simple memory manager implementation. Intended to be used as a base
31
 * class implementation for more advanced memory managers.
32
 *
33
 * Note that the algorithm used is quite simple and there might be substantial
34
 * performance gains if a smarter free list is implemented. Currently it is just an
35
 * unordered stack of free regions. This could easily be improved if an RB-tree
36
 * is used instead. At least if we expect heavy fragmentation.
37
 *
38
 * Aligned allocations can also see improvement.
39
 *
40
 * Authors:
41
 * Thomas Hellström 
42
 */
43
 
1126 serge 44
#include "drmP.h"
1123 serge 45
#include "drm_mm.h"
46
//#include 
47
 
48
#define MM_UNUSED_TARGET 4
49
 
50
unsigned long drm_mm_tail_space(struct drm_mm *mm)
51
{
52
	struct list_head *tail_node;
53
	struct drm_mm_node *entry;
54
 
55
	tail_node = mm->ml_entry.prev;
56
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
57
	if (!entry->free)
58
		return 0;
59
 
60
	return entry->size;
61
}
62
 
63
int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
64
{
65
	struct list_head *tail_node;
66
	struct drm_mm_node *entry;
67
 
68
	tail_node = mm->ml_entry.prev;
69
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
70
	if (!entry->free)
71
		return -ENOMEM;
72
 
73
	if (entry->size <= size)
74
		return -ENOMEM;
75
 
76
	entry->size -= size;
77
	return 0;
78
}
79
 
80
static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
81
{
82
	struct drm_mm_node *child;
83
 
84
    child = malloc(sizeof(*child));
85
 
86
	if (unlikely(child == NULL)) {
87
       spin_lock(&mm->unused_lock);
88
		if (list_empty(&mm->unused_nodes))
89
			child = NULL;
90
		else {
91
			child =
92
			    list_entry(mm->unused_nodes.next,
93
				       struct drm_mm_node, fl_entry);
94
			list_del(&child->fl_entry);
95
			--mm->num_unused;
96
		}
97
       spin_unlock(&mm->unused_lock);
98
	}
99
	return child;
100
}
101
 
102
int drm_mm_pre_get(struct drm_mm *mm)
103
{
104
	struct drm_mm_node *node;
105
 
106
	spin_lock(&mm->unused_lock);
107
	while (mm->num_unused < MM_UNUSED_TARGET) {
108
		spin_unlock(&mm->unused_lock);
109
		node = kmalloc(sizeof(*node), GFP_KERNEL);
110
		spin_lock(&mm->unused_lock);
111
 
112
		if (unlikely(node == NULL)) {
113
			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
114
			spin_unlock(&mm->unused_lock);
115
			return ret;
116
		}
117
		++mm->num_unused;
118
		list_add_tail(&node->fl_entry, &mm->unused_nodes);
119
	}
120
	spin_unlock(&mm->unused_lock);
121
	return 0;
122
}
1126 serge 123
EXPORT_SYMBOL(drm_mm_pre_get);
1123 serge 124
 
125
static int drm_mm_create_tail_node(struct drm_mm *mm,
126
				   unsigned long start,
127
				   unsigned long size, int atomic)
128
{
129
	struct drm_mm_node *child;
130
 
131
	child = drm_mm_kmalloc(mm, atomic);
132
	if (unlikely(child == NULL))
133
		return -ENOMEM;
134
 
135
	child->free = 1;
136
	child->size = size;
137
	child->start = start;
138
	child->mm = mm;
139
 
140
	list_add_tail(&child->ml_entry, &mm->ml_entry);
141
	list_add_tail(&child->fl_entry, &mm->fl_entry);
142
 
143
	return 0;
144
}
145
 
146
int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
147
{
148
	struct list_head *tail_node;
149
	struct drm_mm_node *entry;
150
 
151
	tail_node = mm->ml_entry.prev;
152
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
153
	if (!entry->free) {
154
		return drm_mm_create_tail_node(mm, entry->start + entry->size,
155
					       size, atomic);
156
	}
157
	entry->size += size;
158
	return 0;
159
}
160
 
161
static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
162
						 unsigned long size,
163
						 int atomic)
164
{
165
	struct drm_mm_node *child;
166
 
167
	child = drm_mm_kmalloc(parent->mm, atomic);
168
	if (unlikely(child == NULL))
169
		return NULL;
170
 
171
	INIT_LIST_HEAD(&child->fl_entry);
172
 
173
	child->free = 0;
174
	child->size = size;
175
	child->start = parent->start;
176
	child->mm = parent->mm;
177
 
178
	list_add_tail(&child->ml_entry, &parent->ml_entry);
179
	INIT_LIST_HEAD(&child->fl_entry);
180
 
181
	parent->size -= size;
182
	parent->start += size;
183
	return child;
184
}
185
 
186
 
187
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
188
					     unsigned long size,
189
					     unsigned alignment,
190
					     int atomic)
191
{
192
 
193
	struct drm_mm_node *align_splitoff = NULL;
194
	unsigned tmp = 0;
195
 
196
	if (alignment)
197
		tmp = node->start % alignment;
198
 
199
	if (tmp) {
200
		align_splitoff =
201
		    drm_mm_split_at_start(node, alignment - tmp, atomic);
202
		if (unlikely(align_splitoff == NULL))
203
			return NULL;
204
	}
205
 
206
	if (node->size == size) {
207
		list_del_init(&node->fl_entry);
208
		node->free = 0;
209
	} else {
210
		node = drm_mm_split_at_start(node, size, atomic);
211
	}
212
 
213
	if (align_splitoff)
214
		drm_mm_put_block(align_splitoff);
215
 
216
	return node;
217
}
1126 serge 218
EXPORT_SYMBOL(drm_mm_get_block_generic);
1123 serge 219
 
220
/*
221
 * Put a block. Merge with the previous and / or next block if they are free.
222
 * Otherwise add to the free stack.
223
 */
224
 
225
void drm_mm_put_block(struct drm_mm_node *cur)
226
{
227
 
228
	struct drm_mm *mm = cur->mm;
229
	struct list_head *cur_head = &cur->ml_entry;
230
	struct list_head *root_head = &mm->ml_entry;
231
	struct drm_mm_node *prev_node = NULL;
232
	struct drm_mm_node *next_node;
233
 
234
	int merged = 0;
235
 
236
	if (cur_head->prev != root_head) {
237
		prev_node =
238
		    list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
239
		if (prev_node->free) {
240
			prev_node->size += cur->size;
241
			merged = 1;
242
		}
243
	}
244
	if (cur_head->next != root_head) {
245
		next_node =
246
		    list_entry(cur_head->next, struct drm_mm_node, ml_entry);
247
		if (next_node->free) {
248
			if (merged) {
249
				prev_node->size += next_node->size;
250
				list_del(&next_node->ml_entry);
251
				list_del(&next_node->fl_entry);
252
				if (mm->num_unused < MM_UNUSED_TARGET) {
253
					list_add(&next_node->fl_entry,
254
						 &mm->unused_nodes);
255
					++mm->num_unused;
256
				} else
257
					kfree(next_node);
258
			} else {
259
				next_node->size += cur->size;
260
				next_node->start = cur->start;
261
				merged = 1;
262
			}
263
		}
264
	}
265
	if (!merged) {
266
		cur->free = 1;
267
		list_add(&cur->fl_entry, &mm->fl_entry);
268
	} else {
269
		list_del(&cur->ml_entry);
270
		if (mm->num_unused < MM_UNUSED_TARGET) {
271
			list_add(&cur->fl_entry, &mm->unused_nodes);
272
			++mm->num_unused;
273
		} else
274
			kfree(cur);
275
	}
276
}
277
 
1126 serge 278
EXPORT_SYMBOL(drm_mm_put_block);
1123 serge 279
 
280
struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
281
				       unsigned long size,
282
				       unsigned alignment, int best_match)
283
{
284
	struct list_head *list;
285
	const struct list_head *free_stack = &mm->fl_entry;
286
	struct drm_mm_node *entry;
287
	struct drm_mm_node *best;
288
	unsigned long best_size;
289
	unsigned wasted;
290
 
291
	best = NULL;
292
	best_size = ~0UL;
293
 
294
	list_for_each(list, free_stack) {
295
		entry = list_entry(list, struct drm_mm_node, fl_entry);
296
		wasted = 0;
297
 
298
		if (entry->size < size)
299
			continue;
300
 
301
		if (alignment) {
302
			register unsigned tmp = entry->start % alignment;
303
			if (tmp)
304
				wasted += alignment - tmp;
305
		}
306
 
307
		if (entry->size >= size + wasted) {
308
			if (!best_match)
309
				return entry;
310
			if (size < best_size) {
311
				best = entry;
312
				best_size = entry->size;
313
			}
314
		}
315
	}
316
 
317
	return best;
318
}
1126 serge 319
EXPORT_SYMBOL(drm_mm_search_free);
1123 serge 320
 
321
int drm_mm_clean(struct drm_mm * mm)
322
{
323
	struct list_head *head = &mm->ml_entry;
324
 
325
	return (head->next->next == head);
326
}
1126 serge 327
EXPORT_SYMBOL(drm_mm_clean);
1123 serge 328
 
329
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
330
{
331
	INIT_LIST_HEAD(&mm->ml_entry);
332
	INIT_LIST_HEAD(&mm->fl_entry);
333
	INIT_LIST_HEAD(&mm->unused_nodes);
334
	mm->num_unused = 0;
335
	spin_lock_init(&mm->unused_lock);
336
 
337
	return drm_mm_create_tail_node(mm, start, size, 0);
338
}
1126 serge 339
EXPORT_SYMBOL(drm_mm_init);
1123 serge 340
 
341
void drm_mm_takedown(struct drm_mm * mm)
342
{
343
	struct list_head *bnode = mm->fl_entry.next;
344
	struct drm_mm_node *entry;
345
	struct drm_mm_node *next;
346
 
347
	entry = list_entry(bnode, struct drm_mm_node, fl_entry);
348
 
349
	if (entry->ml_entry.next != &mm->ml_entry ||
350
	    entry->fl_entry.next != &mm->fl_entry) {
351
		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
352
		return;
353
	}
354
 
355
	list_del(&entry->fl_entry);
356
	list_del(&entry->ml_entry);
357
	kfree(entry);
358
 
359
	spin_lock(&mm->unused_lock);
360
	list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
361
		list_del(&entry->fl_entry);
362
		kfree(entry);
363
		--mm->num_unused;
364
	}
365
	spin_unlock(&mm->unused_lock);
366
 
367
	BUG_ON(mm->num_unused != 0);
368
}
1126 serge 369
EXPORT_SYMBOL(drm_mm_takedown);