Subversion Repositories Kolibri OS

Rev

Rev 1126 | Rev 1246 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1123 serge 1
/**************************************************************************
2
 *
3
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 *
27
 **************************************************************************/
28
 
29
/*
30
 * Generic simple memory manager implementation. Intended to be used as a base
31
 * class implementation for more advanced memory managers.
32
 *
33
 * Note that the algorithm used is quite simple and there might be substantial
34
 * performance gains if a smarter free list is implemented. Currently it is just an
35
 * unordered stack of free regions. This could easily be improved if an RB-tree
36
 * is used instead. At least if we expect heavy fragmentation.
37
 *
38
 * Aligned allocations can also see improvement.
39
 *
40
 * Authors:
41
 * Thomas Hellström 
42
 */
43
 
1126 serge 44
#include "drmP.h"
1123 serge 45
#include "drm_mm.h"
46
//#include 
1179 serge 47
#include 
1123 serge 48
 
49
#define MM_UNUSED_TARGET 4
50
 
51
unsigned long drm_mm_tail_space(struct drm_mm *mm)
52
{
53
	struct list_head *tail_node;
54
	struct drm_mm_node *entry;
55
 
56
	tail_node = mm->ml_entry.prev;
57
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
58
	if (!entry->free)
59
		return 0;
60
 
61
	return entry->size;
62
}
63
 
64
int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
65
{
66
	struct list_head *tail_node;
67
	struct drm_mm_node *entry;
68
 
69
	tail_node = mm->ml_entry.prev;
70
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
71
	if (!entry->free)
72
		return -ENOMEM;
73
 
74
	if (entry->size <= size)
75
		return -ENOMEM;
76
 
77
	entry->size -= size;
78
	return 0;
79
}
80
 
81
static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
82
{
83
	struct drm_mm_node *child;
84
 
85
    child = malloc(sizeof(*child));
86
 
87
	if (unlikely(child == NULL)) {
88
       spin_lock(&mm->unused_lock);
89
		if (list_empty(&mm->unused_nodes))
90
			child = NULL;
91
		else {
92
			child =
93
			    list_entry(mm->unused_nodes.next,
94
				       struct drm_mm_node, fl_entry);
95
			list_del(&child->fl_entry);
96
			--mm->num_unused;
97
		}
98
       spin_unlock(&mm->unused_lock);
99
	}
100
	return child;
101
}
102
 
103
int drm_mm_pre_get(struct drm_mm *mm)
104
{
105
	struct drm_mm_node *node;
106
 
107
	spin_lock(&mm->unused_lock);
108
	while (mm->num_unused < MM_UNUSED_TARGET) {
109
		spin_unlock(&mm->unused_lock);
110
		node = kmalloc(sizeof(*node), GFP_KERNEL);
111
		spin_lock(&mm->unused_lock);
112
 
113
		if (unlikely(node == NULL)) {
114
			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
115
			spin_unlock(&mm->unused_lock);
116
			return ret;
117
		}
118
		++mm->num_unused;
119
		list_add_tail(&node->fl_entry, &mm->unused_nodes);
120
	}
121
	spin_unlock(&mm->unused_lock);
122
	return 0;
123
}
1126 serge 124
EXPORT_SYMBOL(drm_mm_pre_get);
1123 serge 125
 
126
static int drm_mm_create_tail_node(struct drm_mm *mm,
127
				   unsigned long start,
128
				   unsigned long size, int atomic)
129
{
130
	struct drm_mm_node *child;
131
 
132
	child = drm_mm_kmalloc(mm, atomic);
133
	if (unlikely(child == NULL))
134
		return -ENOMEM;
135
 
136
	child->free = 1;
137
	child->size = size;
138
	child->start = start;
139
	child->mm = mm;
140
 
141
	list_add_tail(&child->ml_entry, &mm->ml_entry);
142
	list_add_tail(&child->fl_entry, &mm->fl_entry);
143
 
144
	return 0;
145
}
146
 
147
int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
148
{
149
	struct list_head *tail_node;
150
	struct drm_mm_node *entry;
151
 
152
	tail_node = mm->ml_entry.prev;
153
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
154
	if (!entry->free) {
155
		return drm_mm_create_tail_node(mm, entry->start + entry->size,
156
					       size, atomic);
157
	}
158
	entry->size += size;
159
	return 0;
160
}
161
 
162
static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
163
						 unsigned long size,
164
						 int atomic)
165
{
166
	struct drm_mm_node *child;
167
 
168
	child = drm_mm_kmalloc(parent->mm, atomic);
169
	if (unlikely(child == NULL))
170
		return NULL;
171
 
172
	INIT_LIST_HEAD(&child->fl_entry);
173
 
174
	child->free = 0;
175
	child->size = size;
176
	child->start = parent->start;
177
	child->mm = parent->mm;
178
 
179
	list_add_tail(&child->ml_entry, &parent->ml_entry);
180
	INIT_LIST_HEAD(&child->fl_entry);
181
 
182
	parent->size -= size;
183
	parent->start += size;
184
	return child;
185
}
186
 
187
 
188
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
189
					     unsigned long size,
190
					     unsigned alignment,
191
					     int atomic)
192
{
193
 
194
	struct drm_mm_node *align_splitoff = NULL;
195
	unsigned tmp = 0;
196
 
197
	if (alignment)
198
		tmp = node->start % alignment;
199
 
200
	if (tmp) {
201
		align_splitoff =
202
		    drm_mm_split_at_start(node, alignment - tmp, atomic);
203
		if (unlikely(align_splitoff == NULL))
204
			return NULL;
205
	}
206
 
207
	if (node->size == size) {
208
		list_del_init(&node->fl_entry);
209
		node->free = 0;
210
	} else {
211
		node = drm_mm_split_at_start(node, size, atomic);
212
	}
213
 
214
	if (align_splitoff)
215
		drm_mm_put_block(align_splitoff);
216
 
217
	return node;
218
}
1126 serge 219
EXPORT_SYMBOL(drm_mm_get_block_generic);
1123 serge 220
 
221
/*
222
 * Put a block. Merge with the previous and / or next block if they are free.
223
 * Otherwise add to the free stack.
224
 */
225
 
226
void drm_mm_put_block(struct drm_mm_node *cur)
227
{
228
 
229
	struct drm_mm *mm = cur->mm;
230
	struct list_head *cur_head = &cur->ml_entry;
231
	struct list_head *root_head = &mm->ml_entry;
232
	struct drm_mm_node *prev_node = NULL;
233
	struct drm_mm_node *next_node;
234
 
235
	int merged = 0;
236
 
237
	if (cur_head->prev != root_head) {
238
		prev_node =
239
		    list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
240
		if (prev_node->free) {
241
			prev_node->size += cur->size;
242
			merged = 1;
243
		}
244
	}
245
	if (cur_head->next != root_head) {
246
		next_node =
247
		    list_entry(cur_head->next, struct drm_mm_node, ml_entry);
248
		if (next_node->free) {
249
			if (merged) {
250
				prev_node->size += next_node->size;
251
				list_del(&next_node->ml_entry);
252
				list_del(&next_node->fl_entry);
253
				if (mm->num_unused < MM_UNUSED_TARGET) {
254
					list_add(&next_node->fl_entry,
255
						 &mm->unused_nodes);
256
					++mm->num_unused;
257
				} else
258
					kfree(next_node);
259
			} else {
260
				next_node->size += cur->size;
261
				next_node->start = cur->start;
262
				merged = 1;
263
			}
264
		}
265
	}
266
	if (!merged) {
267
		cur->free = 1;
268
		list_add(&cur->fl_entry, &mm->fl_entry);
269
	} else {
270
		list_del(&cur->ml_entry);
271
		if (mm->num_unused < MM_UNUSED_TARGET) {
272
			list_add(&cur->fl_entry, &mm->unused_nodes);
273
			++mm->num_unused;
274
		} else
275
			kfree(cur);
276
	}
277
}
278
 
1126 serge 279
EXPORT_SYMBOL(drm_mm_put_block);
1123 serge 280
 
281
struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
282
				       unsigned long size,
283
				       unsigned alignment, int best_match)
284
{
285
	struct list_head *list;
286
	const struct list_head *free_stack = &mm->fl_entry;
287
	struct drm_mm_node *entry;
288
	struct drm_mm_node *best;
289
	unsigned long best_size;
290
	unsigned wasted;
291
 
292
	best = NULL;
293
	best_size = ~0UL;
294
 
295
	list_for_each(list, free_stack) {
296
		entry = list_entry(list, struct drm_mm_node, fl_entry);
297
		wasted = 0;
298
 
299
		if (entry->size < size)
300
			continue;
301
 
302
		if (alignment) {
303
			register unsigned tmp = entry->start % alignment;
304
			if (tmp)
305
				wasted += alignment - tmp;
306
		}
307
 
308
		if (entry->size >= size + wasted) {
309
			if (!best_match)
310
				return entry;
311
			if (size < best_size) {
312
				best = entry;
313
				best_size = entry->size;
314
			}
315
		}
316
	}
317
 
318
	return best;
319
}
1126 serge 320
EXPORT_SYMBOL(drm_mm_search_free);
1123 serge 321
 
322
int drm_mm_clean(struct drm_mm * mm)
323
{
324
	struct list_head *head = &mm->ml_entry;
325
 
326
	return (head->next->next == head);
327
}
1126 serge 328
EXPORT_SYMBOL(drm_mm_clean);
1123 serge 329
 
330
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
331
{
332
	INIT_LIST_HEAD(&mm->ml_entry);
333
	INIT_LIST_HEAD(&mm->fl_entry);
334
	INIT_LIST_HEAD(&mm->unused_nodes);
335
	mm->num_unused = 0;
336
	spin_lock_init(&mm->unused_lock);
337
 
338
	return drm_mm_create_tail_node(mm, start, size, 0);
339
}
1126 serge 340
EXPORT_SYMBOL(drm_mm_init);
1123 serge 341
 
342
void drm_mm_takedown(struct drm_mm * mm)
343
{
344
	struct list_head *bnode = mm->fl_entry.next;
345
	struct drm_mm_node *entry;
346
	struct drm_mm_node *next;
347
 
348
	entry = list_entry(bnode, struct drm_mm_node, fl_entry);
349
 
350
	if (entry->ml_entry.next != &mm->ml_entry ||
351
	    entry->fl_entry.next != &mm->fl_entry) {
352
		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
353
		return;
354
	}
355
 
356
	list_del(&entry->fl_entry);
357
	list_del(&entry->ml_entry);
358
	kfree(entry);
359
 
360
	spin_lock(&mm->unused_lock);
361
	list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
362
		list_del(&entry->fl_entry);
363
		kfree(entry);
364
		--mm->num_unused;
365
	}
366
	spin_unlock(&mm->unused_lock);
367
 
368
	BUG_ON(mm->num_unused != 0);
369
}
1126 serge 370
EXPORT_SYMBOL(drm_mm_takedown);