Subversion Repositories Kolibri OS

Rev

Rev 1430 | Rev 3031 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1430 Rev 1963
Line 41... Line 41...
41
 * Thomas Hellström 
41
 * Thomas Hellström 
42
 */
42
 */
Line 43... Line 43...
43
 
43
 
44
#include "drmP.h"
44
#include "drmP.h"
45
#include "drm_mm.h"
45
#include "drm_mm.h"
46
//#include 
46
#include 
Line 47... Line 47...
47
#include 
47
#include 
Line 48... Line -...
48
 
-
 
49
#define MM_UNUSED_TARGET 4
-
 
50
 
-
 
51
unsigned long drm_mm_tail_space(struct drm_mm *mm)
-
 
52
{
-
 
53
	struct list_head *tail_node;
-
 
54
	struct drm_mm_node *entry;
-
 
55
 
-
 
56
	tail_node = mm->ml_entry.prev;
-
 
57
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
-
 
58
	if (!entry->free)
-
 
59
		return 0;
-
 
60
 
-
 
61
	return entry->size;
-
 
62
}
-
 
63
 
-
 
64
int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
-
 
65
{
-
 
66
	struct list_head *tail_node;
-
 
67
	struct drm_mm_node *entry;
-
 
68
 
-
 
69
	tail_node = mm->ml_entry.prev;
-
 
70
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
-
 
71
	if (!entry->free)
-
 
72
		return -ENOMEM;
-
 
73
 
-
 
74
	if (entry->size <= size)
-
 
75
		return -ENOMEM;
-
 
76
 
-
 
77
	entry->size -= size;
-
 
78
	return 0;
48
 
79
}
49
#define MM_UNUSED_TARGET 4
80
 
50
 
Line -... Line 51...
-
 
51
static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
-
 
52
{
-
 
53
	struct drm_mm_node *child;
81
static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
54
 
Line 82... Line 55...
82
{
55
	if (atomic)
83
	struct drm_mm_node *child;
56
		child = kzalloc(sizeof(*child), GFP_ATOMIC);
84
 
57
	else
85
    child = kzalloc(sizeof(*child), 0);
58
		child = kzalloc(sizeof(*child), GFP_KERNEL);
86
 
59
 
87
	if (unlikely(child == NULL)) {
60
	if (unlikely(child == NULL)) {
88
       spin_lock(&mm->unused_lock);
61
       spin_lock(&mm->unused_lock);
89
		if (list_empty(&mm->unused_nodes))
62
		if (list_empty(&mm->unused_nodes))
90
			child = NULL;
63
			child = NULL;
91
		else {
64
		else {
92
			child =
65
			child =
93
			    list_entry(mm->unused_nodes.next,
66
			    list_entry(mm->unused_nodes.next,
94
				       struct drm_mm_node, fl_entry);
67
				       struct drm_mm_node, node_list);
95
			list_del(&child->fl_entry);
68
			list_del(&child->node_list);
Line 110... Line 83...
110
	struct drm_mm_node *node;
83
	struct drm_mm_node *node;
Line 111... Line 84...
111
 
84
 
112
	spin_lock(&mm->unused_lock);
85
	spin_lock(&mm->unused_lock);
113
	while (mm->num_unused < MM_UNUSED_TARGET) {
86
	while (mm->num_unused < MM_UNUSED_TARGET) {
114
		spin_unlock(&mm->unused_lock);
87
		spin_unlock(&mm->unused_lock);
115
		node = kmalloc(sizeof(*node), GFP_KERNEL);
88
		node = kzalloc(sizeof(*node), GFP_KERNEL);
Line 116... Line 89...
116
		spin_lock(&mm->unused_lock);
89
		spin_lock(&mm->unused_lock);
117
 
90
 
118
		if (unlikely(node == NULL)) {
91
		if (unlikely(node == NULL)) {
119
			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
92
			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
120
			spin_unlock(&mm->unused_lock);
93
			spin_unlock(&mm->unused_lock);
121
			return ret;
94
			return ret;
122
		}
95
		}
123
		++mm->num_unused;
96
		++mm->num_unused;
124
		list_add_tail(&node->fl_entry, &mm->unused_nodes);
97
		list_add_tail(&node->node_list, &mm->unused_nodes);
125
	}
98
	}
126
	spin_unlock(&mm->unused_lock);
99
	spin_unlock(&mm->unused_lock);
127
	return 0;
100
	return 0;
Line 128... Line 101...
128
}
101
}
129
EXPORT_SYMBOL(drm_mm_pre_get);
-
 
130
 
-
 
131
static int drm_mm_create_tail_node(struct drm_mm *mm,
102
EXPORT_SYMBOL(drm_mm_pre_get);
132
				   unsigned long start,
103
 
133
				   unsigned long size, int atomic)
104
static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
134
{
-
 
135
	struct drm_mm_node *child;
-
 
136
 
-
 
137
	child = drm_mm_kmalloc(mm, atomic);
-
 
138
	if (unlikely(child == NULL))
-
 
139
		return -ENOMEM;
-
 
140
 
-
 
141
	child->free = 1;
-
 
Line -... Line 105...
-
 
105
{
-
 
106
	return hole_node->start + hole_node->size;
142
	child->size = size;
107
}
143
	child->start = start;
108
 
-
 
109
static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
Line 144... Line 110...
144
	child->mm = mm;
110
{
145
 
111
	struct drm_mm_node *next_node =
Line 146... Line 112...
146
	list_add_tail(&child->ml_entry, &mm->ml_entry);
112
		list_entry(hole_node->node_list.next, struct drm_mm_node,
-
 
113
			   node_list);
-
 
114
 
147
	list_add_tail(&child->fl_entry, &mm->fl_entry);
115
	return next_node->start;
148
 
116
}
149
	return 0;
117
 
-
 
118
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
-
 
119
				 struct drm_mm_node *node,
-
 
120
				 unsigned long size, unsigned alignment)
-
 
121
{
Line -... Line 122...
-
 
122
	struct drm_mm *mm = hole_node->mm;
-
 
123
	unsigned long tmp = 0, wasted = 0;
-
 
124
	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
-
 
125
	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
150
}
126
 
-
 
127
	BUG_ON(!hole_node->hole_follows || node->allocated);
-
 
128
 
-
 
129
	if (alignment)
-
 
130
		tmp = hole_start % alignment;
-
 
131
 
-
 
132
	if (!tmp) {
-
 
133
		hole_node->hole_follows = 0;
-
 
134
		list_del_init(&hole_node->hole_stack);
-
 
135
	} else
-
 
136
		wasted = alignment - tmp;
151
 
137
 
-
 
138
	node->start = hole_start + wasted;
-
 
139
	node->size = size;
-
 
140
	node->mm = mm;
152
int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
141
	node->allocated = 1;
153
{
142
 
-
 
143
	INIT_LIST_HEAD(&node->hole_stack);
-
 
144
	list_add(&node->node_list, &hole_node->node_list);
154
	struct list_head *tail_node;
145
 
155
	struct drm_mm_node *entry;
146
	BUG_ON(node->start + node->size > hole_end);
156
 
-
 
157
	tail_node = mm->ml_entry.prev;
-
 
158
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
147
 
Line 159... Line 148...
159
	if (!entry->free) {
148
	if (node->start + node->size < hole_end) {
160
		return drm_mm_create_tail_node(mm, entry->start + entry->size,
149
		list_add(&node->hole_stack, &mm->hole_stack);
-
 
150
		node->hole_follows = 1;
161
					       size, atomic);
151
	} else {
162
	}
152
		node->hole_follows = 0;
163
	entry->size += size;
153
	}
Line 164... Line 154...
164
	return 0;
154
}
165
}
155
 
166
 
156
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
Line 167... Line 157...
167
static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
157
						 unsigned long size,
Line 168... Line 158...
168
						 unsigned long size,
158
					     unsigned alignment,
169
						 int atomic)
-
 
170
{
-
 
-
 
159
						 int atomic)
171
	struct drm_mm_node *child;
160
{
Line -... Line 161...
-
 
161
	struct drm_mm_node *node;
-
 
162
 
-
 
163
	node = drm_mm_kmalloc(hole_node->mm, atomic);
-
 
164
	if (unlikely(node == NULL))
-
 
165
		return NULL;
-
 
166
 
172
 
167
	drm_mm_insert_helper(hole_node, node, size, alignment);
-
 
168
 
173
	child = drm_mm_kmalloc(parent->mm, atomic);
169
	return node;
Line 174... Line 170...
174
	if (unlikely(child == NULL))
170
}
175
		return NULL;
171
EXPORT_SYMBOL(drm_mm_get_block_generic);
176
 
172
 
177
	INIT_LIST_HEAD(&child->fl_entry);
-
 
Line -... Line 173...
-
 
173
/**
Line 178... Line -...
178
 
-
 
179
	child->free = 0;
-
 
180
	child->size = size;
-
 
181
	child->start = parent->start;
174
 * Search for free space and insert a preallocated memory node. Returns
182
	child->mm = parent->mm;
175
 * -ENOSPC if no suitable free area is available. The preallocated memory node
-
 
176
 * must be cleared.
Line -... Line 177...
-
 
177
 */
-
 
178
int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
-
 
179
		       unsigned long size, unsigned alignment)
-
 
180
{
-
 
181
	struct drm_mm_node *hole_node;
183
 
182
 
184
	list_add_tail(&child->ml_entry, &parent->ml_entry);
183
	hole_node = drm_mm_search_free(mm, size, alignment, 0);
-
 
184
	if (!hole_node)
-
 
185
		return -ENOSPC;
Line -... Line 186...
-
 
186
 
-
 
187
	drm_mm_insert_helper(hole_node, node, size, alignment);
-
 
188
 
-
 
189
	return 0;
185
	INIT_LIST_HEAD(&child->fl_entry);
190
}
186
 
191
EXPORT_SYMBOL(drm_mm_insert_node);
Line 187... Line 192...
187
	parent->size -= size;
192
 
188
	parent->start += size;
-
 
189
	return child;
193
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
190
}
-
 
191
 
-
 
192
 
-
 
Line 193... Line 194...
193
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
194
				       struct drm_mm_node *node,
194
					     unsigned long size,
-
 
195
					     unsigned alignment,
195
				       unsigned long size, unsigned alignment,
196
					     int atomic)
-
 
197
{
196
				       unsigned long start, unsigned long end)
198
 
197
{
Line -... Line 198...
-
 
198
	struct drm_mm *mm = hole_node->mm;
199
	struct drm_mm_node *align_splitoff = NULL;
199
	unsigned long tmp = 0, wasted = 0;
-
 
200
	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
200
	unsigned tmp = 0;
201
	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
Line -... Line 202...
-
 
202
 
-
 
203
	BUG_ON(!hole_node->hole_follows || node->allocated);
-
 
204
 
-
 
205
	if (hole_start < start)
-
 
206
		wasted += start - hole_start;
-
 
207
	if (alignment)
-
 
208
		tmp = (hole_start + wasted) % alignment;
-
 
209
 
-
 
210
	if (tmp)
201
 
211
		wasted += alignment - tmp;
-
 
212
 
-
 
213
	if (!wasted) {
202
	if (alignment)
214
		hole_node->hole_follows = 0;
203
		tmp = node->start % alignment;
-
 
Line 204... Line 215...
204
 
215
		list_del_init(&hole_node->hole_stack);
205
	if (tmp) {
216
	}
206
		align_splitoff =
217
 
207
		    drm_mm_split_at_start(node, alignment - tmp, atomic);
218
	node->start = hole_start + wasted;
208
		if (unlikely(align_splitoff == NULL))
219
	node->size = size;
209
			return NULL;
220
	node->mm = mm;
210
	}
221
	node->allocated = 1;
211
 
222
 
212
	if (node->size == size) {
-
 
213
		list_del_init(&node->fl_entry);
-
 
214
		node->free = 0;
-
 
215
	} else {
-
 
216
		node = drm_mm_split_at_start(node, size, atomic);
-
 
217
	}
-
 
218
 
-
 
Line 219... Line -...
219
	if (align_splitoff)
-
 
220
		drm_mm_put_block(align_splitoff);
-
 
221
 
-
 
222
	return node;
223
	INIT_LIST_HEAD(&node->hole_stack);
223
}
224
	list_add(&node->node_list, &hole_node->node_list);
224
EXPORT_SYMBOL(drm_mm_get_block_generic);
225
 
225
 
-
 
Line 226... Line -...
226
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
-
 
227
						unsigned long size,
-
 
228
						unsigned alignment,
-
 
229
						unsigned long start,
-
 
230
						unsigned long end,
226
	BUG_ON(node->start + node->size > hole_end);
231
						int atomic)
-
 
232
{
-
 
233
	struct drm_mm_node *align_splitoff = NULL;
227
	BUG_ON(node->start + node->size > end);
234
	unsigned tmp = 0;
-
 
Line 235... Line 228...
235
	unsigned wasted = 0;
228
 
236
 
229
	if (node->start + node->size < hole_end) {
237
	if (node->start < start)
230
		list_add(&node->hole_stack, &mm->hole_stack);
Line 238... Line 231...
238
		wasted += start - node->start;
231
		node->hole_follows = 1;
239
	if (alignment)
232
	} else {
-
 
233
		node->hole_follows = 0;
240
		tmp = ((node->start + wasted) % alignment);
234
	}
241
 
235
}
-
 
236
 
-
 
237
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
-
 
238
						unsigned long size,
-
 
239
						unsigned alignment,
-
 
240
						unsigned long start,
-
 
241
						unsigned long end,
-
 
242
						int atomic)
-
 
243
{
-
 
244
	struct drm_mm_node *node;
-
 
245
 
Line 242... Line 246...
242
	if (tmp)
246
	node = drm_mm_kmalloc(hole_node->mm, atomic);
243
		wasted += alignment - tmp;
247
	if (unlikely(node == NULL))
Line 244... Line 248...
244
	if (wasted) {
248
			return NULL;
245
		align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
-
 
246
		if (unlikely(align_splitoff == NULL))
-
 
-
 
249
 
247
			return NULL;
250
	drm_mm_insert_helper_range(hole_node, node, size, alignment,
248
	}
-
 
Line -... Line 251...
-
 
251
				   start, end);
-
 
252
 
-
 
253
	return node;
-
 
254
}
-
 
255
EXPORT_SYMBOL(drm_mm_get_block_range_generic);
249
 
256
 
-
 
257
/**
-
 
258
 * Search for free space and insert a preallocated memory node. Returns
-
 
259
 * -ENOSPC if no suitable free area is available. This is for range
-
 
260
 * restricted allocations. The preallocated memory node must be cleared.
Line 250... Line -...
250
	if (node->size == size) {
-
 
251
		list_del_init(&node->fl_entry);
261
 */
252
		node->free = 0;
262
int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
253
	} else {
-
 
254
		node = drm_mm_split_at_start(node, size, atomic);
-
 
255
	}
-
 
256
 
-
 
257
	if (align_splitoff)
263
				unsigned long size, unsigned alignment,
258
		drm_mm_put_block(align_splitoff);
-
 
259
 
-
 
260
	return node;
-
 
261
}
264
				unsigned long start, unsigned long end)
262
EXPORT_SYMBOL(drm_mm_get_block_range_generic);
-
 
263
 
265
{
264
/*
266
	struct drm_mm_node *hole_node;
265
 * Put a block. Merge with the previous and / or next block if they are free.
267
 
266
 * Otherwise add to the free stack.
-
 
267
 */
-
 
268
 
-
 
269
void drm_mm_put_block(struct drm_mm_node *cur)
-
 
270
{
-
 
271
 
268
	hole_node = drm_mm_search_free_in_range(mm, size, alignment,
272
	struct drm_mm *mm = cur->mm;
269
						start, end, 0);
273
	struct list_head *cur_head = &cur->ml_entry;
270
	if (!hole_node)
-
 
271
		return -ENOSPC;
274
	struct list_head *root_head = &mm->ml_entry;
272
 
275
	struct drm_mm_node *prev_node = NULL;
273
	drm_mm_insert_helper_range(hole_node, node, size, alignment,
276
	struct drm_mm_node *next_node;
274
				   start, end);
277
 
275
 
-
 
276
	return 0;
278
	int merged = 0;
277
}
-
 
278
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
279
 
279
 
280
	if (cur_head->prev != root_head) {
280
/**
281
		prev_node =
281
 * Remove a memory node from the allocator.
-
 
282
 */
-
 
283
void drm_mm_remove_node(struct drm_mm_node *node)
-
 
284
{
-
 
285
	struct drm_mm *mm = node->mm;
282
		    list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
286
	struct drm_mm_node *prev_node;
-
 
287
 
283
		if (prev_node->free) {
288
	BUG_ON(node->scanned_block || node->scanned_prev_free
284
			prev_node->size += cur->size;
289
				   || node->scanned_next_free);
-
 
290
 
-
 
291
	prev_node =
-
 
292
	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
285
			merged = 1;
293
 
-
 
294
	if (node->hole_follows) {
286
		}
295
		BUG_ON(drm_mm_hole_node_start(node)
287
	}
296
				== drm_mm_hole_node_end(node));
288
	if (cur_head->next != root_head) {
297
		list_del(&node->hole_stack);
289
		next_node =
298
	} else
290
		    list_entry(cur_head->next, struct drm_mm_node, ml_entry);
299
		BUG_ON(drm_mm_hole_node_start(node)
291
		if (next_node->free) {
300
				!= drm_mm_hole_node_end(node));
292
			if (merged) {
301
 
293
				prev_node->size += next_node->size;
302
	if (!prev_node->hole_follows) {
-
 
303
		prev_node->hole_follows = 1;
-
 
304
		list_add(&prev_node->hole_stack, &mm->hole_stack);
-
 
305
				} else
-
 
306
		list_move(&prev_node->hole_stack, &mm->hole_stack);
-
 
307
 
-
 
308
	list_del(&node->node_list);
-
 
309
	node->allocated = 0;
-
 
310
}
-
 
311
EXPORT_SYMBOL(drm_mm_remove_node);
-
 
312
 
-
 
313
/*
-
 
314
 * Remove a memory node from the allocator and free the allocated struct
-
 
315
 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
-
 
316
 * drm_mm_get_block functions.
294
				list_del(&next_node->ml_entry);
317
 */
Line 295... Line 318...
295
				list_del(&next_node->fl_entry);
318
void drm_mm_put_block(struct drm_mm_node *node)
-
 
319
{
-
 
320
 
-
 
321
	struct drm_mm *mm = node->mm;
-
 
322
 
-
 
323
	drm_mm_remove_node(node);
Line 296... Line 324...
296
				spin_lock(&mm->unused_lock);
324
 
297
				if (mm->num_unused < MM_UNUSED_TARGET) {
325
		spin_lock(&mm->unused_lock);
298
					list_add(&next_node->fl_entry,
326
		if (mm->num_unused < MM_UNUSED_TARGET) {
299
						 &mm->unused_nodes);
327
		list_add(&node->node_list, &mm->unused_nodes);
300
					++mm->num_unused;
-
 
301
				} else
-
 
302
					kfree(next_node);
328
			++mm->num_unused;
303
				spin_unlock(&mm->unused_lock);
329
		} else
304
			} else {
330
		kfree(node);
-
 
331
		spin_unlock(&mm->unused_lock);
305
				next_node->size += cur->size;
332
}
Line 306... Line 333...
306
				next_node->start = cur->start;
333
EXPORT_SYMBOL(drm_mm_put_block);
307
				merged = 1;
334
 
Line 308... Line 335...
308
			}
335
static int check_free_hole(unsigned long start, unsigned long end,
-
 
336
			   unsigned long size, unsigned alignment)
309
		}
337
{
310
	}
338
	unsigned wasted = 0;
311
	if (!merged) {
-
 
312
		cur->free = 1;
339
 
313
		list_add(&cur->fl_entry, &mm->fl_entry);
340
	if (end - start < size)
Line 314... Line -...
314
	} else {
-
 
315
		list_del(&cur->ml_entry);
-
 
316
		spin_lock(&mm->unused_lock);
-
 
317
		if (mm->num_unused < MM_UNUSED_TARGET) {
-
 
318
			list_add(&cur->fl_entry, &mm->unused_nodes);
-
 
319
			++mm->num_unused;
-
 
320
		} else
-
 
321
			kfree(cur);
341
		return 0;
322
		spin_unlock(&mm->unused_lock);
342
 
-
 
343
	if (alignment) {
323
	}
344
		unsigned tmp = start % alignment;
324
}
345
		if (tmp)
325
 
346
			wasted = alignment - tmp;
326
EXPORT_SYMBOL(drm_mm_put_block);
347
	}
327
 
348
 
328
struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
-
 
Line 329... Line 349...
329
				       unsigned long size,
349
	if (end >= start + size + wasted) {
330
				       unsigned alignment, int best_match)
350
		return 1;
331
{
351
	}
Line 371... Line 391...
371
						unsigned alignment,
391
						unsigned alignment,
372
						unsigned long start,
392
						unsigned long start,
373
						unsigned long end,
393
						unsigned long end,
374
						int best_match)
394
						int best_match)
375
{
395
{
376
	struct list_head *list;
-
 
377
	const struct list_head *free_stack = &mm->fl_entry;
-
 
378
	struct drm_mm_node *entry;
396
	struct drm_mm_node *entry;
379
	struct drm_mm_node *best;
397
	struct drm_mm_node *best;
380
	unsigned long best_size;
398
	unsigned long best_size;
-
 
399
 
381
	unsigned wasted;
400
	BUG_ON(mm->scanned_blocks);
Line 382... Line 401...
382
 
401
 
383
	best = NULL;
402
	best = NULL;
Line 384... Line 403...
384
	best_size = ~0UL;
403
	best_size = ~0UL;
385
 
404
 
386
	list_for_each(list, free_stack) {
405
	list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
387
		entry = list_entry(list, struct drm_mm_node, fl_entry);
-
 
388
		wasted = 0;
406
		unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
389
 
407
			start : drm_mm_hole_node_start(entry);
Line -... Line 408...
-
 
408
		unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
390
		if (entry->size < size)
409
			end : drm_mm_hole_node_end(entry);
391
			continue;
410
 
Line 392... Line -...
392
 
-
 
393
		if (entry->start > end || (entry->start+entry->size) < start)
-
 
394
			continue;
-
 
395
 
-
 
396
		if (entry->start < start)
-
 
397
			wasted += start - entry->start;
-
 
398
 
-
 
399
		if (alignment) {
-
 
400
			register unsigned tmp = (entry->start + wasted) % alignment;
-
 
401
			if (tmp)
-
 
402
				wasted += alignment - tmp;
-
 
403
		}
411
		BUG_ON(!entry->hole_follows);
404
 
412
		if (!check_free_hole(adj_start, adj_end, size, alignment))
-
 
413
			continue;
405
		if (entry->size >= size + wasted &&
414
 
406
		    (entry->start + wasted + size) <= end) {
415
			if (!best_match)
407
			if (!best_match)
416
				return entry;
408
				return entry;
417
 
409
			if (entry->size < best_size) {
418
			if (entry->size < best_size) {
410
				best = entry;
-
 
Line 411... Line 419...
411
				best_size = entry->size;
419
				best = entry;
412
			}
420
				best_size = entry->size;
413
		}
421
			}
Line -... Line 422...
-
 
422
		}
-
 
423
 
-
 
424
	return best;
-
 
425
}
-
 
426
EXPORT_SYMBOL(drm_mm_search_free_in_range);
-
 
427
 
-
 
428
/**
-
 
429
 * Moves an allocation. To be used with embedded struct drm_mm_node.
-
 
430
 */
-
 
431
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
-
 
432
{
-
 
433
	list_replace(&old->node_list, &new->node_list);
-
 
434
	list_replace(&old->hole_stack, &new->hole_stack);
-
 
435
	new->hole_follows = old->hole_follows;
-
 
436
	new->mm = old->mm;
-
 
437
	new->start = old->start;
-
 
438
	new->size = old->size;
-
 
439
 
-
 
440
	old->allocated = 0;
-
 
441
	new->allocated = 1;
-
 
442
}
-
 
443
EXPORT_SYMBOL(drm_mm_replace_node);
-
 
444
 
-
 
445
/**
-
 
446
 * Initializa lru scanning.
-
 
447
 *
-
 
448
 * This simply sets up the scanning routines with the parameters for the desired
-
 
449
 * hole.
-
 
450
 *
-
 
451
 * Warning: As long as the scan list is non-empty, no other operations than
-
 
452
 * adding/removing nodes to/from the scan list are allowed.
-
 
453
 */
-
 
454
void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
-
 
455
		      unsigned alignment)
-
 
456
{
-
 
457
	mm->scan_alignment = alignment;
-
 
458
	mm->scan_size = size;
-
 
459
	mm->scanned_blocks = 0;
-
 
460
	mm->scan_hit_start = 0;
-
 
461
	mm->scan_hit_size = 0;
-
 
462
	mm->scan_check_range = 0;
-
 
463
	mm->prev_scanned_node = NULL;
-
 
464
}
-
 
465
EXPORT_SYMBOL(drm_mm_init_scan);
-
 
466
 
-
 
467
/**
-
 
468
 * Initializa lru scanning.
-
 
469
 *
-
 
470
 * This simply sets up the scanning routines with the parameters for the desired
-
 
471
 * hole. This version is for range-restricted scans.
-
 
472
 *
-
 
473
 * Warning: As long as the scan list is non-empty, no other operations than
-
 
474
 * adding/removing nodes to/from the scan list are allowed.
-
 
475
 */
-
 
476
void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
-
 
477
				 unsigned alignment,
-
 
478
				 unsigned long start,
-
 
479
				 unsigned long end)
-
 
480
{
-
 
481
	mm->scan_alignment = alignment;
-
 
482
	mm->scan_size = size;
-
 
483
	mm->scanned_blocks = 0;
-
 
484
	mm->scan_hit_start = 0;
-
 
485
	mm->scan_hit_size = 0;
-
 
486
	mm->scan_start = start;
-
 
487
	mm->scan_end = end;
-
 
488
	mm->scan_check_range = 1;
-
 
489
	mm->prev_scanned_node = NULL;
-
 
490
}
-
 
491
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
-
 
492
 
-
 
493
/**
-
 
494
 * Add a node to the scan list that might be freed to make space for the desired
-
 
495
 * hole.
-
 
496
 *
-
 
497
 * Returns non-zero, if a hole has been found, zero otherwise.
-
 
498
 */
-
 
499
int drm_mm_scan_add_block(struct drm_mm_node *node)
-
 
500
{
-
 
501
	struct drm_mm *mm = node->mm;
-
 
502
	struct drm_mm_node *prev_node;
-
 
503
	unsigned long hole_start, hole_end;
-
 
504
	unsigned long adj_start;
-
 
505
	unsigned long adj_end;
-
 
506
 
-
 
507
	mm->scanned_blocks++;
-
 
508
 
-
 
509
	BUG_ON(node->scanned_block);
-
 
510
	node->scanned_block = 1;
-
 
511
 
-
 
512
		prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
-
 
513
				       node_list);
-
 
514
 
-
 
515
	node->scanned_preceeds_hole = prev_node->hole_follows;
-
 
516
	prev_node->hole_follows = 1;
-
 
517
	list_del(&node->node_list);
-
 
518
	node->node_list.prev = &prev_node->node_list;
-
 
519
	node->node_list.next = &mm->prev_scanned_node->node_list;
-
 
520
	mm->prev_scanned_node = node;
-
 
521
 
-
 
522
	hole_start = drm_mm_hole_node_start(prev_node);
-
 
523
	hole_end = drm_mm_hole_node_end(prev_node);
-
 
524
	if (mm->scan_check_range) {
-
 
525
		adj_start = hole_start < mm->scan_start ?
-
 
526
			mm->scan_start : hole_start;
-
 
527
		adj_end = hole_end > mm->scan_end ?
-
 
528
			mm->scan_end : hole_end;
-
 
529
	} else {
-
 
530
		adj_start = hole_start;
-
 
531
		adj_end = hole_end;
-
 
532
	}
-
 
533
 
-
 
534
	if (check_free_hole(adj_start , adj_end,
-
 
535
			    mm->scan_size, mm->scan_alignment)) {
-
 
536
		mm->scan_hit_start = hole_start;
-
 
537
		mm->scan_hit_size = hole_end;
-
 
538
 
-
 
539
		return 1;
-
 
540
	}
-
 
541
 
-
 
542
	return 0;
-
 
543
}
-
 
544
EXPORT_SYMBOL(drm_mm_scan_add_block);
-
 
545
 
-
 
546
/**
-
 
547
 * Remove a node from the scan list.
-
 
548
 *
-
 
549
 * Nodes _must_ be removed in the exact same order from the scan list as they
-
 
550
 * have been added, otherwise the internal state of the memory manager will be
-
 
551
 * corrupted.
-
 
552
 *
-
 
553
 * When the scan list is empty, the selected memory nodes can be freed. An
-
 
554
 * immediately following drm_mm_search_free with best_match = 0 will then return
-
 
555
 * the just freed block (because its at the top of the free_stack list).
-
 
556
 *
-
 
557
 * Returns one if this block should be evicted, zero otherwise. Will always
-
 
558
 * return zero when no hole has been found.
-
 
559
 */
-
 
560
int drm_mm_scan_remove_block(struct drm_mm_node *node)
-
 
561
{
-
 
562
	struct drm_mm *mm = node->mm;
-
 
563
	struct drm_mm_node *prev_node;
-
 
564
 
-
 
565
	mm->scanned_blocks--;
-
 
566
 
-
 
567
	BUG_ON(!node->scanned_block);
-
 
568
	node->scanned_block = 0;
-
 
569
 
-
 
570
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
-
 
571
			       node_list);
-
 
572
 
-
 
573
	prev_node->hole_follows = node->scanned_preceeds_hole;
-
 
574
	INIT_LIST_HEAD(&node->node_list);
-
 
575
	list_add(&node->node_list, &prev_node->node_list);
-
 
576
 
-
 
577
	/* Only need to check for containement because start&size for the
-
 
578
	 * complete resulting free block (not just the desired part) is
-
 
579
	 * stored. */
-
 
580
	if (node->start >= mm->scan_hit_start &&
-
 
581
	    node->start + node->size
-
 
582
	    		<= mm->scan_hit_start + mm->scan_hit_size) {
-
 
583
		return 1;
414
	}
584
	}
415
 
585
 
416
	return best;
586
	return 0;
Line 417... Line 587...
417
}
587
}
418
EXPORT_SYMBOL(drm_mm_search_free_in_range);
588
EXPORT_SYMBOL(drm_mm_scan_remove_block);
419
 
589
 
Line 420... Line 590...
420
int drm_mm_clean(struct drm_mm * mm)
590
int drm_mm_clean(struct drm_mm * mm)
421
{
591
{
422
	struct list_head *head = &mm->ml_entry;
-
 
423
 
592
	struct list_head *head = &mm->head_node.node_list;
424
	return (head->next->next == head);
593
 
425
}
594
	return (head->next->next == head);
-
 
595
}
426
EXPORT_SYMBOL(drm_mm_clean);
596
EXPORT_SYMBOL(drm_mm_clean);
Line -... Line 597...
-
 
597
 
-
 
598
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
-
 
599
{
-
 
600
	INIT_LIST_HEAD(&mm->hole_stack);
-
 
601
	INIT_LIST_HEAD(&mm->unused_nodes);
-
 
602
	mm->num_unused = 0;
-
 
603
	mm->scanned_blocks = 0;
-
 
604
	spin_lock_init(&mm->unused_lock);
427
 
605
 
-
 
606
	/* Clever trick to avoid a special case in the free hole tracking. */
-
 
607
	INIT_LIST_HEAD(&mm->head_node.node_list);
-
 
608
	INIT_LIST_HEAD(&mm->head_node.hole_stack);
-
 
609
	mm->head_node.hole_follows = 1;
428
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
610
	mm->head_node.scanned_block = 0;
429
{
611
	mm->head_node.scanned_prev_free = 0;
Line 430... Line 612...
430
	INIT_LIST_HEAD(&mm->ml_entry);
612
	mm->head_node.scanned_next_free = 0;
431
	INIT_LIST_HEAD(&mm->fl_entry);
613
	mm->head_node.mm = mm;
432
	INIT_LIST_HEAD(&mm->unused_nodes);
-
 
433
	mm->num_unused = 0;
614
	mm->head_node.start = start + size;
434
	spin_lock_init(&mm->unused_lock);
-
 
435
 
-
 
436
	return drm_mm_create_tail_node(mm, start, size, 0);
-
 
Line 437... Line 615...
437
}
615
	mm->head_node.size = start - mm->head_node.start;
438
EXPORT_SYMBOL(drm_mm_init);
-
 
439
 
616
	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
440
void drm_mm_takedown(struct drm_mm * mm)
617
 
441
{
618
	return 0;
Line 442... Line -...
442
	struct list_head *bnode = mm->fl_entry.next;
-
 
443
	struct drm_mm_node *entry;
-
 
444
	struct drm_mm_node *next;
-
 
445
 
-
 
446
	entry = list_entry(bnode, struct drm_mm_node, fl_entry);
619
}
447
 
620
EXPORT_SYMBOL(drm_mm_init);
448
	if (entry->ml_entry.next != &mm->ml_entry ||
621
 
449
	    entry->fl_entry.next != &mm->fl_entry) {
622
void drm_mm_takedown(struct drm_mm * mm)
450
		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
623
{
451
		return;
624
	struct drm_mm_node *entry, *next;
452
	}
625