Subversion Repositories Kolibri OS

Rev

Rev 4104 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1123 serge 1
/**************************************************************************
2
 *
3
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 *
27
 **************************************************************************/
28
 
29
/*
30
 * Generic simple memory manager implementation. Intended to be used as a base
31
 * class implementation for more advanced memory managers.
32
 *
33
 * Note that the algorithm used is quite simple and there might be substantial
34
 * performance gains if a smarter free list is implemented. Currently it is just an
35
 * unordered stack of free regions. This could easily be improved if an RB-tree
36
 * is used instead. At least if we expect heavy fragmentation.
37
 *
38
 * Aligned allocations can also see improvement.
39
 *
40
 * Authors:
41
 * Thomas Hellström 
42
 */
43
 
3031 serge 44
#include 
45
#include 
1963 serge 46
#include 
1179 serge 47
#include 
3031 serge 48
#include 
1123 serge 49
 
5060 serge 50
/**
51
 * DOC: Overview
52
 *
53
 * drm_mm provides a simple range allocator. The drivers are free to use the
54
 * resource allocator from the linux core if it suits them, the upside of drm_mm
55
 * is that it's in the DRM core. Which means that it's easier to extend for
56
 * some of the crazier special purpose needs of gpus.
57
 *
58
 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
59
 * Drivers are free to embed either of them into their own suitable
60
 * datastructures. drm_mm itself will not do any allocations of its own, so if
61
 * drivers choose not to embed nodes they need to still allocate them
62
 * themselves.
63
 *
64
 * The range allocator also supports reservation of preallocated blocks. This is
65
 * useful for taking over initial mode setting configurations from the firmware,
66
 * where an object needs to be created which exactly matches the firmware's
67
 * scanout target. As long as the range is still free it can be inserted anytime
68
 * after the allocator is initialized, which helps with avoiding looped
69
 * depencies in the driver load sequence.
70
 *
71
 * drm_mm maintains a stack of most recently freed holes, which of all
72
 * simplistic datastructures seems to be a fairly decent approach to clustering
73
 * allocations and avoiding too much fragmentation. This means free space
74
 * searches are O(num_holes). Given that all the fancy features drm_mm supports
75
 * something better would be fairly complex and since gfx thrashing is a fairly
76
 * steep cliff not a real concern. Removing a node again is O(1).
77
 *
78
 * drm_mm supports a few features: Alignment and range restrictions can be
79
 * supplied. Further more every &drm_mm_node has a color value (which is just an
80
 * opaqua unsigned long) which in conjunction with a driver callback can be used
81
 * to implement sophisticated placement restrictions. The i915 DRM driver uses
82
 * this to implement guard pages between incompatible caching domains in the
83
 * graphics TT.
84
 *
85
 * Two behaviors are supported for searching and allocating: bottom-up and top-down.
86
 * The default is bottom-up. Top-down allocation can be used if the memory area
87
 * has different restrictions, or just to reduce fragmentation.
88
 *
89
 * Finally iteration helpers to walk all nodes and all holes are provided as are
90
 * some basic allocator dumpers for debugging.
91
 */
1123 serge 92
 
4104 Serge 93
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
94
						unsigned long size,
95
						unsigned alignment,
96
						unsigned long color,
97
						enum drm_mm_search_flags flags);
98
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
99
						unsigned long size,
100
						unsigned alignment,
101
						unsigned long color,
102
						unsigned long start,
103
						unsigned long end,
104
						enum drm_mm_search_flags flags);
1123 serge 105
 
1963 serge 106
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
107
				 struct drm_mm_node *node,
3031 serge 108
				 unsigned long size, unsigned alignment,
5060 serge 109
				 unsigned long color,
110
				 enum drm_mm_allocator_flags flags)
1123 serge 111
{
1963 serge 112
	struct drm_mm *mm = hole_node->mm;
113
	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
114
	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
3031 serge 115
	unsigned long adj_start = hole_start;
116
	unsigned long adj_end = hole_end;
1123 serge 117
 
3480 Serge 118
	BUG_ON(node->allocated);
1963 serge 119
 
3031 serge 120
	if (mm->color_adjust)
121
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
1963 serge 122
 
5060 serge 123
	if (flags & DRM_MM_CREATE_TOP)
124
		adj_start = adj_end - size;
125
 
3031 serge 126
	if (alignment) {
127
		unsigned tmp = adj_start % alignment;
5060 serge 128
		if (tmp) {
129
			if (flags & DRM_MM_CREATE_TOP)
130
				adj_start -= tmp;
131
			else
3031 serge 132
			adj_start += alignment - tmp;
133
	}
5060 serge 134
	}
3031 serge 135
 
5060 serge 136
	BUG_ON(adj_start < hole_start);
137
	BUG_ON(adj_end > hole_end);
138
 
3031 serge 139
	if (adj_start == hole_start) {
1963 serge 140
		hole_node->hole_follows = 0;
3031 serge 141
		list_del(&hole_node->hole_stack);
142
	}
1963 serge 143
 
3031 serge 144
	node->start = adj_start;
1963 serge 145
	node->size = size;
146
	node->mm = mm;
3031 serge 147
	node->color = color;
1963 serge 148
	node->allocated = 1;
149
 
150
	INIT_LIST_HEAD(&node->hole_stack);
151
	list_add(&node->node_list, &hole_node->node_list);
152
 
3031 serge 153
	BUG_ON(node->start + node->size > adj_end);
1963 serge 154
 
3031 serge 155
	node->hole_follows = 0;
3480 Serge 156
	if (__drm_mm_hole_node_start(node) < hole_end) {
1963 serge 157
		list_add(&node->hole_stack, &mm->hole_stack);
158
		node->hole_follows = 1;
1123 serge 159
	}
160
}
161
 
5060 serge 162
/**
163
 * drm_mm_reserve_node - insert an pre-initialized node
164
 * @mm: drm_mm allocator to insert @node into
165
 * @node: drm_mm_node to insert
166
 *
167
 * This functions inserts an already set-up drm_mm_node into the allocator,
168
 * meaning that start, size and color must be set by the caller. This is useful
169
 * to initialize the allocator with preallocated objects which must be set-up
170
 * before the range allocator can be set-up, e.g. when taking over a firmware
171
 * framebuffer.
172
 *
173
 * Returns:
174
 * 0 on success, -ENOSPC if there's no hole where @node is.
175
 */
4104 Serge 176
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
3480 Serge 177
{
4104 Serge 178
	struct drm_mm_node *hole;
179
	unsigned long end = node->start + node->size;
3480 Serge 180
	unsigned long hole_start;
181
	unsigned long hole_end;
182
 
4104 Serge 183
	BUG_ON(node == NULL);
184
 
185
	/* Find the relevant hole to add our node to */
3480 Serge 186
	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
4104 Serge 187
		if (hole_start > node->start || hole_end < end)
3480 Serge 188
			continue;
189
 
190
		node->mm = mm;
191
		node->allocated = 1;
192
 
193
		INIT_LIST_HEAD(&node->hole_stack);
194
		list_add(&node->node_list, &hole->node_list);
195
 
4104 Serge 196
		if (node->start == hole_start) {
3480 Serge 197
			hole->hole_follows = 0;
198
			list_del_init(&hole->hole_stack);
199
		}
200
 
201
		node->hole_follows = 0;
202
		if (end != hole_end) {
203
			list_add(&node->hole_stack, &mm->hole_stack);
204
			node->hole_follows = 1;
205
		}
206
 
4104 Serge 207
		return 0;
3480 Serge 208
	}
209
 
4104 Serge 210
	return -ENOSPC;
3480 Serge 211
}
4104 Serge 212
EXPORT_SYMBOL(drm_mm_reserve_node);
3480 Serge 213
 
1963 serge 214
/**
5060 serge 215
 * drm_mm_insert_node_generic - search for space and insert @node
216
 * @mm: drm_mm to allocate from
217
 * @node: preallocate node to insert
218
 * @size: size of the allocation
219
 * @alignment: alignment of the allocation
220
 * @color: opaque tag value to use for this node
221
 * @sflags: flags to fine-tune the allocation search
222
 * @aflags: flags to fine-tune the allocation behavior
223
 *
224
 * The preallocated node must be cleared to 0.
225
 *
226
 * Returns:
227
 * 0 on success, -ENOSPC if there's no suitable hole.
1963 serge 228
 */
3192 Serge 229
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
230
			       unsigned long size, unsigned alignment,
4104 Serge 231
			       unsigned long color,
5060 serge 232
			       enum drm_mm_search_flags sflags,
233
			       enum drm_mm_allocator_flags aflags)
1963 serge 234
{
235
	struct drm_mm_node *hole_node;
1123 serge 236
 
3192 Serge 237
	hole_node = drm_mm_search_free_generic(mm, size, alignment,
5060 serge 238
					       color, sflags);
1963 serge 239
	if (!hole_node)
240
		return -ENOSPC;
241
 
5060 serge 242
	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
1963 serge 243
	return 0;
1123 serge 244
}
3192 Serge 245
EXPORT_SYMBOL(drm_mm_insert_node_generic);
246
 
1963 serge 247
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
248
				       struct drm_mm_node *node,
249
				       unsigned long size, unsigned alignment,
3031 serge 250
				       unsigned long color,
5060 serge 251
				       unsigned long start, unsigned long end,
252
				       enum drm_mm_allocator_flags flags)
1123 serge 253
{
1963 serge 254
	struct drm_mm *mm = hole_node->mm;
255
	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
256
	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
3031 serge 257
	unsigned long adj_start = hole_start;
258
	unsigned long adj_end = hole_end;
1123 serge 259
 
1963 serge 260
	BUG_ON(!hole_node->hole_follows || node->allocated);
1123 serge 261
 
3192 Serge 262
	if (adj_start < start)
263
		adj_start = start;
264
	if (adj_end > end)
265
		adj_end = end;
266
 
5060 serge 267
	if (flags & DRM_MM_CREATE_TOP)
268
		adj_start = adj_end - size;
269
 
3031 serge 270
	if (mm->color_adjust)
271
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
1123 serge 272
 
3031 serge 273
	if (alignment) {
274
		unsigned tmp = adj_start % alignment;
5060 serge 275
		if (tmp) {
276
			if (flags & DRM_MM_CREATE_TOP)
277
				adj_start -= tmp;
278
			else
3031 serge 279
			adj_start += alignment - tmp;
280
	}
5060 serge 281
	}
1963 serge 282
 
3031 serge 283
	if (adj_start == hole_start) {
1963 serge 284
		hole_node->hole_follows = 0;
3031 serge 285
		list_del(&hole_node->hole_stack);
1123 serge 286
	}
287
 
3031 serge 288
	node->start = adj_start;
1963 serge 289
	node->size = size;
290
	node->mm = mm;
3031 serge 291
	node->color = color;
1963 serge 292
	node->allocated = 1;
293
 
294
	INIT_LIST_HEAD(&node->hole_stack);
295
	list_add(&node->node_list, &hole_node->node_list);
296
 
5060 serge 297
	BUG_ON(node->start < start);
298
	BUG_ON(node->start < adj_start);
3031 serge 299
	BUG_ON(node->start + node->size > adj_end);
1963 serge 300
	BUG_ON(node->start + node->size > end);
301
 
3031 serge 302
	node->hole_follows = 0;
3480 Serge 303
	if (__drm_mm_hole_node_start(node) < hole_end) {
1963 serge 304
		list_add(&node->hole_stack, &mm->hole_stack);
305
		node->hole_follows = 1;
1123 serge 306
	}
307
}
308
 
1963 serge 309
/**
5060 serge 310
 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
311
 * @mm: drm_mm to allocate from
312
 * @node: preallocate node to insert
313
 * @size: size of the allocation
314
 * @alignment: alignment of the allocation
315
 * @color: opaque tag value to use for this node
316
 * @start: start of the allowed range for this node
317
 * @end: end of the allowed range for this node
318
 * @sflags: flags to fine-tune the allocation search
319
 * @aflags: flags to fine-tune the allocation behavior
320
 *
321
 * The preallocated node must be cleared to 0.
322
 *
323
 * Returns:
324
 * 0 on success, -ENOSPC if there's no suitable hole.
1123 serge 325
 */
3192 Serge 326
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
5060 serge 327
					unsigned long size, unsigned alignment,
328
					unsigned long color,
4104 Serge 329
					unsigned long start, unsigned long end,
5060 serge 330
					enum drm_mm_search_flags sflags,
331
					enum drm_mm_allocator_flags aflags)
1963 serge 332
{
333
	struct drm_mm_node *hole_node;
1123 serge 334
 
3192 Serge 335
	hole_node = drm_mm_search_free_in_range_generic(mm,
336
							size, alignment, color,
5060 serge 337
							start, end, sflags);
1963 serge 338
	if (!hole_node)
339
		return -ENOSPC;
340
 
3192 Serge 341
	drm_mm_insert_helper_range(hole_node, node,
342
				   size, alignment, color,
5060 serge 343
				   start, end, aflags);
1963 serge 344
	return 0;
345
}
3192 Serge 346
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
347
 
1963 serge 348
/**
5060 serge 349
 * drm_mm_remove_node - Remove a memory node from the allocator.
350
 * @node: drm_mm_node to remove
351
 *
352
 * This just removes a node from its drm_mm allocator. The node does not need to
353
 * be cleared again before it can be re-inserted into this or any other drm_mm
354
 * allocator. It is a bug to call this function on a un-allocated node.
1963 serge 355
 */
356
void drm_mm_remove_node(struct drm_mm_node *node)
1123 serge 357
{
1963 serge 358
	struct drm_mm *mm = node->mm;
359
	struct drm_mm_node *prev_node;
1123 serge 360
 
4104 Serge 361
	if (WARN_ON(!node->allocated))
362
		return;
363
 
1963 serge 364
	BUG_ON(node->scanned_block || node->scanned_prev_free
365
				   || node->scanned_next_free);
1123 serge 366
 
1963 serge 367
	prev_node =
368
	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
1123 serge 369
 
1963 serge 370
	if (node->hole_follows) {
3480 Serge 371
		BUG_ON(__drm_mm_hole_node_start(node) ==
372
		       __drm_mm_hole_node_end(node));
1963 serge 373
		list_del(&node->hole_stack);
374
	} else
3480 Serge 375
		BUG_ON(__drm_mm_hole_node_start(node) !=
376
		       __drm_mm_hole_node_end(node));
1963 serge 377
 
3480 Serge 378
 
1963 serge 379
	if (!prev_node->hole_follows) {
380
		prev_node->hole_follows = 1;
381
		list_add(&prev_node->hole_stack, &mm->hole_stack);
1123 serge 382
				} else
1963 serge 383
		list_move(&prev_node->hole_stack, &mm->hole_stack);
384
 
385
	list_del(&node->node_list);
386
	node->allocated = 0;
387
}
388
EXPORT_SYMBOL(drm_mm_remove_node);
389
 
390
static int check_free_hole(unsigned long start, unsigned long end,
391
			   unsigned long size, unsigned alignment)
392
{
393
	if (end - start < size)
394
		return 0;
395
 
396
	if (alignment) {
397
		unsigned tmp = start % alignment;
398
		if (tmp)
3031 serge 399
			start += alignment - tmp;
1123 serge 400
	}
1963 serge 401
 
3031 serge 402
	return end >= start + size;
1123 serge 403
}
404
 
4104 Serge 405
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
1123 serge 406
				       unsigned long size,
3031 serge 407
					       unsigned alignment,
408
					       unsigned long color,
4104 Serge 409
						      enum drm_mm_search_flags flags)
1123 serge 410
{
411
	struct drm_mm_node *entry;
412
	struct drm_mm_node *best;
3480 Serge 413
	unsigned long adj_start;
414
	unsigned long adj_end;
1123 serge 415
	unsigned long best_size;
416
 
1963 serge 417
	BUG_ON(mm->scanned_blocks);
418
 
1123 serge 419
	best = NULL;
420
	best_size = ~0UL;
421
 
5060 serge 422
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
423
			       flags & DRM_MM_SEARCH_BELOW) {
424
		unsigned long hole_size = adj_end - adj_start;
425
 
3031 serge 426
		if (mm->color_adjust) {
427
			mm->color_adjust(entry, color, &adj_start, &adj_end);
428
			if (adj_end <= adj_start)
429
				continue;
430
		}
431
 
432
		if (!check_free_hole(adj_start, adj_end, size, alignment))
1123 serge 433
			continue;
434
 
4104 Serge 435
		if (!(flags & DRM_MM_SEARCH_BEST))
1123 serge 436
				return entry;
1963 serge 437
 
5060 serge 438
		if (hole_size < best_size) {
1123 serge 439
				best = entry;
5060 serge 440
			best_size = hole_size;
1123 serge 441
			}
442
		}
443
 
444
	return best;
445
}
446
 
4104 Serge 447
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
1321 serge 448
						unsigned long size,
449
						unsigned alignment,
3031 serge 450
							unsigned long color,
1321 serge 451
						unsigned long start,
452
						unsigned long end,
4104 Serge 453
							enum drm_mm_search_flags flags)
1321 serge 454
{
455
	struct drm_mm_node *entry;
456
	struct drm_mm_node *best;
3480 Serge 457
	unsigned long adj_start;
458
	unsigned long adj_end;
1321 serge 459
	unsigned long best_size;
460
 
1963 serge 461
	BUG_ON(mm->scanned_blocks);
462
 
1321 serge 463
	best = NULL;
464
	best_size = ~0UL;
465
 
5060 serge 466
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
467
			       flags & DRM_MM_SEARCH_BELOW) {
468
		unsigned long hole_size = adj_end - adj_start;
469
 
3480 Serge 470
		if (adj_start < start)
471
			adj_start = start;
472
		if (adj_end > end)
473
			adj_end = end;
1321 serge 474
 
3031 serge 475
		if (mm->color_adjust) {
476
			mm->color_adjust(entry, color, &adj_start, &adj_end);
477
			if (adj_end <= adj_start)
478
				continue;
479
		}
480
 
1963 serge 481
		if (!check_free_hole(adj_start, adj_end, size, alignment))
1321 serge 482
			continue;
483
 
4104 Serge 484
		if (!(flags & DRM_MM_SEARCH_BEST))
1321 serge 485
				return entry;
1963 serge 486
 
5060 serge 487
		if (hole_size < best_size) {
1321 serge 488
				best = entry;
5060 serge 489
			best_size = hole_size;
1321 serge 490
			}
491
		}
492
 
493
	return best;
494
}
495
 
1963 serge 496
/**
5060 serge 497
 * drm_mm_replace_node - move an allocation from @old to @new
498
 * @old: drm_mm_node to remove from the allocator
499
 * @new: drm_mm_node which should inherit @old's allocation
500
 *
501
 * This is useful for when drivers embed the drm_mm_node structure and hence
502
 * can't move allocations by reassigning pointers. It's a combination of remove
503
 * and insert with the guarantee that the allocation start will match.
1963 serge 504
 */
505
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
506
{
507
	list_replace(&old->node_list, &new->node_list);
508
	list_replace(&old->hole_stack, &new->hole_stack);
509
	new->hole_follows = old->hole_follows;
510
	new->mm = old->mm;
511
	new->start = old->start;
512
	new->size = old->size;
3031 serge 513
	new->color = old->color;
1963 serge 514
 
515
	old->allocated = 0;
516
	new->allocated = 1;
517
}
518
EXPORT_SYMBOL(drm_mm_replace_node);
519
 
520
/**
5060 serge 521
 * DOC: lru scan roaster
1963 serge 522
 *
5060 serge 523
 * Very often GPUs need to have continuous allocations for a given object. When
524
 * evicting objects to make space for a new one it is therefore not most
525
 * efficient when we simply start to select all objects from the tail of an LRU
526
 * until there's a suitable hole: Especially for big objects or nodes that
527
 * otherwise have special allocation constraints there's a good chance we evict
528
 * lots of (smaller) objects unecessarily.
529
 *
530
 * The DRM range allocator supports this use-case through the scanning
531
 * interfaces. First a scan operation needs to be initialized with
532
 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
533
 * objects to the roaster (probably by walking an LRU list, but this can be
534
 * freely implemented) until a suitable hole is found or there's no further
535
 * evitable object.
536
 *
537
 * The the driver must walk through all objects again in exactly the reverse
538
 * order to restore the allocator state. Note that while the allocator is used
539
 * in the scan mode no other operation is allowed.
540
 *
541
 * Finally the driver evicts all objects selected in the scan. Adding and
542
 * removing an object is O(1), and since freeing a node is also O(1) the overall
543
 * complexity is O(scanned_objects). So like the free stack which needs to be
544
 * walked before a scan operation even begins this is linear in the number of
545
 * objects. It doesn't seem to hurt badly.
546
 */
547
 
548
/**
549
 * drm_mm_init_scan - initialize lru scanning
550
 * @mm: drm_mm to scan
551
 * @size: size of the allocation
552
 * @alignment: alignment of the allocation
553
 * @color: opaque tag value to use for the allocation
554
 *
1963 serge 555
 * This simply sets up the scanning routines with the parameters for the desired
5060 serge 556
 * hole. Note that there's no need to specify allocation flags, since they only
557
 * change the place a node is allocated from within a suitable hole.
1963 serge 558
 *
5060 serge 559
 * Warning:
560
 * As long as the scan list is non-empty, no other operations than
1963 serge 561
 * adding/removing nodes to/from the scan list are allowed.
562
 */
3031 serge 563
void drm_mm_init_scan(struct drm_mm *mm,
564
		      unsigned long size,
565
		      unsigned alignment,
566
		      unsigned long color)
1963 serge 567
{
3031 serge 568
	mm->scan_color = color;
1963 serge 569
	mm->scan_alignment = alignment;
570
	mm->scan_size = size;
571
	mm->scanned_blocks = 0;
572
	mm->scan_hit_start = 0;
3192 Serge 573
	mm->scan_hit_end = 0;
1963 serge 574
	mm->scan_check_range = 0;
575
	mm->prev_scanned_node = NULL;
576
}
577
EXPORT_SYMBOL(drm_mm_init_scan);
578
 
579
/**
5060 serge 580
 * drm_mm_init_scan - initialize range-restricted lru scanning
581
 * @mm: drm_mm to scan
582
 * @size: size of the allocation
583
 * @alignment: alignment of the allocation
584
 * @color: opaque tag value to use for the allocation
585
 * @start: start of the allowed range for the allocation
586
 * @end: end of the allowed range for the allocation
1963 serge 587
 *
588
 * This simply sets up the scanning routines with the parameters for the desired
5060 serge 589
 * hole. Note that there's no need to specify allocation flags, since they only
590
 * change the place a node is allocated from within a suitable hole.
1963 serge 591
 *
5060 serge 592
 * Warning:
593
 * As long as the scan list is non-empty, no other operations than
1963 serge 594
 * adding/removing nodes to/from the scan list are allowed.
595
 */
3031 serge 596
void drm_mm_init_scan_with_range(struct drm_mm *mm,
597
				 unsigned long size,
1963 serge 598
				 unsigned alignment,
3031 serge 599
				 unsigned long color,
1963 serge 600
				 unsigned long start,
601
				 unsigned long end)
602
{
3031 serge 603
	mm->scan_color = color;
1963 serge 604
	mm->scan_alignment = alignment;
605
	mm->scan_size = size;
606
	mm->scanned_blocks = 0;
607
	mm->scan_hit_start = 0;
3192 Serge 608
	mm->scan_hit_end = 0;
1963 serge 609
	mm->scan_start = start;
610
	mm->scan_end = end;
611
	mm->scan_check_range = 1;
612
	mm->prev_scanned_node = NULL;
613
}
614
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
615
 
616
/**
5060 serge 617
 * drm_mm_scan_add_block - add a node to the scan list
618
 * @node: drm_mm_node to add
619
 *
1963 serge 620
 * Add a node to the scan list that might be freed to make space for the desired
621
 * hole.
622
 *
5060 serge 623
 * Returns:
624
 * True if a hole has been found, false otherwise.
1963 serge 625
 */
5060 serge 626
bool drm_mm_scan_add_block(struct drm_mm_node *node)
1963 serge 627
{
628
	struct drm_mm *mm = node->mm;
629
	struct drm_mm_node *prev_node;
630
	unsigned long hole_start, hole_end;
3192 Serge 631
	unsigned long adj_start, adj_end;
1963 serge 632
 
633
	mm->scanned_blocks++;
634
 
635
	BUG_ON(node->scanned_block);
636
	node->scanned_block = 1;
637
 
638
		prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
639
				       node_list);
640
 
641
	node->scanned_preceeds_hole = prev_node->hole_follows;
642
	prev_node->hole_follows = 1;
643
	list_del(&node->node_list);
644
	node->node_list.prev = &prev_node->node_list;
645
	node->node_list.next = &mm->prev_scanned_node->node_list;
646
	mm->prev_scanned_node = node;
647
 
3192 Serge 648
	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
649
	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
3031 serge 650
 
651
	if (mm->scan_check_range) {
652
		if (adj_start < mm->scan_start)
653
			adj_start = mm->scan_start;
654
		if (adj_end > mm->scan_end)
655
			adj_end = mm->scan_end;
1963 serge 656
	}
657
 
3192 Serge 658
	if (mm->color_adjust)
659
		mm->color_adjust(prev_node, mm->scan_color,
660
				 &adj_start, &adj_end);
661
 
3031 serge 662
	if (check_free_hole(adj_start, adj_end,
1963 serge 663
			    mm->scan_size, mm->scan_alignment)) {
664
		mm->scan_hit_start = hole_start;
3192 Serge 665
		mm->scan_hit_end = hole_end;
5060 serge 666
		return true;
1963 serge 667
	}
668
 
5060 serge 669
	return false;
1963 serge 670
}
671
EXPORT_SYMBOL(drm_mm_scan_add_block);
672
 
673
/**
5060 serge 674
 * drm_mm_scan_remove_block - remove a node from the scan list
675
 * @node: drm_mm_node to remove
1963 serge 676
 *
677
 * Nodes _must_ be removed in the exact same order from the scan list as they
678
 * have been added, otherwise the internal state of the memory manager will be
679
 * corrupted.
680
 *
681
 * When the scan list is empty, the selected memory nodes can be freed. An
4104 Serge 682
 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
683
 * return the just freed block (because its at the top of the free_stack list).
1963 serge 684
 *
5060 serge 685
 * Returns:
686
 * True if this block should be evicted, false otherwise. Will always
687
 * return false when no hole has been found.
1963 serge 688
 */
5060 serge 689
bool drm_mm_scan_remove_block(struct drm_mm_node *node)
1963 serge 690
{
691
	struct drm_mm *mm = node->mm;
692
	struct drm_mm_node *prev_node;
693
 
694
	mm->scanned_blocks--;
695
 
696
	BUG_ON(!node->scanned_block);
697
	node->scanned_block = 0;
698
 
699
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
700
			       node_list);
701
 
702
	prev_node->hole_follows = node->scanned_preceeds_hole;
703
	list_add(&node->node_list, &prev_node->node_list);
704
 
3192 Serge 705
	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
706
		 node->start < mm->scan_hit_end);
1963 serge 707
}
708
EXPORT_SYMBOL(drm_mm_scan_remove_block);
709
 
5060 serge 710
/**
711
 * drm_mm_clean - checks whether an allocator is clean
712
 * @mm: drm_mm allocator to check
713
 *
714
 * Returns:
715
 * True if the allocator is completely free, false if there's still a node
716
 * allocated in it.
717
 */
718
bool drm_mm_clean(struct drm_mm * mm)
1123 serge 719
{
1963 serge 720
	struct list_head *head = &mm->head_node.node_list;
1123 serge 721
 
722
	return (head->next->next == head);
723
}
1126 serge 724
EXPORT_SYMBOL(drm_mm_clean);
1123 serge 725
 
5060 serge 726
/**
727
 * drm_mm_init - initialize a drm-mm allocator
728
 * @mm: the drm_mm structure to initialize
729
 * @start: start of the range managed by @mm
730
 * @size: end of the range managed by @mm
731
 *
732
 * Note that @mm must be cleared to 0 before calling this function.
733
 */
4075 Serge 734
void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
1123 serge 735
{
1963 serge 736
	INIT_LIST_HEAD(&mm->hole_stack);
737
	mm->scanned_blocks = 0;
1123 serge 738
 
1963 serge 739
	/* Clever trick to avoid a special case in the free hole tracking. */
740
	INIT_LIST_HEAD(&mm->head_node.node_list);
741
	INIT_LIST_HEAD(&mm->head_node.hole_stack);
742
	mm->head_node.hole_follows = 1;
743
	mm->head_node.scanned_block = 0;
744
	mm->head_node.scanned_prev_free = 0;
745
	mm->head_node.scanned_next_free = 0;
746
	mm->head_node.mm = mm;
747
	mm->head_node.start = start + size;
748
	mm->head_node.size = start - mm->head_node.start;
749
	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
750
 
3031 serge 751
	mm->color_adjust = NULL;
1123 serge 752
}
1126 serge 753
EXPORT_SYMBOL(drm_mm_init);
1123 serge 754
 
5060 serge 755
/**
756
 * drm_mm_takedown - clean up a drm_mm allocator
757
 * @mm: drm_mm allocator to clean up
758
 *
759
 * Note that it is a bug to call this function on an allocator which is not
760
 * clean.
761
 */
1123 serge 762
void drm_mm_takedown(struct drm_mm * mm)
763
{
4104 Serge 764
	WARN(!list_empty(&mm->head_node.node_list),
765
	     "Memory manager not clean during takedown.\n");
1123 serge 766
}
1126 serge 767
EXPORT_SYMBOL(drm_mm_takedown);
3192 Serge 768
 
4075 Serge 769
static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
770
				       const char *prefix)
3192 Serge 771
{
772
	unsigned long hole_start, hole_end, hole_size;
773
 
4075 Serge 774
	if (entry->hole_follows) {
775
		hole_start = drm_mm_hole_node_start(entry);
776
		hole_end = drm_mm_hole_node_end(entry);
3192 Serge 777
	hole_size = hole_end - hole_start;
778
		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
779
			prefix, hole_start, hole_end,
780
			hole_size);
4075 Serge 781
		return hole_size;
782
	}
3192 Serge 783
 
4075 Serge 784
	return 0;
785
}
786
 
5060 serge 787
/**
788
 * drm_mm_debug_table - dump allocator state to dmesg
789
 * @mm: drm_mm allocator to dump
790
 * @prefix: prefix to use for dumping to dmesg
791
 */
4075 Serge 792
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
793
{
794
	struct drm_mm_node *entry;
795
	unsigned long total_used = 0, total_free = 0, total = 0;
796
 
797
	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
798
 
3192 Serge 799
	drm_mm_for_each_node(entry, mm) {
800
		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
801
			prefix, entry->start, entry->start + entry->size,
802
			entry->size);
803
		total_used += entry->size;
4075 Serge 804
		total_free += drm_mm_debug_hole(entry, prefix);
3192 Serge 805
		}
806
	total = total_free + total_used;
807
 
808
	printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
809
		total_used, total_free);
810
}
811
EXPORT_SYMBOL(drm_mm_debug_table);
812
 
813
#if defined(CONFIG_DEBUG_FS)
3764 Serge 814
static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
3192 Serge 815
{
816
	unsigned long hole_start, hole_end, hole_size;
817
 
3764 Serge 818
	if (entry->hole_follows) {
819
		hole_start = drm_mm_hole_node_start(entry);
820
		hole_end = drm_mm_hole_node_end(entry);
3192 Serge 821
	hole_size = hole_end - hole_start;
822
		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
823
				hole_start, hole_end, hole_size);
3764 Serge 824
		return hole_size;
825
	}
3192 Serge 826
 
3764 Serge 827
	return 0;
828
}
829
 
5060 serge 830
/**
831
 * drm_mm_dump_table - dump allocator state to a seq_file
832
 * @m: seq_file to dump to
833
 * @mm: drm_mm allocator to dump
834
 */
3764 Serge 835
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
836
{
837
	struct drm_mm_node *entry;
838
	unsigned long total_used = 0, total_free = 0, total = 0;
839
 
840
	total_free += drm_mm_dump_hole(m, &mm->head_node);
841
 
3192 Serge 842
	drm_mm_for_each_node(entry, mm) {
843
		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
844
				entry->start, entry->start + entry->size,
845
				entry->size);
846
		total_used += entry->size;
3764 Serge 847
		total_free += drm_mm_dump_hole(m, entry);
3192 Serge 848
		}
849
	total = total_free + total_used;
850
 
851
	seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
852
	return 0;
853
}
854
EXPORT_SYMBOL(drm_mm_dump_table);
855
#endif