Subversion Repositories Kolibri OS

Rev

Rev 5060 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
6084 serge 1
/**************************************************************************
1123 serge 2
 *
3
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 *
27
 **************************************************************************/
28
 
29
/*
30
 * Generic simple memory manager implementation. Intended to be used as a base
31
 * class implementation for more advanced memory managers.
32
 *
33
 * Note that the algorithm used is quite simple and there might be substantial
34
 * performance gains if a smarter free list is implemented. Currently it is just an
35
 * unordered stack of free regions. This could easily be improved if an RB-tree
36
 * is used instead. At least if we expect heavy fragmentation.
37
 *
38
 * Aligned allocations can also see improvement.
39
 *
40
 * Authors:
41
 * Thomas Hellström 
42
 */
43
 
3031 serge 44
#include 
45
#include 
1963 serge 46
#include 
1179 serge 47
#include 
3031 serge 48
#include 
1123 serge 49
 
5060 serge 50
/**
51
 * DOC: Overview
52
 *
53
 * drm_mm provides a simple range allocator. The drivers are free to use the
54
 * resource allocator from the linux core if it suits them, the upside of drm_mm
55
 * is that it's in the DRM core. Which means that it's easier to extend for
56
 * some of the crazier special purpose needs of gpus.
57
 *
58
 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
59
 * Drivers are free to embed either of them into their own suitable
60
 * datastructures. drm_mm itself will not do any allocations of its own, so if
61
 * drivers choose not to embed nodes they need to still allocate them
62
 * themselves.
63
 *
64
 * The range allocator also supports reservation of preallocated blocks. This is
65
 * useful for taking over initial mode setting configurations from the firmware,
66
 * where an object needs to be created which exactly matches the firmware's
67
 * scanout target. As long as the range is still free it can be inserted anytime
68
 * after the allocator is initialized, which helps with avoiding looped
69
 * depencies in the driver load sequence.
70
 *
71
 * drm_mm maintains a stack of most recently freed holes, which of all
72
 * simplistic datastructures seems to be a fairly decent approach to clustering
73
 * allocations and avoiding too much fragmentation. This means free space
74
 * searches are O(num_holes). Given that all the fancy features drm_mm supports
75
 * something better would be fairly complex and since gfx thrashing is a fairly
76
 * steep cliff not a real concern. Removing a node again is O(1).
77
 *
78
 * drm_mm supports a few features: Alignment and range restrictions can be
79
 * supplied. Further more every &drm_mm_node has a color value (which is just an
80
 * opaqua unsigned long) which in conjunction with a driver callback can be used
81
 * to implement sophisticated placement restrictions. The i915 DRM driver uses
82
 * this to implement guard pages between incompatible caching domains in the
83
 * graphics TT.
84
 *
85
 * Two behaviors are supported for searching and allocating: bottom-up and top-down.
86
 * The default is bottom-up. Top-down allocation can be used if the memory area
87
 * has different restrictions, or just to reduce fragmentation.
88
 *
89
 * Finally iteration helpers to walk all nodes and all holes are provided as are
90
 * some basic allocator dumpers for debugging.
91
 */
1123 serge 92
 
4104 Serge 93
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
6084 serge 94
						u64 size,
4104 Serge 95
						unsigned alignment,
96
						unsigned long color,
97
						enum drm_mm_search_flags flags);
98
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
6084 serge 99
						u64 size,
4104 Serge 100
						unsigned alignment,
101
						unsigned long color,
6084 serge 102
						u64 start,
103
						u64 end,
4104 Serge 104
						enum drm_mm_search_flags flags);
1123 serge 105
 
1963 serge 106
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
107
				 struct drm_mm_node *node,
6084 serge 108
				 u64 size, unsigned alignment,
5060 serge 109
				 unsigned long color,
110
				 enum drm_mm_allocator_flags flags)
1123 serge 111
{
1963 serge 112
	struct drm_mm *mm = hole_node->mm;
6084 serge 113
	u64 hole_start = drm_mm_hole_node_start(hole_node);
114
	u64 hole_end = drm_mm_hole_node_end(hole_node);
115
	u64 adj_start = hole_start;
116
	u64 adj_end = hole_end;
1123 serge 117
 
3480 Serge 118
	BUG_ON(node->allocated);
1963 serge 119
 
3031 serge 120
	if (mm->color_adjust)
121
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
1963 serge 122
 
5060 serge 123
	if (flags & DRM_MM_CREATE_TOP)
124
		adj_start = adj_end - size;
125
 
3031 serge 126
	if (alignment) {
6084 serge 127
		u64 tmp = adj_start;
128
		unsigned rem;
129
 
130
		rem = do_div(tmp, alignment);
131
		if (rem) {
5060 serge 132
			if (flags & DRM_MM_CREATE_TOP)
6084 serge 133
				adj_start -= rem;
5060 serge 134
			else
6084 serge 135
				adj_start += alignment - rem;
136
		}
3031 serge 137
	}
138
 
5060 serge 139
	BUG_ON(adj_start < hole_start);
140
	BUG_ON(adj_end > hole_end);
141
 
3031 serge 142
	if (adj_start == hole_start) {
1963 serge 143
		hole_node->hole_follows = 0;
3031 serge 144
		list_del(&hole_node->hole_stack);
145
	}
1963 serge 146
 
3031 serge 147
	node->start = adj_start;
1963 serge 148
	node->size = size;
149
	node->mm = mm;
3031 serge 150
	node->color = color;
1963 serge 151
	node->allocated = 1;
152
 
153
	INIT_LIST_HEAD(&node->hole_stack);
154
	list_add(&node->node_list, &hole_node->node_list);
155
 
3031 serge 156
	BUG_ON(node->start + node->size > adj_end);
1963 serge 157
 
3031 serge 158
	node->hole_follows = 0;
3480 Serge 159
	if (__drm_mm_hole_node_start(node) < hole_end) {
1963 serge 160
		list_add(&node->hole_stack, &mm->hole_stack);
161
		node->hole_follows = 1;
1123 serge 162
	}
163
}
164
 
5060 serge 165
/**
166
 * drm_mm_reserve_node - insert an pre-initialized node
167
 * @mm: drm_mm allocator to insert @node into
168
 * @node: drm_mm_node to insert
169
 *
170
 * This functions inserts an already set-up drm_mm_node into the allocator,
171
 * meaning that start, size and color must be set by the caller. This is useful
172
 * to initialize the allocator with preallocated objects which must be set-up
173
 * before the range allocator can be set-up, e.g. when taking over a firmware
174
 * framebuffer.
175
 *
176
 * Returns:
177
 * 0 on success, -ENOSPC if there's no hole where @node is.
178
 */
4104 Serge 179
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
3480 Serge 180
{
4104 Serge 181
	struct drm_mm_node *hole;
6084 serge 182
	u64 end = node->start + node->size;
183
	u64 hole_start;
184
	u64 hole_end;
3480 Serge 185
 
4104 Serge 186
	BUG_ON(node == NULL);
187
 
188
	/* Find the relevant hole to add our node to */
3480 Serge 189
	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
4104 Serge 190
		if (hole_start > node->start || hole_end < end)
3480 Serge 191
			continue;
192
 
193
		node->mm = mm;
194
		node->allocated = 1;
195
 
196
		INIT_LIST_HEAD(&node->hole_stack);
197
		list_add(&node->node_list, &hole->node_list);
198
 
4104 Serge 199
		if (node->start == hole_start) {
3480 Serge 200
			hole->hole_follows = 0;
201
			list_del_init(&hole->hole_stack);
202
		}
203
 
204
		node->hole_follows = 0;
205
		if (end != hole_end) {
206
			list_add(&node->hole_stack, &mm->hole_stack);
207
			node->hole_follows = 1;
208
		}
209
 
4104 Serge 210
		return 0;
3480 Serge 211
	}
212
 
4104 Serge 213
	return -ENOSPC;
3480 Serge 214
}
4104 Serge 215
EXPORT_SYMBOL(drm_mm_reserve_node);
3480 Serge 216
 
1963 serge 217
/**
5060 serge 218
 * drm_mm_insert_node_generic - search for space and insert @node
219
 * @mm: drm_mm to allocate from
220
 * @node: preallocate node to insert
221
 * @size: size of the allocation
222
 * @alignment: alignment of the allocation
223
 * @color: opaque tag value to use for this node
224
 * @sflags: flags to fine-tune the allocation search
225
 * @aflags: flags to fine-tune the allocation behavior
226
 *
227
 * The preallocated node must be cleared to 0.
228
 *
229
 * Returns:
230
 * 0 on success, -ENOSPC if there's no suitable hole.
1963 serge 231
 */
3192 Serge 232
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
6084 serge 233
			       u64 size, unsigned alignment,
4104 Serge 234
			       unsigned long color,
5060 serge 235
			       enum drm_mm_search_flags sflags,
236
			       enum drm_mm_allocator_flags aflags)
1963 serge 237
{
238
	struct drm_mm_node *hole_node;
1123 serge 239
 
3192 Serge 240
	hole_node = drm_mm_search_free_generic(mm, size, alignment,
5060 serge 241
					       color, sflags);
1963 serge 242
	if (!hole_node)
243
		return -ENOSPC;
244
 
5060 serge 245
	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
1963 serge 246
	return 0;
1123 serge 247
}
3192 Serge 248
EXPORT_SYMBOL(drm_mm_insert_node_generic);
249
 
1963 serge 250
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
251
				       struct drm_mm_node *node,
6084 serge 252
				       u64 size, unsigned alignment,
3031 serge 253
				       unsigned long color,
6084 serge 254
				       u64 start, u64 end,
5060 serge 255
				       enum drm_mm_allocator_flags flags)
1123 serge 256
{
1963 serge 257
	struct drm_mm *mm = hole_node->mm;
6084 serge 258
	u64 hole_start = drm_mm_hole_node_start(hole_node);
259
	u64 hole_end = drm_mm_hole_node_end(hole_node);
260
	u64 adj_start = hole_start;
261
	u64 adj_end = hole_end;
1123 serge 262
 
1963 serge 263
	BUG_ON(!hole_node->hole_follows || node->allocated);
1123 serge 264
 
3192 Serge 265
	if (adj_start < start)
266
		adj_start = start;
267
	if (adj_end > end)
268
		adj_end = end;
269
 
6084 serge 270
	if (mm->color_adjust)
271
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
272
 
5060 serge 273
	if (flags & DRM_MM_CREATE_TOP)
274
		adj_start = adj_end - size;
275
 
6084 serge 276
	if (alignment) {
277
		u64 tmp = adj_start;
278
		unsigned rem;
1123 serge 279
 
6084 serge 280
		rem = do_div(tmp, alignment);
281
		if (rem) {
5060 serge 282
			if (flags & DRM_MM_CREATE_TOP)
6084 serge 283
				adj_start -= rem;
5060 serge 284
			else
6084 serge 285
				adj_start += alignment - rem;
286
		}
3031 serge 287
	}
1963 serge 288
 
3031 serge 289
	if (adj_start == hole_start) {
1963 serge 290
		hole_node->hole_follows = 0;
3031 serge 291
		list_del(&hole_node->hole_stack);
1123 serge 292
	}
293
 
3031 serge 294
	node->start = adj_start;
1963 serge 295
	node->size = size;
296
	node->mm = mm;
3031 serge 297
	node->color = color;
1963 serge 298
	node->allocated = 1;
299
 
300
	INIT_LIST_HEAD(&node->hole_stack);
301
	list_add(&node->node_list, &hole_node->node_list);
302
 
5060 serge 303
	BUG_ON(node->start < start);
304
	BUG_ON(node->start < adj_start);
3031 serge 305
	BUG_ON(node->start + node->size > adj_end);
1963 serge 306
	BUG_ON(node->start + node->size > end);
307
 
3031 serge 308
	node->hole_follows = 0;
3480 Serge 309
	if (__drm_mm_hole_node_start(node) < hole_end) {
1963 serge 310
		list_add(&node->hole_stack, &mm->hole_stack);
311
		node->hole_follows = 1;
1123 serge 312
	}
313
}
314
 
1963 serge 315
/**
5060 serge 316
 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
317
 * @mm: drm_mm to allocate from
318
 * @node: preallocate node to insert
319
 * @size: size of the allocation
320
 * @alignment: alignment of the allocation
321
 * @color: opaque tag value to use for this node
322
 * @start: start of the allowed range for this node
323
 * @end: end of the allowed range for this node
324
 * @sflags: flags to fine-tune the allocation search
325
 * @aflags: flags to fine-tune the allocation behavior
326
 *
327
 * The preallocated node must be cleared to 0.
328
 *
329
 * Returns:
330
 * 0 on success, -ENOSPC if there's no suitable hole.
1123 serge 331
 */
3192 Serge 332
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
6084 serge 333
					u64 size, unsigned alignment,
5060 serge 334
					unsigned long color,
6084 serge 335
					u64 start, u64 end,
5060 serge 336
					enum drm_mm_search_flags sflags,
337
					enum drm_mm_allocator_flags aflags)
1963 serge 338
{
339
	struct drm_mm_node *hole_node;
1123 serge 340
 
3192 Serge 341
	hole_node = drm_mm_search_free_in_range_generic(mm,
342
							size, alignment, color,
5060 serge 343
							start, end, sflags);
1963 serge 344
	if (!hole_node)
345
		return -ENOSPC;
346
 
3192 Serge 347
	drm_mm_insert_helper_range(hole_node, node,
348
				   size, alignment, color,
5060 serge 349
				   start, end, aflags);
1963 serge 350
	return 0;
351
}
3192 Serge 352
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
353
 
1963 serge 354
/**
5060 serge 355
 * drm_mm_remove_node - Remove a memory node from the allocator.
356
 * @node: drm_mm_node to remove
357
 *
358
 * This just removes a node from its drm_mm allocator. The node does not need to
359
 * be cleared again before it can be re-inserted into this or any other drm_mm
360
 * allocator. It is a bug to call this function on a un-allocated node.
1963 serge 361
 */
362
void drm_mm_remove_node(struct drm_mm_node *node)
1123 serge 363
{
1963 serge 364
	struct drm_mm *mm = node->mm;
365
	struct drm_mm_node *prev_node;
1123 serge 366
 
4104 Serge 367
	if (WARN_ON(!node->allocated))
368
		return;
369
 
1963 serge 370
	BUG_ON(node->scanned_block || node->scanned_prev_free
371
				   || node->scanned_next_free);
1123 serge 372
 
1963 serge 373
	prev_node =
374
	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
1123 serge 375
 
1963 serge 376
	if (node->hole_follows) {
3480 Serge 377
		BUG_ON(__drm_mm_hole_node_start(node) ==
378
		       __drm_mm_hole_node_end(node));
1963 serge 379
		list_del(&node->hole_stack);
380
	} else
3480 Serge 381
		BUG_ON(__drm_mm_hole_node_start(node) !=
382
		       __drm_mm_hole_node_end(node));
1963 serge 383
 
3480 Serge 384
 
1963 serge 385
	if (!prev_node->hole_follows) {
386
		prev_node->hole_follows = 1;
387
		list_add(&prev_node->hole_stack, &mm->hole_stack);
6084 serge 388
	} else
1963 serge 389
		list_move(&prev_node->hole_stack, &mm->hole_stack);
390
 
391
	list_del(&node->node_list);
392
	node->allocated = 0;
393
}
394
EXPORT_SYMBOL(drm_mm_remove_node);
395
 
6084 serge 396
static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
1963 serge 397
{
398
	if (end - start < size)
399
		return 0;
400
 
401
	if (alignment) {
6084 serge 402
		u64 tmp = start;
403
		unsigned rem;
404
 
405
		rem = do_div(tmp, alignment);
406
		if (rem)
407
			start += alignment - rem;
1123 serge 408
	}
1963 serge 409
 
3031 serge 410
	return end >= start + size;
1123 serge 411
}
412
 
4104 Serge 413
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
6084 serge 414
						      u64 size,
415
						      unsigned alignment,
416
						      unsigned long color,
4104 Serge 417
						      enum drm_mm_search_flags flags)
1123 serge 418
{
419
	struct drm_mm_node *entry;
420
	struct drm_mm_node *best;
6084 serge 421
	u64 adj_start;
422
	u64 adj_end;
423
	u64 best_size;
1123 serge 424
 
1963 serge 425
	BUG_ON(mm->scanned_blocks);
426
 
1123 serge 427
	best = NULL;
428
	best_size = ~0UL;
429
 
5060 serge 430
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
431
			       flags & DRM_MM_SEARCH_BELOW) {
6084 serge 432
		u64 hole_size = adj_end - adj_start;
5060 serge 433
 
3031 serge 434
		if (mm->color_adjust) {
435
			mm->color_adjust(entry, color, &adj_start, &adj_end);
436
			if (adj_end <= adj_start)
437
				continue;
438
		}
439
 
440
		if (!check_free_hole(adj_start, adj_end, size, alignment))
1123 serge 441
			continue;
442
 
4104 Serge 443
		if (!(flags & DRM_MM_SEARCH_BEST))
6084 serge 444
			return entry;
1963 serge 445
 
5060 serge 446
		if (hole_size < best_size) {
6084 serge 447
			best = entry;
5060 serge 448
			best_size = hole_size;
1123 serge 449
		}
6084 serge 450
	}
1123 serge 451
 
452
	return best;
453
}
454
 
4104 Serge 455
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
6084 serge 456
							u64 size,
457
							unsigned alignment,
3031 serge 458
							unsigned long color,
6084 serge 459
							u64 start,
460
							u64 end,
4104 Serge 461
							enum drm_mm_search_flags flags)
1321 serge 462
{
463
	struct drm_mm_node *entry;
464
	struct drm_mm_node *best;
6084 serge 465
	u64 adj_start;
466
	u64 adj_end;
467
	u64 best_size;
1321 serge 468
 
1963 serge 469
	BUG_ON(mm->scanned_blocks);
470
 
1321 serge 471
	best = NULL;
472
	best_size = ~0UL;
473
 
5060 serge 474
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
475
			       flags & DRM_MM_SEARCH_BELOW) {
6084 serge 476
		u64 hole_size = adj_end - adj_start;
5060 serge 477
 
3480 Serge 478
		if (adj_start < start)
479
			adj_start = start;
480
		if (adj_end > end)
481
			adj_end = end;
1321 serge 482
 
3031 serge 483
		if (mm->color_adjust) {
484
			mm->color_adjust(entry, color, &adj_start, &adj_end);
485
			if (adj_end <= adj_start)
486
				continue;
487
		}
488
 
1963 serge 489
		if (!check_free_hole(adj_start, adj_end, size, alignment))
1321 serge 490
			continue;
491
 
4104 Serge 492
		if (!(flags & DRM_MM_SEARCH_BEST))
6084 serge 493
			return entry;
1963 serge 494
 
5060 serge 495
		if (hole_size < best_size) {
6084 serge 496
			best = entry;
5060 serge 497
			best_size = hole_size;
1321 serge 498
		}
6084 serge 499
	}
1321 serge 500
 
501
	return best;
502
}
503
 
1963 serge 504
/**
5060 serge 505
 * drm_mm_replace_node - move an allocation from @old to @new
506
 * @old: drm_mm_node to remove from the allocator
507
 * @new: drm_mm_node which should inherit @old's allocation
508
 *
509
 * This is useful for when drivers embed the drm_mm_node structure and hence
510
 * can't move allocations by reassigning pointers. It's a combination of remove
511
 * and insert with the guarantee that the allocation start will match.
1963 serge 512
 */
513
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
514
{
515
	list_replace(&old->node_list, &new->node_list);
516
	list_replace(&old->hole_stack, &new->hole_stack);
517
	new->hole_follows = old->hole_follows;
518
	new->mm = old->mm;
519
	new->start = old->start;
520
	new->size = old->size;
3031 serge 521
	new->color = old->color;
1963 serge 522
 
523
	old->allocated = 0;
524
	new->allocated = 1;
525
}
526
EXPORT_SYMBOL(drm_mm_replace_node);
527
 
528
/**
5060 serge 529
 * DOC: lru scan roaster
1963 serge 530
 *
5060 serge 531
 * Very often GPUs need to have continuous allocations for a given object. When
532
 * evicting objects to make space for a new one it is therefore not most
533
 * efficient when we simply start to select all objects from the tail of an LRU
534
 * until there's a suitable hole: Especially for big objects or nodes that
535
 * otherwise have special allocation constraints there's a good chance we evict
536
 * lots of (smaller) objects unecessarily.
537
 *
538
 * The DRM range allocator supports this use-case through the scanning
539
 * interfaces. First a scan operation needs to be initialized with
540
 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
541
 * objects to the roaster (probably by walking an LRU list, but this can be
542
 * freely implemented) until a suitable hole is found or there's no further
543
 * evitable object.
544
 *
545
 * The the driver must walk through all objects again in exactly the reverse
546
 * order to restore the allocator state. Note that while the allocator is used
547
 * in the scan mode no other operation is allowed.
548
 *
549
 * Finally the driver evicts all objects selected in the scan. Adding and
550
 * removing an object is O(1), and since freeing a node is also O(1) the overall
551
 * complexity is O(scanned_objects). So like the free stack which needs to be
552
 * walked before a scan operation even begins this is linear in the number of
553
 * objects. It doesn't seem to hurt badly.
554
 */
555
 
556
/**
557
 * drm_mm_init_scan - initialize lru scanning
558
 * @mm: drm_mm to scan
559
 * @size: size of the allocation
560
 * @alignment: alignment of the allocation
561
 * @color: opaque tag value to use for the allocation
562
 *
1963 serge 563
 * This simply sets up the scanning routines with the parameters for the desired
5060 serge 564
 * hole. Note that there's no need to specify allocation flags, since they only
565
 * change the place a node is allocated from within a suitable hole.
1963 serge 566
 *
5060 serge 567
 * Warning:
568
 * As long as the scan list is non-empty, no other operations than
1963 serge 569
 * adding/removing nodes to/from the scan list are allowed.
570
 */
3031 serge 571
void drm_mm_init_scan(struct drm_mm *mm,
6084 serge 572
		      u64 size,
3031 serge 573
		      unsigned alignment,
574
		      unsigned long color)
1963 serge 575
{
3031 serge 576
	mm->scan_color = color;
1963 serge 577
	mm->scan_alignment = alignment;
578
	mm->scan_size = size;
579
	mm->scanned_blocks = 0;
580
	mm->scan_hit_start = 0;
3192 Serge 581
	mm->scan_hit_end = 0;
1963 serge 582
	mm->scan_check_range = 0;
583
	mm->prev_scanned_node = NULL;
584
}
585
EXPORT_SYMBOL(drm_mm_init_scan);
586
 
587
/**
5060 serge 588
 * drm_mm_init_scan - initialize range-restricted lru scanning
589
 * @mm: drm_mm to scan
590
 * @size: size of the allocation
591
 * @alignment: alignment of the allocation
592
 * @color: opaque tag value to use for the allocation
593
 * @start: start of the allowed range for the allocation
594
 * @end: end of the allowed range for the allocation
1963 serge 595
 *
596
 * This simply sets up the scanning routines with the parameters for the desired
5060 serge 597
 * hole. Note that there's no need to specify allocation flags, since they only
598
 * change the place a node is allocated from within a suitable hole.
1963 serge 599
 *
5060 serge 600
 * Warning:
601
 * As long as the scan list is non-empty, no other operations than
1963 serge 602
 * adding/removing nodes to/from the scan list are allowed.
603
 */
3031 serge 604
void drm_mm_init_scan_with_range(struct drm_mm *mm,
6084 serge 605
				 u64 size,
1963 serge 606
				 unsigned alignment,
3031 serge 607
				 unsigned long color,
6084 serge 608
				 u64 start,
609
				 u64 end)
1963 serge 610
{
3031 serge 611
	mm->scan_color = color;
1963 serge 612
	mm->scan_alignment = alignment;
613
	mm->scan_size = size;
614
	mm->scanned_blocks = 0;
615
	mm->scan_hit_start = 0;
3192 Serge 616
	mm->scan_hit_end = 0;
1963 serge 617
	mm->scan_start = start;
618
	mm->scan_end = end;
619
	mm->scan_check_range = 1;
620
	mm->prev_scanned_node = NULL;
621
}
622
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
623
 
624
/**
5060 serge 625
 * drm_mm_scan_add_block - add a node to the scan list
626
 * @node: drm_mm_node to add
627
 *
1963 serge 628
 * Add a node to the scan list that might be freed to make space for the desired
629
 * hole.
630
 *
5060 serge 631
 * Returns:
632
 * True if a hole has been found, false otherwise.
1963 serge 633
 */
5060 serge 634
bool drm_mm_scan_add_block(struct drm_mm_node *node)
1963 serge 635
{
636
	struct drm_mm *mm = node->mm;
637
	struct drm_mm_node *prev_node;
6084 serge 638
	u64 hole_start, hole_end;
639
	u64 adj_start, adj_end;
1963 serge 640
 
641
	mm->scanned_blocks++;
642
 
643
	BUG_ON(node->scanned_block);
644
	node->scanned_block = 1;
645
 
6084 serge 646
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
647
			       node_list);
1963 serge 648
 
649
	node->scanned_preceeds_hole = prev_node->hole_follows;
650
	prev_node->hole_follows = 1;
651
	list_del(&node->node_list);
652
	node->node_list.prev = &prev_node->node_list;
653
	node->node_list.next = &mm->prev_scanned_node->node_list;
654
	mm->prev_scanned_node = node;
655
 
3192 Serge 656
	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
657
	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
3031 serge 658
 
659
	if (mm->scan_check_range) {
660
		if (adj_start < mm->scan_start)
661
			adj_start = mm->scan_start;
662
		if (adj_end > mm->scan_end)
663
			adj_end = mm->scan_end;
1963 serge 664
	}
665
 
3192 Serge 666
	if (mm->color_adjust)
667
		mm->color_adjust(prev_node, mm->scan_color,
668
				 &adj_start, &adj_end);
669
 
3031 serge 670
	if (check_free_hole(adj_start, adj_end,
1963 serge 671
			    mm->scan_size, mm->scan_alignment)) {
672
		mm->scan_hit_start = hole_start;
3192 Serge 673
		mm->scan_hit_end = hole_end;
5060 serge 674
		return true;
1963 serge 675
	}
676
 
5060 serge 677
	return false;
1963 serge 678
}
679
EXPORT_SYMBOL(drm_mm_scan_add_block);
680
 
681
/**
5060 serge 682
 * drm_mm_scan_remove_block - remove a node from the scan list
683
 * @node: drm_mm_node to remove
1963 serge 684
 *
685
 * Nodes _must_ be removed in the exact same order from the scan list as they
686
 * have been added, otherwise the internal state of the memory manager will be
687
 * corrupted.
688
 *
689
 * When the scan list is empty, the selected memory nodes can be freed. An
4104 Serge 690
 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
691
 * return the just freed block (because its at the top of the free_stack list).
1963 serge 692
 *
5060 serge 693
 * Returns:
694
 * True if this block should be evicted, false otherwise. Will always
695
 * return false when no hole has been found.
1963 serge 696
 */
5060 serge 697
bool drm_mm_scan_remove_block(struct drm_mm_node *node)
1963 serge 698
{
699
	struct drm_mm *mm = node->mm;
700
	struct drm_mm_node *prev_node;
701
 
702
	mm->scanned_blocks--;
703
 
704
	BUG_ON(!node->scanned_block);
705
	node->scanned_block = 0;
706
 
707
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
708
			       node_list);
709
 
710
	prev_node->hole_follows = node->scanned_preceeds_hole;
711
	list_add(&node->node_list, &prev_node->node_list);
712
 
3192 Serge 713
	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
714
		 node->start < mm->scan_hit_end);
1963 serge 715
}
716
EXPORT_SYMBOL(drm_mm_scan_remove_block);
717
 
5060 serge 718
/**
719
 * drm_mm_clean - checks whether an allocator is clean
720
 * @mm: drm_mm allocator to check
721
 *
722
 * Returns:
723
 * True if the allocator is completely free, false if there's still a node
724
 * allocated in it.
725
 */
726
bool drm_mm_clean(struct drm_mm * mm)
1123 serge 727
{
1963 serge 728
	struct list_head *head = &mm->head_node.node_list;
1123 serge 729
 
730
	return (head->next->next == head);
731
}
1126 serge 732
EXPORT_SYMBOL(drm_mm_clean);
1123 serge 733
 
5060 serge 734
/**
735
 * drm_mm_init - initialize a drm-mm allocator
736
 * @mm: the drm_mm structure to initialize
737
 * @start: start of the range managed by @mm
738
 * @size: end of the range managed by @mm
739
 *
740
 * Note that @mm must be cleared to 0 before calling this function.
741
 */
6084 serge 742
void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
1123 serge 743
{
1963 serge 744
	INIT_LIST_HEAD(&mm->hole_stack);
745
	mm->scanned_blocks = 0;
1123 serge 746
 
1963 serge 747
	/* Clever trick to avoid a special case in the free hole tracking. */
748
	INIT_LIST_HEAD(&mm->head_node.node_list);
749
	INIT_LIST_HEAD(&mm->head_node.hole_stack);
750
	mm->head_node.hole_follows = 1;
751
	mm->head_node.scanned_block = 0;
752
	mm->head_node.scanned_prev_free = 0;
753
	mm->head_node.scanned_next_free = 0;
754
	mm->head_node.mm = mm;
755
	mm->head_node.start = start + size;
756
	mm->head_node.size = start - mm->head_node.start;
757
	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
758
 
3031 serge 759
	mm->color_adjust = NULL;
1123 serge 760
}
1126 serge 761
EXPORT_SYMBOL(drm_mm_init);
1123 serge 762
 
5060 serge 763
/**
764
 * drm_mm_takedown - clean up a drm_mm allocator
765
 * @mm: drm_mm allocator to clean up
766
 *
767
 * Note that it is a bug to call this function on an allocator which is not
768
 * clean.
769
 */
1123 serge 770
void drm_mm_takedown(struct drm_mm * mm)
771
{
4104 Serge 772
	WARN(!list_empty(&mm->head_node.node_list),
773
	     "Memory manager not clean during takedown.\n");
1123 serge 774
}
1126 serge 775
EXPORT_SYMBOL(drm_mm_takedown);
3192 Serge 776
 
6084 serge 777
static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
778
				     const char *prefix)
3192 Serge 779
{
6084 serge 780
	u64 hole_start, hole_end, hole_size;
3192 Serge 781
 
4075 Serge 782
	if (entry->hole_follows) {
783
		hole_start = drm_mm_hole_node_start(entry);
784
		hole_end = drm_mm_hole_node_end(entry);
6084 serge 785
		hole_size = hole_end - hole_start;
786
		pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
787
			 hole_end, hole_size);
4075 Serge 788
		return hole_size;
789
	}
3192 Serge 790
 
4075 Serge 791
	return 0;
792
}
793
 
5060 serge 794
/**
795
 * drm_mm_debug_table - dump allocator state to dmesg
796
 * @mm: drm_mm allocator to dump
797
 * @prefix: prefix to use for dumping to dmesg
798
 */
4075 Serge 799
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
800
{
801
	struct drm_mm_node *entry;
6084 serge 802
	u64 total_used = 0, total_free = 0, total = 0;
4075 Serge 803
 
804
	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
805
 
3192 Serge 806
	drm_mm_for_each_node(entry, mm) {
6084 serge 807
		pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
808
			 entry->start + entry->size, entry->size);
3192 Serge 809
		total_used += entry->size;
4075 Serge 810
		total_free += drm_mm_debug_hole(entry, prefix);
6084 serge 811
	}
3192 Serge 812
	total = total_free + total_used;
813
 
6084 serge 814
	pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
815
		 total_used, total_free);
3192 Serge 816
}
817
EXPORT_SYMBOL(drm_mm_debug_table);
818
 
819
#if defined(CONFIG_DEBUG_FS)
6084 serge 820
static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
3192 Serge 821
{
6084 serge 822
	u64 hole_start, hole_end, hole_size;
3192 Serge 823
 
3764 Serge 824
	if (entry->hole_follows) {
825
		hole_start = drm_mm_hole_node_start(entry);
826
		hole_end = drm_mm_hole_node_end(entry);
6084 serge 827
		hole_size = hole_end - hole_start;
828
		seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
829
			   hole_end, hole_size);
3764 Serge 830
		return hole_size;
831
	}
3192 Serge 832
 
3764 Serge 833
	return 0;
834
}
835
 
5060 serge 836
/**
837
 * drm_mm_dump_table - dump allocator state to a seq_file
838
 * @m: seq_file to dump to
839
 * @mm: drm_mm allocator to dump
840
 */
3764 Serge 841
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
842
{
843
	struct drm_mm_node *entry;
6084 serge 844
	u64 total_used = 0, total_free = 0, total = 0;
3764 Serge 845
 
846
	total_free += drm_mm_dump_hole(m, &mm->head_node);
847
 
3192 Serge 848
	drm_mm_for_each_node(entry, mm) {
6084 serge 849
		seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start,
850
			   entry->start + entry->size, entry->size);
3192 Serge 851
		total_used += entry->size;
3764 Serge 852
		total_free += drm_mm_dump_hole(m, entry);
6084 serge 853
	}
3192 Serge 854
	total = total_free + total_used;
855
 
6084 serge 856
	seq_printf(m, "total: %llu, used %llu free %llu\n", total,
857
		   total_used, total_free);
3192 Serge 858
	return 0;
859
}
860
EXPORT_SYMBOL(drm_mm_dump_table);
861
#endif