Subversion Repositories Kolibri OS

Rev

Rev 4104 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4104 Rev 5060
Line 45... Line 45...
45
#include 
45
#include 
46
#include 
46
#include 
47
#include 
47
#include 
48
#include 
48
#include 
Line -... Line 49...
-
 
49
 
-
 
50
/**
-
 
51
 * DOC: Overview
-
 
52
 *
-
 
53
 * drm_mm provides a simple range allocator. The drivers are free to use the
-
 
54
 * resource allocator from the linux core if it suits them, the upside of drm_mm
-
 
55
 * is that it's in the DRM core. Which means that it's easier to extend for
-
 
56
 * some of the crazier special purpose needs of gpus.
-
 
57
 *
-
 
58
 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
-
 
59
 * Drivers are free to embed either of them into their own suitable
-
 
60
 * datastructures. drm_mm itself will not do any allocations of its own, so if
-
 
61
 * drivers choose not to embed nodes they need to still allocate them
-
 
62
 * themselves.
-
 
63
 *
-
 
64
 * The range allocator also supports reservation of preallocated blocks. This is
-
 
65
 * useful for taking over initial mode setting configurations from the firmware,
-
 
66
 * where an object needs to be created which exactly matches the firmware's
-
 
67
 * scanout target. As long as the range is still free it can be inserted anytime
49
 
68
 * after the allocator is initialized, which helps with avoiding looped
-
 
69
 * depencies in the driver load sequence.
-
 
70
 *
-
 
71
 * drm_mm maintains a stack of most recently freed holes, which of all
-
 
72
 * simplistic datastructures seems to be a fairly decent approach to clustering
-
 
73
 * allocations and avoiding too much fragmentation. This means free space
-
 
74
 * searches are O(num_holes). Given that all the fancy features drm_mm supports
-
 
75
 * something better would be fairly complex and since gfx thrashing is a fairly
-
 
76
 * steep cliff not a real concern. Removing a node again is O(1).
-
 
77
 *
-
 
78
 * drm_mm supports a few features: Alignment and range restrictions can be
-
 
79
 * supplied. Further more every &drm_mm_node has a color value (which is just an
-
 
80
 * opaqua unsigned long) which in conjunction with a driver callback can be used
-
 
81
 * to implement sophisticated placement restrictions. The i915 DRM driver uses
-
 
82
 * this to implement guard pages between incompatible caching domains in the
-
 
83
 * graphics TT.
-
 
84
 *
-
 
85
 * Two behaviors are supported for searching and allocating: bottom-up and top-down.
-
 
86
 * The default is bottom-up. Top-down allocation can be used if the memory area
-
 
87
 * has different restrictions, or just to reduce fragmentation.
-
 
88
 *
-
 
89
 * Finally iteration helpers to walk all nodes and all holes are provided as are
-
 
90
 * some basic allocator dumpers for debugging.
Line 50... Line 91...
50
#define MM_UNUSED_TARGET 4
91
 */
51
 
92
 
52
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
93
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
53
						unsigned long size,
94
						unsigned long size,
Line 63... Line 104...
63
						enum drm_mm_search_flags flags);
104
						enum drm_mm_search_flags flags);
Line 64... Line 105...
64
 
105
 
65
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
106
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
66
				 struct drm_mm_node *node,
107
				 struct drm_mm_node *node,
67
				 unsigned long size, unsigned alignment,
108
				 unsigned long size, unsigned alignment,
-
 
109
				 unsigned long color,
68
				 unsigned long color)
110
				 enum drm_mm_allocator_flags flags)
69
{
111
{
70
	struct drm_mm *mm = hole_node->mm;
112
	struct drm_mm *mm = hole_node->mm;
71
	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
113
	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
72
	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
114
	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
Line 76... Line 118...
76
	BUG_ON(node->allocated);
118
	BUG_ON(node->allocated);
Line 77... Line 119...
77
 
119
 
78
	if (mm->color_adjust)
120
	if (mm->color_adjust)
Line -... Line 121...
-
 
121
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
-
 
122
 
-
 
123
	if (flags & DRM_MM_CREATE_TOP)
79
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
124
		adj_start = adj_end - size;
80
 
125
 
81
	if (alignment) {
126
	if (alignment) {
-
 
127
		unsigned tmp = adj_start % alignment;
-
 
128
		if (tmp) {
-
 
129
			if (flags & DRM_MM_CREATE_TOP)
82
		unsigned tmp = adj_start % alignment;
130
				adj_start -= tmp;
83
		if (tmp)
131
			else
-
 
132
			adj_start += alignment - tmp;
-
 
133
	}
-
 
134
	}
-
 
135
 
Line 84... Line 136...
84
			adj_start += alignment - tmp;
136
	BUG_ON(adj_start < hole_start);
85
	}
137
	BUG_ON(adj_end > hole_end);
86
 
138
 
87
	if (adj_start == hole_start) {
139
	if (adj_start == hole_start) {
Line 105... Line 157...
105
		list_add(&node->hole_stack, &mm->hole_stack);
157
		list_add(&node->hole_stack, &mm->hole_stack);
106
		node->hole_follows = 1;
158
		node->hole_follows = 1;
107
	}
159
	}
108
}
160
}
Line -... Line 161...
-
 
161
 
-
 
162
/**
-
 
163
 * drm_mm_reserve_node - insert an pre-initialized node
-
 
164
 * @mm: drm_mm allocator to insert @node into
-
 
165
 * @node: drm_mm_node to insert
-
 
166
 *
-
 
167
 * This functions inserts an already set-up drm_mm_node into the allocator,
-
 
168
 * meaning that start, size and color must be set by the caller. This is useful
-
 
169
 * to initialize the allocator with preallocated objects which must be set-up
-
 
170
 * before the range allocator can be set-up, e.g. when taking over a firmware
-
 
171
 * framebuffer.
-
 
172
 *
-
 
173
 * Returns:
-
 
174
 * 0 on success, -ENOSPC if there's no hole where @node is.
109
 
175
 */
110
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
176
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
111
{
177
{
112
	struct drm_mm_node *hole;
178
	struct drm_mm_node *hole;
113
	unsigned long end = node->start + node->size;
179
	unsigned long end = node->start + node->size;
Line 139... Line 205...
139
		}
205
		}
Line 140... Line 206...
140
 
206
 
141
		return 0;
207
		return 0;
Line 142... Line -...
142
	}
-
 
143
 
-
 
144
	WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
208
	}
145
	     node->start, node->size);
209
 
146
	return -ENOSPC;
210
	return -ENOSPC;
Line 147... Line 211...
147
}
211
}
-
 
212
EXPORT_SYMBOL(drm_mm_reserve_node);
-
 
213
 
-
 
214
/**
-
 
215
 * drm_mm_insert_node_generic - search for space and insert @node
-
 
216
 * @mm: drm_mm to allocate from
-
 
217
 * @node: preallocate node to insert
148
EXPORT_SYMBOL(drm_mm_reserve_node);
218
 * @size: size of the allocation
149
 
219
 * @alignment: alignment of the allocation
-
 
220
 * @color: opaque tag value to use for this node
150
/**
221
 * @sflags: flags to fine-tune the allocation search
-
 
222
 * @aflags: flags to fine-tune the allocation behavior
-
 
223
 *
-
 
224
 * The preallocated node must be cleared to 0.
151
 * Search for free space and insert a preallocated memory node. Returns
225
 *
152
 * -ENOSPC if no suitable free area is available. The preallocated memory node
226
 * Returns:
153
 * must be cleared.
227
 * 0 on success, -ENOSPC if there's no suitable hole.
154
 */
228
 */
155
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
229
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
-
 
230
			       unsigned long size, unsigned alignment,
156
			       unsigned long size, unsigned alignment,
231
			       unsigned long color,
157
			       unsigned long color,
232
			       enum drm_mm_search_flags sflags,
Line 158... Line 233...
158
			       enum drm_mm_search_flags flags)
233
			       enum drm_mm_allocator_flags aflags)
159
{
234
{
160
	struct drm_mm_node *hole_node;
235
	struct drm_mm_node *hole_node;
161
 
236
 
Line 162... Line 237...
162
	hole_node = drm_mm_search_free_generic(mm, size, alignment,
237
	hole_node = drm_mm_search_free_generic(mm, size, alignment,
163
					       color, flags);
238
					       color, sflags);
164
	if (!hole_node)
239
	if (!hole_node)
165
		return -ENOSPC;
240
		return -ENOSPC;
Line 166... Line 241...
166
 
241
 
167
	drm_mm_insert_helper(hole_node, node, size, alignment, color);
242
	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
168
	return 0;
243
	return 0;
169
}
244
}
170
EXPORT_SYMBOL(drm_mm_insert_node_generic);
245
EXPORT_SYMBOL(drm_mm_insert_node_generic);
-
 
246
 
171
 
247
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
172
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
248
				       struct drm_mm_node *node,
173
				       struct drm_mm_node *node,
249
				       unsigned long size, unsigned alignment,
174
				       unsigned long size, unsigned alignment,
250
				       unsigned long color,
175
				       unsigned long color,
251
				       unsigned long start, unsigned long end,
Line 186... Line 262...
186
	if (adj_start < start)
262
	if (adj_start < start)
187
		adj_start = start;
263
		adj_start = start;
188
	if (adj_end > end)
264
	if (adj_end > end)
189
		adj_end = end;
265
		adj_end = end;
Line -... Line 266...
-
 
266
 
-
 
267
	if (flags & DRM_MM_CREATE_TOP)
-
 
268
		adj_start = adj_end - size;
190
 
269
 
191
	if (mm->color_adjust)
270
	if (mm->color_adjust)
Line 192... Line 271...
192
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
271
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
193
 
272
 
194
	if (alignment) {
273
	if (alignment) {
-
 
274
		unsigned tmp = adj_start % alignment;
-
 
275
		if (tmp) {
-
 
276
			if (flags & DRM_MM_CREATE_TOP)
195
		unsigned tmp = adj_start % alignment;
277
				adj_start -= tmp;
196
	if (tmp)
278
			else
-
 
279
			adj_start += alignment - tmp;
Line 197... Line 280...
197
			adj_start += alignment - tmp;
280
	}
198
	}
281
	}
199
 
282
 
200
	if (adj_start == hole_start) {
283
	if (adj_start == hole_start) {
Line 209... Line 292...
209
	node->allocated = 1;
292
	node->allocated = 1;
Line 210... Line 293...
210
 
293
 
211
	INIT_LIST_HEAD(&node->hole_stack);
294
	INIT_LIST_HEAD(&node->hole_stack);
Line -... Line 295...
-
 
295
	list_add(&node->node_list, &hole_node->node_list);
-
 
296
 
212
	list_add(&node->node_list, &hole_node->node_list);
297
	BUG_ON(node->start < start);
213
 
298
	BUG_ON(node->start < adj_start);
Line 214... Line 299...
214
	BUG_ON(node->start + node->size > adj_end);
299
	BUG_ON(node->start + node->size > adj_end);
215
	BUG_ON(node->start + node->size > end);
300
	BUG_ON(node->start + node->size > end);
Line 220... Line 305...
220
		node->hole_follows = 1;
305
		node->hole_follows = 1;
221
	}
306
	}
222
}
307
}
Line 223... Line 308...
223
 
308
 
-
 
309
/**
-
 
310
 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
-
 
311
 * @mm: drm_mm to allocate from
-
 
312
 * @node: preallocate node to insert
-
 
313
 * @size: size of the allocation
-
 
314
 * @alignment: alignment of the allocation
-
 
315
 * @color: opaque tag value to use for this node
-
 
316
 * @start: start of the allowed range for this node
224
/**
317
 * @end: end of the allowed range for this node
225
 * Search for free space and insert a preallocated memory node. Returns
318
 * @sflags: flags to fine-tune the allocation search
-
 
319
 * @aflags: flags to fine-tune the allocation behavior
226
 * -ENOSPC if no suitable free area is available. This is for range
320
 *
-
 
321
 * The preallocated node must be cleared to 0.
-
 
322
 *
-
 
323
 * Returns:
227
 * restricted allocations. The preallocated memory node must be cleared.
324
 * 0 on success, -ENOSPC if there's no suitable hole.
228
 */
325
 */
229
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
326
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
-
 
327
					unsigned long size, unsigned alignment,
230
					unsigned long size, unsigned alignment, unsigned long color,
328
					unsigned long color,
231
					unsigned long start, unsigned long end,
329
					unsigned long start, unsigned long end,
-
 
330
					enum drm_mm_search_flags sflags,
232
					enum drm_mm_search_flags flags)
331
					enum drm_mm_allocator_flags aflags)
233
{
332
{
Line 234... Line 333...
234
	struct drm_mm_node *hole_node;
333
	struct drm_mm_node *hole_node;
235
 
334
 
236
	hole_node = drm_mm_search_free_in_range_generic(mm,
335
	hole_node = drm_mm_search_free_in_range_generic(mm,
237
							size, alignment, color,
336
							size, alignment, color,
238
							start, end, flags);
337
							start, end, sflags);
Line 239... Line 338...
239
	if (!hole_node)
338
	if (!hole_node)
240
		return -ENOSPC;
339
		return -ENOSPC;
241
 
340
 
242
	drm_mm_insert_helper_range(hole_node, node,
341
	drm_mm_insert_helper_range(hole_node, node,
243
				   size, alignment, color,
342
				   size, alignment, color,
244
				   start, end);
343
				   start, end, aflags);
Line 245... Line 344...
245
	return 0;
344
	return 0;
246
}
345
}
-
 
346
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
-
 
347
 
-
 
348
/**
-
 
349
 * drm_mm_remove_node - Remove a memory node from the allocator.
-
 
350
 * @node: drm_mm_node to remove
247
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
351
 *
248
 
352
 * This just removes a node from its drm_mm allocator. The node does not need to
249
/**
353
 * be cleared again before it can be re-inserted into this or any other drm_mm
250
 * Remove a memory node from the allocator.
354
 * allocator. It is a bug to call this function on a un-allocated node.
251
 */
355
 */
Line 313... Line 417...
313
	BUG_ON(mm->scanned_blocks);
417
	BUG_ON(mm->scanned_blocks);
Line 314... Line 418...
314
 
418
 
315
	best = NULL;
419
	best = NULL;
Line 316... Line 420...
316
	best_size = ~0UL;
420
	best_size = ~0UL;
-
 
421
 
-
 
422
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
-
 
423
			       flags & DRM_MM_SEARCH_BELOW) {
317
 
424
		unsigned long hole_size = adj_end - adj_start;
318
	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
425
 
319
		if (mm->color_adjust) {
426
		if (mm->color_adjust) {
320
			mm->color_adjust(entry, color, &adj_start, &adj_end);
427
			mm->color_adjust(entry, color, &adj_start, &adj_end);
321
			if (adj_end <= adj_start)
428
			if (adj_end <= adj_start)
Line 326... Line 433...
326
			continue;
433
			continue;
Line 327... Line 434...
327
 
434
 
328
		if (!(flags & DRM_MM_SEARCH_BEST))
435
		if (!(flags & DRM_MM_SEARCH_BEST))
Line 329... Line 436...
329
				return entry;
436
				return entry;
330
 
437
 
331
			if (entry->size < best_size) {
438
		if (hole_size < best_size) {
332
				best = entry;
439
				best = entry;
333
				best_size = entry->size;
440
			best_size = hole_size;
Line 334... Line 441...
334
			}
441
			}
335
		}
442
		}
Line 354... Line 461...
354
	BUG_ON(mm->scanned_blocks);
461
	BUG_ON(mm->scanned_blocks);
Line 355... Line 462...
355
 
462
 
356
	best = NULL;
463
	best = NULL;
Line 357... Line 464...
357
	best_size = ~0UL;
464
	best_size = ~0UL;
-
 
465
 
-
 
466
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
-
 
467
			       flags & DRM_MM_SEARCH_BELOW) {
358
 
468
		unsigned long hole_size = adj_end - adj_start;
359
	drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
469
 
360
		if (adj_start < start)
470
		if (adj_start < start)
361
			adj_start = start;
471
			adj_start = start;
Line 372... Line 482...
372
			continue;
482
			continue;
Line 373... Line 483...
373
 
483
 
374
		if (!(flags & DRM_MM_SEARCH_BEST))
484
		if (!(flags & DRM_MM_SEARCH_BEST))
Line 375... Line 485...
375
				return entry;
485
				return entry;
376
 
486
 
377
			if (entry->size < best_size) {
487
		if (hole_size < best_size) {
378
				best = entry;
488
				best = entry;
379
				best_size = entry->size;
489
			best_size = hole_size;
Line 380... Line 490...
380
			}
490
			}
381
		}
491
		}
Line 382... Line 492...
382
 
492
 
-
 
493
	return best;
-
 
494
}
-
 
495
 
-
 
496
/**
-
 
497
 * drm_mm_replace_node - move an allocation from @old to @new
383
	return best;
498
 * @old: drm_mm_node to remove from the allocator
-
 
499
 * @new: drm_mm_node which should inherit @old's allocation
384
}
500
 *
385
 
501
 * This is useful for when drivers embed the drm_mm_node structure and hence
386
/**
502
 * can't move allocations by reassigning pointers. It's a combination of remove
387
 * Moves an allocation. To be used with embedded struct drm_mm_node.
503
 * and insert with the guarantee that the allocation start will match.
388
 */
504
 */
Line 400... Line 516...
400
	new->allocated = 1;
516
	new->allocated = 1;
401
}
517
}
402
EXPORT_SYMBOL(drm_mm_replace_node);
518
EXPORT_SYMBOL(drm_mm_replace_node);
Line 403... Line 519...
403
 
519
 
-
 
520
/**
-
 
521
 * DOC: lru scan roaster
-
 
522
 *
-
 
523
 * Very often GPUs need to have continuous allocations for a given object. When
-
 
524
 * evicting objects to make space for a new one it is therefore not most
-
 
525
 * efficient when we simply start to select all objects from the tail of an LRU
-
 
526
 * until there's a suitable hole: Especially for big objects or nodes that
-
 
527
 * otherwise have special allocation constraints there's a good chance we evict
-
 
528
 * lots of (smaller) objects unecessarily.
-
 
529
 *
-
 
530
 * The DRM range allocator supports this use-case through the scanning
-
 
531
 * interfaces. First a scan operation needs to be initialized with
-
 
532
 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
-
 
533
 * objects to the roaster (probably by walking an LRU list, but this can be
-
 
534
 * freely implemented) until a suitable hole is found or there's no further
-
 
535
 * evitable object.
-
 
536
 *
-
 
537
 * The the driver must walk through all objects again in exactly the reverse
-
 
538
 * order to restore the allocator state. Note that while the allocator is used
-
 
539
 * in the scan mode no other operation is allowed.
-
 
540
 *
-
 
541
 * Finally the driver evicts all objects selected in the scan. Adding and
-
 
542
 * removing an object is O(1), and since freeing a node is also O(1) the overall
-
 
543
 * complexity is O(scanned_objects). So like the free stack which needs to be
-
 
544
 * walked before a scan operation even begins this is linear in the number of
-
 
545
 * objects. It doesn't seem to hurt badly.
-
 
546
 */
-
 
547
 
404
/**
548
/**
-
 
549
 * drm_mm_init_scan - initialize lru scanning
-
 
550
 * @mm: drm_mm to scan
-
 
551
 * @size: size of the allocation
-
 
552
 * @alignment: alignment of the allocation
405
 * Initializa lru scanning.
553
 * @color: opaque tag value to use for the allocation
406
 *
554
 *
-
 
555
 * This simply sets up the scanning routines with the parameters for the desired
407
 * This simply sets up the scanning routines with the parameters for the desired
556
 * hole. Note that there's no need to specify allocation flags, since they only
408
 * hole.
557
 * change the place a node is allocated from within a suitable hole.
-
 
558
 *
409
 *
559
 * Warning:
410
 * Warning: As long as the scan list is non-empty, no other operations than
560
 * As long as the scan list is non-empty, no other operations than
411
 * adding/removing nodes to/from the scan list are allowed.
561
 * adding/removing nodes to/from the scan list are allowed.
412
 */
562
 */
413
void drm_mm_init_scan(struct drm_mm *mm,
563
void drm_mm_init_scan(struct drm_mm *mm,
414
		      unsigned long size,
564
		      unsigned long size,
Line 425... Line 575...
425
	mm->prev_scanned_node = NULL;
575
	mm->prev_scanned_node = NULL;
426
}
576
}
427
EXPORT_SYMBOL(drm_mm_init_scan);
577
EXPORT_SYMBOL(drm_mm_init_scan);
Line 428... Line 578...
428
 
578
 
-
 
579
/**
429
/**
580
 * drm_mm_init_scan - initialize range-restricted lru scanning
-
 
581
 * @mm: drm_mm to scan
-
 
582
 * @size: size of the allocation
-
 
583
 * @alignment: alignment of the allocation
-
 
584
 * @color: opaque tag value to use for the allocation
-
 
585
 * @start: start of the allowed range for the allocation
430
 * Initializa lru scanning.
586
 * @end: end of the allowed range for the allocation
431
 *
587
 *
432
 * This simply sets up the scanning routines with the parameters for the desired
588
 * This simply sets up the scanning routines with the parameters for the desired
-
 
589
 * hole. Note that there's no need to specify allocation flags, since they only
433
 * hole. This version is for range-restricted scans.
590
 * change the place a node is allocated from within a suitable hole.
-
 
591
 *
434
 *
592
 * Warning:
435
 * Warning: As long as the scan list is non-empty, no other operations than
593
 * As long as the scan list is non-empty, no other operations than
436
 * adding/removing nodes to/from the scan list are allowed.
594
 * adding/removing nodes to/from the scan list are allowed.
437
 */
595
 */
438
void drm_mm_init_scan_with_range(struct drm_mm *mm,
596
void drm_mm_init_scan_with_range(struct drm_mm *mm,
439
				 unsigned long size,
597
				 unsigned long size,
Line 454... Line 612...
454
	mm->prev_scanned_node = NULL;
612
	mm->prev_scanned_node = NULL;
455
}
613
}
456
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
614
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
Line 457... Line 615...
457
 
615
 
-
 
616
/**
-
 
617
 * drm_mm_scan_add_block - add a node to the scan list
-
 
618
 * @node: drm_mm_node to add
458
/**
619
 *
459
 * Add a node to the scan list that might be freed to make space for the desired
620
 * Add a node to the scan list that might be freed to make space for the desired
460
 * hole.
621
 * hole.
-
 
622
 *
461
 *
623
 * Returns:
462
 * Returns non-zero, if a hole has been found, zero otherwise.
624
 * True if a hole has been found, false otherwise.
463
 */
625
 */
464
int drm_mm_scan_add_block(struct drm_mm_node *node)
626
bool drm_mm_scan_add_block(struct drm_mm_node *node)
465
{
627
{
466
	struct drm_mm *mm = node->mm;
628
	struct drm_mm *mm = node->mm;
467
	struct drm_mm_node *prev_node;
629
	struct drm_mm_node *prev_node;
468
	unsigned long hole_start, hole_end;
630
	unsigned long hole_start, hole_end;
Line 499... Line 661...
499
 
661
 
500
	if (check_free_hole(adj_start, adj_end,
662
	if (check_free_hole(adj_start, adj_end,
501
			    mm->scan_size, mm->scan_alignment)) {
663
			    mm->scan_size, mm->scan_alignment)) {
502
		mm->scan_hit_start = hole_start;
664
		mm->scan_hit_start = hole_start;
503
		mm->scan_hit_end = hole_end;
665
		mm->scan_hit_end = hole_end;
504
		return 1;
666
		return true;
Line 505... Line 667...
505
	}
667
	}
506
 
668
 
507
	return 0;
669
	return false;
Line 508... Line 670...
508
}
670
}
509
EXPORT_SYMBOL(drm_mm_scan_add_block);
671
EXPORT_SYMBOL(drm_mm_scan_add_block);
-
 
672
 
510
 
673
/**
511
/**
674
 * drm_mm_scan_remove_block - remove a node from the scan list
512
 * Remove a node from the scan list.
675
 * @node: drm_mm_node to remove
513
 *
676
 *
514
 * Nodes _must_ be removed in the exact same order from the scan list as they
677
 * Nodes _must_ be removed in the exact same order from the scan list as they
515
 * have been added, otherwise the internal state of the memory manager will be
678
 * have been added, otherwise the internal state of the memory manager will be
516
 * corrupted.
679
 * corrupted.
517
 *
680
 *
518
 * When the scan list is empty, the selected memory nodes can be freed. An
681
 * When the scan list is empty, the selected memory nodes can be freed. An
-
 
682
 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
519
 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
683
 * return the just freed block (because its at the top of the free_stack list).
520
 * return the just freed block (because its at the top of the free_stack list).
684
 *
521
 *
685
 * Returns:
522
 * Returns one if this block should be evicted, zero otherwise. Will always
686
 * True if this block should be evicted, false otherwise. Will always
523
 * return zero when no hole has been found.
687
 * return false when no hole has been found.
524
 */
688
 */
525
int drm_mm_scan_remove_block(struct drm_mm_node *node)
689
bool drm_mm_scan_remove_block(struct drm_mm_node *node)
Line 526... Line 690...
526
{
690
{
Line 541... Line 705...
541
	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
705
	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
542
		 node->start < mm->scan_hit_end);
706
		 node->start < mm->scan_hit_end);
543
}
707
}
544
EXPORT_SYMBOL(drm_mm_scan_remove_block);
708
EXPORT_SYMBOL(drm_mm_scan_remove_block);
Line -... Line 709...
-
 
709
 
-
 
710
/**
-
 
711
 * drm_mm_clean - checks whether an allocator is clean
-
 
712
 * @mm: drm_mm allocator to check
-
 
713
 *
-
 
714
 * Returns:
-
 
715
 * True if the allocator is completely free, false if there's still a node
-
 
716
 * allocated in it.
545
 
717
 */
546
int drm_mm_clean(struct drm_mm * mm)
718
bool drm_mm_clean(struct drm_mm * mm)
547
{
719
{
Line 548... Line 720...
548
	struct list_head *head = &mm->head_node.node_list;
720
	struct list_head *head = &mm->head_node.node_list;
549
 
721
 
550
	return (head->next->next == head);
722
	return (head->next->next == head);
Line -... Line 723...
-
 
723
}
-
 
724
EXPORT_SYMBOL(drm_mm_clean);
-
 
725
 
-
 
726
/**
-
 
727
 * drm_mm_init - initialize a drm-mm allocator
-
 
728
 * @mm: the drm_mm structure to initialize
-
 
729
 * @start: start of the range managed by @mm
-
 
730
 * @size: end of the range managed by @mm
551
}
731
 *
552
EXPORT_SYMBOL(drm_mm_clean);
732
 * Note that @mm must be cleared to 0 before calling this function.
553
 
733
 */
554
void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
734
void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
Line 570... Line 750...
570
 
750
 
571
	mm->color_adjust = NULL;
751
	mm->color_adjust = NULL;
572
}
752
}
Line -... Line 753...
-
 
753
EXPORT_SYMBOL(drm_mm_init);
-
 
754
 
-
 
755
/**
-
 
756
 * drm_mm_takedown - clean up a drm_mm allocator
-
 
757
 * @mm: drm_mm allocator to clean up
-
 
758
 *
-
 
759
 * Note that it is a bug to call this function on an allocator which is not
573
EXPORT_SYMBOL(drm_mm_init);
760
 * clean.
574
 
761
 */
575
void drm_mm_takedown(struct drm_mm * mm)
762
void drm_mm_takedown(struct drm_mm * mm)
576
{
763
{
577
	WARN(!list_empty(&mm->head_node.node_list),
764
	WARN(!list_empty(&mm->head_node.node_list),
Line 595... Line 782...
595
	}
782
	}
Line 596... Line 783...
596
 
783
 
597
	return 0;
784
	return 0;
Line -... Line 785...
-
 
785
}
-
 
786
 
-
 
787
/**
-
 
788
 * drm_mm_debug_table - dump allocator state to dmesg
-
 
789
 * @mm: drm_mm allocator to dump
598
}
790
 * @prefix: prefix to use for dumping to dmesg
599
 
791
 */
600
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
792
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
601
{
793
{
Line 633... Line 825...
633
	}
825
	}
Line 634... Line 826...
634
 
826
 
635
	return 0;
827
	return 0;
Line -... Line 828...
-
 
828
}
-
 
829
 
-
 
830
/**
-
 
831
 * drm_mm_dump_table - dump allocator state to a seq_file
-
 
832
 * @m: seq_file to dump to
636
}
833
 * @mm: drm_mm allocator to dump
637
 
834
 */
638
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
835
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
639
{
836
{