Subversion Repositories Kolibri OS

Rev

Rev 5060 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5060 Rev 6084
1
/**************************************************************************
1
/**************************************************************************
2
 *
2
 *
3
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
3
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4
 * All Rights Reserved.
4
 * All Rights Reserved.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
12
 * the following conditions:
13
 *
13
 *
14
 * The above copyright notice and this permission notice (including the
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
16
 * of the Software.
17
 *
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
25
 *
26
 *
26
 *
27
 **************************************************************************/
27
 **************************************************************************/
28
 
28
 
29
/*
29
/*
30
 * Generic simple memory manager implementation. Intended to be used as a base
30
 * Generic simple memory manager implementation. Intended to be used as a base
31
 * class implementation for more advanced memory managers.
31
 * class implementation for more advanced memory managers.
32
 *
32
 *
33
 * Note that the algorithm used is quite simple and there might be substantial
33
 * Note that the algorithm used is quite simple and there might be substantial
34
 * performance gains if a smarter free list is implemented. Currently it is just an
34
 * performance gains if a smarter free list is implemented. Currently it is just an
35
 * unordered stack of free regions. This could easily be improved if an RB-tree
35
 * unordered stack of free regions. This could easily be improved if an RB-tree
36
 * is used instead. At least if we expect heavy fragmentation.
36
 * is used instead. At least if we expect heavy fragmentation.
37
 *
37
 *
38
 * Aligned allocations can also see improvement.
38
 * Aligned allocations can also see improvement.
39
 *
39
 *
40
 * Authors:
40
 * Authors:
41
 * Thomas Hellström 
41
 * Thomas Hellström 
42
 */
42
 */
43
 
43
 
44
#include 
44
#include 
45
#include 
45
#include 
46
#include 
46
#include 
47
#include 
47
#include 
48
#include 
48
#include 
49
 
49
 
50
/**
50
/**
51
 * DOC: Overview
51
 * DOC: Overview
52
 *
52
 *
53
 * drm_mm provides a simple range allocator. The drivers are free to use the
53
 * drm_mm provides a simple range allocator. The drivers are free to use the
54
 * resource allocator from the linux core if it suits them, the upside of drm_mm
54
 * resource allocator from the linux core if it suits them, the upside of drm_mm
55
 * is that it's in the DRM core. Which means that it's easier to extend for
55
 * is that it's in the DRM core. Which means that it's easier to extend for
56
 * some of the crazier special purpose needs of gpus.
56
 * some of the crazier special purpose needs of gpus.
57
 *
57
 *
58
 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
58
 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
59
 * Drivers are free to embed either of them into their own suitable
59
 * Drivers are free to embed either of them into their own suitable
60
 * datastructures. drm_mm itself will not do any allocations of its own, so if
60
 * datastructures. drm_mm itself will not do any allocations of its own, so if
61
 * drivers choose not to embed nodes they need to still allocate them
61
 * drivers choose not to embed nodes they need to still allocate them
62
 * themselves.
62
 * themselves.
63
 *
63
 *
64
 * The range allocator also supports reservation of preallocated blocks. This is
64
 * The range allocator also supports reservation of preallocated blocks. This is
65
 * useful for taking over initial mode setting configurations from the firmware,
65
 * useful for taking over initial mode setting configurations from the firmware,
66
 * where an object needs to be created which exactly matches the firmware's
66
 * where an object needs to be created which exactly matches the firmware's
67
 * scanout target. As long as the range is still free it can be inserted anytime
67
 * scanout target. As long as the range is still free it can be inserted anytime
68
 * after the allocator is initialized, which helps with avoiding looped
68
 * after the allocator is initialized, which helps with avoiding looped
69
 * depencies in the driver load sequence.
69
 * depencies in the driver load sequence.
70
 *
70
 *
71
 * drm_mm maintains a stack of most recently freed holes, which of all
71
 * drm_mm maintains a stack of most recently freed holes, which of all
72
 * simplistic datastructures seems to be a fairly decent approach to clustering
72
 * simplistic datastructures seems to be a fairly decent approach to clustering
73
 * allocations and avoiding too much fragmentation. This means free space
73
 * allocations and avoiding too much fragmentation. This means free space
74
 * searches are O(num_holes). Given that all the fancy features drm_mm supports
74
 * searches are O(num_holes). Given that all the fancy features drm_mm supports
75
 * something better would be fairly complex and since gfx thrashing is a fairly
75
 * something better would be fairly complex and since gfx thrashing is a fairly
76
 * steep cliff not a real concern. Removing a node again is O(1).
76
 * steep cliff not a real concern. Removing a node again is O(1).
77
 *
77
 *
78
 * drm_mm supports a few features: Alignment and range restrictions can be
78
 * drm_mm supports a few features: Alignment and range restrictions can be
79
 * supplied. Further more every &drm_mm_node has a color value (which is just an
79
 * supplied. Further more every &drm_mm_node has a color value (which is just an
80
 * opaqua unsigned long) which in conjunction with a driver callback can be used
80
 * opaqua unsigned long) which in conjunction with a driver callback can be used
81
 * to implement sophisticated placement restrictions. The i915 DRM driver uses
81
 * to implement sophisticated placement restrictions. The i915 DRM driver uses
82
 * this to implement guard pages between incompatible caching domains in the
82
 * this to implement guard pages between incompatible caching domains in the
83
 * graphics TT.
83
 * graphics TT.
84
 *
84
 *
85
 * Two behaviors are supported for searching and allocating: bottom-up and top-down.
85
 * Two behaviors are supported for searching and allocating: bottom-up and top-down.
86
 * The default is bottom-up. Top-down allocation can be used if the memory area
86
 * The default is bottom-up. Top-down allocation can be used if the memory area
87
 * has different restrictions, or just to reduce fragmentation.
87
 * has different restrictions, or just to reduce fragmentation.
88
 *
88
 *
89
 * Finally iteration helpers to walk all nodes and all holes are provided as are
89
 * Finally iteration helpers to walk all nodes and all holes are provided as are
90
 * some basic allocator dumpers for debugging.
90
 * some basic allocator dumpers for debugging.
91
 */
91
 */
92
 
92
 
93
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
93
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
94
						unsigned long size,
94
						u64 size,
95
						unsigned alignment,
95
						unsigned alignment,
96
						unsigned long color,
96
						unsigned long color,
97
						enum drm_mm_search_flags flags);
97
						enum drm_mm_search_flags flags);
98
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
98
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
99
						unsigned long size,
99
						u64 size,
100
						unsigned alignment,
100
						unsigned alignment,
101
						unsigned long color,
101
						unsigned long color,
102
						unsigned long start,
102
						u64 start,
103
						unsigned long end,
103
						u64 end,
104
						enum drm_mm_search_flags flags);
104
						enum drm_mm_search_flags flags);
105
 
105
 
106
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
106
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
107
				 struct drm_mm_node *node,
107
				 struct drm_mm_node *node,
108
				 unsigned long size, unsigned alignment,
108
				 u64 size, unsigned alignment,
109
				 unsigned long color,
109
				 unsigned long color,
110
				 enum drm_mm_allocator_flags flags)
110
				 enum drm_mm_allocator_flags flags)
111
{
111
{
112
	struct drm_mm *mm = hole_node->mm;
112
	struct drm_mm *mm = hole_node->mm;
113
	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
113
	u64 hole_start = drm_mm_hole_node_start(hole_node);
114
	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
114
	u64 hole_end = drm_mm_hole_node_end(hole_node);
115
	unsigned long adj_start = hole_start;
115
	u64 adj_start = hole_start;
116
	unsigned long adj_end = hole_end;
116
	u64 adj_end = hole_end;
117
 
117
 
118
	BUG_ON(node->allocated);
118
	BUG_ON(node->allocated);
119
 
119
 
120
	if (mm->color_adjust)
120
	if (mm->color_adjust)
121
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
121
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
122
 
122
 
123
	if (flags & DRM_MM_CREATE_TOP)
123
	if (flags & DRM_MM_CREATE_TOP)
124
		adj_start = adj_end - size;
124
		adj_start = adj_end - size;
125
 
125
 
126
	if (alignment) {
126
	if (alignment) {
-
 
127
		u64 tmp = adj_start;
-
 
128
		unsigned rem;
-
 
129
 
127
		unsigned tmp = adj_start % alignment;
130
		rem = do_div(tmp, alignment);
128
		if (tmp) {
131
		if (rem) {
129
			if (flags & DRM_MM_CREATE_TOP)
132
			if (flags & DRM_MM_CREATE_TOP)
130
				adj_start -= tmp;
133
				adj_start -= rem;
131
			else
134
			else
132
			adj_start += alignment - tmp;
135
				adj_start += alignment - rem;
133
	}
136
		}
134
	}
137
	}
135
 
138
 
136
	BUG_ON(adj_start < hole_start);
139
	BUG_ON(adj_start < hole_start);
137
	BUG_ON(adj_end > hole_end);
140
	BUG_ON(adj_end > hole_end);
138
 
141
 
139
	if (adj_start == hole_start) {
142
	if (adj_start == hole_start) {
140
		hole_node->hole_follows = 0;
143
		hole_node->hole_follows = 0;
141
		list_del(&hole_node->hole_stack);
144
		list_del(&hole_node->hole_stack);
142
	}
145
	}
143
 
146
 
144
	node->start = adj_start;
147
	node->start = adj_start;
145
	node->size = size;
148
	node->size = size;
146
	node->mm = mm;
149
	node->mm = mm;
147
	node->color = color;
150
	node->color = color;
148
	node->allocated = 1;
151
	node->allocated = 1;
149
 
152
 
150
	INIT_LIST_HEAD(&node->hole_stack);
153
	INIT_LIST_HEAD(&node->hole_stack);
151
	list_add(&node->node_list, &hole_node->node_list);
154
	list_add(&node->node_list, &hole_node->node_list);
152
 
155
 
153
	BUG_ON(node->start + node->size > adj_end);
156
	BUG_ON(node->start + node->size > adj_end);
154
 
157
 
155
	node->hole_follows = 0;
158
	node->hole_follows = 0;
156
	if (__drm_mm_hole_node_start(node) < hole_end) {
159
	if (__drm_mm_hole_node_start(node) < hole_end) {
157
		list_add(&node->hole_stack, &mm->hole_stack);
160
		list_add(&node->hole_stack, &mm->hole_stack);
158
		node->hole_follows = 1;
161
		node->hole_follows = 1;
159
	}
162
	}
160
}
163
}
161
 
164
 
162
/**
165
/**
163
 * drm_mm_reserve_node - insert an pre-initialized node
166
 * drm_mm_reserve_node - insert an pre-initialized node
164
 * @mm: drm_mm allocator to insert @node into
167
 * @mm: drm_mm allocator to insert @node into
165
 * @node: drm_mm_node to insert
168
 * @node: drm_mm_node to insert
166
 *
169
 *
167
 * This functions inserts an already set-up drm_mm_node into the allocator,
170
 * This functions inserts an already set-up drm_mm_node into the allocator,
168
 * meaning that start, size and color must be set by the caller. This is useful
171
 * meaning that start, size and color must be set by the caller. This is useful
169
 * to initialize the allocator with preallocated objects which must be set-up
172
 * to initialize the allocator with preallocated objects which must be set-up
170
 * before the range allocator can be set-up, e.g. when taking over a firmware
173
 * before the range allocator can be set-up, e.g. when taking over a firmware
171
 * framebuffer.
174
 * framebuffer.
172
 *
175
 *
173
 * Returns:
176
 * Returns:
174
 * 0 on success, -ENOSPC if there's no hole where @node is.
177
 * 0 on success, -ENOSPC if there's no hole where @node is.
175
 */
178
 */
176
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
179
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
177
{
180
{
178
	struct drm_mm_node *hole;
181
	struct drm_mm_node *hole;
179
	unsigned long end = node->start + node->size;
182
	u64 end = node->start + node->size;
180
	unsigned long hole_start;
183
	u64 hole_start;
181
	unsigned long hole_end;
184
	u64 hole_end;
182
 
185
 
183
	BUG_ON(node == NULL);
186
	BUG_ON(node == NULL);
184
 
187
 
185
	/* Find the relevant hole to add our node to */
188
	/* Find the relevant hole to add our node to */
186
	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
189
	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
187
		if (hole_start > node->start || hole_end < end)
190
		if (hole_start > node->start || hole_end < end)
188
			continue;
191
			continue;
189
 
192
 
190
		node->mm = mm;
193
		node->mm = mm;
191
		node->allocated = 1;
194
		node->allocated = 1;
192
 
195
 
193
		INIT_LIST_HEAD(&node->hole_stack);
196
		INIT_LIST_HEAD(&node->hole_stack);
194
		list_add(&node->node_list, &hole->node_list);
197
		list_add(&node->node_list, &hole->node_list);
195
 
198
 
196
		if (node->start == hole_start) {
199
		if (node->start == hole_start) {
197
			hole->hole_follows = 0;
200
			hole->hole_follows = 0;
198
			list_del_init(&hole->hole_stack);
201
			list_del_init(&hole->hole_stack);
199
		}
202
		}
200
 
203
 
201
		node->hole_follows = 0;
204
		node->hole_follows = 0;
202
		if (end != hole_end) {
205
		if (end != hole_end) {
203
			list_add(&node->hole_stack, &mm->hole_stack);
206
			list_add(&node->hole_stack, &mm->hole_stack);
204
			node->hole_follows = 1;
207
			node->hole_follows = 1;
205
		}
208
		}
206
 
209
 
207
		return 0;
210
		return 0;
208
	}
211
	}
209
 
212
 
210
	return -ENOSPC;
213
	return -ENOSPC;
211
}
214
}
212
EXPORT_SYMBOL(drm_mm_reserve_node);
215
EXPORT_SYMBOL(drm_mm_reserve_node);
213
 
216
 
214
/**
217
/**
215
 * drm_mm_insert_node_generic - search for space and insert @node
218
 * drm_mm_insert_node_generic - search for space and insert @node
216
 * @mm: drm_mm to allocate from
219
 * @mm: drm_mm to allocate from
217
 * @node: preallocate node to insert
220
 * @node: preallocate node to insert
218
 * @size: size of the allocation
221
 * @size: size of the allocation
219
 * @alignment: alignment of the allocation
222
 * @alignment: alignment of the allocation
220
 * @color: opaque tag value to use for this node
223
 * @color: opaque tag value to use for this node
221
 * @sflags: flags to fine-tune the allocation search
224
 * @sflags: flags to fine-tune the allocation search
222
 * @aflags: flags to fine-tune the allocation behavior
225
 * @aflags: flags to fine-tune the allocation behavior
223
 *
226
 *
224
 * The preallocated node must be cleared to 0.
227
 * The preallocated node must be cleared to 0.
225
 *
228
 *
226
 * Returns:
229
 * Returns:
227
 * 0 on success, -ENOSPC if there's no suitable hole.
230
 * 0 on success, -ENOSPC if there's no suitable hole.
228
 */
231
 */
229
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
232
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
230
			       unsigned long size, unsigned alignment,
233
			       u64 size, unsigned alignment,
231
			       unsigned long color,
234
			       unsigned long color,
232
			       enum drm_mm_search_flags sflags,
235
			       enum drm_mm_search_flags sflags,
233
			       enum drm_mm_allocator_flags aflags)
236
			       enum drm_mm_allocator_flags aflags)
234
{
237
{
235
	struct drm_mm_node *hole_node;
238
	struct drm_mm_node *hole_node;
236
 
239
 
237
	hole_node = drm_mm_search_free_generic(mm, size, alignment,
240
	hole_node = drm_mm_search_free_generic(mm, size, alignment,
238
					       color, sflags);
241
					       color, sflags);
239
	if (!hole_node)
242
	if (!hole_node)
240
		return -ENOSPC;
243
		return -ENOSPC;
241
 
244
 
242
	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
245
	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
243
	return 0;
246
	return 0;
244
}
247
}
245
EXPORT_SYMBOL(drm_mm_insert_node_generic);
248
EXPORT_SYMBOL(drm_mm_insert_node_generic);
246
 
249
 
247
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
250
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
248
				       struct drm_mm_node *node,
251
				       struct drm_mm_node *node,
249
				       unsigned long size, unsigned alignment,
252
				       u64 size, unsigned alignment,
250
				       unsigned long color,
253
				       unsigned long color,
251
				       unsigned long start, unsigned long end,
254
				       u64 start, u64 end,
252
				       enum drm_mm_allocator_flags flags)
255
				       enum drm_mm_allocator_flags flags)
253
{
256
{
254
	struct drm_mm *mm = hole_node->mm;
257
	struct drm_mm *mm = hole_node->mm;
255
	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
258
	u64 hole_start = drm_mm_hole_node_start(hole_node);
256
	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
259
	u64 hole_end = drm_mm_hole_node_end(hole_node);
257
	unsigned long adj_start = hole_start;
260
	u64 adj_start = hole_start;
258
	unsigned long adj_end = hole_end;
261
	u64 adj_end = hole_end;
259
 
262
 
260
	BUG_ON(!hole_node->hole_follows || node->allocated);
263
	BUG_ON(!hole_node->hole_follows || node->allocated);
261
 
264
 
262
	if (adj_start < start)
265
	if (adj_start < start)
263
		adj_start = start;
266
		adj_start = start;
264
	if (adj_end > end)
267
	if (adj_end > end)
265
		adj_end = end;
268
		adj_end = end;
266
 
-
 
267
	if (flags & DRM_MM_CREATE_TOP)
-
 
268
		adj_start = adj_end - size;
-
 
269
 
269
 
270
	if (mm->color_adjust)
270
	if (mm->color_adjust)
271
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
271
		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
-
 
272
 
-
 
273
	if (flags & DRM_MM_CREATE_TOP)
-
 
274
		adj_start = adj_end - size;
272
 
275
 
-
 
276
	if (alignment) {
-
 
277
		u64 tmp = adj_start;
-
 
278
		unsigned rem;
273
	if (alignment) {
279
 
274
		unsigned tmp = adj_start % alignment;
280
		rem = do_div(tmp, alignment);
275
		if (tmp) {
281
		if (rem) {
276
			if (flags & DRM_MM_CREATE_TOP)
282
			if (flags & DRM_MM_CREATE_TOP)
277
				adj_start -= tmp;
283
				adj_start -= rem;
278
			else
284
			else
279
			adj_start += alignment - tmp;
285
				adj_start += alignment - rem;
280
	}
286
		}
281
	}
287
	}
282
 
288
 
283
	if (adj_start == hole_start) {
289
	if (adj_start == hole_start) {
284
		hole_node->hole_follows = 0;
290
		hole_node->hole_follows = 0;
285
		list_del(&hole_node->hole_stack);
291
		list_del(&hole_node->hole_stack);
286
	}
292
	}
287
 
293
 
288
	node->start = adj_start;
294
	node->start = adj_start;
289
	node->size = size;
295
	node->size = size;
290
	node->mm = mm;
296
	node->mm = mm;
291
	node->color = color;
297
	node->color = color;
292
	node->allocated = 1;
298
	node->allocated = 1;
293
 
299
 
294
	INIT_LIST_HEAD(&node->hole_stack);
300
	INIT_LIST_HEAD(&node->hole_stack);
295
	list_add(&node->node_list, &hole_node->node_list);
301
	list_add(&node->node_list, &hole_node->node_list);
296
 
302
 
297
	BUG_ON(node->start < start);
303
	BUG_ON(node->start < start);
298
	BUG_ON(node->start < adj_start);
304
	BUG_ON(node->start < adj_start);
299
	BUG_ON(node->start + node->size > adj_end);
305
	BUG_ON(node->start + node->size > adj_end);
300
	BUG_ON(node->start + node->size > end);
306
	BUG_ON(node->start + node->size > end);
301
 
307
 
302
	node->hole_follows = 0;
308
	node->hole_follows = 0;
303
	if (__drm_mm_hole_node_start(node) < hole_end) {
309
	if (__drm_mm_hole_node_start(node) < hole_end) {
304
		list_add(&node->hole_stack, &mm->hole_stack);
310
		list_add(&node->hole_stack, &mm->hole_stack);
305
		node->hole_follows = 1;
311
		node->hole_follows = 1;
306
	}
312
	}
307
}
313
}
308
 
314
 
309
/**
315
/**
310
 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
316
 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
311
 * @mm: drm_mm to allocate from
317
 * @mm: drm_mm to allocate from
312
 * @node: preallocate node to insert
318
 * @node: preallocate node to insert
313
 * @size: size of the allocation
319
 * @size: size of the allocation
314
 * @alignment: alignment of the allocation
320
 * @alignment: alignment of the allocation
315
 * @color: opaque tag value to use for this node
321
 * @color: opaque tag value to use for this node
316
 * @start: start of the allowed range for this node
322
 * @start: start of the allowed range for this node
317
 * @end: end of the allowed range for this node
323
 * @end: end of the allowed range for this node
318
 * @sflags: flags to fine-tune the allocation search
324
 * @sflags: flags to fine-tune the allocation search
319
 * @aflags: flags to fine-tune the allocation behavior
325
 * @aflags: flags to fine-tune the allocation behavior
320
 *
326
 *
321
 * The preallocated node must be cleared to 0.
327
 * The preallocated node must be cleared to 0.
322
 *
328
 *
323
 * Returns:
329
 * Returns:
324
 * 0 on success, -ENOSPC if there's no suitable hole.
330
 * 0 on success, -ENOSPC if there's no suitable hole.
325
 */
331
 */
326
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
332
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
327
					unsigned long size, unsigned alignment,
333
					u64 size, unsigned alignment,
328
					unsigned long color,
334
					unsigned long color,
329
					unsigned long start, unsigned long end,
335
					u64 start, u64 end,
330
					enum drm_mm_search_flags sflags,
336
					enum drm_mm_search_flags sflags,
331
					enum drm_mm_allocator_flags aflags)
337
					enum drm_mm_allocator_flags aflags)
332
{
338
{
333
	struct drm_mm_node *hole_node;
339
	struct drm_mm_node *hole_node;
334
 
340
 
335
	hole_node = drm_mm_search_free_in_range_generic(mm,
341
	hole_node = drm_mm_search_free_in_range_generic(mm,
336
							size, alignment, color,
342
							size, alignment, color,
337
							start, end, sflags);
343
							start, end, sflags);
338
	if (!hole_node)
344
	if (!hole_node)
339
		return -ENOSPC;
345
		return -ENOSPC;
340
 
346
 
341
	drm_mm_insert_helper_range(hole_node, node,
347
	drm_mm_insert_helper_range(hole_node, node,
342
				   size, alignment, color,
348
				   size, alignment, color,
343
				   start, end, aflags);
349
				   start, end, aflags);
344
	return 0;
350
	return 0;
345
}
351
}
346
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
352
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
347
 
353
 
348
/**
354
/**
349
 * drm_mm_remove_node - Remove a memory node from the allocator.
355
 * drm_mm_remove_node - Remove a memory node from the allocator.
350
 * @node: drm_mm_node to remove
356
 * @node: drm_mm_node to remove
351
 *
357
 *
352
 * This just removes a node from its drm_mm allocator. The node does not need to
358
 * This just removes a node from its drm_mm allocator. The node does not need to
353
 * be cleared again before it can be re-inserted into this or any other drm_mm
359
 * be cleared again before it can be re-inserted into this or any other drm_mm
354
 * allocator. It is a bug to call this function on a un-allocated node.
360
 * allocator. It is a bug to call this function on a un-allocated node.
355
 */
361
 */
356
void drm_mm_remove_node(struct drm_mm_node *node)
362
void drm_mm_remove_node(struct drm_mm_node *node)
357
{
363
{
358
	struct drm_mm *mm = node->mm;
364
	struct drm_mm *mm = node->mm;
359
	struct drm_mm_node *prev_node;
365
	struct drm_mm_node *prev_node;
360
 
366
 
361
	if (WARN_ON(!node->allocated))
367
	if (WARN_ON(!node->allocated))
362
		return;
368
		return;
363
 
369
 
364
	BUG_ON(node->scanned_block || node->scanned_prev_free
370
	BUG_ON(node->scanned_block || node->scanned_prev_free
365
				   || node->scanned_next_free);
371
				   || node->scanned_next_free);
366
 
372
 
367
	prev_node =
373
	prev_node =
368
	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
374
	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
369
 
375
 
370
	if (node->hole_follows) {
376
	if (node->hole_follows) {
371
		BUG_ON(__drm_mm_hole_node_start(node) ==
377
		BUG_ON(__drm_mm_hole_node_start(node) ==
372
		       __drm_mm_hole_node_end(node));
378
		       __drm_mm_hole_node_end(node));
373
		list_del(&node->hole_stack);
379
		list_del(&node->hole_stack);
374
	} else
380
	} else
375
		BUG_ON(__drm_mm_hole_node_start(node) !=
381
		BUG_ON(__drm_mm_hole_node_start(node) !=
376
		       __drm_mm_hole_node_end(node));
382
		       __drm_mm_hole_node_end(node));
377
 
383
 
378
 
384
 
379
	if (!prev_node->hole_follows) {
385
	if (!prev_node->hole_follows) {
380
		prev_node->hole_follows = 1;
386
		prev_node->hole_follows = 1;
381
		list_add(&prev_node->hole_stack, &mm->hole_stack);
387
		list_add(&prev_node->hole_stack, &mm->hole_stack);
382
				} else
388
	} else
383
		list_move(&prev_node->hole_stack, &mm->hole_stack);
389
		list_move(&prev_node->hole_stack, &mm->hole_stack);
384
 
390
 
385
	list_del(&node->node_list);
391
	list_del(&node->node_list);
386
	node->allocated = 0;
392
	node->allocated = 0;
387
}
393
}
388
EXPORT_SYMBOL(drm_mm_remove_node);
394
EXPORT_SYMBOL(drm_mm_remove_node);
389
 
395
 
390
static int check_free_hole(unsigned long start, unsigned long end,
-
 
391
			   unsigned long size, unsigned alignment)
396
static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
392
{
397
{
393
	if (end - start < size)
398
	if (end - start < size)
394
		return 0;
399
		return 0;
395
 
400
 
396
	if (alignment) {
401
	if (alignment) {
-
 
402
		u64 tmp = start;
-
 
403
		unsigned rem;
-
 
404
 
397
		unsigned tmp = start % alignment;
405
		rem = do_div(tmp, alignment);
398
		if (tmp)
406
		if (rem)
399
			start += alignment - tmp;
407
			start += alignment - rem;
400
	}
408
	}
401
 
409
 
402
	return end >= start + size;
410
	return end >= start + size;
403
}
411
}
404
 
412
 
405
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
413
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
406
				       unsigned long size,
414
						      u64 size,
407
					       unsigned alignment,
415
						      unsigned alignment,
408
					       unsigned long color,
416
						      unsigned long color,
409
						      enum drm_mm_search_flags flags)
417
						      enum drm_mm_search_flags flags)
410
{
418
{
411
	struct drm_mm_node *entry;
419
	struct drm_mm_node *entry;
412
	struct drm_mm_node *best;
420
	struct drm_mm_node *best;
413
	unsigned long adj_start;
421
	u64 adj_start;
414
	unsigned long adj_end;
422
	u64 adj_end;
415
	unsigned long best_size;
423
	u64 best_size;
416
 
424
 
417
	BUG_ON(mm->scanned_blocks);
425
	BUG_ON(mm->scanned_blocks);
418
 
426
 
419
	best = NULL;
427
	best = NULL;
420
	best_size = ~0UL;
428
	best_size = ~0UL;
421
 
429
 
422
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
430
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
423
			       flags & DRM_MM_SEARCH_BELOW) {
431
			       flags & DRM_MM_SEARCH_BELOW) {
424
		unsigned long hole_size = adj_end - adj_start;
432
		u64 hole_size = adj_end - adj_start;
425
 
433
 
426
		if (mm->color_adjust) {
434
		if (mm->color_adjust) {
427
			mm->color_adjust(entry, color, &adj_start, &adj_end);
435
			mm->color_adjust(entry, color, &adj_start, &adj_end);
428
			if (adj_end <= adj_start)
436
			if (adj_end <= adj_start)
429
				continue;
437
				continue;
430
		}
438
		}
431
 
439
 
432
		if (!check_free_hole(adj_start, adj_end, size, alignment))
440
		if (!check_free_hole(adj_start, adj_end, size, alignment))
433
			continue;
441
			continue;
434
 
442
 
435
		if (!(flags & DRM_MM_SEARCH_BEST))
443
		if (!(flags & DRM_MM_SEARCH_BEST))
436
				return entry;
444
			return entry;
437
 
445
 
438
		if (hole_size < best_size) {
446
		if (hole_size < best_size) {
439
				best = entry;
447
			best = entry;
440
			best_size = hole_size;
448
			best_size = hole_size;
441
			}
449
		}
442
		}
450
	}
443
 
451
 
444
	return best;
452
	return best;
445
}
453
}
446
 
454
 
447
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
455
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
448
						unsigned long size,
456
							u64 size,
449
						unsigned alignment,
457
							unsigned alignment,
450
							unsigned long color,
458
							unsigned long color,
451
						unsigned long start,
459
							u64 start,
452
						unsigned long end,
460
							u64 end,
453
							enum drm_mm_search_flags flags)
461
							enum drm_mm_search_flags flags)
454
{
462
{
455
	struct drm_mm_node *entry;
463
	struct drm_mm_node *entry;
456
	struct drm_mm_node *best;
464
	struct drm_mm_node *best;
457
	unsigned long adj_start;
465
	u64 adj_start;
458
	unsigned long adj_end;
466
	u64 adj_end;
459
	unsigned long best_size;
467
	u64 best_size;
460
 
468
 
461
	BUG_ON(mm->scanned_blocks);
469
	BUG_ON(mm->scanned_blocks);
462
 
470
 
463
	best = NULL;
471
	best = NULL;
464
	best_size = ~0UL;
472
	best_size = ~0UL;
465
 
473
 
466
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
474
	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
467
			       flags & DRM_MM_SEARCH_BELOW) {
475
			       flags & DRM_MM_SEARCH_BELOW) {
468
		unsigned long hole_size = adj_end - adj_start;
476
		u64 hole_size = adj_end - adj_start;
469
 
477
 
470
		if (adj_start < start)
478
		if (adj_start < start)
471
			adj_start = start;
479
			adj_start = start;
472
		if (adj_end > end)
480
		if (adj_end > end)
473
			adj_end = end;
481
			adj_end = end;
474
 
482
 
475
		if (mm->color_adjust) {
483
		if (mm->color_adjust) {
476
			mm->color_adjust(entry, color, &adj_start, &adj_end);
484
			mm->color_adjust(entry, color, &adj_start, &adj_end);
477
			if (adj_end <= adj_start)
485
			if (adj_end <= adj_start)
478
				continue;
486
				continue;
479
		}
487
		}
480
 
488
 
481
		if (!check_free_hole(adj_start, adj_end, size, alignment))
489
		if (!check_free_hole(adj_start, adj_end, size, alignment))
482
			continue;
490
			continue;
483
 
491
 
484
		if (!(flags & DRM_MM_SEARCH_BEST))
492
		if (!(flags & DRM_MM_SEARCH_BEST))
485
				return entry;
493
			return entry;
486
 
494
 
487
		if (hole_size < best_size) {
495
		if (hole_size < best_size) {
488
				best = entry;
496
			best = entry;
489
			best_size = hole_size;
497
			best_size = hole_size;
490
			}
498
		}
491
		}
499
	}
492
 
500
 
493
	return best;
501
	return best;
494
}
502
}
495
 
503
 
496
/**
504
/**
497
 * drm_mm_replace_node - move an allocation from @old to @new
505
 * drm_mm_replace_node - move an allocation from @old to @new
498
 * @old: drm_mm_node to remove from the allocator
506
 * @old: drm_mm_node to remove from the allocator
499
 * @new: drm_mm_node which should inherit @old's allocation
507
 * @new: drm_mm_node which should inherit @old's allocation
500
 *
508
 *
501
 * This is useful for when drivers embed the drm_mm_node structure and hence
509
 * This is useful for when drivers embed the drm_mm_node structure and hence
502
 * can't move allocations by reassigning pointers. It's a combination of remove
510
 * can't move allocations by reassigning pointers. It's a combination of remove
503
 * and insert with the guarantee that the allocation start will match.
511
 * and insert with the guarantee that the allocation start will match.
504
 */
512
 */
505
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
513
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
506
{
514
{
507
	list_replace(&old->node_list, &new->node_list);
515
	list_replace(&old->node_list, &new->node_list);
508
	list_replace(&old->hole_stack, &new->hole_stack);
516
	list_replace(&old->hole_stack, &new->hole_stack);
509
	new->hole_follows = old->hole_follows;
517
	new->hole_follows = old->hole_follows;
510
	new->mm = old->mm;
518
	new->mm = old->mm;
511
	new->start = old->start;
519
	new->start = old->start;
512
	new->size = old->size;
520
	new->size = old->size;
513
	new->color = old->color;
521
	new->color = old->color;
514
 
522
 
515
	old->allocated = 0;
523
	old->allocated = 0;
516
	new->allocated = 1;
524
	new->allocated = 1;
517
}
525
}
518
EXPORT_SYMBOL(drm_mm_replace_node);
526
EXPORT_SYMBOL(drm_mm_replace_node);
519
 
527
 
520
/**
528
/**
521
 * DOC: lru scan roaster
529
 * DOC: lru scan roaster
522
 *
530
 *
523
 * Very often GPUs need to have continuous allocations for a given object. When
531
 * Very often GPUs need to have continuous allocations for a given object. When
524
 * evicting objects to make space for a new one it is therefore not most
532
 * evicting objects to make space for a new one it is therefore not most
525
 * efficient when we simply start to select all objects from the tail of an LRU
533
 * efficient when we simply start to select all objects from the tail of an LRU
526
 * until there's a suitable hole: Especially for big objects or nodes that
534
 * until there's a suitable hole: Especially for big objects or nodes that
527
 * otherwise have special allocation constraints there's a good chance we evict
535
 * otherwise have special allocation constraints there's a good chance we evict
528
 * lots of (smaller) objects unecessarily.
536
 * lots of (smaller) objects unecessarily.
529
 *
537
 *
530
 * The DRM range allocator supports this use-case through the scanning
538
 * The DRM range allocator supports this use-case through the scanning
531
 * interfaces. First a scan operation needs to be initialized with
539
 * interfaces. First a scan operation needs to be initialized with
532
 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
540
 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
533
 * objects to the roaster (probably by walking an LRU list, but this can be
541
 * objects to the roaster (probably by walking an LRU list, but this can be
534
 * freely implemented) until a suitable hole is found or there's no further
542
 * freely implemented) until a suitable hole is found or there's no further
535
 * evitable object.
543
 * evitable object.
536
 *
544
 *
537
 * The the driver must walk through all objects again in exactly the reverse
545
 * The the driver must walk through all objects again in exactly the reverse
538
 * order to restore the allocator state. Note that while the allocator is used
546
 * order to restore the allocator state. Note that while the allocator is used
539
 * in the scan mode no other operation is allowed.
547
 * in the scan mode no other operation is allowed.
540
 *
548
 *
541
 * Finally the driver evicts all objects selected in the scan. Adding and
549
 * Finally the driver evicts all objects selected in the scan. Adding and
542
 * removing an object is O(1), and since freeing a node is also O(1) the overall
550
 * removing an object is O(1), and since freeing a node is also O(1) the overall
543
 * complexity is O(scanned_objects). So like the free stack which needs to be
551
 * complexity is O(scanned_objects). So like the free stack which needs to be
544
 * walked before a scan operation even begins this is linear in the number of
552
 * walked before a scan operation even begins this is linear in the number of
545
 * objects. It doesn't seem to hurt badly.
553
 * objects. It doesn't seem to hurt badly.
546
 */
554
 */
547
 
555
 
548
/**
556
/**
549
 * drm_mm_init_scan - initialize lru scanning
557
 * drm_mm_init_scan - initialize lru scanning
550
 * @mm: drm_mm to scan
558
 * @mm: drm_mm to scan
551
 * @size: size of the allocation
559
 * @size: size of the allocation
552
 * @alignment: alignment of the allocation
560
 * @alignment: alignment of the allocation
553
 * @color: opaque tag value to use for the allocation
561
 * @color: opaque tag value to use for the allocation
554
 *
562
 *
555
 * This simply sets up the scanning routines with the parameters for the desired
563
 * This simply sets up the scanning routines with the parameters for the desired
556
 * hole. Note that there's no need to specify allocation flags, since they only
564
 * hole. Note that there's no need to specify allocation flags, since they only
557
 * change the place a node is allocated from within a suitable hole.
565
 * change the place a node is allocated from within a suitable hole.
558
 *
566
 *
559
 * Warning:
567
 * Warning:
560
 * As long as the scan list is non-empty, no other operations than
568
 * As long as the scan list is non-empty, no other operations than
561
 * adding/removing nodes to/from the scan list are allowed.
569
 * adding/removing nodes to/from the scan list are allowed.
562
 */
570
 */
563
void drm_mm_init_scan(struct drm_mm *mm,
571
void drm_mm_init_scan(struct drm_mm *mm,
564
		      unsigned long size,
572
		      u64 size,
565
		      unsigned alignment,
573
		      unsigned alignment,
566
		      unsigned long color)
574
		      unsigned long color)
567
{
575
{
568
	mm->scan_color = color;
576
	mm->scan_color = color;
569
	mm->scan_alignment = alignment;
577
	mm->scan_alignment = alignment;
570
	mm->scan_size = size;
578
	mm->scan_size = size;
571
	mm->scanned_blocks = 0;
579
	mm->scanned_blocks = 0;
572
	mm->scan_hit_start = 0;
580
	mm->scan_hit_start = 0;
573
	mm->scan_hit_end = 0;
581
	mm->scan_hit_end = 0;
574
	mm->scan_check_range = 0;
582
	mm->scan_check_range = 0;
575
	mm->prev_scanned_node = NULL;
583
	mm->prev_scanned_node = NULL;
576
}
584
}
577
EXPORT_SYMBOL(drm_mm_init_scan);
585
EXPORT_SYMBOL(drm_mm_init_scan);
578
 
586
 
579
/**
587
/**
580
 * drm_mm_init_scan - initialize range-restricted lru scanning
588
 * drm_mm_init_scan - initialize range-restricted lru scanning
581
 * @mm: drm_mm to scan
589
 * @mm: drm_mm to scan
582
 * @size: size of the allocation
590
 * @size: size of the allocation
583
 * @alignment: alignment of the allocation
591
 * @alignment: alignment of the allocation
584
 * @color: opaque tag value to use for the allocation
592
 * @color: opaque tag value to use for the allocation
585
 * @start: start of the allowed range for the allocation
593
 * @start: start of the allowed range for the allocation
586
 * @end: end of the allowed range for the allocation
594
 * @end: end of the allowed range for the allocation
587
 *
595
 *
588
 * This simply sets up the scanning routines with the parameters for the desired
596
 * This simply sets up the scanning routines with the parameters for the desired
589
 * hole. Note that there's no need to specify allocation flags, since they only
597
 * hole. Note that there's no need to specify allocation flags, since they only
590
 * change the place a node is allocated from within a suitable hole.
598
 * change the place a node is allocated from within a suitable hole.
591
 *
599
 *
592
 * Warning:
600
 * Warning:
593
 * As long as the scan list is non-empty, no other operations than
601
 * As long as the scan list is non-empty, no other operations than
594
 * adding/removing nodes to/from the scan list are allowed.
602
 * adding/removing nodes to/from the scan list are allowed.
595
 */
603
 */
596
void drm_mm_init_scan_with_range(struct drm_mm *mm,
604
void drm_mm_init_scan_with_range(struct drm_mm *mm,
597
				 unsigned long size,
605
				 u64 size,
598
				 unsigned alignment,
606
				 unsigned alignment,
599
				 unsigned long color,
607
				 unsigned long color,
600
				 unsigned long start,
608
				 u64 start,
601
				 unsigned long end)
609
				 u64 end)
602
{
610
{
603
	mm->scan_color = color;
611
	mm->scan_color = color;
604
	mm->scan_alignment = alignment;
612
	mm->scan_alignment = alignment;
605
	mm->scan_size = size;
613
	mm->scan_size = size;
606
	mm->scanned_blocks = 0;
614
	mm->scanned_blocks = 0;
607
	mm->scan_hit_start = 0;
615
	mm->scan_hit_start = 0;
608
	mm->scan_hit_end = 0;
616
	mm->scan_hit_end = 0;
609
	mm->scan_start = start;
617
	mm->scan_start = start;
610
	mm->scan_end = end;
618
	mm->scan_end = end;
611
	mm->scan_check_range = 1;
619
	mm->scan_check_range = 1;
612
	mm->prev_scanned_node = NULL;
620
	mm->prev_scanned_node = NULL;
613
}
621
}
614
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
622
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
615
 
623
 
616
/**
624
/**
617
 * drm_mm_scan_add_block - add a node to the scan list
625
 * drm_mm_scan_add_block - add a node to the scan list
618
 * @node: drm_mm_node to add
626
 * @node: drm_mm_node to add
619
 *
627
 *
620
 * Add a node to the scan list that might be freed to make space for the desired
628
 * Add a node to the scan list that might be freed to make space for the desired
621
 * hole.
629
 * hole.
622
 *
630
 *
623
 * Returns:
631
 * Returns:
624
 * True if a hole has been found, false otherwise.
632
 * True if a hole has been found, false otherwise.
625
 */
633
 */
626
bool drm_mm_scan_add_block(struct drm_mm_node *node)
634
bool drm_mm_scan_add_block(struct drm_mm_node *node)
627
{
635
{
628
	struct drm_mm *mm = node->mm;
636
	struct drm_mm *mm = node->mm;
629
	struct drm_mm_node *prev_node;
637
	struct drm_mm_node *prev_node;
630
	unsigned long hole_start, hole_end;
638
	u64 hole_start, hole_end;
631
	unsigned long adj_start, adj_end;
639
	u64 adj_start, adj_end;
632
 
640
 
633
	mm->scanned_blocks++;
641
	mm->scanned_blocks++;
634
 
642
 
635
	BUG_ON(node->scanned_block);
643
	BUG_ON(node->scanned_block);
636
	node->scanned_block = 1;
644
	node->scanned_block = 1;
637
 
645
 
638
		prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
646
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
639
				       node_list);
647
			       node_list);
640
 
648
 
641
	node->scanned_preceeds_hole = prev_node->hole_follows;
649
	node->scanned_preceeds_hole = prev_node->hole_follows;
642
	prev_node->hole_follows = 1;
650
	prev_node->hole_follows = 1;
643
	list_del(&node->node_list);
651
	list_del(&node->node_list);
644
	node->node_list.prev = &prev_node->node_list;
652
	node->node_list.prev = &prev_node->node_list;
645
	node->node_list.next = &mm->prev_scanned_node->node_list;
653
	node->node_list.next = &mm->prev_scanned_node->node_list;
646
	mm->prev_scanned_node = node;
654
	mm->prev_scanned_node = node;
647
 
655
 
648
	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
656
	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
649
	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
657
	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
650
 
658
 
651
	if (mm->scan_check_range) {
659
	if (mm->scan_check_range) {
652
		if (adj_start < mm->scan_start)
660
		if (adj_start < mm->scan_start)
653
			adj_start = mm->scan_start;
661
			adj_start = mm->scan_start;
654
		if (adj_end > mm->scan_end)
662
		if (adj_end > mm->scan_end)
655
			adj_end = mm->scan_end;
663
			adj_end = mm->scan_end;
656
	}
664
	}
657
 
665
 
658
	if (mm->color_adjust)
666
	if (mm->color_adjust)
659
		mm->color_adjust(prev_node, mm->scan_color,
667
		mm->color_adjust(prev_node, mm->scan_color,
660
				 &adj_start, &adj_end);
668
				 &adj_start, &adj_end);
661
 
669
 
662
	if (check_free_hole(adj_start, adj_end,
670
	if (check_free_hole(adj_start, adj_end,
663
			    mm->scan_size, mm->scan_alignment)) {
671
			    mm->scan_size, mm->scan_alignment)) {
664
		mm->scan_hit_start = hole_start;
672
		mm->scan_hit_start = hole_start;
665
		mm->scan_hit_end = hole_end;
673
		mm->scan_hit_end = hole_end;
666
		return true;
674
		return true;
667
	}
675
	}
668
 
676
 
669
	return false;
677
	return false;
670
}
678
}
671
EXPORT_SYMBOL(drm_mm_scan_add_block);
679
EXPORT_SYMBOL(drm_mm_scan_add_block);
672
 
680
 
673
/**
681
/**
674
 * drm_mm_scan_remove_block - remove a node from the scan list
682
 * drm_mm_scan_remove_block - remove a node from the scan list
675
 * @node: drm_mm_node to remove
683
 * @node: drm_mm_node to remove
676
 *
684
 *
677
 * Nodes _must_ be removed in the exact same order from the scan list as they
685
 * Nodes _must_ be removed in the exact same order from the scan list as they
678
 * have been added, otherwise the internal state of the memory manager will be
686
 * have been added, otherwise the internal state of the memory manager will be
679
 * corrupted.
687
 * corrupted.
680
 *
688
 *
681
 * When the scan list is empty, the selected memory nodes can be freed. An
689
 * When the scan list is empty, the selected memory nodes can be freed. An
682
 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
690
 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
683
 * return the just freed block (because its at the top of the free_stack list).
691
 * return the just freed block (because its at the top of the free_stack list).
684
 *
692
 *
685
 * Returns:
693
 * Returns:
686
 * True if this block should be evicted, false otherwise. Will always
694
 * True if this block should be evicted, false otherwise. Will always
687
 * return false when no hole has been found.
695
 * return false when no hole has been found.
688
 */
696
 */
689
bool drm_mm_scan_remove_block(struct drm_mm_node *node)
697
bool drm_mm_scan_remove_block(struct drm_mm_node *node)
690
{
698
{
691
	struct drm_mm *mm = node->mm;
699
	struct drm_mm *mm = node->mm;
692
	struct drm_mm_node *prev_node;
700
	struct drm_mm_node *prev_node;
693
 
701
 
694
	mm->scanned_blocks--;
702
	mm->scanned_blocks--;
695
 
703
 
696
	BUG_ON(!node->scanned_block);
704
	BUG_ON(!node->scanned_block);
697
	node->scanned_block = 0;
705
	node->scanned_block = 0;
698
 
706
 
699
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
707
	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
700
			       node_list);
708
			       node_list);
701
 
709
 
702
	prev_node->hole_follows = node->scanned_preceeds_hole;
710
	prev_node->hole_follows = node->scanned_preceeds_hole;
703
	list_add(&node->node_list, &prev_node->node_list);
711
	list_add(&node->node_list, &prev_node->node_list);
704
 
712
 
705
	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
713
	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
706
		 node->start < mm->scan_hit_end);
714
		 node->start < mm->scan_hit_end);
707
}
715
}
708
EXPORT_SYMBOL(drm_mm_scan_remove_block);
716
EXPORT_SYMBOL(drm_mm_scan_remove_block);
709
 
717
 
710
/**
718
/**
711
 * drm_mm_clean - checks whether an allocator is clean
719
 * drm_mm_clean - checks whether an allocator is clean
712
 * @mm: drm_mm allocator to check
720
 * @mm: drm_mm allocator to check
713
 *
721
 *
714
 * Returns:
722
 * Returns:
715
 * True if the allocator is completely free, false if there's still a node
723
 * True if the allocator is completely free, false if there's still a node
716
 * allocated in it.
724
 * allocated in it.
717
 */
725
 */
718
bool drm_mm_clean(struct drm_mm * mm)
726
bool drm_mm_clean(struct drm_mm * mm)
719
{
727
{
720
	struct list_head *head = &mm->head_node.node_list;
728
	struct list_head *head = &mm->head_node.node_list;
721
 
729
 
722
	return (head->next->next == head);
730
	return (head->next->next == head);
723
}
731
}
724
EXPORT_SYMBOL(drm_mm_clean);
732
EXPORT_SYMBOL(drm_mm_clean);
725
 
733
 
726
/**
734
/**
727
 * drm_mm_init - initialize a drm-mm allocator
735
 * drm_mm_init - initialize a drm-mm allocator
728
 * @mm: the drm_mm structure to initialize
736
 * @mm: the drm_mm structure to initialize
729
 * @start: start of the range managed by @mm
737
 * @start: start of the range managed by @mm
730
 * @size: end of the range managed by @mm
738
 * @size: end of the range managed by @mm
731
 *
739
 *
732
 * Note that @mm must be cleared to 0 before calling this function.
740
 * Note that @mm must be cleared to 0 before calling this function.
733
 */
741
 */
734
void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
742
void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
735
{
743
{
736
	INIT_LIST_HEAD(&mm->hole_stack);
744
	INIT_LIST_HEAD(&mm->hole_stack);
737
	mm->scanned_blocks = 0;
745
	mm->scanned_blocks = 0;
738
 
746
 
739
	/* Clever trick to avoid a special case in the free hole tracking. */
747
	/* Clever trick to avoid a special case in the free hole tracking. */
740
	INIT_LIST_HEAD(&mm->head_node.node_list);
748
	INIT_LIST_HEAD(&mm->head_node.node_list);
741
	INIT_LIST_HEAD(&mm->head_node.hole_stack);
749
	INIT_LIST_HEAD(&mm->head_node.hole_stack);
742
	mm->head_node.hole_follows = 1;
750
	mm->head_node.hole_follows = 1;
743
	mm->head_node.scanned_block = 0;
751
	mm->head_node.scanned_block = 0;
744
	mm->head_node.scanned_prev_free = 0;
752
	mm->head_node.scanned_prev_free = 0;
745
	mm->head_node.scanned_next_free = 0;
753
	mm->head_node.scanned_next_free = 0;
746
	mm->head_node.mm = mm;
754
	mm->head_node.mm = mm;
747
	mm->head_node.start = start + size;
755
	mm->head_node.start = start + size;
748
	mm->head_node.size = start - mm->head_node.start;
756
	mm->head_node.size = start - mm->head_node.start;
749
	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
757
	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
750
 
758
 
751
	mm->color_adjust = NULL;
759
	mm->color_adjust = NULL;
752
}
760
}
753
EXPORT_SYMBOL(drm_mm_init);
761
EXPORT_SYMBOL(drm_mm_init);
754
 
762
 
755
/**
763
/**
756
 * drm_mm_takedown - clean up a drm_mm allocator
764
 * drm_mm_takedown - clean up a drm_mm allocator
757
 * @mm: drm_mm allocator to clean up
765
 * @mm: drm_mm allocator to clean up
758
 *
766
 *
759
 * Note that it is a bug to call this function on an allocator which is not
767
 * Note that it is a bug to call this function on an allocator which is not
760
 * clean.
768
 * clean.
761
 */
769
 */
762
void drm_mm_takedown(struct drm_mm * mm)
770
void drm_mm_takedown(struct drm_mm * mm)
763
{
771
{
764
	WARN(!list_empty(&mm->head_node.node_list),
772
	WARN(!list_empty(&mm->head_node.node_list),
765
	     "Memory manager not clean during takedown.\n");
773
	     "Memory manager not clean during takedown.\n");
766
}
774
}
767
EXPORT_SYMBOL(drm_mm_takedown);
775
EXPORT_SYMBOL(drm_mm_takedown);
768
 
776
 
769
static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
777
static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
770
				       const char *prefix)
778
				     const char *prefix)
771
{
779
{
772
	unsigned long hole_start, hole_end, hole_size;
780
	u64 hole_start, hole_end, hole_size;
773
 
781
 
774
	if (entry->hole_follows) {
782
	if (entry->hole_follows) {
775
		hole_start = drm_mm_hole_node_start(entry);
783
		hole_start = drm_mm_hole_node_start(entry);
776
		hole_end = drm_mm_hole_node_end(entry);
784
		hole_end = drm_mm_hole_node_end(entry);
777
	hole_size = hole_end - hole_start;
785
		hole_size = hole_end - hole_start;
778
		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
786
		pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
779
			prefix, hole_start, hole_end,
-
 
780
			hole_size);
787
			 hole_end, hole_size);
781
		return hole_size;
788
		return hole_size;
782
	}
789
	}
783
 
790
 
784
	return 0;
791
	return 0;
785
}
792
}
786
 
793
 
787
/**
794
/**
788
 * drm_mm_debug_table - dump allocator state to dmesg
795
 * drm_mm_debug_table - dump allocator state to dmesg
789
 * @mm: drm_mm allocator to dump
796
 * @mm: drm_mm allocator to dump
790
 * @prefix: prefix to use for dumping to dmesg
797
 * @prefix: prefix to use for dumping to dmesg
791
 */
798
 */
792
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
799
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
793
{
800
{
794
	struct drm_mm_node *entry;
801
	struct drm_mm_node *entry;
795
	unsigned long total_used = 0, total_free = 0, total = 0;
802
	u64 total_used = 0, total_free = 0, total = 0;
796
 
803
 
797
	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
804
	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
798
 
805
 
799
	drm_mm_for_each_node(entry, mm) {
806
	drm_mm_for_each_node(entry, mm) {
800
		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
807
		pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
801
			prefix, entry->start, entry->start + entry->size,
808
			 entry->start + entry->size, entry->size);
802
			entry->size);
-
 
803
		total_used += entry->size;
809
		total_used += entry->size;
804
		total_free += drm_mm_debug_hole(entry, prefix);
810
		total_free += drm_mm_debug_hole(entry, prefix);
805
		}
811
	}
806
	total = total_free + total_used;
812
	total = total_free + total_used;
807
 
813
 
808
	printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
814
	pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
809
		total_used, total_free);
815
		 total_used, total_free);
810
}
816
}
811
EXPORT_SYMBOL(drm_mm_debug_table);
817
EXPORT_SYMBOL(drm_mm_debug_table);
812
 
818
 
813
#if defined(CONFIG_DEBUG_FS)
819
#if defined(CONFIG_DEBUG_FS)
814
static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
820
static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
815
{
821
{
816
	unsigned long hole_start, hole_end, hole_size;
822
	u64 hole_start, hole_end, hole_size;
817
 
823
 
818
	if (entry->hole_follows) {
824
	if (entry->hole_follows) {
819
		hole_start = drm_mm_hole_node_start(entry);
825
		hole_start = drm_mm_hole_node_start(entry);
820
		hole_end = drm_mm_hole_node_end(entry);
826
		hole_end = drm_mm_hole_node_end(entry);
821
	hole_size = hole_end - hole_start;
827
		hole_size = hole_end - hole_start;
822
		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
828
		seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
823
				hole_start, hole_end, hole_size);
829
			   hole_end, hole_size);
824
		return hole_size;
830
		return hole_size;
825
	}
831
	}
826
 
832
 
827
	return 0;
833
	return 0;
828
}
834
}
829
 
835
 
830
/**
836
/**
831
 * drm_mm_dump_table - dump allocator state to a seq_file
837
 * drm_mm_dump_table - dump allocator state to a seq_file
832
 * @m: seq_file to dump to
838
 * @m: seq_file to dump to
833
 * @mm: drm_mm allocator to dump
839
 * @mm: drm_mm allocator to dump
834
 */
840
 */
835
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
841
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
836
{
842
{
837
	struct drm_mm_node *entry;
843
	struct drm_mm_node *entry;
838
	unsigned long total_used = 0, total_free = 0, total = 0;
844
	u64 total_used = 0, total_free = 0, total = 0;
839
 
845
 
840
	total_free += drm_mm_dump_hole(m, &mm->head_node);
846
	total_free += drm_mm_dump_hole(m, &mm->head_node);
841
 
847
 
842
	drm_mm_for_each_node(entry, mm) {
848
	drm_mm_for_each_node(entry, mm) {
843
		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
849
		seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start,
844
				entry->start, entry->start + entry->size,
850
			   entry->start + entry->size, entry->size);
845
				entry->size);
-
 
846
		total_used += entry->size;
851
		total_used += entry->size;
847
		total_free += drm_mm_dump_hole(m, entry);
852
		total_free += drm_mm_dump_hole(m, entry);
848
		}
853
	}
849
	total = total_free + total_used;
854
	total = total_free + total_used;
850
 
855
 
-
 
856
	seq_printf(m, "total: %llu, used %llu free %llu\n", total,
851
	seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
857
		   total_used, total_free);
852
	return 0;
858
	return 0;
853
}
859
}
854
EXPORT_SYMBOL(drm_mm_dump_table);
860
EXPORT_SYMBOL(drm_mm_dump_table);
855
#endif
861
#endif