Subversion Repositories Kolibri OS

Rev

Rev 4112 | Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
4075 Serge 1
/**************************************************************************
2
 *
3
 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
/*
28
 * Authors: Thomas Hellstrom 
29
 */
30
 
31
#include 
32
#include 
33
#include 
34
#include 
35
#include 
36
#include 
37
#include 
38
 
39
/**
40
 * Currently we use a spinlock for the lock, but a mutex *may* be
41
 * more appropriate to reduce scheduling latency if the range manager
42
 * ends up with very fragmented allocation patterns.
43
 */
44
 
45
struct ttm_range_manager {
46
	struct drm_mm mm;
47
	spinlock_t lock;
48
};
49
 
50
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
51
			       struct ttm_buffer_object *bo,
52
			       struct ttm_placement *placement,
53
			       struct ttm_mem_reg *mem)
54
{
55
	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
56
	struct drm_mm *mm = &rman->mm;
57
	struct drm_mm_node *node = NULL;
58
	unsigned long lpfn;
59
	int ret;
60
 
61
	lpfn = placement->lpfn;
62
	if (!lpfn)
63
		lpfn = man->size;
64
	do {
65
		ret = drm_mm_pre_get(mm);
66
		if (unlikely(ret))
67
			return ret;
68
 
69
		spin_lock(&rman->lock);
70
		node = drm_mm_search_free_in_range(mm,
71
					mem->num_pages, mem->page_alignment,
72
					placement->fpfn, lpfn, 1);
73
		if (unlikely(node == NULL)) {
74
			spin_unlock(&rman->lock);
75
			return 0;
76
		}
77
		node = drm_mm_get_block_atomic_range(node, mem->num_pages,
78
						     mem->page_alignment,
79
						     placement->fpfn,
80
						     lpfn);
81
		spin_unlock(&rman->lock);
82
	} while (node == NULL);
83
 
84
	mem->mm_node = node;
85
	mem->start = node->start;
86
	return 0;
87
}
88
 
89
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
90
				struct ttm_mem_reg *mem)
91
{
92
	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
93
 
94
	if (mem->mm_node) {
95
		spin_lock(&rman->lock);
96
		drm_mm_put_block(mem->mm_node);
97
		spin_unlock(&rman->lock);
98
		mem->mm_node = NULL;
99
	}
100
}
101
 
102
static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
103
			   unsigned long p_size)
104
{
105
	struct ttm_range_manager *rman;
106
 
107
	rman = kzalloc(sizeof(*rman), GFP_KERNEL);
108
	if (!rman)
109
		return -ENOMEM;
110
 
111
	drm_mm_init(&rman->mm, 0, p_size);
112
	spin_lock_init(&rman->lock);
113
	man->priv = rman;
114
	return 0;
115
}
116
 
117
static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
118
{
119
	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
120
	struct drm_mm *mm = &rman->mm;
121
 
122
	spin_lock(&rman->lock);
123
	if (drm_mm_clean(mm)) {
124
		drm_mm_takedown(mm);
125
		spin_unlock(&rman->lock);
126
		kfree(rman);
127
		man->priv = NULL;
128
		return 0;
129
	}
130
	spin_unlock(&rman->lock);
131
	return -EBUSY;
132
}
133
 
134
static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
135
			     const char *prefix)
136
{
137
	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
138
 
139
	spin_lock(&rman->lock);
140
	drm_mm_debug_table(&rman->mm, prefix);
141
	spin_unlock(&rman->lock);
142
}
143
 
144
const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
145
	ttm_bo_man_init,
146
	ttm_bo_man_takedown,
147
	ttm_bo_man_get_node,
148
	ttm_bo_man_put_node,
149
	ttm_bo_man_debug
150
};
151
EXPORT_SYMBOL(ttm_bo_manager_func);