Subversion Repositories Kolibri OS

Rev

Rev 4103 | Rev 5056 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4103 Rev 4559
1
#ifndef __DRM_VMA_MANAGER_H__
1
#ifndef __DRM_VMA_MANAGER_H__
2
#define __DRM_VMA_MANAGER_H__
2
#define __DRM_VMA_MANAGER_H__
3
 
3
 
4
/*
4
/*
5
 * Copyright (c) 2013 David Herrmann 
5
 * Copyright (c) 2013 David Herrmann 
6
 *
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * Permission is hereby granted, free of charge, to any person obtaining a
8
 * copy of this software and associated documentation files (the "Software"),
8
 * copy of this software and associated documentation files (the "Software"),
9
 * to deal in the Software without restriction, including without limitation
9
 * to deal in the Software without restriction, including without limitation
10
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * and/or sell copies of the Software, and to permit persons to whom the
12
 * Software is furnished to do so, subject to the following conditions:
12
 * Software is furnished to do so, subject to the following conditions:
13
 *
13
 *
14
 * The above copyright notice and this permission notice shall be included in
14
 * The above copyright notice and this permission notice shall be included in
15
 * all copies or substantial portions of the Software.
15
 * all copies or substantial portions of the Software.
16
 *
16
 *
17
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23
 * OTHER DEALINGS IN THE SOFTWARE.
23
 * OTHER DEALINGS IN THE SOFTWARE.
24
 */
24
 */
25
 
25
 
26
#include 
26
#include 
27
//#include 
27
#include 
28
#include 
28
#include 
29
#include 
29
#include 
30
#include 
30
#include 
31
#include 
31
#include 
32
#include 
32
#include 
33
 
33
 
34
struct drm_vma_offset_file {
34
struct drm_vma_offset_file {
35
	struct rb_node vm_rb;
35
	struct rb_node vm_rb;
36
	struct file *vm_filp;
36
	struct file *vm_filp;
37
	unsigned long vm_count;
37
	unsigned long vm_count;
38
};
38
};
39
 
39
 
40
struct drm_vma_offset_node {
40
struct drm_vma_offset_node {
41
	rwlock_t vm_lock;
41
	rwlock_t vm_lock;
42
	struct drm_mm_node vm_node;
42
	struct drm_mm_node vm_node;
43
	struct rb_node vm_rb;
43
	struct rb_node vm_rb;
44
	struct rb_root vm_files;
44
	struct rb_root vm_files;
45
};
45
};
46
 
46
 
47
struct drm_vma_offset_manager {
47
struct drm_vma_offset_manager {
48
	rwlock_t vm_lock;
48
	rwlock_t vm_lock;
49
	struct rb_root vm_addr_space_rb;
49
	struct rb_root vm_addr_space_rb;
50
	struct drm_mm vm_addr_space_mm;
50
	struct drm_mm vm_addr_space_mm;
51
};
51
};
52
 
52
 
53
void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
53
void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
54
				 unsigned long page_offset, unsigned long size);
54
				 unsigned long page_offset, unsigned long size);
55
void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr);
55
void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr);
56
 
56
 
57
struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
57
struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
58
						  unsigned long start,
58
						  unsigned long start,
59
						  unsigned long pages);
59
						  unsigned long pages);
60
struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
60
struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
61
							   unsigned long start,
61
							   unsigned long start,
62
							   unsigned long pages);
62
							   unsigned long pages);
63
int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
63
int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
64
		       struct drm_vma_offset_node *node, unsigned long pages);
64
		       struct drm_vma_offset_node *node, unsigned long pages);
65
void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
65
void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
66
			   struct drm_vma_offset_node *node);
66
			   struct drm_vma_offset_node *node);
67
 
67
 
68
int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp);
68
int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp);
69
void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp);
69
void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp);
70
bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
70
bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
71
			     struct file *filp);
71
			     struct file *filp);
72
 
72
 
73
/**
73
/**
74
 * drm_vma_offset_exact_lookup() - Look up node by exact address
74
 * drm_vma_offset_exact_lookup() - Look up node by exact address
75
 * @mgr: Manager object
75
 * @mgr: Manager object
76
 * @start: Start address (page-based, not byte-based)
76
 * @start: Start address (page-based, not byte-based)
77
 * @pages: Size of object (page-based)
77
 * @pages: Size of object (page-based)
78
 *
78
 *
79
 * Same as drm_vma_offset_lookup() but does not allow any offset into the node.
79
 * Same as drm_vma_offset_lookup() but does not allow any offset into the node.
80
 * It only returns the exact object with the given start address.
80
 * It only returns the exact object with the given start address.
81
 *
81
 *
82
 * RETURNS:
82
 * RETURNS:
83
 * Node at exact start address @start.
83
 * Node at exact start address @start.
84
 */
84
 */
85
static inline struct drm_vma_offset_node *
85
static inline struct drm_vma_offset_node *
86
drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
86
drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
87
			    unsigned long start,
87
			    unsigned long start,
88
			    unsigned long pages)
88
			    unsigned long pages)
89
{
89
{
90
	struct drm_vma_offset_node *node;
90
	struct drm_vma_offset_node *node;
91
 
91
 
92
	node = drm_vma_offset_lookup(mgr, start, pages);
92
	node = drm_vma_offset_lookup(mgr, start, pages);
93
	return (node && node->vm_node.start == start) ? node : NULL;
93
	return (node && node->vm_node.start == start) ? node : NULL;
94
}
94
}
95
 
95
 
96
/**
96
/**
97
 * drm_vma_offset_lock_lookup() - Lock lookup for extended private use
97
 * drm_vma_offset_lock_lookup() - Lock lookup for extended private use
98
 * @mgr: Manager object
98
 * @mgr: Manager object
99
 *
99
 *
100
 * Lock VMA manager for extended lookups. Only *_locked() VMA function calls
100
 * Lock VMA manager for extended lookups. Only *_locked() VMA function calls
101
 * are allowed while holding this lock. All other contexts are blocked from VMA
101
 * are allowed while holding this lock. All other contexts are blocked from VMA
102
 * until the lock is released via drm_vma_offset_unlock_lookup().
102
 * until the lock is released via drm_vma_offset_unlock_lookup().
103
 *
103
 *
104
 * Use this if you need to take a reference to the objects returned by
104
 * Use this if you need to take a reference to the objects returned by
105
 * drm_vma_offset_lookup_locked() before releasing this lock again.
105
 * drm_vma_offset_lookup_locked() before releasing this lock again.
106
 *
106
 *
107
 * This lock must not be used for anything else than extended lookups. You must
107
 * This lock must not be used for anything else than extended lookups. You must
108
 * not call any other VMA helpers while holding this lock.
108
 * not call any other VMA helpers while holding this lock.
109
 *
109
 *
110
 * Note: You're in atomic-context while holding this lock!
110
 * Note: You're in atomic-context while holding this lock!
111
 *
111
 *
112
 * Example:
112
 * Example:
113
 *   drm_vma_offset_lock_lookup(mgr);
113
 *   drm_vma_offset_lock_lookup(mgr);
114
 *   node = drm_vma_offset_lookup_locked(mgr);
114
 *   node = drm_vma_offset_lookup_locked(mgr);
115
 *   if (node)
115
 *   if (node)
116
 *       kref_get_unless_zero(container_of(node, sth, entr));
116
 *       kref_get_unless_zero(container_of(node, sth, entr));
117
 *   drm_vma_offset_unlock_lookup(mgr);
117
 *   drm_vma_offset_unlock_lookup(mgr);
118
 */
118
 */
119
static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
119
static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
120
{
120
{
121
	read_lock(&mgr->vm_lock);
121
	read_lock(&mgr->vm_lock);
122
}
122
}
123
 
123
 
124
/**
124
/**
125
 * drm_vma_offset_unlock_lookup() - Unlock lookup for extended private use
125
 * drm_vma_offset_unlock_lookup() - Unlock lookup for extended private use
126
 * @mgr: Manager object
126
 * @mgr: Manager object
127
 *
127
 *
128
 * Release lookup-lock. See drm_vma_offset_lock_lookup() for more information.
128
 * Release lookup-lock. See drm_vma_offset_lock_lookup() for more information.
129
 */
129
 */
130
static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr)
130
static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr)
131
{
131
{
132
	read_unlock(&mgr->vm_lock);
132
	read_unlock(&mgr->vm_lock);
133
}
133
}
134
 
134
 
135
/**
135
/**
136
 * drm_vma_node_reset() - Initialize or reset node object
136
 * drm_vma_node_reset() - Initialize or reset node object
137
 * @node: Node to initialize or reset
137
 * @node: Node to initialize or reset
138
 *
138
 *
139
 * Reset a node to its initial state. This must be called before using it with
139
 * Reset a node to its initial state. This must be called before using it with
140
 * any VMA offset manager.
140
 * any VMA offset manager.
141
 *
141
 *
142
 * This must not be called on an already allocated node, or you will leak
142
 * This must not be called on an already allocated node, or you will leak
143
 * memory.
143
 * memory.
144
 */
144
 */
145
static inline void drm_vma_node_reset(struct drm_vma_offset_node *node)
145
static inline void drm_vma_node_reset(struct drm_vma_offset_node *node)
146
{
146
{
147
	memset(node, 0, sizeof(*node));
147
	memset(node, 0, sizeof(*node));
148
	node->vm_files = RB_ROOT;
148
	node->vm_files = RB_ROOT;
149
	rwlock_init(&node->vm_lock);
149
	rwlock_init(&node->vm_lock);
150
}
150
}
151
 
151
 
152
/**
152
/**
153
 * drm_vma_node_start() - Return start address for page-based addressing
153
 * drm_vma_node_start() - Return start address for page-based addressing
154
 * @node: Node to inspect
154
 * @node: Node to inspect
155
 *
155
 *
156
 * Return the start address of the given node. This can be used as offset into
156
 * Return the start address of the given node. This can be used as offset into
157
 * the linear VM space that is provided by the VMA offset manager. Note that
157
 * the linear VM space that is provided by the VMA offset manager. Note that
158
 * this can only be used for page-based addressing. If you need a proper offset
158
 * this can only be used for page-based addressing. If you need a proper offset
159
 * for user-space mappings, you must apply "<< PAGE_SHIFT" or use the
159
 * for user-space mappings, you must apply "<< PAGE_SHIFT" or use the
160
 * drm_vma_node_offset_addr() helper instead.
160
 * drm_vma_node_offset_addr() helper instead.
161
 *
161
 *
162
 * RETURNS:
162
 * RETURNS:
163
 * Start address of @node for page-based addressing. 0 if the node does not
163
 * Start address of @node for page-based addressing. 0 if the node does not
164
 * have an offset allocated.
164
 * have an offset allocated.
165
 */
165
 */
166
static inline unsigned long drm_vma_node_start(struct drm_vma_offset_node *node)
166
static inline unsigned long drm_vma_node_start(struct drm_vma_offset_node *node)
167
{
167
{
168
	return node->vm_node.start;
168
	return node->vm_node.start;
169
}
169
}
170
 
170
 
171
/**
171
/**
172
 * drm_vma_node_size() - Return size (page-based)
172
 * drm_vma_node_size() - Return size (page-based)
173
 * @node: Node to inspect
173
 * @node: Node to inspect
174
 *
174
 *
175
 * Return the size as number of pages for the given node. This is the same size
175
 * Return the size as number of pages for the given node. This is the same size
176
 * that was passed to drm_vma_offset_add(). If no offset is allocated for the
176
 * that was passed to drm_vma_offset_add(). If no offset is allocated for the
177
 * node, this is 0.
177
 * node, this is 0.
178
 *
178
 *
179
 * RETURNS:
179
 * RETURNS:
180
 * Size of @node as number of pages. 0 if the node does not have an offset
180
 * Size of @node as number of pages. 0 if the node does not have an offset
181
 * allocated.
181
 * allocated.
182
 */
182
 */
183
static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node)
183
static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node)
184
{
184
{
185
	return node->vm_node.size;
185
	return node->vm_node.size;
186
}
186
}
187
 
187
 
188
/**
188
/**
189
 * drm_vma_node_has_offset() - Check whether node is added to offset manager
189
 * drm_vma_node_has_offset() - Check whether node is added to offset manager
190
 * @node: Node to be checked
190
 * @node: Node to be checked
191
 *
191
 *
192
 * RETURNS:
192
 * RETURNS:
193
 * true iff the node was previously allocated an offset and added to
193
 * true iff the node was previously allocated an offset and added to
194
 * an vma offset manager.
194
 * an vma offset manager.
195
 */
195
 */
196
static inline bool drm_vma_node_has_offset(struct drm_vma_offset_node *node)
196
static inline bool drm_vma_node_has_offset(struct drm_vma_offset_node *node)
197
{
197
{
198
	return drm_mm_node_allocated(&node->vm_node);
198
	return drm_mm_node_allocated(&node->vm_node);
199
}
199
}
200
 
200
 
201
/**
201
/**
202
 * drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps
202
 * drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps
203
 * @node: Linked offset node
203
 * @node: Linked offset node
204
 *
204
 *
205
 * Same as drm_vma_node_start() but returns the address as a valid offset that
205
 * Same as drm_vma_node_start() but returns the address as a valid offset that
206
 * can be used for user-space mappings during mmap().
206
 * can be used for user-space mappings during mmap().
207
 * This must not be called on unlinked nodes.
207
 * This must not be called on unlinked nodes.
208
 *
208
 *
209
 * RETURNS:
209
 * RETURNS:
210
 * Offset of @node for byte-based addressing. 0 if the node does not have an
210
 * Offset of @node for byte-based addressing. 0 if the node does not have an
211
 * object allocated.
211
 * object allocated.
212
 */
212
 */
213
static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
213
static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
214
{
214
{
215
	return ((__u64)node->vm_node.start) << PAGE_SHIFT;
215
	return ((__u64)node->vm_node.start) << PAGE_SHIFT;
216
}
216
}
217
 
217
 
218
/**
218
/**
219
 * drm_vma_node_unmap() - Unmap offset node
219
 * drm_vma_node_unmap() - Unmap offset node
220
 * @node: Offset node
220
 * @node: Offset node
221
 * @file_mapping: Address space to unmap @node from
221
 * @file_mapping: Address space to unmap @node from
222
 *
222
 *
223
 * Unmap all userspace mappings for a given offset node. The mappings must be
223
 * Unmap all userspace mappings for a given offset node. The mappings must be
224
 * associated with the @file_mapping address-space. If no offset exists or
224
 * associated with the @file_mapping address-space. If no offset exists or
225
 * the address-space is invalid, nothing is done.
225
 * the address-space is invalid, nothing is done.
226
 *
226
 *
227
 * This call is unlocked. The caller must guarantee that drm_vma_offset_remove()
227
 * This call is unlocked. The caller must guarantee that drm_vma_offset_remove()
228
 * is not called on this node concurrently.
228
 * is not called on this node concurrently.
229
 */
229
 */
230
static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
230
static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
231
				      struct address_space *file_mapping)
231
				      struct address_space *file_mapping)
232
{
232
{
233
	if (file_mapping && drm_vma_node_has_offset(node))
233
	if (file_mapping && drm_vma_node_has_offset(node))
234
		unmap_mapping_range(file_mapping,
234
		unmap_mapping_range(file_mapping,
235
				    drm_vma_node_offset_addr(node),
235
				    drm_vma_node_offset_addr(node),
236
				    drm_vma_node_size(node) << PAGE_SHIFT, 1);
236
				    drm_vma_node_size(node) << PAGE_SHIFT, 1);
237
}
237
}
238
 
238
 
239
/**
239
/**
240
 * drm_vma_node_verify_access() - Access verification helper for TTM
240
 * drm_vma_node_verify_access() - Access verification helper for TTM
241
 * @node: Offset node
241
 * @node: Offset node
242
 * @filp: Open-file
242
 * @filp: Open-file
243
 *
243
 *
244
 * This checks whether @filp is granted access to @node. It is the same as
244
 * This checks whether @filp is granted access to @node. It is the same as
245
 * drm_vma_node_is_allowed() but suitable as drop-in helper for TTM
245
 * drm_vma_node_is_allowed() but suitable as drop-in helper for TTM
246
 * verify_access() callbacks.
246
 * verify_access() callbacks.
247
 *
247
 *
248
 * RETURNS:
248
 * RETURNS:
249
 * 0 if access is granted, -EACCES otherwise.
249
 * 0 if access is granted, -EACCES otherwise.
250
 */
250
 */
251
static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node,
251
static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node,
252
					     struct file *filp)
252
					     struct file *filp)
253
{
253
{
254
	return drm_vma_node_is_allowed(node, filp) ? 0 : -EACCES;
254
	return drm_vma_node_is_allowed(node, filp) ? 0 : -EACCES;
255
}
255
}
256
 
256
 
257
#endif /* __DRM_VMA_MANAGER_H__ */
257
#endif /* __DRM_VMA_MANAGER_H__ */