Subversion Repositories Kolibri OS

Rev

Rev 1126 | Rev 1404 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1126 Rev 1321
Line 26... Line 26...
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
#ifndef __RADEON_OBJECT_H__
28
#ifndef __RADEON_OBJECT_H__
29
#define __RADEON_OBJECT_H__
29
#define __RADEON_OBJECT_H__
Line 30... Line 30...
30
 
30
 
31
//#include 
-
 
32
//#include 
-
 
33
//#include 
31
#include 
34
//#include 
-
 
35
 
-
 
36
/*
-
 
37
 * TTM.
-
 
38
 */
-
 
39
//struct radeon_mman {
-
 
40
//   struct ttm_global_reference mem_global_ref;
-
 
41
//   bool                mem_global_referenced;
-
 
42
//   struct ttm_bo_device        bdev;
-
 
43
//};
-
 
44
 
-
 
45
 
-
 
46
#define TTM_PL_SYSTEM           0
-
 
47
#define TTM_PL_TT               1
-
 
48
#define TTM_PL_VRAM             2
-
 
49
#define TTM_PL_PRIV0            3
-
 
50
#define TTM_PL_PRIV1            4
-
 
51
#define TTM_PL_PRIV2            5
-
 
52
#define TTM_PL_PRIV3            6
-
 
53
#define TTM_PL_PRIV4            7
-
 
54
#define TTM_PL_PRIV5            8
-
 
55
#define TTM_PL_SWAPPED          15
-
 
56
 
-
 
57
#define TTM_PL_FLAG_SYSTEM      (1 << TTM_PL_SYSTEM)
-
 
58
#define TTM_PL_FLAG_TT          (1 << TTM_PL_TT)
-
 
59
#define TTM_PL_FLAG_VRAM        (1 << TTM_PL_VRAM)
-
 
60
#define TTM_PL_FLAG_PRIV0       (1 << TTM_PL_PRIV0)
-
 
61
#define TTM_PL_FLAG_PRIV1       (1 << TTM_PL_PRIV1)
-
 
62
#define TTM_PL_FLAG_PRIV2       (1 << TTM_PL_PRIV2)
-
 
63
#define TTM_PL_FLAG_PRIV3       (1 << TTM_PL_PRIV3)
-
 
64
#define TTM_PL_FLAG_PRIV4       (1 << TTM_PL_PRIV4)
-
 
65
#define TTM_PL_FLAG_PRIV5       (1 << TTM_PL_PRIV5)
-
 
66
#define TTM_PL_FLAG_SWAPPED     (1 << TTM_PL_SWAPPED)
-
 
67
#define TTM_PL_MASK_MEM         0x0000FFFF
-
 
68
 
-
 
69
 
-
 
70
struct ttm_mem_type_manager {
-
 
71
 
-
 
72
    /*
-
 
73
     * No protection. Constant from start.
-
 
74
     */
-
 
75
 
-
 
76
    bool            has_type;
-
 
77
    bool            use_type;
-
 
78
    uint32_t        flags;
-
 
79
    unsigned long   gpu_offset;
-
 
80
    unsigned long   io_offset;
-
 
81
    unsigned long   io_size;
-
 
82
    void            *io_addr;
-
 
83
    uint64_t        size;
-
 
84
    uint32_t        available_caching;
-
 
85
    uint32_t        default_caching;
-
 
86
 
-
 
87
    /*
-
 
88
     * Protected by the bdev->lru_lock.
-
 
89
     * TODO: Consider one lru_lock per ttm_mem_type_manager.
-
 
90
     * Plays ill with list removal, though.
-
 
91
     */
-
 
92
 
-
 
93
    struct drm_mm manager;
-
 
94
    struct list_head lru;
-
 
95
};
-
 
96
 
-
 
97
struct ttm_bo_driver {
-
 
98
    const uint32_t      *mem_type_prio;
-
 
99
    const uint32_t      *mem_busy_prio;
-
 
100
    uint32_t             num_mem_type_prio;
-
 
Line 101... Line 32...
101
    uint32_t             num_mem_busy_prio;
32
#include "radeon.h"
102
 
33
 
-
 
34
/**
103
    /**
35
 * radeon_mem_type_to_domain - return domain corresponding to mem_type
104
     * struct ttm_bo_driver member create_ttm_backend_entry
-
 
105
     *
-
 
106
     * @bdev: The buffer object device.
36
 * @mem_type:	ttm memory type
107
     *
37
 *
-
 
38
 * Returns corresponding domain of the ttm mem_type
108
     * Create a driver specific struct ttm_backend.
39
 */
-
 
40
static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
-
 
41
{
109
     */
42
	switch (mem_type) {
-
 
43
	case TTM_PL_VRAM:
-
 
44
		return RADEON_GEM_DOMAIN_VRAM;
-
 
45
	case TTM_PL_TT:
-
 
46
		return RADEON_GEM_DOMAIN_GTT;
-
 
47
	case TTM_PL_SYSTEM:
-
 
48
		return RADEON_GEM_DOMAIN_CPU;
-
 
49
	default:
-
 
50
		break;
-
 
51
	}
110
 
52
	return 0;
111
//    struct ttm_backend *(*create_ttm_backend_entry)(struct ttm_bo_device *bdev);
53
}
112
 
54
 
113
    /**
-
 
114
     * struct ttm_bo_driver member invalidate_caches
55
/**
115
     *
56
 * radeon_bo_reserve - reserve bo
116
     * @bdev: the buffer object device.
57
 * @bo:		bo structure
117
     * @flags: new placement of the rebound buffer object.
58
 * @no_wait:		don't sleep while trying to reserve (return -EBUSY)
118
     *
59
 *
119
     * A previosly evicted buffer has been rebound in a
60
 * Returns:
120
     * potentially new location. Tell the driver that it might
61
 * -EBUSY: buffer is busy and @no_wait is true
121
     * consider invalidating read (texture) caches on the next command
62
 * -ERESTART: A wait for the buffer to become unreserved was interrupted by
-
 
63
 * a signal. Release all buffer reservations and return to user-space.
-
 
64
     */
-
 
65
static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
Line -... Line 66...
-
 
66
{
122
     * submission as a consequence.
67
	int r;
123
     */
68
 
124
 
69
retry:
125
//    int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
70
	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
126
//    int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
71
	if (unlikely(r != 0)) {
127
//                  struct ttm_mem_type_manager *man);
72
		if (r == -ERESTART)
128
    /**
-
 
129
     * struct ttm_bo_driver member evict_flags:
73
			goto retry;
130
     *
-
 
131
     * @bo: the buffer object to be evicted
-
 
132
     *
-
 
133
     * Return the bo flags for a buffer which is not mapped to the hardware.
74
		dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
-
 
75
		return r;
Line 134... Line 76...
134
     * These will be placed in proposed_flags so that when the move is
76
	}
135
     * finished, they'll end up in bo->mem.flags
-
 
136
     */
-
 
137
 
77
	return 0;
138
//     uint32_t(*evict_flags) (struct ttm_buffer_object *bo);
78
}
139
    /**
-
 
140
     * struct ttm_bo_driver member move:
-
 
141
     *
-
 
142
     * @bo: the buffer to move
-
 
143
     * @evict: whether this motion is evicting the buffer from
-
 
144
     * the graphics address space
-
 
145
     * @interruptible: Use interruptible sleeps if possible when sleeping.
79
 
146
     * @no_wait: whether this should give up and return -EBUSY
-
 
147
     * if this move would require sleeping
-
 
148
     * @new_mem: the new memory region receiving the buffer
-
 
149
     *
-
 
150
     * Move a buffer between two memory regions.
-
 
Line 151... Line 80...
151
     */
80
static inline void radeon_bo_unreserve(struct radeon_bo *bo)
-
 
81
{
152
//    int (*move) (struct ttm_buffer_object *bo,
82
	ttm_bo_unreserve(&bo->tbo);
153
//             bool evict, bool interruptible,
83
}
154
//             bool no_wait, struct ttm_mem_reg *new_mem);
84
 
155
 
-
 
156
    /**
85
/**
157
     * struct ttm_bo_driver_member verify_access
-
 
158
     *
-
 
159
     * @bo: Pointer to a buffer object.
-
 
160
     * @filp: Pointer to a struct file trying to access the object.
-
 
161
     *
-
 
162
     * Called from the map / write / read methods to verify that the
-
 
163
     * caller is permitted to access the buffer object.
-
 
164
     * This member may be set to NULL, which will refuse this kind of
-
 
165
     * access for all buffer objects.
-
 
166
     * This function should return 0 if access is granted, -EPERM otherwise.
-
 
167
     */
86
 * radeon_bo_gpu_offset - return GPU offset of bo
168
//    int (*verify_access) (struct ttm_buffer_object *bo,
-
 
169
//                  struct file *filp);
-
 
170
 
-
 
171
    /**
87
 * @bo:	radeon object for which we query the offset
172
     * In case a driver writer dislikes the TTM fence objects,
-
 
173
     * the driver writer can replace those with sync objects of
-
 
174
     * his / her own. If it turns out that no driver writer is
-
 
175
     * using these. I suggest we remove these hooks and plug in
-
 
176
     * fences directly. The bo driver needs the following functionality:
-
 
177
     * See the corresponding functions in the fence object API
-
 
178
     * documentation.
-
 
179
     */
-
 
180
 
-
 
181
//    bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
-
 
182
//    int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
-
 
183
//                  bool lazy, bool interruptible);
-
 
184
//    int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
-
 
185
//    void (*sync_obj_unref) (void **sync_obj);
-
 
186
//    void *(*sync_obj_ref) (void *sync_obj);
-
 
187
};
-
 
188
 
-
 
189
#define TTM_NUM_MEM_TYPES 8
-
 
190
 
-
 
191
 
-
 
192
struct ttm_bo_device {
-
 
193
 
-
 
194
    /*
-
 
195
     * Constant after bo device init / atomic.
-
 
196
     */
-
 
197
 
-
 
198
//    struct ttm_mem_global *mem_glob;
-
 
199
    struct ttm_bo_driver *driver;
-
 
200
//    struct page *dummy_read_page;
-
 
201
//    struct ttm_mem_shrink shrink;
-
 
202
 
-
 
203
    size_t      ttm_bo_extra_size;
-
 
204
    size_t      ttm_bo_size;
-
 
205
 
-
 
206
//   rwlock_t vm_lock;
-
 
207
    /*
-
 
208
     * Protected by the vm lock.
-
 
209
     */
-
 
210
    struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
-
 
211
//   struct rb_root addr_space_rb;
-
 
212
    struct drm_mm       addr_space_mm;
-
 
213
 
-
 
214
    /*
-
 
215
     * Might want to change this to one lock per manager.
-
 
216
     */
-
 
217
//   spinlock_t lru_lock;
-
 
218
    /*
-
 
219
     * Protected by the lru lock.
-
 
220
     */
-
 
221
    struct list_head ddestroy;
-
 
222
    struct list_head swap_lru;
-
 
223
 
-
 
224
    /*
-
 
225
     * Protected by load / firstopen / lastclose /unload sync.
-
 
226
     */
-
 
227
 
-
 
228
    bool nice_mode;
-
 
229
//   struct address_space *dev_mapping;
-
 
230
 
-
 
231
    /*
-
 
232
     * Internal protection.
-
 
233
     */
-
 
234
 
-
 
235
//   struct delayed_work wq;
-
 
236
};
-
 
237
 
-
 
238
struct ttm_mem_reg {
-
 
239
    struct drm_mm_node *mm_node;
-
 
240
    unsigned long       size;
-
 
241
    unsigned long       num_pages;
-
 
242
    uint32_t            page_alignment;
-
 
243
    uint32_t            mem_type;
-
 
244
    uint32_t            placement;
-
 
245
};
-
 
246
 
-
 
247
enum ttm_bo_type {
-
 
248
    ttm_bo_type_device,
-
 
249
    ttm_bo_type_user,
-
 
250
    ttm_bo_type_kernel
-
 
251
};
-
 
252
 
-
 
253
struct ttm_buffer_object {
-
 
254
    /**
-
 
255
     * Members constant at init.
-
 
256
     */
-
 
257
 
-
 
258
    struct ttm_bo_device   *bdev;
-
 
259
    unsigned long           buffer_start;
-
 
260
    enum ttm_bo_type        type;
-
 
261
    void (*destroy) (struct ttm_buffer_object *);
-
 
262
    unsigned long           num_pages;
-
 
263
    uint64_t                addr_space_offset;
-
 
264
    size_t                  acc_size;
-
 
265
 
-
 
266
    /**
-
 
267
    * Members not needing protection.
-
 
268
    */
-
 
269
 
-
 
270
//    struct kref kref;
-
 
271
//    struct kref list_kref;
-
 
272
//    wait_queue_head_t event_queue;
-
 
273
//    spinlock_t lock;
-
 
274
 
-
 
275
    /**
-
 
276
     * Members protected by the bo::reserved lock.
-
 
277
     */
-
 
278
 
-
 
279
    uint32_t                proposed_placement;
-
 
280
    struct ttm_mem_reg      mem;
-
 
281
//    struct file *persistant_swap_storage;
-
 
282
//    struct ttm_tt *ttm;
-
 
283
    bool evicted;
-
 
284
 
-
 
285
    /**
-
 
286
     * Members protected by the bo::reserved lock only when written to.
-
 
287
     */
-
 
288
 
-
 
289
//    atomic_t cpu_writers;
-
 
290
 
-
 
291
    /**
-
 
292
     * Members protected by the bdev::lru_lock.
-
 
293
     */
-
 
294
 
-
 
295
    struct list_head lru;
-
 
296
    struct list_head ddestroy;
-
 
297
    struct list_head swap;
-
 
298
    uint32_t val_seq;
-
 
299
    bool seq_valid;
-
 
300
 
-
 
301
    /**
-
 
302
     * Members protected by the bdev::lru_lock
-
 
303
     * only when written to.
-
 
304
     */
-
 
305
 
-
 
306
//    atomic_t reserved;
-
 
307
 
-
 
308
 
-
 
309
    /**
-
 
310
     * Members protected by the bo::lock
-
 
311
     */
-
 
312
 
-
 
313
    void *sync_obj_arg;
-
 
314
    void *sync_obj;
88
     *
-
 
89
 * Returns current GPU offset of the object.
-
 
90
     *
-
 
91
 * Note: object should either be pinned or reserved when calling this
-
 
92
 * function, it might be usefull to add check for this for debugging.
Line 315... Line 93...
315
    unsigned long priv_flags;
93
     */
-
 
94
static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
316
 
95
{
-
 
96
	return bo->tbo.offset;
Line -... Line 97...
-
 
97
}
-
 
98
 
-
 
99
static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
-
 
100
{
Line 317... Line 101...
317
    /**
101
	return bo->tbo.num_pages << PAGE_SHIFT;
318
     * Members protected by the bdev::vm_lock
102
}
319
     */
103
 
-
 
104
static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
320
 
105
{
-
 
106
	return !!atomic_read(&bo->tbo.reserved);
-
 
107
}
-
 
108
 
321
//    struct rb_node vm_rb;
109
/**
322
    struct drm_mm_node *vm_node;
-
 
323
 
110
 * radeon_bo_mmap_offset - return mmap offset of bo
324
 
-
 
325
    /**
-
 
326
     * Special members that are protected by the reserve lock
-
 
327
     * and the bo::lock when written to. Can be read with
-
 
328
     * either of these locks held.
111
 * @bo:	radeon object for which we query the offset
329
     */
-
 
330
 
112
     *
331
    unsigned long offset;
-
 
332
    uint32_t cur_placement;
-
 
333
};
-
 
334
 
-
 
335
struct radeon_object
-
 
336
{
-
 
337
    struct ttm_buffer_object     tobj;
-
 
338
    struct list_head            list;
-
 
339
    struct radeon_device        *rdev;
-
 
340
    struct drm_gem_object       *gobj;
-
 
341
//   struct ttm_bo_kmap_obj      kmap;
-
 
342
 
-
 
343
    unsigned            pin_count;
-
 
344
    uint64_t            gpu_addr;
113
 * Returns mmap offset of the object.
Line -... Line 114...
-
 
114
     *
-
 
115
 * Note: addr_space_offset is constant after ttm bo init thus isn't protected
-
 
116
 * by any lock.
-
 
117
     */
Line -... Line 118...
-
 
118
static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
-
 
119
{
-
 
120
	return bo->tbo.addr_space_offset;
-
 
121
}
-
 
122
 
-
 
123
static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
-
 
124
					bool no_wait)
-
 
125
{
-
 
126
	int r;
-
 
127
 
-
 
128
retry:
-
 
129
	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
-
 
130
	if (unlikely(r != 0)) {
-
 
131
		if (r == -ERESTART)
-
 
132
			goto retry;
-
 
133
		dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
-
 
134
		return r;
-
 
135
	}
-
 
136
	spin_lock(&bo->tbo.lock);
-
 
137
	if (mem_type)
-
 
138
		*mem_type = bo->tbo.mem.mem_type;
-
 
139
	if (bo->tbo.sync_obj)
-
 
140
		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
-
 
141
	spin_unlock(&bo->tbo.lock);
-
 
142
	ttm_bo_unreserve(&bo->tbo);
-
 
143
	if (unlikely(r == -ERESTART))
-
 
144
		goto retry;
-
 
145
	return r;
-
 
146
}
-
 
147
 
-
 
148
extern int radeon_bo_create(struct radeon_device *rdev,
-
 
149
				struct drm_gem_object *gobj, unsigned long size,
-
 
150
				bool kernel, u32 domain,
-
 
151
				struct radeon_bo **bo_ptr);
-
 
152
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
-
 
153
extern void radeon_bo_kunmap(struct radeon_bo *bo);
-
 
154
extern void radeon_bo_unref(struct radeon_bo **bo);
-
 
155
extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
-
 
156
extern int radeon_bo_unpin(struct radeon_bo *bo);
-
 
157
extern int radeon_bo_evict_vram(struct radeon_device *rdev);
-
 
158
extern void radeon_bo_force_delete(struct radeon_device *rdev);
-
 
159
extern int radeon_bo_init(struct radeon_device *rdev);
-
 
160
extern void radeon_bo_fini(struct radeon_device *rdev);
-
 
161
extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
-
 
162
				struct list_head *head);
-
 
163
extern int radeon_bo_list_reserve(struct list_head *head);
-
 
164
extern void radeon_bo_list_unreserve(struct list_head *head);
-
 
165
extern int radeon_bo_list_validate(struct list_head *head, void *fence);
-
 
166
extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
-
 
167
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
-
 
168
				struct vm_area_struct *vma);
345
    void                *kptr;
169
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,