Subversion Repositories Kolibri OS

Rev

Rev 1321 | Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
1126 serge 1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
26
 *          Jerome Glisse
27
 */
28
#ifndef __RADEON_OBJECT_H__
29
#define __RADEON_OBJECT_H__
30
 
31
//#include 
32
//#include 
33
//#include 
34
//#include 
35
 
36
/*
37
 * TTM.
38
 */
39
//struct radeon_mman {
40
//   struct ttm_global_reference mem_global_ref;
41
//   bool                mem_global_referenced;
42
//   struct ttm_bo_device        bdev;
43
//};
44
 
45
 
46
#define TTM_PL_SYSTEM           0
47
#define TTM_PL_TT               1
48
#define TTM_PL_VRAM             2
49
#define TTM_PL_PRIV0            3
50
#define TTM_PL_PRIV1            4
51
#define TTM_PL_PRIV2            5
52
#define TTM_PL_PRIV3            6
53
#define TTM_PL_PRIV4            7
54
#define TTM_PL_PRIV5            8
55
#define TTM_PL_SWAPPED          15
56
 
57
#define TTM_PL_FLAG_SYSTEM      (1 << TTM_PL_SYSTEM)
58
#define TTM_PL_FLAG_TT          (1 << TTM_PL_TT)
59
#define TTM_PL_FLAG_VRAM        (1 << TTM_PL_VRAM)
60
#define TTM_PL_FLAG_PRIV0       (1 << TTM_PL_PRIV0)
61
#define TTM_PL_FLAG_PRIV1       (1 << TTM_PL_PRIV1)
62
#define TTM_PL_FLAG_PRIV2       (1 << TTM_PL_PRIV2)
63
#define TTM_PL_FLAG_PRIV3       (1 << TTM_PL_PRIV3)
64
#define TTM_PL_FLAG_PRIV4       (1 << TTM_PL_PRIV4)
65
#define TTM_PL_FLAG_PRIV5       (1 << TTM_PL_PRIV5)
66
#define TTM_PL_FLAG_SWAPPED     (1 << TTM_PL_SWAPPED)
67
#define TTM_PL_MASK_MEM         0x0000FFFF
68
 
69
 
70
struct ttm_mem_type_manager {
71
 
72
    /*
73
     * No protection. Constant from start.
74
     */
75
 
76
    bool            has_type;
77
    bool            use_type;
78
    uint32_t        flags;
79
    unsigned long   gpu_offset;
80
    unsigned long   io_offset;
81
    unsigned long   io_size;
82
    void            *io_addr;
83
    uint64_t        size;
84
    uint32_t        available_caching;
85
    uint32_t        default_caching;
86
 
87
    /*
88
     * Protected by the bdev->lru_lock.
89
     * TODO: Consider one lru_lock per ttm_mem_type_manager.
90
     * Plays ill with list removal, though.
91
     */
92
 
93
    struct drm_mm manager;
94
    struct list_head lru;
95
};
96
 
97
struct ttm_bo_driver {
98
    const uint32_t      *mem_type_prio;
99
    const uint32_t      *mem_busy_prio;
100
    uint32_t             num_mem_type_prio;
101
    uint32_t             num_mem_busy_prio;
102
 
103
    /**
104
     * struct ttm_bo_driver member create_ttm_backend_entry
105
     *
106
     * @bdev: The buffer object device.
107
     *
108
     * Create a driver specific struct ttm_backend.
109
     */
110
 
111
//    struct ttm_backend *(*create_ttm_backend_entry)(struct ttm_bo_device *bdev);
112
 
113
    /**
114
     * struct ttm_bo_driver member invalidate_caches
115
     *
116
     * @bdev: the buffer object device.
117
     * @flags: new placement of the rebound buffer object.
118
     *
119
     * A previosly evicted buffer has been rebound in a
120
     * potentially new location. Tell the driver that it might
121
     * consider invalidating read (texture) caches on the next command
122
     * submission as a consequence.
123
     */
124
 
125
//    int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
126
//    int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
127
//                  struct ttm_mem_type_manager *man);
128
    /**
129
     * struct ttm_bo_driver member evict_flags:
130
     *
131
     * @bo: the buffer object to be evicted
132
     *
133
     * Return the bo flags for a buffer which is not mapped to the hardware.
134
     * These will be placed in proposed_flags so that when the move is
135
     * finished, they'll end up in bo->mem.flags
136
     */
137
 
138
//     uint32_t(*evict_flags) (struct ttm_buffer_object *bo);
139
    /**
140
     * struct ttm_bo_driver member move:
141
     *
142
     * @bo: the buffer to move
143
     * @evict: whether this motion is evicting the buffer from
144
     * the graphics address space
145
     * @interruptible: Use interruptible sleeps if possible when sleeping.
146
     * @no_wait: whether this should give up and return -EBUSY
147
     * if this move would require sleeping
148
     * @new_mem: the new memory region receiving the buffer
149
     *
150
     * Move a buffer between two memory regions.
151
     */
152
//    int (*move) (struct ttm_buffer_object *bo,
153
//             bool evict, bool interruptible,
154
//             bool no_wait, struct ttm_mem_reg *new_mem);
155
 
156
    /**
157
     * struct ttm_bo_driver_member verify_access
158
     *
159
     * @bo: Pointer to a buffer object.
160
     * @filp: Pointer to a struct file trying to access the object.
161
     *
162
     * Called from the map / write / read methods to verify that the
163
     * caller is permitted to access the buffer object.
164
     * This member may be set to NULL, which will refuse this kind of
165
     * access for all buffer objects.
166
     * This function should return 0 if access is granted, -EPERM otherwise.
167
     */
168
//    int (*verify_access) (struct ttm_buffer_object *bo,
169
//                  struct file *filp);
170
 
171
    /**
172
     * In case a driver writer dislikes the TTM fence objects,
173
     * the driver writer can replace those with sync objects of
174
     * his / her own. If it turns out that no driver writer is
175
     * using these. I suggest we remove these hooks and plug in
176
     * fences directly. The bo driver needs the following functionality:
177
     * See the corresponding functions in the fence object API
178
     * documentation.
179
     */
180
 
181
//    bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
182
//    int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
183
//                  bool lazy, bool interruptible);
184
//    int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
185
//    void (*sync_obj_unref) (void **sync_obj);
186
//    void *(*sync_obj_ref) (void *sync_obj);
187
};
188
 
189
#define TTM_NUM_MEM_TYPES 8
190
 
191
 
192
struct ttm_bo_device {
193
 
194
    /*
195
     * Constant after bo device init / atomic.
196
     */
197
 
198
//    struct ttm_mem_global *mem_glob;
199
    struct ttm_bo_driver *driver;
200
//    struct page *dummy_read_page;
201
//    struct ttm_mem_shrink shrink;
202
 
203
    size_t      ttm_bo_extra_size;
204
    size_t      ttm_bo_size;
205
 
206
//   rwlock_t vm_lock;
207
    /*
208
     * Protected by the vm lock.
209
     */
210
    struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
211
//   struct rb_root addr_space_rb;
212
    struct drm_mm       addr_space_mm;
213
 
214
    /*
215
     * Might want to change this to one lock per manager.
216
     */
217
//   spinlock_t lru_lock;
218
    /*
219
     * Protected by the lru lock.
220
     */
221
    struct list_head ddestroy;
222
    struct list_head swap_lru;
223
 
224
    /*
225
     * Protected by load / firstopen / lastclose /unload sync.
226
     */
227
 
228
    bool nice_mode;
229
//   struct address_space *dev_mapping;
230
 
231
    /*
232
     * Internal protection.
233
     */
234
 
235
//   struct delayed_work wq;
236
};
237
 
238
struct ttm_mem_reg {
239
    struct drm_mm_node *mm_node;
240
    unsigned long       size;
241
    unsigned long       num_pages;
242
    uint32_t            page_alignment;
243
    uint32_t            mem_type;
244
    uint32_t            placement;
245
};
246
 
247
enum ttm_bo_type {
248
    ttm_bo_type_device,
249
    ttm_bo_type_user,
250
    ttm_bo_type_kernel
251
};
252
 
253
struct ttm_buffer_object {
254
    /**
255
     * Members constant at init.
256
     */
257
 
258
    struct ttm_bo_device   *bdev;
259
    unsigned long           buffer_start;
260
    enum ttm_bo_type        type;
261
    void (*destroy) (struct ttm_buffer_object *);
262
    unsigned long           num_pages;
263
    uint64_t                addr_space_offset;
264
    size_t                  acc_size;
265
 
266
    /**
267
    * Members not needing protection.
268
    */
269
 
270
//    struct kref kref;
271
//    struct kref list_kref;
272
//    wait_queue_head_t event_queue;
273
//    spinlock_t lock;
274
 
275
    /**
276
     * Members protected by the bo::reserved lock.
277
     */
278
 
279
    uint32_t                proposed_placement;
280
    struct ttm_mem_reg      mem;
281
//    struct file *persistant_swap_storage;
282
//    struct ttm_tt *ttm;
283
    bool evicted;
284
 
285
    /**
286
     * Members protected by the bo::reserved lock only when written to.
287
     */
288
 
289
//    atomic_t cpu_writers;
290
 
291
    /**
292
     * Members protected by the bdev::lru_lock.
293
     */
294
 
295
    struct list_head lru;
296
    struct list_head ddestroy;
297
    struct list_head swap;
298
    uint32_t val_seq;
299
    bool seq_valid;
300
 
301
    /**
302
     * Members protected by the bdev::lru_lock
303
     * only when written to.
304
     */
305
 
306
//    atomic_t reserved;
307
 
308
 
309
    /**
310
     * Members protected by the bo::lock
311
     */
312
 
313
    void *sync_obj_arg;
314
    void *sync_obj;
315
    unsigned long priv_flags;
316
 
317
    /**
318
     * Members protected by the bdev::vm_lock
319
     */
320
 
321
//    struct rb_node vm_rb;
322
    struct drm_mm_node *vm_node;
323
 
324
 
325
    /**
326
     * Special members that are protected by the reserve lock
327
     * and the bo::lock when written to. Can be read with
328
     * either of these locks held.
329
     */
330
 
331
    unsigned long offset;
332
    uint32_t cur_placement;
333
};
334
 
335
struct radeon_object
336
{
337
    struct ttm_buffer_object     tobj;
338
    struct list_head            list;
339
    struct radeon_device        *rdev;
340
    struct drm_gem_object       *gobj;
341
//   struct ttm_bo_kmap_obj      kmap;
342
 
343
    unsigned            pin_count;
344
    uint64_t            gpu_addr;
345
    void                *kptr;
346
    bool                is_iomem;
347
 
348
    struct drm_mm_node  *mm_node;
349
    u32_t                vm_addr;
350
    u32_t                cpu_addr;
351
    u32_t                flags;
352
};
353
 
354
 
355
#endif