Subversion Repositories Kolibri OS

Rev

Rev 2003 | Rev 4065 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2003 Rev 3262
Line 28... Line 28...
28
 * Authors: Thomas Hellstrom 
28
 * Authors: Thomas Hellstrom 
29
 */
29
 */
30
#ifndef _TTM_BO_DRIVER_H_
30
#ifndef _TTM_BO_DRIVER_H_
31
#define _TTM_BO_DRIVER_H_
31
#define _TTM_BO_DRIVER_H_
Line 32... Line 32...
32
 
32
 
33
#include "ttm/ttm_bo_api.h"
33
#include 
34
#include "ttm/ttm_memory.h"
34
#include 
35
#include "ttm/ttm_module.h"
35
#include 
-
 
36
#include 
36
#include "drm_mm.h"
37
#include 
37
#include "linux/spinlock.h"
-
 
38
 
38
//#include 
-
 
39
//#include 
Line 39... Line 40...
39
struct ttm_backend;
40
#include 
40
 
41
 
41
struct ttm_backend_func {
-
 
42
	/**
-
 
43
	 * struct ttm_backend_func member populate
-
 
44
	 *
-
 
45
	 * @backend: Pointer to a struct ttm_backend.
-
 
46
	 * @num_pages: Number of pages to populate.
-
 
47
	 * @pages: Array of pointers to ttm pages.
-
 
48
	 * @dummy_read_page: Page to be used instead of NULL pages in the
-
 
49
	 * array @pages.
-
 
50
	 *
-
 
51
	 * Populate the backend with ttm pages. Depending on the backend,
-
 
52
	 * it may or may not copy the @pages array.
-
 
53
	 */
-
 
54
	int (*populate) (struct ttm_backend *backend,
-
 
55
			 unsigned long num_pages, struct page **pages,
-
 
56
			 struct page *dummy_read_page,
-
 
57
			 dma_addr_t *dma_addrs);
-
 
58
	/**
-
 
59
	 * struct ttm_backend_func member clear
-
 
60
	 *
-
 
61
	 * @backend: Pointer to a struct ttm_backend.
-
 
62
	 *
-
 
63
	 * This is an "unpopulate" function. Release all resources
-
 
64
	 * allocated with populate.
-
 
65
	 */
-
 
66
	void (*clear) (struct ttm_backend *backend);
-
 
67
 
42
struct ttm_backend_func {
68
	/**
43
	/**
69
	 * struct ttm_backend_func member bind
44
	 * struct ttm_backend_func member bind
70
	 *
45
	 *
71
	 * @backend: Pointer to a struct ttm_backend.
46
	 * @ttm: Pointer to a struct ttm_tt.
72
	 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
47
	 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
73
	 * memory type and location for binding.
48
	 * memory type and location for binding.
74
	 *
49
	 *
75
	 * Bind the backend pages into the aperture in the location
50
	 * Bind the backend pages into the aperture in the location
76
	 * indicated by @bo_mem. This function should be able to handle
51
	 * indicated by @bo_mem. This function should be able to handle
77
	 * differences between aperture- and system page sizes.
52
	 * differences between aperture and system page sizes.
Line 78... Line 53...
78
	 */
53
	 */
79
	int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem);
54
	int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
80
 
55
 
81
	/**
56
	/**
82
	 * struct ttm_backend_func member unbind
57
	 * struct ttm_backend_func member unbind
83
	 *
58
	 *
84
	 * @backend: Pointer to a struct ttm_backend.
59
	 * @ttm: Pointer to a struct ttm_tt.
85
	 *
60
	 *
86
	 * Unbind previously bound backend pages. This function should be
61
	 * Unbind previously bound backend pages. This function should be
Line 87... Line 62...
87
	 * able to handle differences between aperture- and system page sizes.
62
	 * able to handle differences between aperture and system page sizes.
88
	 */
63
	 */
89
	int (*unbind) (struct ttm_backend *backend);
64
	int (*unbind) (struct ttm_tt *ttm);
90
 
65
 
91
	/**
66
	/**
92
	 * struct ttm_backend_func member destroy
67
	 * struct ttm_backend_func member destroy
-
 
68
	 *
93
	 *
69
	 * @ttm: Pointer to a struct ttm_tt.
94
	 * @backend: Pointer to a struct ttm_backend.
70
	 *
95
	 *
-
 
96
	 * Destroy the backend.
-
 
97
	 */
-
 
98
	void (*destroy) (struct ttm_backend *backend);
-
 
99
};
-
 
100
 
-
 
101
/**
-
 
102
 * struct ttm_backend
-
 
103
 *
-
 
104
 * @bdev: Pointer to a struct ttm_bo_device.
-
 
105
 * @flags: For driver use.
-
 
106
 * @func: Pointer to a struct ttm_backend_func that describes
-
 
107
 * the backend methods.
-
 
108
 *
-
 
109
 */
-
 
110
 
-
 
111
struct ttm_backend {
71
	 * Destroy the backend. This will be call back from ttm_tt_destroy so
Line 112... Line -...
112
	struct ttm_bo_device *bdev;
-
 
113
	uint32_t flags;
-
 
114
	struct ttm_backend_func *func;
72
	 * don't call ttm_tt_destroy from the callback or infinite loop.
115
};
73
	 */
116
 
74
	void (*destroy) (struct ttm_tt *ttm);
117
#define TTM_PAGE_FLAG_USER            (1 << 1)
75
};
118
#define TTM_PAGE_FLAG_USER_DIRTY      (1 << 2)
76
 
-
 
77
#define TTM_PAGE_FLAG_WRITE           (1 << 3)
Line 119... Line 78...
119
#define TTM_PAGE_FLAG_WRITE           (1 << 3)
78
#define TTM_PAGE_FLAG_SWAPPED         (1 << 4)
120
#define TTM_PAGE_FLAG_SWAPPED         (1 << 4)
79
#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
121
#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
80
#define TTM_PAGE_FLAG_ZERO_ALLOC      (1 << 6)
122
#define TTM_PAGE_FLAG_ZERO_ALLOC      (1 << 6)
81
#define TTM_PAGE_FLAG_DMA32           (1 << 7)
123
#define TTM_PAGE_FLAG_DMA32           (1 << 7)
82
#define TTM_PAGE_FLAG_SG              (1 << 8)
Line 124... Line 83...
124
 
83
 
125
enum ttm_caching_state {
84
enum ttm_caching_state {
126
	tt_uncached,
85
	tt_uncached,
-
 
86
	tt_wc,
-
 
87
	tt_cached
-
 
88
};
127
	tt_wc,
89
 
128
	tt_cached
90
/**
129
};
91
 * struct ttm_tt
130
 
-
 
131
/**
-
 
132
 * struct ttm_tt
-
 
133
 *
-
 
134
 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
-
 
135
 * pointer.
92
 *
136
 * @pages: Array of pages backing the data.
93
 * @bdev: Pointer to a struct ttm_bo_device.
137
 * @first_himem_page: Himem pages are put last in the page array, which
94
 * @func: Pointer to a struct ttm_backend_func that describes
138
 * enables us to run caching attribute changes on only the first part
-
 
139
 * of the page array containing lomem pages. This is the index of the
-
 
140
 * first himem page.
95
 * the backend methods.
141
 * @last_lomem_page: Index of the last lomem page in the page array.
96
 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
142
 * @num_pages: Number of pages in the page array.
97
 * pointer.
143
 * @bdev: Pointer to the current struct ttm_bo_device.
-
 
144
 * @be: Pointer to the ttm backend.
98
 * @pages: Array of pages backing the data.
145
 * @tsk: The task for user ttm.
99
 * @num_pages: Number of pages in the page array.
146
 * @start: virtual address for user ttm.
100
 * @bdev: Pointer to the current struct ttm_bo_device.
147
 * @swap_storage: Pointer to shmem struct file for swap storage.
101
 * @be: Pointer to the ttm backend.
148
 * @caching_state: The current caching state of the pages.
102
 * @swap_storage: Pointer to shmem struct file for swap storage.
Line 149... Line 103...
149
 * @state: The current binding state of the pages.
103
 * @caching_state: The current caching state of the pages.
-
 
104
 * @state: The current binding state of the pages.
-
 
105
 *
150
 * @dma_address: The DMA (bus) addresses of the pages (if TTM_PAGE_FLAG_DMA32)
106
 * This is a structure holding the pages, caching- and aperture binding
151
 *
107
 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
152
 * This is a structure holding the pages, caching- and aperture binding
-
 
153
 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
-
 
154
 * memory.
108
 * memory.
155
 */
109
 */
-
 
110
 
156
 
111
struct ttm_tt {
157
struct ttm_tt {
-
 
158
	struct page *dummy_read_page;
-
 
159
	struct page **pages;
-
 
160
	long first_himem_page;
112
	struct ttm_bo_device *bdev;
161
	long last_lomem_page;
113
	struct ttm_backend_func *func;
162
	uint32_t page_flags;
114
	struct page *dummy_read_page;
163
	unsigned long num_pages;
115
	struct page **pages;
164
	struct ttm_bo_global *glob;
116
	uint32_t page_flags;
165
	struct ttm_backend *be;
117
	unsigned long num_pages;
166
	struct task_struct *tsk;
118
	struct sg_table *sg; /* for SG objects via dma-buf */
-
 
119
	struct ttm_bo_global *glob;
-
 
120
	struct file *swap_storage;
-
 
121
	enum ttm_caching_state caching_state;
-
 
122
	enum {
-
 
123
		tt_bound,
-
 
124
		tt_unbound,
-
 
125
		tt_unpopulated,
-
 
126
	} state;
-
 
127
};
-
 
128
 
-
 
129
/**
-
 
130
 * struct ttm_dma_tt
-
 
131
 *
-
 
132
 * @ttm: Base ttm_tt struct.
-
 
133
 * @dma_address: The DMA (bus) addresses of the pages
167
	unsigned long start;
134
 * @pages_list: used by some page allocation backend
-
 
135
 *
168
	struct file *swap_storage;
136
 * This is a structure holding the pages, caching- and aperture binding
Line 169... Line 137...
169
	enum ttm_caching_state caching_state;
137
 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
170
	enum {
138
 * memory.
171
		tt_bound,
139
 */
Line -... Line 140...
-
 
140
struct ttm_dma_tt {
-
 
141
	struct ttm_tt ttm;
-
 
142
	dma_addr_t *dma_address;
-
 
143
	struct list_head pages_list;
-
 
144
};
-
 
145
 
-
 
146
#define TTM_MEMTYPE_FLAG_FIXED         (1 << 0)	/* Fixed (on-card) PCI memory */
-
 
147
#define TTM_MEMTYPE_FLAG_MAPPABLE      (1 << 1)	/* Memory mappable */
-
 
148
#define TTM_MEMTYPE_FLAG_CMA           (1 << 3)	/* Can't map aperture */
-
 
149
 
-
 
150
struct ttm_mem_type_manager;
-
 
151
 
-
 
152
struct ttm_mem_type_manager_func {
-
 
153
	/**
-
 
154
	 * struct ttm_mem_type_manager member init
-
 
155
	 *
-
 
156
	 * @man: Pointer to a memory type manager.
-
 
157
	 * @p_size: Implementation dependent, but typically the size of the
-
 
158
	 * range to be managed in pages.
-
 
159
	 *
-
 
160
	 * Called to initialize a private range manager. The function is
-
 
161
	 * expected to initialize the man::priv member.
-
 
162
	 * Returns 0 on success, negative error code on failure.
-
 
163
	 */
-
 
164
	int  (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
-
 
165
 
-
 
166
	/**
-
 
167
	 * struct ttm_mem_type_manager member takedown
-
 
168
	 *
-
 
169
	 * @man: Pointer to a memory type manager.
-
 
170
	 *
-
 
171
	 * Called to undo the setup done in init. All allocated resources
-
 
172
	 * should be freed.
-
 
173
	 */
-
 
174
	int  (*takedown)(struct ttm_mem_type_manager *man);
-
 
175
 
-
 
176
	/**
-
 
177
	 * struct ttm_mem_type_manager member get_node
-
 
178
	 *
-
 
179
	 * @man: Pointer to a memory type manager.
-
 
180
	 * @bo: Pointer to the buffer object we're allocating space for.
-
 
181
	 * @placement: Placement details.
-
 
182
	 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
-
 
183
	 *
-
 
184
	 * This function should allocate space in the memory type managed
-
 
185
	 * by @man. Placement details if
-
 
186
	 * applicable are given by @placement. If successful,
-
 
187
	 * @mem::mm_node should be set to a non-null value, and
-
 
188
	 * @mem::start should be set to a value identifying the beginning
-
 
189
	 * of the range allocated, and the function should return zero.
-
 
190
	 * If the memory region accommodate the buffer object, @mem::mm_node
-
 
191
	 * should be set to NULL, and the function should return 0.
-
 
192
	 * If a system error occurred, preventing the request to be fulfilled,
-
 
193
	 * the function should return a negative error code.
-
 
194
	 *
-
 
195
	 * Note that @mem::mm_node will only be dereferenced by
-
 
196
	 * struct ttm_mem_type_manager functions and optionally by the driver,
-
 
197
	 * which has knowledge of the underlying type.
-
 
198
	 *
-
 
199
	 * This function may not be called from within atomic context, so
-
 
200
	 * an implementation can and must use either a mutex or a spinlock to
-
 
201
	 * protect any data structures managing the space.
-
 
202
	 */
-
 
203
	int  (*get_node)(struct ttm_mem_type_manager *man,
-
 
204
			 struct ttm_buffer_object *bo,
-
 
205
			 struct ttm_placement *placement,
-
 
206
			 struct ttm_mem_reg *mem);
-
 
207
 
-
 
208
	/**
-
 
209
	 * struct ttm_mem_type_manager member put_node
-
 
210
	 *
-
 
211
	 * @man: Pointer to a memory type manager.
-
 
212
	 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
-
 
213
	 *
-
 
214
	 * This function frees memory type resources previously allocated
-
 
215
	 * and that are identified by @mem::mm_node and @mem::start. May not
-
 
216
	 * be called from within atomic context.
-
 
217
	 */
-
 
218
	void (*put_node)(struct ttm_mem_type_manager *man,
-
 
219
			 struct ttm_mem_reg *mem);
-
 
220
 
-
 
221
	/**
-
 
222
	 * struct ttm_mem_type_manager member debug
-
 
223
	 *
172
		tt_unbound,
224
	 * @man: Pointer to a memory type manager.
173
		tt_unpopulated,
225
	 * @prefix: Prefix to be used in printout to identify the caller.
174
	} state;
226
	 *
175
	dma_addr_t *dma_address;
227
	 * This function is called to print out the state of the memory
176
};
228
	 * type manager to aid debugging of out-of-memory conditions.
Line 261... Line 313...
261
 * @sync_obj_ref: See ttm_fence_api.h
313
 * @sync_obj_ref: See ttm_fence_api.h
262
 */
314
 */
Line 263... Line 315...
263
 
315
 
264
struct ttm_bo_driver {
316
struct ttm_bo_driver {
265
	/**
317
	/**
266
	 * struct ttm_bo_driver member create_ttm_backend_entry
318
	 * ttm_tt_create
267
	 *
319
	 *
-
 
320
	 * @bdev: pointer to a struct ttm_bo_device:
-
 
321
	 * @size: Size of the data needed backing.
-
 
322
	 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
268
	 * @bdev: The buffer object device.
323
	 * @dummy_read_page: See struct ttm_bo_device.
269
	 *
324
	 *
-
 
325
	 * Create a struct ttm_tt to back data with system memory pages.
-
 
326
	 * No pages are actually allocated.
-
 
327
	 * Returns:
270
	 * Create a driver specific struct ttm_backend.
328
	 * NULL: Out of memory.
-
 
329
	 */
-
 
330
	struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
-
 
331
					unsigned long size,
-
 
332
					uint32_t page_flags,
Line -... Line 333...
-
 
333
					struct page *dummy_read_page);
-
 
334
 
-
 
335
	/**
271
	 */
336
	 * ttm_tt_populate
-
 
337
	 *
-
 
338
	 * @ttm: The struct ttm_tt to contain the backing pages.
-
 
339
	 *
-
 
340
	 * Allocate all backing pages
-
 
341
	 * Returns:
-
 
342
	 * -ENOMEM: Out of memory.
-
 
343
	 */
-
 
344
	int (*ttm_tt_populate)(struct ttm_tt *ttm);
-
 
345
 
-
 
346
	/**
-
 
347
	 * ttm_tt_unpopulate
-
 
348
	 *
272
 
349
	 * @ttm: The struct ttm_tt to contain the backing pages.
-
 
350
	 *
-
 
351
	 * Free all backing page
Line 273... Line 352...
273
	struct ttm_backend *(*create_ttm_backend_entry)
352
	 */
274
	 (struct ttm_bo_device *bdev);
353
	void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
275
 
354
 
276
	/**
355
	/**
Line 313... Line 392...
313
	 *
392
	 *
314
	 * Move a buffer between two memory regions.
393
	 * Move a buffer between two memory regions.
315
	 */
394
	 */
316
	int (*move) (struct ttm_buffer_object *bo,
395
	int (*move) (struct ttm_buffer_object *bo,
317
		     bool evict, bool interruptible,
396
		     bool evict, bool interruptible,
-
 
397
		     bool no_wait_gpu,
318
		     bool no_wait, struct ttm_mem_reg *new_mem);
398
		     struct ttm_mem_reg *new_mem);
Line 319... Line 399...
319
 
399
 
320
	/**
400
	/**
321
	 * struct ttm_bo_driver_member verify_access
401
	 * struct ttm_bo_driver_member verify_access
322
	 *
402
	 *
Line 340... Line 420...
340
	 * fences directly. The bo driver needs the following functionality:
420
	 * fences directly. The bo driver needs the following functionality:
341
	 * See the corresponding functions in the fence object API
421
	 * See the corresponding functions in the fence object API
342
	 * documentation.
422
	 * documentation.
343
	 */
423
	 */
Line 344... Line 424...
344
 
424
 
345
	bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
425
	bool (*sync_obj_signaled) (void *sync_obj);
346
	int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
426
	int (*sync_obj_wait) (void *sync_obj,
347
			      bool lazy, bool interruptible);
427
			      bool lazy, bool interruptible);
348
	int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
428
	int (*sync_obj_flush) (void *sync_obj);
349
	void (*sync_obj_unref) (void **sync_obj);
429
	void (*sync_obj_unref) (void **sync_obj);
Line 350... Line 430...
350
	void *(*sync_obj_ref) (void *sync_obj);
430
	void *(*sync_obj_ref) (void *sync_obj);
351
 
431
 
352
	/* hook to notify driver about a driver move so it
432
	/* hook to notify driver about a driver move so it
353
	 * can do tiling things */
433
	 * can do tiling things */
354
	void (*move_notify)(struct ttm_buffer_object *bo,
434
	void (*move_notify)(struct ttm_buffer_object *bo,
355
			    struct ttm_mem_reg *new_mem);
435
			    struct ttm_mem_reg *new_mem);
356
	/* notify the driver we are taking a fault on this BO
436
	/* notify the driver we are taking a fault on this BO
-
 
437
	 * and have reserved it */
-
 
438
	int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
-
 
439
 
-
 
440
	/**
-
 
441
	 * notify the driver that we're about to swap out this bo
-
 
442
	 */
-
 
443
	void (*swap_notify) (struct ttm_buffer_object *bo);
-
 
444
 
-
 
445
	/**
-
 
446
	 * Driver callback on when mapping io memory (for bo_move_memcpy
-
 
447
	 * for instance). TTM will take care to call io_mem_free whenever
-
 
448
	 * the mapping is not use anymore. io_mem_reserve & io_mem_free
-
 
449
	 * are balanced.
-
 
450
	 */
357
	 * and have reserved it */
451
	int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
Line 358... Line 452...
358
	void (*fault_reserve_notify)(struct ttm_buffer_object *bo);
452
	void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
359
};
453
};
360
 
454
 
Line 361... Line 455...
361
/**
455
/**
362
 * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
456
 * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
363
 */
457
 */
364
 
458
 
Line 365... Line 459...
365
struct ttm_bo_global_ref {
459
struct ttm_bo_global_ref {
366
	struct ttm_global_reference ref;
460
    struct drm_global_reference ref;
367
	struct ttm_mem_global *mem_glob;
461
	struct ttm_mem_global *mem_glob;
368
};
462
};
369
 
463
 
370
/**
464
/**
371
 * struct ttm_bo_global - Buffer object driver global data.
465
 * struct ttm_bo_global - Buffer object driver global data.
372
 *
-
 
373
 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
-
 
374
 * @dummy_read_page: Pointer to a dummy page used for mapping requests
-
 
375
 * of unpopulated pages.
466
 *
376
 * @shrink: A shrink callback object used for buffer object swap.
467
 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
377
 * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
468
 * @dummy_read_page: Pointer to a dummy page used for mapping requests
378
 * used by a buffer object. This is excluding page arrays and backing pages.
469
 * of unpopulated pages.
379
 * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
470
 * @shrink: A shrink callback object used for buffer object swap.
Line 392... Line 483...
392
 
483
 
393
//	struct kobject kobj;
484
//   struct kobject kobj;
394
	struct ttm_mem_global *mem_glob;
485
	struct ttm_mem_global *mem_glob;
395
	struct page *dummy_read_page;
486
	struct page *dummy_read_page;
396
	struct ttm_mem_shrink shrink;
-
 
397
	size_t ttm_bo_extra_size;
-
 
398
	size_t ttm_bo_size;
487
	struct ttm_mem_shrink shrink;
399
	struct mutex device_list_mutex;
488
	struct mutex device_list_mutex;
Line 400... Line 489...
400
	spinlock_t lru_lock;
489
	spinlock_t lru_lock;
401
 
490
 
Line 424... Line 513...
424
/**
513
/**
425
 * struct ttm_bo_device - Buffer object driver device-specific data.
514
 * struct ttm_bo_device - Buffer object driver device-specific data.
426
 *
515
 *
427
 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
516
 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
428
 * @man: An array of mem_type_managers.
517
 * @man: An array of mem_type_managers.
-
 
518
 * @fence_lock: Protects the synchronizing members on *all* bos belonging
-
 
519
 * to this device.
429
 * @addr_space_mm: Range manager for the device address space.
520
 * @addr_space_mm: Range manager for the device address space.
430
 * lru_lock: Spinlock that protects the buffer+device lru lists and
521
 * lru_lock: Spinlock that protects the buffer+device lru lists and
431
 * ddestroy lists.
522
 * ddestroy lists.
432
 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
-
 
433
 * If a GPU lockup has been detected, this is forced to 0.
523
 * @val_seq: Current validation sequence.
434
 * @dev_mapping: A pointer to the struct address_space representing the
524
 * @dev_mapping: A pointer to the struct address_space representing the
435
 * device address space.
525
 * device address space.
436
 * @wq: Work queue structure for the delayed delete workqueue.
526
 * @wq: Work queue structure for the delayed delete workqueue.
437
 *
527
 *
438
 */
528
 */
Line 445... Line 535...
445
	struct list_head device_list;
535
	struct list_head device_list;
446
	struct ttm_bo_global *glob;
536
	struct ttm_bo_global *glob;
447
	struct ttm_bo_driver *driver;
537
	struct ttm_bo_driver *driver;
448
    rwlock_t vm_lock;
538
	rwlock_t vm_lock;
449
	struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
539
	struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
-
 
540
	spinlock_t fence_lock;
450
	/*
541
	/*
451
	 * Protected by the vm lock.
542
	 * Protected by the vm lock.
452
	 */
543
	 */
453
//   struct rb_root addr_space_rb;
544
	struct rb_root addr_space_rb;
454
	struct drm_mm addr_space_mm;
545
	struct drm_mm addr_space_mm;
Line 455... Line 546...
455
 
546
 
456
	/*
547
	/*
457
	 * Protected by the global:lru lock.
548
	 * Protected by the global:lru lock.
458
	 */
549
	 */
-
 
550
	struct list_head ddestroy;
Line 459... Line 551...
459
	struct list_head ddestroy;
551
	uint32_t val_seq;
460
 
552
 
461
	/*
553
	/*
Line 462... Line -...
462
	 * Protected by load / firstopen / lastclose /unload sync.
-
 
463
	 */
554
	 * Protected by load / firstopen / lastclose /unload sync.
Line 464... Line 555...
464
 
555
	 */
465
	bool nice_mode;
556
 
466
    struct address_space *dev_mapping;
557
	struct address_space *dev_mapping;
Line 467... Line 558...
467
 
558
 
Line 468... Line 559...
468
	/*
559
	/*
469
	 * Internal protection.
560
	 * Internal protection.
Line 470... Line 561...
470
	 */
561
	 */
Line 490... Line 581...
490
	*old ^= (*old ^ new) & mask;
581
	*old ^= (*old ^ new) & mask;
491
	return *old;
582
	return *old;
492
}
583
}
Line 493... Line 584...
493
 
584
 
494
/**
585
/**
495
 * ttm_tt_create
586
 * ttm_tt_init
-
 
587
 *
496
 *
588
 * @ttm: The struct ttm_tt.
497
 * @bdev: pointer to a struct ttm_bo_device:
589
 * @bdev: pointer to a struct ttm_bo_device:
498
 * @size: Size of the data needed backing.
590
 * @size: Size of the data needed backing.
499
 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
591
 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
500
 * @dummy_read_page: See struct ttm_bo_device.
592
 * @dummy_read_page: See struct ttm_bo_device.
501
 *
593
 *
502
 * Create a struct ttm_tt to back data with system memory pages.
594
 * Create a struct ttm_tt to back data with system memory pages.
503
 * No pages are actually allocated.
595
 * No pages are actually allocated.
504
 * Returns:
596
 * Returns:
505
 * NULL: Out of memory.
597
 * NULL: Out of memory.
506
 */
598
 */
507
extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
599
extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
-
 
600
			unsigned long size, uint32_t page_flags,
-
 
601
			struct page *dummy_read_page);
508
				    unsigned long size,
602
extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
509
				    uint32_t page_flags,
603
			   unsigned long size, uint32_t page_flags,
Line 510... Line 604...
510
				    struct page *dummy_read_page);
604
			   struct page *dummy_read_page);
511
 
605
 
512
/**
606
/**
513
 * ttm_tt_set_user:
607
 * ttm_tt_fini
514
 *
-
 
515
 * @ttm: The struct ttm_tt to populate.
-
 
516
 * @tsk: A struct task_struct for which @start is a valid user-space address.
-
 
517
 * @start: A valid user-space address.
608
 *
518
 * @num_pages: Size in pages of the user memory area.
-
 
519
 *
609
 * @ttm: the ttm_tt structure.
520
 * Populate a struct ttm_tt with a user-space memory area after first pinning
-
 
521
 * the pages backing it.
-
 
522
 * Returns:
610
 *
523
 * !0: Error.
-
 
524
 */
611
 * Free memory of ttm_tt structure
525
 
-
 
526
extern int ttm_tt_set_user(struct ttm_tt *ttm,
612
 */
Line 527... Line 613...
527
			   struct task_struct *tsk,
613
extern void ttm_tt_fini(struct ttm_tt *ttm);
528
			   unsigned long start, unsigned long num_pages);
614
extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
529
 
615
 
530
/**
616
/**
Line 536... Line 622...
536
 * Bind the pages of @ttm to an aperture location identified by @bo_mem
622
 * Bind the pages of @ttm to an aperture location identified by @bo_mem
537
 */
623
 */
538
extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
624
extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
Line 539... Line 625...
539
 
625
 
540
/**
-
 
541
 * ttm_tt_populate:
-
 
542
 *
-
 
543
 * @ttm: The struct ttm_tt to contain the backing pages.
-
 
544
 *
-
 
545
 * Add backing pages to all of @ttm
-
 
546
 */
-
 
547
extern int ttm_tt_populate(struct ttm_tt *ttm);
-
 
548
 
-
 
549
/**
626
/**
550
 * ttm_ttm_destroy:
627
 * ttm_ttm_destroy:
551
 *
628
 *
552
 * @ttm: The struct ttm_tt.
629
 * @ttm: The struct ttm_tt.
553
 *
630
 *
554
 * Unbind, unpopulate and destroy a struct ttm_tt.
631
 * Unbind, unpopulate and destroy common struct ttm_tt.
555
 */
632
 */
Line 556... Line 633...
556
extern void ttm_tt_destroy(struct ttm_tt *ttm);
633
extern void ttm_tt_destroy(struct ttm_tt *ttm);
557
 
634
 
Line 563... Line 640...
563
 * Unbind a struct ttm_tt.
640
 * Unbind a struct ttm_tt.
564
 */
641
 */
565
extern void ttm_tt_unbind(struct ttm_tt *ttm);
642
extern void ttm_tt_unbind(struct ttm_tt *ttm);
Line 566... Line 643...
566
 
643
 
567
/**
644
/**
568
 * ttm_ttm_destroy:
645
 * ttm_tt_swapin:
569
 *
646
 *
570
 * @ttm: The struct ttm_tt.
-
 
571
 * @index: Index of the desired page.
-
 
572
 *
-
 
573
 * Return a pointer to the struct page backing @ttm at page
-
 
574
 * index @index. If the page is unpopulated, one will be allocated to
-
 
575
 * populate that index.
647
 * @ttm: The struct ttm_tt.
576
 *
-
 
577
 * Returns:
648
 *
578
 * NULL on OOM.
649
 * Swap in a previously swap out ttm_tt.
579
 */
650
 */
Line 580... Line 651...
580
extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
651
extern int ttm_tt_swapin(struct ttm_tt *ttm);
581
 
652
 
582
/**
653
/**
583
 * ttm_tt_cache_flush:
654
 * ttm_tt_cache_flush:
Line 604... Line 675...
604
 * hit RAM. This function may be very costly as it involves global TLB
675
 * hit RAM. This function may be very costly as it involves global TLB
605
 * and cache flushes and potential page splitting / combining.
676
 * and cache flushes and potential page splitting / combining.
606
 */
677
 */
607
extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
678
extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
608
extern int ttm_tt_swapout(struct ttm_tt *ttm,
679
extern int ttm_tt_swapout(struct ttm_tt *ttm,
609
			  struct file *persistant_swap_storage);
680
			  struct file *persistent_swap_storage);
Line 610... Line 681...
610
 
681
 
611
/*
682
/*
612
 * ttm_bo.c
683
 * ttm_bo.c
Line 630... Line 701...
630
 * @bo: Pointer to a struct ttm_buffer_object. the data of which
701
 * @bo: Pointer to a struct ttm_buffer_object. the data of which
631
 * we want to allocate space for.
702
 * we want to allocate space for.
632
 * @proposed_placement: Proposed new placement for the buffer object.
703
 * @proposed_placement: Proposed new placement for the buffer object.
633
 * @mem: A struct ttm_mem_reg.
704
 * @mem: A struct ttm_mem_reg.
634
 * @interruptible: Sleep interruptible when sliping.
705
 * @interruptible: Sleep interruptible when sliping.
635
 * @no_wait: Don't sleep waiting for space to become available.
706
 * @no_wait_gpu: Return immediately if the GPU is busy.
636
 *
707
 *
637
 * Allocate memory space for the buffer object pointed to by @bo, using
708
 * Allocate memory space for the buffer object pointed to by @bo, using
638
 * the placement flags in @mem, potentially evicting other idle buffer objects.
709
 * the placement flags in @mem, potentially evicting other idle buffer objects.
639
 * This function may sleep while waiting for space to become available.
710
 * This function may sleep while waiting for space to become available.
640
 * Returns:
711
 * Returns:
Line 644... Line 715...
644
 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
715
 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
645
 */
716
 */
646
extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
717
extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
647
				struct ttm_placement *placement,
718
				struct ttm_placement *placement,
648
				struct ttm_mem_reg *mem,
719
				struct ttm_mem_reg *mem,
649
				bool interruptible, bool no_wait);
720
				bool interruptible,
650
/**
-
 
651
 * ttm_bo_wait_for_cpu
721
				bool no_wait_gpu);
652
 *
-
 
653
 * @bo: Pointer to a struct ttm_buffer_object.
-
 
654
 * @no_wait: Don't sleep while waiting.
-
 
655
 *
-
 
656
 * Wait until a buffer object is no longer sync'ed for CPU access.
-
 
657
 * Returns:
-
 
658
 * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
-
 
659
 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
-
 
660
 */
-
 
661
 
-
 
662
extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
-
 
663
 
-
 
664
/**
-
 
665
 * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
-
 
666
 *
-
 
667
 * @bo Pointer to a struct ttm_buffer_object.
-
 
668
 * @bus_base On return the base of the PCI region
-
 
669
 * @bus_offset On return the byte offset into the PCI region
-
 
670
 * @bus_size On return the byte size of the buffer object or zero if
-
 
671
 * the buffer object memory is not accessible through a PCI region.
-
 
672
 *
-
 
673
 * Returns:
-
 
674
 * -EINVAL if the buffer object is currently not mappable.
-
 
675
 * 0 otherwise.
-
 
676
 */
-
 
Line 677... Line 722...
677
 
722
 
678
extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
723
extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
679
			     struct ttm_mem_reg *mem,
-
 
680
			     unsigned long *bus_base,
724
			   struct ttm_mem_reg *mem);
681
			     unsigned long *bus_offset,
725
extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
Line 682... Line 726...
682
			     unsigned long *bus_size);
726
				  struct ttm_mem_reg *mem);
683
 
727
 
Line 684... Line 728...
684
extern void ttm_bo_global_release(struct ttm_global_reference *ref);
728
extern void ttm_bo_global_release(struct drm_global_reference *ref);
Line 685... Line 729...
685
extern int ttm_bo_global_init(struct ttm_global_reference *ref);
729
extern int ttm_bo_global_init(struct drm_global_reference *ref);
686
 
730
 
687
extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
731
extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
688
 
732
 
689
/**
733
/**
690
 * ttm_bo_device_init
734
 * ttm_bo_device_init
691
 *
735
 *
692
 * @bdev: A pointer to a struct ttm_bo_device to initialize.
736
 * @bdev: A pointer to a struct ttm_bo_device to initialize.
693
 * @mem_global: A pointer to an initialized struct ttm_mem_global.
737
 * @glob: A pointer to an initialized struct ttm_bo_global.
694
 * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
738
 * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
Line 711... Line 755...
711
 * @bo: tear down the virtual mappings for this BO
755
 * @bo: tear down the virtual mappings for this BO
712
 */
756
 */
713
extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
757
extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
Line 714... Line 758...
714
 
758
 
-
 
759
/**
-
 
760
 * ttm_bo_unmap_virtual
-
 
761
 *
-
 
762
 * @bo: tear down the virtual mappings for this BO
-
 
763
 *
-
 
764
 * The caller must take ttm_mem_io_lock before calling this function.
-
 
765
 */
-
 
766
extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
-
 
767
 
-
 
768
extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
-
 
769
extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
-
 
770
extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
-
 
771
			   bool interruptible);
-
 
772
extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
-
 
773
 
-
 
774
 
715
/**
775
/**
716
 * ttm_bo_reserve:
776
 * ttm_bo_reserve:
717
 *
777
 *
718
 * @bo: A pointer to a struct ttm_buffer_object.
778
 * @bo: A pointer to a struct ttm_buffer_object.
719
 * @interruptible: Sleep interruptible if waiting.
779
 * @interruptible: Sleep interruptible if waiting.
Line 727... Line 787...
727
 *
787
 *
728
 * Deadlocks may occur when two processes try to reserve multiple buffers in
788
 * Deadlocks may occur when two processes try to reserve multiple buffers in
729
 * different order, either by will or as a result of a buffer being evicted
789
 * different order, either by will or as a result of a buffer being evicted
730
 * to make room for a buffer already reserved. (Buffers are reserved before
790
 * to make room for a buffer already reserved. (Buffers are reserved before
731
 * they are evicted). The following algorithm prevents such deadlocks from
791
 * they are evicted). The following algorithm prevents such deadlocks from
732
 * occuring:
792
 * occurring:
733
 * 1) Buffers are reserved with the lru spinlock held. Upon successful
793
 * 1) Buffers are reserved with the lru spinlock held. Upon successful
734
 * reservation they are removed from the lru list. This stops a reserved buffer
794
 * reservation they are removed from the lru list. This stops a reserved buffer
735
 * from being evicted. However the lru spinlock is released between the time
795
 * from being evicted. However the lru spinlock is released between the time
736
 * a buffer is selected for eviction and the time it is reserved.
796
 * a buffer is selected for eviction and the time it is reserved.
737
 * Therefore a check is made when a buffer is reserved for eviction, that it
797
 * Therefore a check is made when a buffer is reserved for eviction, that it
Line 760... Line 820...
760
 * -EAGAIN: The reservation may cause a deadlock.
820
 * -EAGAIN: The reservation may cause a deadlock.
761
 * Release all buffer reservations, wait for @bo to become unreserved and
821
 * Release all buffer reservations, wait for @bo to become unreserved and
762
 * try again. (only if use_sequence == 1).
822
 * try again. (only if use_sequence == 1).
763
 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
823
 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
764
 * a signal. Release all buffer reservations and return to user-space.
824
 * a signal. Release all buffer reservations and return to user-space.
-
 
825
 * -EBUSY: The function needed to sleep, but @no_wait was true
-
 
826
 * -EDEADLK: Bo already reserved using @sequence. This error code will only
-
 
827
 * be returned if @use_sequence is set to true.
765
 */
828
 */
766
extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
829
extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
767
			  bool interruptible,
830
			  bool interruptible,
768
			  bool no_wait, bool use_sequence, uint32_t sequence);
831
			  bool no_wait, bool use_sequence, uint32_t sequence);
Line -... Line 832...
-
 
832
 
769
 
833
 
770
/**
834
/**
771
 * ttm_bo_unreserve
835
 * ttm_bo_reserve_locked:
772
 *
836
 *
-
 
837
 * @bo: A pointer to a struct ttm_buffer_object.
-
 
838
 * @interruptible: Sleep interruptible if waiting.
-
 
839
 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
-
 
840
 * @use_sequence: If @bo is already reserved, Only sleep waiting for
773
 * @bo: A pointer to a struct ttm_buffer_object.
841
 * it to become unreserved if @sequence < (@bo)->sequence.
-
 
842
 *
-
 
843
 * Must be called with struct ttm_bo_global::lru_lock held,
-
 
844
 * and will not remove reserved buffers from the lru lists.
-
 
845
 * The function may release the LRU spinlock if it needs to sleep.
-
 
846
 * Otherwise identical to ttm_bo_reserve.
-
 
847
 *
-
 
848
 * Returns:
-
 
849
 * -EAGAIN: The reservation may cause a deadlock.
-
 
850
 * Release all buffer reservations, wait for @bo to become unreserved and
-
 
851
 * try again. (only if use_sequence == 1).
-
 
852
 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
-
 
853
 * a signal. Release all buffer reservations and return to user-space.
-
 
854
 * -EBUSY: The function needed to sleep, but @no_wait was true
774
 *
855
 * -EDEADLK: Bo already reserved using @sequence. This error code will only
775
 * Unreserve a previous reservation of @bo.
856
 * be returned if @use_sequence is set to true.
776
 */
857
 */
-
 
858
extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
-
 
859
				 bool interruptible,
-
 
860
				 bool no_wait, bool use_sequence,
Line 777... Line 861...
777
extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
861
				 uint32_t sequence);
778
 
862
 
779
/**
863
/**
780
 * ttm_bo_wait_unreserved
864
 * ttm_bo_unreserve
781
 *
865
 *
782
 * @bo: A pointer to a struct ttm_buffer_object.
-
 
783
 *
-
 
784
 * Wait for a struct ttm_buffer_object to become unreserved.
866
 * @bo: A pointer to a struct ttm_buffer_object.
785
 * This is typically used in the execbuf code to relax cpu-usage when
867
 *
786
 * a potential deadlock condition backoff.
868
 * Unreserve a previous reservation of @bo.
787
 */
-
 
Line 788... Line 869...
788
extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
869
 */
789
				  bool interruptible);
870
extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
790
 
871
 
791
/**
872
/**
792
 * ttm_bo_block_reservation
-
 
793
 *
-
 
794
 * @bo: A pointer to a struct ttm_buffer_object.
-
 
795
 * @interruptible: Use interruptible sleep when waiting.
-
 
796
 * @no_wait: Don't sleep, but rather return -EBUSY.
-
 
797
 *
-
 
798
 * Block reservation for validation by simply reserving the buffer.
873
 * ttm_bo_unreserve_locked
799
 * This is intended for single buffer use only without eviction,
-
 
800
 * and thus needs no deadlock protection.
874
 *
801
 *
875
 * @bo: A pointer to a struct ttm_buffer_object.
802
 * Returns:
-
 
803
 * -EBUSY: If no_wait == 1 and the buffer is already reserved.
876
 *
804
 * -ERESTARTSYS: If interruptible == 1 and the process received a signal
877
 * Unreserve a previous reservation of @bo.
805
 * while sleeping.
-
 
Line 806... Line 878...
806
 */
878
 * Needs to be called with struct ttm_bo_global::lru_lock held.
807
extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
879
 */
808
				    bool interruptible, bool no_wait);
880
extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
809
 
881
 
810
/**
882
/**
-
 
883
 * ttm_bo_wait_unreserved
-
 
884
 *
811
 * ttm_bo_unblock_reservation
885
 * @bo: A pointer to a struct ttm_buffer_object.
812
 *
886
 *
813
 * @bo: A pointer to a struct ttm_buffer_object.
887
 * Wait for a struct ttm_buffer_object to become unreserved.
-
 
888
 * This is typically used in the execbuf code to relax cpu-usage when
Line 814... Line 889...
814
 *
889
 * a potential deadlock condition backoff.
815
 * Unblocks reservation leaving lru lists untouched.
890
 */
816
 */
891
extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
Line 817... Line 892...
817
extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
892
				  bool interruptible);
818
 
893
 
819
/*
894
/*
820
 * ttm_bo_util.c
895
 * ttm_bo_util.c
821
 */
896
 */
822
 
897
 
823
/**
898
/**
824
 * ttm_bo_move_ttm
899
 * ttm_bo_move_ttm
825
 *
900
 *
826
 * @bo: A pointer to a struct ttm_buffer_object.
901
 * @bo: A pointer to a struct ttm_buffer_object.
827
 * @evict: 1: This is an eviction. Don't try to pipeline.
902
 * @evict: 1: This is an eviction. Don't try to pipeline.
Line 837... Line 912...
837
 * Returns:
912
 * Returns:
838
 * !0: Failure.
913
 * !0: Failure.
839
 */
914
 */
Line 840... Line 915...
840
 
915
 
841
extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
916
extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
842
			   bool evict, bool no_wait,
917
			   bool evict, bool no_wait_gpu,
Line 843... Line 918...
843
			   struct ttm_mem_reg *new_mem);
918
			   struct ttm_mem_reg *new_mem);
844
 
919
 
845
/**
920
/**
846
 * ttm_bo_move_memcpy
921
 * ttm_bo_move_memcpy
847
 *
922
 *
848
 * @bo: A pointer to a struct ttm_buffer_object.
923
 * @bo: A pointer to a struct ttm_buffer_object.
849
 * @evict: 1: This is an eviction. Don't try to pipeline.
924
 * @evict: 1: This is an eviction. Don't try to pipeline.
850
 * @no_wait: Never sleep, but rather return with -EBUSY.
925
 * @no_wait_gpu: Return immediately if the GPU is busy.
851
 * @new_mem: struct ttm_mem_reg indicating where to move.
926
 * @new_mem: struct ttm_mem_reg indicating where to move.
852
 *
927
 *
853
 * Fallback move function for a mappable buffer object in mappable memory.
928
 * Fallback move function for a mappable buffer object in mappable memory.
Line 859... Line 934...
859
 * Returns:
934
 * Returns:
860
 * !0: Failure.
935
 * !0: Failure.
861
 */
936
 */
Line 862... Line 937...
862
 
937
 
863
extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
938
extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
864
			      bool evict,
939
			      bool evict, bool no_wait_gpu,
Line 865... Line 940...
865
			      bool no_wait, struct ttm_mem_reg *new_mem);
940
			      struct ttm_mem_reg *new_mem);
866
 
941
 
867
/**
942
/**
868
 * ttm_bo_free_old_node
943
 * ttm_bo_free_old_node
Line 876... Line 951...
876
/**
951
/**
877
 * ttm_bo_move_accel_cleanup.
952
 * ttm_bo_move_accel_cleanup.
878
 *
953
 *
879
 * @bo: A pointer to a struct ttm_buffer_object.
954
 * @bo: A pointer to a struct ttm_buffer_object.
880
 * @sync_obj: A sync object that signals when moving is complete.
955
 * @sync_obj: A sync object that signals when moving is complete.
881
 * @sync_obj_arg: An argument to pass to the sync object idle / wait
-
 
882
 * functions.
-
 
883
 * @evict: This is an evict move. Don't return until the buffer is idle.
956
 * @evict: This is an evict move. Don't return until the buffer is idle.
884
 * @no_wait: Never sleep, but rather return with -EBUSY.
957
 * @no_wait_gpu: Return immediately if the GPU is busy.
885
 * @new_mem: struct ttm_mem_reg indicating where to move.
958
 * @new_mem: struct ttm_mem_reg indicating where to move.
886
 *
959
 *
887
 * Accelerated move function to be called when an accelerated move
960
 * Accelerated move function to be called when an accelerated move
888
 * has been scheduled. The function will create a new temporary buffer object
961
 * has been scheduled. The function will create a new temporary buffer object
889
 * representing the old placement, and put the sync object on both buffer
962
 * representing the old placement, and put the sync object on both buffer
Line 892... Line 965...
892
 * buffer moves.
965
 * buffer moves.
893
 */
966
 */
Line 894... Line 967...
894
 
967
 
895
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
968
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
896
				     void *sync_obj,
-
 
897
				     void *sync_obj_arg,
969
				     void *sync_obj,
898
				     bool evict, bool no_wait,
970
				     bool evict, bool no_wait_gpu,
899
				     struct ttm_mem_reg *new_mem);
971
				     struct ttm_mem_reg *new_mem);
900
/**
972
/**
901
 * ttm_io_prot
973
 * ttm_io_prot
902
 *
974
 *
903
 * @c_state: Caching state.
975
 * @c_state: Caching state.
904
 * @tmp: Page protection flag for a normal, cached mapping.
976
 * @tmp: Page protection flag for a normal, cached mapping.
905
 *
977
 *
906
 * Utility function that returns the pgprot_t that should be used for
978
 * Utility function that returns the pgprot_t that should be used for
907
 * setting up a PTE with the caching model indicated by @c_state.
979
 * setting up a PTE with the caching model indicated by @c_state.
908
 */
980
 */
-
 
981
extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
-
 
982
 
Line 909... Line 983...
909
extern pgprot_t ttm_io_prot(enum ttm_caching_state c_state, pgprot_t tmp);
983
extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
910
 
984
 
911
#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
985
#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
Line 912... Line 986...
912
#define TTM_HAS_AGP
986
#define TTM_HAS_AGP
913
#include 
987
#include 
914
 
988
 
915
/**
989
/**
916
 * ttm_agp_backend_init
990
 * ttm_agp_tt_create
-
 
991
 *
-
 
992
 * @bdev: Pointer to a struct ttm_bo_device.
-
 
993
 * @bridge: The agp bridge this device is sitting on.
-
 
994
 * @size: Size of the data needed backing.
917
 *
995
 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
918
 * @bdev: Pointer to a struct ttm_bo_device.
996
 * @dummy_read_page: See struct ttm_bo_device.
919
 * @bridge: The agp bridge this device is sitting on.
997
 *
920
 *
998
 *
921
 * Create a TTM backend that uses the indicated AGP bridge as an aperture
999
 * Create a TTM backend that uses the indicated AGP bridge as an aperture
922
 * for TT memory. This function uses the linux agpgart interface to
1000
 * for TT memory. This function uses the linux agpgart interface to
923
 * bind and unbind memory backing a ttm_tt.
1001
 * bind and unbind memory backing a ttm_tt.
-
 
1002
 */
-
 
1003
extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
-
 
1004
					struct agp_bridge_data *bridge,
-
 
1005
					unsigned long size, uint32_t page_flags,
924
 */
1006
					struct page *dummy_read_page);
Line 925... Line 1007...
925
extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
1007
int ttm_agp_tt_populate(struct ttm_tt *ttm);