Subversion Repositories Kolibri OS

Rev

Rev 2003 | Rev 4065 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
3262 Serge 1
/**************************************************************************
2
 *
3
 * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
4
 * All Rights Reserved.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the
8
 * "Software"), to deal in the Software without restriction, including
9
 * without limitation the rights to use, copy, modify, merge, publish,
10
 * distribute, sub license, and/or sell copies of the Software, and to
11
 * permit persons to whom the Software is furnished to do so, subject to
12
 * the following conditions:
13
 *
14
 * The above copyright notice and this permission notice (including the
15
 * next paragraph) shall be included in all copies or substantial portions
16
 * of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25
 *
26
 **************************************************************************/
27
/*
28
 * Authors: Thomas Hellstrom 
29
 */
30
#ifndef _TTM_BO_DRIVER_H_
31
#define _TTM_BO_DRIVER_H_
32
 
33
#include 
34
#include 
35
#include 
36
#include 
37
#include 
38
//#include 
39
//#include 
40
#include 
41
 
42
struct ttm_backend_func {
43
	/**
44
	 * struct ttm_backend_func member bind
45
	 *
46
	 * @ttm: Pointer to a struct ttm_tt.
47
	 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
48
	 * memory type and location for binding.
49
	 *
50
	 * Bind the backend pages into the aperture in the location
51
	 * indicated by @bo_mem. This function should be able to handle
52
	 * differences between aperture and system page sizes.
53
	 */
54
	int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
55
 
56
	/**
57
	 * struct ttm_backend_func member unbind
58
	 *
59
	 * @ttm: Pointer to a struct ttm_tt.
60
	 *
61
	 * Unbind previously bound backend pages. This function should be
62
	 * able to handle differences between aperture and system page sizes.
63
	 */
64
	int (*unbind) (struct ttm_tt *ttm);
65
 
66
	/**
67
	 * struct ttm_backend_func member destroy
68
	 *
69
	 * @ttm: Pointer to a struct ttm_tt.
70
	 *
71
	 * Destroy the backend. This will be call back from ttm_tt_destroy so
72
	 * don't call ttm_tt_destroy from the callback or infinite loop.
73
	 */
74
	void (*destroy) (struct ttm_tt *ttm);
75
};
76
 
77
#define TTM_PAGE_FLAG_WRITE           (1 << 3)
78
#define TTM_PAGE_FLAG_SWAPPED         (1 << 4)
79
#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
80
#define TTM_PAGE_FLAG_ZERO_ALLOC      (1 << 6)
81
#define TTM_PAGE_FLAG_DMA32           (1 << 7)
82
#define TTM_PAGE_FLAG_SG              (1 << 8)
83
 
84
enum ttm_caching_state {
85
	tt_uncached,
86
	tt_wc,
87
	tt_cached
88
};
89
 
90
/**
91
 * struct ttm_tt
92
 *
93
 * @bdev: Pointer to a struct ttm_bo_device.
94
 * @func: Pointer to a struct ttm_backend_func that describes
95
 * the backend methods.
96
 * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
97
 * pointer.
98
 * @pages: Array of pages backing the data.
99
 * @num_pages: Number of pages in the page array.
100
 * @bdev: Pointer to the current struct ttm_bo_device.
101
 * @be: Pointer to the ttm backend.
102
 * @swap_storage: Pointer to shmem struct file for swap storage.
103
 * @caching_state: The current caching state of the pages.
104
 * @state: The current binding state of the pages.
105
 *
106
 * This is a structure holding the pages, caching- and aperture binding
107
 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
108
 * memory.
109
 */
110
 
111
struct ttm_tt {
112
	struct ttm_bo_device *bdev;
113
	struct ttm_backend_func *func;
114
	struct page *dummy_read_page;
115
	struct page **pages;
116
	uint32_t page_flags;
117
	unsigned long num_pages;
118
	struct sg_table *sg; /* for SG objects via dma-buf */
119
	struct ttm_bo_global *glob;
120
	struct file *swap_storage;
121
	enum ttm_caching_state caching_state;
122
	enum {
123
		tt_bound,
124
		tt_unbound,
125
		tt_unpopulated,
126
	} state;
127
};
128
 
129
/**
130
 * struct ttm_dma_tt
131
 *
132
 * @ttm: Base ttm_tt struct.
133
 * @dma_address: The DMA (bus) addresses of the pages
134
 * @pages_list: used by some page allocation backend
135
 *
136
 * This is a structure holding the pages, caching- and aperture binding
137
 * status for a buffer object that isn't backed by fixed (VRAM / AGP)
138
 * memory.
139
 */
140
struct ttm_dma_tt {
141
	struct ttm_tt ttm;
142
	dma_addr_t *dma_address;
143
	struct list_head pages_list;
144
};
145
 
146
#define TTM_MEMTYPE_FLAG_FIXED         (1 << 0)	/* Fixed (on-card) PCI memory */
147
#define TTM_MEMTYPE_FLAG_MAPPABLE      (1 << 1)	/* Memory mappable */
148
#define TTM_MEMTYPE_FLAG_CMA           (1 << 3)	/* Can't map aperture */
149
 
150
struct ttm_mem_type_manager;
151
 
152
struct ttm_mem_type_manager_func {
153
	/**
154
	 * struct ttm_mem_type_manager member init
155
	 *
156
	 * @man: Pointer to a memory type manager.
157
	 * @p_size: Implementation dependent, but typically the size of the
158
	 * range to be managed in pages.
159
	 *
160
	 * Called to initialize a private range manager. The function is
161
	 * expected to initialize the man::priv member.
162
	 * Returns 0 on success, negative error code on failure.
163
	 */
164
	int  (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
165
 
166
	/**
167
	 * struct ttm_mem_type_manager member takedown
168
	 *
169
	 * @man: Pointer to a memory type manager.
170
	 *
171
	 * Called to undo the setup done in init. All allocated resources
172
	 * should be freed.
173
	 */
174
	int  (*takedown)(struct ttm_mem_type_manager *man);
175
 
176
	/**
177
	 * struct ttm_mem_type_manager member get_node
178
	 *
179
	 * @man: Pointer to a memory type manager.
180
	 * @bo: Pointer to the buffer object we're allocating space for.
181
	 * @placement: Placement details.
182
	 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
183
	 *
184
	 * This function should allocate space in the memory type managed
185
	 * by @man. Placement details if
186
	 * applicable are given by @placement. If successful,
187
	 * @mem::mm_node should be set to a non-null value, and
188
	 * @mem::start should be set to a value identifying the beginning
189
	 * of the range allocated, and the function should return zero.
190
	 * If the memory region accommodate the buffer object, @mem::mm_node
191
	 * should be set to NULL, and the function should return 0.
192
	 * If a system error occurred, preventing the request to be fulfilled,
193
	 * the function should return a negative error code.
194
	 *
195
	 * Note that @mem::mm_node will only be dereferenced by
196
	 * struct ttm_mem_type_manager functions and optionally by the driver,
197
	 * which has knowledge of the underlying type.
198
	 *
199
	 * This function may not be called from within atomic context, so
200
	 * an implementation can and must use either a mutex or a spinlock to
201
	 * protect any data structures managing the space.
202
	 */
203
	int  (*get_node)(struct ttm_mem_type_manager *man,
204
			 struct ttm_buffer_object *bo,
205
			 struct ttm_placement *placement,
206
			 struct ttm_mem_reg *mem);
207
 
208
	/**
209
	 * struct ttm_mem_type_manager member put_node
210
	 *
211
	 * @man: Pointer to a memory type manager.
212
	 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
213
	 *
214
	 * This function frees memory type resources previously allocated
215
	 * and that are identified by @mem::mm_node and @mem::start. May not
216
	 * be called from within atomic context.
217
	 */
218
	void (*put_node)(struct ttm_mem_type_manager *man,
219
			 struct ttm_mem_reg *mem);
220
 
221
	/**
222
	 * struct ttm_mem_type_manager member debug
223
	 *
224
	 * @man: Pointer to a memory type manager.
225
	 * @prefix: Prefix to be used in printout to identify the caller.
226
	 *
227
	 * This function is called to print out the state of the memory
228
	 * type manager to aid debugging of out-of-memory conditions.
229
	 * It may not be called from within atomic context.
230
	 */
231
	void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
232
};
233
 
234
/**
235
 * struct ttm_mem_type_manager
236
 *
237
 * @has_type: The memory type has been initialized.
238
 * @use_type: The memory type is enabled.
239
 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
240
 * managed by this memory type.
241
 * @gpu_offset: If used, the GPU offset of the first managed page of
242
 * fixed memory or the first managed location in an aperture.
243
 * @size: Size of the managed region.
244
 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
245
 * as defined in ttm_placement_common.h
246
 * @default_caching: The default caching policy used for a buffer object
247
 * placed in this memory type if the user doesn't provide one.
248
 * @func: structure pointer implementing the range manager. See above
249
 * @priv: Driver private closure for @func.
250
 * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
251
 * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
252
 * reserved by the TTM vm system.
253
 * @io_reserve_lru: Optional lru list for unreserving io mem regions.
254
 * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
255
 * static information. bdev::driver::io_mem_free is never used.
256
 * @lru: The lru list for this memory type.
257
 *
258
 * This structure is used to identify and manage memory types for a device.
259
 * It's set up by the ttm_bo_driver::init_mem_type method.
260
 */
261
 
262
 
263
 
264
struct ttm_mem_type_manager {
265
	struct ttm_bo_device *bdev;
266
 
267
	/*
268
	 * No protection. Constant from start.
269
	 */
270
 
271
	bool has_type;
272
	bool use_type;
273
	uint32_t flags;
274
	unsigned long gpu_offset;
275
	uint64_t size;
276
	uint32_t available_caching;
277
	uint32_t default_caching;
278
	const struct ttm_mem_type_manager_func *func;
279
	void *priv;
280
	struct mutex io_reserve_mutex;
281
	bool use_io_reserve_lru;
282
	bool io_reserve_fastpath;
283
 
284
	/*
285
	 * Protected by @io_reserve_mutex:
286
	 */
287
 
288
	struct list_head io_reserve_lru;
289
 
290
	/*
291
	 * Protected by the global->lru_lock.
292
	 */
293
 
294
	struct list_head lru;
295
};
296
 
297
/**
298
 * struct ttm_bo_driver
299
 *
300
 * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
301
 * @invalidate_caches: Callback to invalidate read caches when a buffer object
302
 * has been evicted.
303
 * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
304
 * structure.
305
 * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
306
 * @move: Callback for a driver to hook in accelerated functions to
307
 * move a buffer.
308
 * If set to NULL, a potentially slow memcpy() move is used.
309
 * @sync_obj_signaled: See ttm_fence_api.h
310
 * @sync_obj_wait: See ttm_fence_api.h
311
 * @sync_obj_flush: See ttm_fence_api.h
312
 * @sync_obj_unref: See ttm_fence_api.h
313
 * @sync_obj_ref: See ttm_fence_api.h
314
 */
315
 
316
struct ttm_bo_driver {
317
	/**
318
	 * ttm_tt_create
319
	 *
320
	 * @bdev: pointer to a struct ttm_bo_device:
321
	 * @size: Size of the data needed backing.
322
	 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
323
	 * @dummy_read_page: See struct ttm_bo_device.
324
	 *
325
	 * Create a struct ttm_tt to back data with system memory pages.
326
	 * No pages are actually allocated.
327
	 * Returns:
328
	 * NULL: Out of memory.
329
	 */
330
	struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
331
					unsigned long size,
332
					uint32_t page_flags,
333
					struct page *dummy_read_page);
334
 
335
	/**
336
	 * ttm_tt_populate
337
	 *
338
	 * @ttm: The struct ttm_tt to contain the backing pages.
339
	 *
340
	 * Allocate all backing pages
341
	 * Returns:
342
	 * -ENOMEM: Out of memory.
343
	 */
344
	int (*ttm_tt_populate)(struct ttm_tt *ttm);
345
 
346
	/**
347
	 * ttm_tt_unpopulate
348
	 *
349
	 * @ttm: The struct ttm_tt to contain the backing pages.
350
	 *
351
	 * Free all backing page
352
	 */
353
	void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
354
 
355
	/**
356
	 * struct ttm_bo_driver member invalidate_caches
357
	 *
358
	 * @bdev: the buffer object device.
359
	 * @flags: new placement of the rebound buffer object.
360
	 *
361
	 * A previosly evicted buffer has been rebound in a
362
	 * potentially new location. Tell the driver that it might
363
	 * consider invalidating read (texture) caches on the next command
364
	 * submission as a consequence.
365
	 */
366
 
367
	int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags);
368
	int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type,
369
			      struct ttm_mem_type_manager *man);
370
	/**
371
	 * struct ttm_bo_driver member evict_flags:
372
	 *
373
	 * @bo: the buffer object to be evicted
374
	 *
375
	 * Return the bo flags for a buffer which is not mapped to the hardware.
376
	 * These will be placed in proposed_flags so that when the move is
377
	 * finished, they'll end up in bo->mem.flags
378
	 */
379
 
380
	 void(*evict_flags) (struct ttm_buffer_object *bo,
381
				struct ttm_placement *placement);
382
	/**
383
	 * struct ttm_bo_driver member move:
384
	 *
385
	 * @bo: the buffer to move
386
	 * @evict: whether this motion is evicting the buffer from
387
	 * the graphics address space
388
	 * @interruptible: Use interruptible sleeps if possible when sleeping.
389
	 * @no_wait: whether this should give up and return -EBUSY
390
	 * if this move would require sleeping
391
	 * @new_mem: the new memory region receiving the buffer
392
	 *
393
	 * Move a buffer between two memory regions.
394
	 */
395
	int (*move) (struct ttm_buffer_object *bo,
396
		     bool evict, bool interruptible,
397
		     bool no_wait_gpu,
398
		     struct ttm_mem_reg *new_mem);
399
 
400
	/**
401
	 * struct ttm_bo_driver_member verify_access
402
	 *
403
	 * @bo: Pointer to a buffer object.
404
	 * @filp: Pointer to a struct file trying to access the object.
405
	 *
406
	 * Called from the map / write / read methods to verify that the
407
	 * caller is permitted to access the buffer object.
408
	 * This member may be set to NULL, which will refuse this kind of
409
	 * access for all buffer objects.
410
	 * This function should return 0 if access is granted, -EPERM otherwise.
411
	 */
412
	int (*verify_access) (struct ttm_buffer_object *bo,
413
			      struct file *filp);
414
 
415
	/**
416
	 * In case a driver writer dislikes the TTM fence objects,
417
	 * the driver writer can replace those with sync objects of
418
	 * his / her own. If it turns out that no driver writer is
419
	 * using these. I suggest we remove these hooks and plug in
420
	 * fences directly. The bo driver needs the following functionality:
421
	 * See the corresponding functions in the fence object API
422
	 * documentation.
423
	 */
424
 
425
	bool (*sync_obj_signaled) (void *sync_obj);
426
	int (*sync_obj_wait) (void *sync_obj,
427
			      bool lazy, bool interruptible);
428
	int (*sync_obj_flush) (void *sync_obj);
429
	void (*sync_obj_unref) (void **sync_obj);
430
	void *(*sync_obj_ref) (void *sync_obj);
431
 
432
	/* hook to notify driver about a driver move so it
433
	 * can do tiling things */
434
	void (*move_notify)(struct ttm_buffer_object *bo,
435
			    struct ttm_mem_reg *new_mem);
436
	/* notify the driver we are taking a fault on this BO
437
	 * and have reserved it */
438
	int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
439
 
440
	/**
441
	 * notify the driver that we're about to swap out this bo
442
	 */
443
	void (*swap_notify) (struct ttm_buffer_object *bo);
444
 
445
	/**
446
	 * Driver callback on when mapping io memory (for bo_move_memcpy
447
	 * for instance). TTM will take care to call io_mem_free whenever
448
	 * the mapping is not use anymore. io_mem_reserve & io_mem_free
449
	 * are balanced.
450
	 */
451
	int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
452
	void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
453
};
454
 
455
/**
456
 * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
457
 */
458
 
459
struct ttm_bo_global_ref {
460
    struct drm_global_reference ref;
461
	struct ttm_mem_global *mem_glob;
462
};
463
 
464
/**
465
 * struct ttm_bo_global - Buffer object driver global data.
466
 *
467
 * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
468
 * @dummy_read_page: Pointer to a dummy page used for mapping requests
469
 * of unpopulated pages.
470
 * @shrink: A shrink callback object used for buffer object swap.
471
 * @device_list_mutex: Mutex protecting the device list.
472
 * This mutex is held while traversing the device list for pm options.
473
 * @lru_lock: Spinlock protecting the bo subsystem lru lists.
474
 * @device_list: List of buffer object devices.
475
 * @swap_lru: Lru list of buffer objects used for swapping.
476
 */
477
 
478
struct ttm_bo_global {
479
 
480
	/**
481
	 * Constant after init.
482
	 */
483
 
484
//   struct kobject kobj;
485
	struct ttm_mem_global *mem_glob;
486
	struct page *dummy_read_page;
487
	struct ttm_mem_shrink shrink;
488
	struct mutex device_list_mutex;
489
	spinlock_t lru_lock;
490
 
491
	/**
492
	 * Protected by device_list_mutex.
493
	 */
494
	struct list_head device_list;
495
 
496
	/**
497
	 * Protected by the lru_lock.
498
	 */
499
	struct list_head swap_lru;
500
 
501
	/**
502
	 * Internal protection.
503
	 */
504
	atomic_t bo_count;
505
};
506
 
507
 
508
#define TTM_NUM_MEM_TYPES 8
509
 
510
#define TTM_BO_PRIV_FLAG_MOVING  0	/* Buffer object is moving and needs
511
					   idling before CPU mapping */
512
#define TTM_BO_PRIV_FLAG_MAX 1
513
/**
514
 * struct ttm_bo_device - Buffer object driver device-specific data.
515
 *
516
 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
517
 * @man: An array of mem_type_managers.
518
 * @fence_lock: Protects the synchronizing members on *all* bos belonging
519
 * to this device.
520
 * @addr_space_mm: Range manager for the device address space.
521
 * lru_lock: Spinlock that protects the buffer+device lru lists and
522
 * ddestroy lists.
523
 * @val_seq: Current validation sequence.
524
 * @dev_mapping: A pointer to the struct address_space representing the
525
 * device address space.
526
 * @wq: Work queue structure for the delayed delete workqueue.
527
 *
528
 */
529
 
530
struct ttm_bo_device {
531
 
532
	/*
533
	 * Constant after bo device init / atomic.
534
	 */
535
	struct list_head device_list;
536
	struct ttm_bo_global *glob;
537
	struct ttm_bo_driver *driver;
538
	rwlock_t vm_lock;
539
	struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
540
	spinlock_t fence_lock;
541
	/*
542
	 * Protected by the vm lock.
543
	 */
544
	struct rb_root addr_space_rb;
545
	struct drm_mm addr_space_mm;
546
 
547
	/*
548
	 * Protected by the global:lru lock.
549
	 */
550
	struct list_head ddestroy;
551
	uint32_t val_seq;
552
 
553
	/*
554
	 * Protected by load / firstopen / lastclose /unload sync.
555
	 */
556
 
557
	struct address_space *dev_mapping;
558
 
559
	/*
560
	 * Internal protection.
561
	 */
562
 
563
	struct delayed_work wq;
564
 
565
	bool need_dma32;
566
};
567
 
568
/**
569
 * ttm_flag_masked
570
 *
571
 * @old: Pointer to the result and original value.
572
 * @new: New value of bits.
573
 * @mask: Mask of bits to change.
574
 *
575
 * Convenience function to change a number of bits identified by a mask.
576
 */
577
 
578
static inline uint32_t
579
ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
580
{
581
	*old ^= (*old ^ new) & mask;
582
	return *old;
583
}
584
 
585
/**
586
 * ttm_tt_init
587
 *
588
 * @ttm: The struct ttm_tt.
589
 * @bdev: pointer to a struct ttm_bo_device:
590
 * @size: Size of the data needed backing.
591
 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
592
 * @dummy_read_page: See struct ttm_bo_device.
593
 *
594
 * Create a struct ttm_tt to back data with system memory pages.
595
 * No pages are actually allocated.
596
 * Returns:
597
 * NULL: Out of memory.
598
 */
599
extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
600
			unsigned long size, uint32_t page_flags,
601
			struct page *dummy_read_page);
602
extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
603
			   unsigned long size, uint32_t page_flags,
604
			   struct page *dummy_read_page);
605
 
606
/**
607
 * ttm_tt_fini
608
 *
609
 * @ttm: the ttm_tt structure.
610
 *
611
 * Free memory of ttm_tt structure
612
 */
613
extern void ttm_tt_fini(struct ttm_tt *ttm);
614
extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
615
 
616
/**
617
 * ttm_ttm_bind:
618
 *
619
 * @ttm: The struct ttm_tt containing backing pages.
620
 * @bo_mem: The struct ttm_mem_reg identifying the binding location.
621
 *
622
 * Bind the pages of @ttm to an aperture location identified by @bo_mem
623
 */
624
extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
625
 
626
/**
627
 * ttm_ttm_destroy:
628
 *
629
 * @ttm: The struct ttm_tt.
630
 *
631
 * Unbind, unpopulate and destroy common struct ttm_tt.
632
 */
633
extern void ttm_tt_destroy(struct ttm_tt *ttm);
634
 
635
/**
636
 * ttm_ttm_unbind:
637
 *
638
 * @ttm: The struct ttm_tt.
639
 *
640
 * Unbind a struct ttm_tt.
641
 */
642
extern void ttm_tt_unbind(struct ttm_tt *ttm);
643
 
644
/**
645
 * ttm_tt_swapin:
646
 *
647
 * @ttm: The struct ttm_tt.
648
 *
649
 * Swap in a previously swap out ttm_tt.
650
 */
651
extern int ttm_tt_swapin(struct ttm_tt *ttm);
652
 
653
/**
654
 * ttm_tt_cache_flush:
655
 *
656
 * @pages: An array of pointers to struct page:s to flush.
657
 * @num_pages: Number of pages to flush.
658
 *
659
 * Flush the data of the indicated pages from the cpu caches.
660
 * This is used when changing caching attributes of the pages from
661
 * cache-coherent.
662
 */
663
extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages);
664
 
665
/**
666
 * ttm_tt_set_placement_caching:
667
 *
668
 * @ttm A struct ttm_tt the backing pages of which will change caching policy.
669
 * @placement: Flag indicating the desired caching policy.
670
 *
671
 * This function will change caching policy of any default kernel mappings of
672
 * the pages backing @ttm. If changing from cached to uncached or
673
 * write-combined,
674
 * all CPU caches will first be flushed to make sure the data of the pages
675
 * hit RAM. This function may be very costly as it involves global TLB
676
 * and cache flushes and potential page splitting / combining.
677
 */
678
extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
679
extern int ttm_tt_swapout(struct ttm_tt *ttm,
680
			  struct file *persistent_swap_storage);
681
 
682
/*
683
 * ttm_bo.c
684
 */
685
 
686
/**
687
 * ttm_mem_reg_is_pci
688
 *
689
 * @bdev: Pointer to a struct ttm_bo_device.
690
 * @mem: A valid struct ttm_mem_reg.
691
 *
692
 * Returns true if the memory described by @mem is PCI memory,
693
 * false otherwise.
694
 */
695
extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
696
				   struct ttm_mem_reg *mem);
697
 
698
/**
699
 * ttm_bo_mem_space
700
 *
701
 * @bo: Pointer to a struct ttm_buffer_object. the data of which
702
 * we want to allocate space for.
703
 * @proposed_placement: Proposed new placement for the buffer object.
704
 * @mem: A struct ttm_mem_reg.
705
 * @interruptible: Sleep interruptible when sliping.
706
 * @no_wait_gpu: Return immediately if the GPU is busy.
707
 *
708
 * Allocate memory space for the buffer object pointed to by @bo, using
709
 * the placement flags in @mem, potentially evicting other idle buffer objects.
710
 * This function may sleep while waiting for space to become available.
711
 * Returns:
712
 * -EBUSY: No space available (only if no_wait == 1).
713
 * -ENOMEM: Could not allocate memory for the buffer object, either due to
714
 * fragmentation or concurrent allocators.
715
 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
716
 */
717
extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
718
				struct ttm_placement *placement,
719
				struct ttm_mem_reg *mem,
720
				bool interruptible,
721
				bool no_wait_gpu);
722
 
723
extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
724
			   struct ttm_mem_reg *mem);
725
extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
726
				  struct ttm_mem_reg *mem);
727
 
728
extern void ttm_bo_global_release(struct drm_global_reference *ref);
729
extern int ttm_bo_global_init(struct drm_global_reference *ref);
730
 
731
extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
732
 
733
/**
734
 * ttm_bo_device_init
735
 *
736
 * @bdev: A pointer to a struct ttm_bo_device to initialize.
737
 * @glob: A pointer to an initialized struct ttm_bo_global.
738
 * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
739
 * @file_page_offset: Offset into the device address space that is available
740
 * for buffer data. This ensures compatibility with other users of the
741
 * address space.
742
 *
743
 * Initializes a struct ttm_bo_device:
744
 * Returns:
745
 * !0: Failure.
746
 */
747
extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
748
			      struct ttm_bo_global *glob,
749
			      struct ttm_bo_driver *driver,
750
			      uint64_t file_page_offset, bool need_dma32);
751
 
752
/**
753
 * ttm_bo_unmap_virtual
754
 *
755
 * @bo: tear down the virtual mappings for this BO
756
 */
757
extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
758
 
759
/**
760
 * ttm_bo_unmap_virtual
761
 *
762
 * @bo: tear down the virtual mappings for this BO
763
 *
764
 * The caller must take ttm_mem_io_lock before calling this function.
765
 */
766
extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
767
 
768
extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
769
extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
770
extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
771
			   bool interruptible);
772
extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
773
 
774
 
775
/**
776
 * ttm_bo_reserve:
777
 *
778
 * @bo: A pointer to a struct ttm_buffer_object.
779
 * @interruptible: Sleep interruptible if waiting.
780
 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
781
 * @use_sequence: If @bo is already reserved, Only sleep waiting for
782
 * it to become unreserved if @sequence < (@bo)->sequence.
783
 *
784
 * Locks a buffer object for validation. (Or prevents other processes from
785
 * locking it for validation) and removes it from lru lists, while taking
786
 * a number of measures to prevent deadlocks.
787
 *
788
 * Deadlocks may occur when two processes try to reserve multiple buffers in
789
 * different order, either by will or as a result of a buffer being evicted
790
 * to make room for a buffer already reserved. (Buffers are reserved before
791
 * they are evicted). The following algorithm prevents such deadlocks from
792
 * occurring:
793
 * 1) Buffers are reserved with the lru spinlock held. Upon successful
794
 * reservation they are removed from the lru list. This stops a reserved buffer
795
 * from being evicted. However the lru spinlock is released between the time
796
 * a buffer is selected for eviction and the time it is reserved.
797
 * Therefore a check is made when a buffer is reserved for eviction, that it
798
 * is still the first buffer in the lru list, before it is removed from the
799
 * list. @check_lru == 1 forces this check. If it fails, the function returns
800
 * -EINVAL, and the caller should then choose a new buffer to evict and repeat
801
 * the procedure.
802
 * 2) Processes attempting to reserve multiple buffers other than for eviction,
803
 * (typically execbuf), should first obtain a unique 32-bit
804
 * validation sequence number,
805
 * and call this function with @use_sequence == 1 and @sequence == the unique
806
 * sequence number. If upon call of this function, the buffer object is already
807
 * reserved, the validation sequence is checked against the validation
808
 * sequence of the process currently reserving the buffer,
809
 * and if the current validation sequence is greater than that of the process
810
 * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps
811
 * waiting for the buffer to become unreserved, after which it retries
812
 * reserving.
813
 * The caller should, when receiving an -EAGAIN error
814
 * release all its buffer reservations, wait for @bo to become unreserved, and
815
 * then rerun the validation with the same validation sequence. This procedure
816
 * will always guarantee that the process with the lowest validation sequence
817
 * will eventually succeed, preventing both deadlocks and starvation.
818
 *
819
 * Returns:
820
 * -EAGAIN: The reservation may cause a deadlock.
821
 * Release all buffer reservations, wait for @bo to become unreserved and
822
 * try again. (only if use_sequence == 1).
823
 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
824
 * a signal. Release all buffer reservations and return to user-space.
825
 * -EBUSY: The function needed to sleep, but @no_wait was true
826
 * -EDEADLK: Bo already reserved using @sequence. This error code will only
827
 * be returned if @use_sequence is set to true.
828
 */
829
extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
830
			  bool interruptible,
831
			  bool no_wait, bool use_sequence, uint32_t sequence);
832
 
833
 
834
/**
835
 * ttm_bo_reserve_locked:
836
 *
837
 * @bo: A pointer to a struct ttm_buffer_object.
838
 * @interruptible: Sleep interruptible if waiting.
839
 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
840
 * @use_sequence: If @bo is already reserved, Only sleep waiting for
841
 * it to become unreserved if @sequence < (@bo)->sequence.
842
 *
843
 * Must be called with struct ttm_bo_global::lru_lock held,
844
 * and will not remove reserved buffers from the lru lists.
845
 * The function may release the LRU spinlock if it needs to sleep.
846
 * Otherwise identical to ttm_bo_reserve.
847
 *
848
 * Returns:
849
 * -EAGAIN: The reservation may cause a deadlock.
850
 * Release all buffer reservations, wait for @bo to become unreserved and
851
 * try again. (only if use_sequence == 1).
852
 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
853
 * a signal. Release all buffer reservations and return to user-space.
854
 * -EBUSY: The function needed to sleep, but @no_wait was true
855
 * -EDEADLK: Bo already reserved using @sequence. This error code will only
856
 * be returned if @use_sequence is set to true.
857
 */
858
extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
859
				 bool interruptible,
860
				 bool no_wait, bool use_sequence,
861
				 uint32_t sequence);
862
 
863
/**
864
 * ttm_bo_unreserve
865
 *
866
 * @bo: A pointer to a struct ttm_buffer_object.
867
 *
868
 * Unreserve a previous reservation of @bo.
869
 */
870
extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
871
 
872
/**
873
 * ttm_bo_unreserve_locked
874
 *
875
 * @bo: A pointer to a struct ttm_buffer_object.
876
 *
877
 * Unreserve a previous reservation of @bo.
878
 * Needs to be called with struct ttm_bo_global::lru_lock held.
879
 */
880
extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
881
 
882
/**
883
 * ttm_bo_wait_unreserved
884
 *
885
 * @bo: A pointer to a struct ttm_buffer_object.
886
 *
887
 * Wait for a struct ttm_buffer_object to become unreserved.
888
 * This is typically used in the execbuf code to relax cpu-usage when
889
 * a potential deadlock condition backoff.
890
 */
891
extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
892
				  bool interruptible);
893
 
894
/*
895
 * ttm_bo_util.c
896
 */
897
 
898
/**
899
 * ttm_bo_move_ttm
900
 *
901
 * @bo: A pointer to a struct ttm_buffer_object.
902
 * @evict: 1: This is an eviction. Don't try to pipeline.
903
 * @no_wait_gpu: Return immediately if the GPU is busy.
904
 * @new_mem: struct ttm_mem_reg indicating where to move.
905
 *
906
 * Optimized move function for a buffer object with both old and
907
 * new placement backed by a TTM. The function will, if successful,
908
 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
909
 * and update the (@bo)->mem placement flags. If unsuccessful, the old
910
 * data remains untouched, and it's up to the caller to free the
911
 * memory space indicated by @new_mem.
912
 * Returns:
913
 * !0: Failure.
914
 */
915
 
916
extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
917
			   bool evict, bool no_wait_gpu,
918
			   struct ttm_mem_reg *new_mem);
919
 
920
/**
921
 * ttm_bo_move_memcpy
922
 *
923
 * @bo: A pointer to a struct ttm_buffer_object.
924
 * @evict: 1: This is an eviction. Don't try to pipeline.
925
 * @no_wait_gpu: Return immediately if the GPU is busy.
926
 * @new_mem: struct ttm_mem_reg indicating where to move.
927
 *
928
 * Fallback move function for a mappable buffer object in mappable memory.
929
 * The function will, if successful,
930
 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
931
 * and update the (@bo)->mem placement flags. If unsuccessful, the old
932
 * data remains untouched, and it's up to the caller to free the
933
 * memory space indicated by @new_mem.
934
 * Returns:
935
 * !0: Failure.
936
 */
937
 
938
extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
939
			      bool evict, bool no_wait_gpu,
940
			      struct ttm_mem_reg *new_mem);
941
 
942
/**
943
 * ttm_bo_free_old_node
944
 *
945
 * @bo: A pointer to a struct ttm_buffer_object.
946
 *
947
 * Utility function to free an old placement after a successful move.
948
 */
949
extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
950
 
951
/**
952
 * ttm_bo_move_accel_cleanup.
953
 *
954
 * @bo: A pointer to a struct ttm_buffer_object.
955
 * @sync_obj: A sync object that signals when moving is complete.
956
 * @evict: This is an evict move. Don't return until the buffer is idle.
957
 * @no_wait_gpu: Return immediately if the GPU is busy.
958
 * @new_mem: struct ttm_mem_reg indicating where to move.
959
 *
960
 * Accelerated move function to be called when an accelerated move
961
 * has been scheduled. The function will create a new temporary buffer object
962
 * representing the old placement, and put the sync object on both buffer
963
 * objects. After that the newly created buffer object is unref'd to be
964
 * destroyed when the move is complete. This will help pipeline
965
 * buffer moves.
966
 */
967
 
968
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
969
				     void *sync_obj,
970
				     bool evict, bool no_wait_gpu,
971
				     struct ttm_mem_reg *new_mem);
972
/**
973
 * ttm_io_prot
974
 *
975
 * @c_state: Caching state.
976
 * @tmp: Page protection flag for a normal, cached mapping.
977
 *
978
 * Utility function that returns the pgprot_t that should be used for
979
 * setting up a PTE with the caching model indicated by @c_state.
980
 */
981
extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
982
 
983
extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
984
 
985
#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
986
#define TTM_HAS_AGP
987
#include 
988
 
989
/**
990
 * ttm_agp_tt_create
991
 *
992
 * @bdev: Pointer to a struct ttm_bo_device.
993
 * @bridge: The agp bridge this device is sitting on.
994
 * @size: Size of the data needed backing.
995
 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
996
 * @dummy_read_page: See struct ttm_bo_device.
997
 *
998
 *
999
 * Create a TTM backend that uses the indicated AGP bridge as an aperture
1000
 * for TT memory. This function uses the linux agpgart interface to
1001
 * bind and unbind memory backing a ttm_tt.
1002
 */
1003
extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
1004
					struct agp_bridge_data *bridge,
1005
					unsigned long size, uint32_t page_flags,
1006
					struct page *dummy_read_page);
1007
int ttm_agp_tt_populate(struct ttm_tt *ttm);
1008
void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
1009
#endif
1010
 
1011
#endif