Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
3254 Serge 1
/*
2
 * Copyright (c) 2011 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
 * SOFTWARE.
22
 *
23
 * Authors:
24
 *    Chris Wilson 
25
 *
26
 */
27
 
28
#ifndef KGEM_H
29
#define KGEM_H
30
 
31
#define HAS_DEBUG_FULL 1
32
 
33
#include 
34
#include 
35
#include 
36
#include 
37
 
38
#include "i915_drm.h"
39
 
40
#include "compiler.h"
41
#include "intel_list.h"
42
 
43
 
44
 
45
#if HAS_DEBUG_FULL
46
#define DBG(x) printf x
47
#else
48
#define DBG(x)
49
#endif
50
 
51
struct kgem_bo {
52
	struct kgem_request *rq;
53
#define RQ(rq) ((struct kgem_request *)((uintptr_t)(rq) & ~3))
54
#define RQ_RING(rq) ((uintptr_t)(rq) & 3)
55
#define RQ_IS_BLT(rq) (RQ_RING(rq) == KGEM_BLT)
56
	struct drm_i915_gem_exec_object2 *exec;
57
 
58
	struct kgem_bo *proxy;
59
 
60
	struct list list;
61
	struct list request;
62
	struct list vma;
63
 
64
    void     *map;
65
#define IS_CPU_MAP(ptr) ((uintptr_t)(ptr) & 1)
66
#define IS_GTT_MAP(ptr) (ptr && ((uintptr_t)(ptr) & 1) == 0)
67
 
68
	struct kgem_bo_binding {
69
		struct kgem_bo_binding *next;
70
		uint32_t format;
71
		uint16_t offset;
72
	} binding;
73
 
74
	uint32_t unique_id;
75
	uint32_t refcnt;
76
	uint32_t handle;
77
	uint32_t target_handle;
78
	uint32_t presumed_offset;
79
	uint32_t delta;
80
	union {
81
		struct {
82
			uint32_t count:27;
83
#define PAGE_SIZE 4096
84
            uint32_t bucket:5;
85
#define NUM_CACHE_BUCKETS 16
86
#define MAX_CACHE_SIZE (1 << (NUM_CACHE_BUCKETS+12))
87
		} pages;
88
		uint32_t bytes;
89
	} size;
90
    uint32_t pitch  : 18; /* max 128k */
91
	uint32_t tiling : 2;
92
	uint32_t reusable : 1;
93
    uint32_t dirty  : 1;
94
	uint32_t domain : 2;
95
	uint32_t needs_flush : 1;
96
	uint32_t snoop : 1;
97
    uint32_t io     : 1;
98
    uint32_t flush  : 1;
99
	uint32_t scanout : 1;
100
	uint32_t purged : 1;
101
};
102
#define DOMAIN_NONE 0
103
#define DOMAIN_CPU 1
104
#define DOMAIN_GTT 2
105
#define DOMAIN_GPU 3
106
 
107
struct kgem_request {
108
	struct list list;
109
	struct kgem_bo *bo;
110
	struct list buffers;
111
	int ring;
112
};
113
 
114
enum {
115
	MAP_GTT = 0,
116
	MAP_CPU,
117
	NUM_MAP_TYPES,
118
};
119
 
120
struct kgem {
121
	int fd;
122
	int wedged;
123
	unsigned gen;
124
 
125
	uint32_t unique_id;
126
 
127
	enum kgem_mode {
128
		/* order matches I915_EXEC_RING ordering */
129
		KGEM_NONE = 0,
130
		KGEM_RENDER,
131
		KGEM_BSD,
132
		KGEM_BLT,
133
	} mode, ring;
134
 
135
	struct list flushing;
136
	struct list large;
137
	struct list large_inactive;
138
	struct list active[NUM_CACHE_BUCKETS][3];
139
	struct list inactive[NUM_CACHE_BUCKETS];
140
	struct list pinned_batches[2];
141
	struct list snoop;
142
	struct list scanout;
143
	struct list batch_buffers, active_buffers;
144
 
145
	struct list requests[2];
146
	struct kgem_request *next_request;
147
	struct kgem_request static_request;
148
 
149
	struct {
150
		struct list inactive[NUM_CACHE_BUCKETS];
151
		int16_t count;
152
	} vma[NUM_MAP_TYPES];
153
 
154
	uint32_t batch_flags;
155
	uint32_t batch_flags_base;
156
#define I915_EXEC_SECURE (1<<9)
157
#define LOCAL_EXEC_OBJECT_WRITE (1<<2)
158
 
159
	uint16_t nbatch;
160
	uint16_t surface;
161
	uint16_t nexec;
162
	uint16_t nreloc;
163
	uint16_t nreloc__self;
164
	uint16_t nfence;
165
	uint16_t batch_size;
166
	uint16_t min_alignment;
167
 
168
	uint32_t flush:1;
169
	uint32_t need_expire:1;
170
	uint32_t need_purge:1;
171
	uint32_t need_retire:1;
172
	uint32_t need_throttle:1;
173
	uint32_t scanout_busy:1;
174
	uint32_t busy:1;
175
 
176
	uint32_t has_userptr :1;
177
	uint32_t has_blt :1;
178
	uint32_t has_relaxed_fencing :1;
179
	uint32_t has_relaxed_delta :1;
180
	uint32_t has_semaphores :1;
181
	uint32_t has_secure_batches :1;
182
	uint32_t has_pinned_batches :1;
183
	uint32_t has_cacheing :1;
184
	uint32_t has_llc :1;
185
	uint32_t has_no_reloc :1;
186
	uint32_t has_handle_lut :1;
187
 
188
	uint32_t can_blt_cpu :1;
189
 
190
	uint16_t fence_max;
191
	uint16_t half_cpu_cache_pages;
192
	uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
193
	uint32_t aperture, aperture_fenced;
194
	uint32_t max_upload_tile_size, max_copy_tile_size;
195
	uint32_t max_gpu_size, max_cpu_size;
196
	uint32_t large_object_size, max_object_size;
197
	uint32_t buffer_size;
198
 
199
	void (*context_switch)(struct kgem *kgem, int new_mode);
200
    void (*retire)(struct kgem *kgem);
201
	void (*expire)(struct kgem *kgem);
202
 
203
	uint32_t batch[64*1024-8];
204
	struct drm_i915_gem_exec_object2 exec[256];
205
	struct drm_i915_gem_relocation_entry reloc[4096];
206
	uint16_t reloc__self[256];
207
 
208
#ifdef DEBUG_MEMORY
209
	struct {
210
		int bo_allocs;
211
		size_t bo_bytes;
212
	} debug_memory;
213
#endif
214
};
215
 
216
#define KGEM_BATCH_RESERVED 1
217
#define KGEM_RELOC_RESERVED 4
218
#define KGEM_EXEC_RESERVED 1
219
 
220
#ifndef ARRAY_SIZE
221
#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
222
#endif
223
 
224
#define KGEM_BATCH_SIZE(K) ((K)->batch_size-KGEM_BATCH_RESERVED)
225
#define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED)
226
#define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED)
227
 
228
void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen);
229
void kgem_reset(struct kgem *kgem);
230
 
231
struct kgem_bo *kgem_create_map(struct kgem *kgem,
232
				void *ptr, uint32_t size,
233
				bool read_only);
234
 
235
struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name);
236
 
237
struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags);
238
struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
239
				  struct kgem_bo *target,
240
				  int offset, int length);
241
 
242
 
243
int kgem_choose_tiling(struct kgem *kgem,
244
		       int tiling, int width, int height, int bpp);
245
unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth);
246
#define KGEM_CAN_CREATE_GPU     0x1
247
#define KGEM_CAN_CREATE_CPU     0x2
248
#define KGEM_CAN_CREATE_LARGE	0x4
249
#define KGEM_CAN_CREATE_GTT	0x8
250
 
251
struct kgem_bo *
252
kgem_replace_bo(struct kgem *kgem,
253
		struct kgem_bo *src,
254
		uint32_t width,
255
		uint32_t height,
256
		uint32_t pitch,
257
		uint32_t bpp);
258
enum {
259
	CREATE_EXACT = 0x1,
260
	CREATE_INACTIVE = 0x2,
261
	CREATE_CPU_MAP = 0x4,
262
	CREATE_GTT_MAP = 0x8,
263
	CREATE_SCANOUT = 0x10,
264
	CREATE_PRIME = 0x20,
265
	CREATE_TEMPORARY = 0x40,
266
	CREATE_CACHED = 0x80,
267
	CREATE_NO_RETIRE = 0x100,
268
	CREATE_NO_THROTTLE = 0x200,
269
};
270
struct kgem_bo *kgem_create_2d(struct kgem *kgem,
271
			       int width,
272
			       int height,
273
			       int bpp,
274
			       int tiling,
275
			       uint32_t flags);
276
 
277
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format);
278
void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
279
int kgem_bo_get_swizzling(struct kgem *kgem, struct kgem_bo *bo);
280
 
281
bool kgem_retire(struct kgem *kgem);
282
 
283
bool __kgem_ring_is_idle(struct kgem *kgem, int ring);
284
static inline bool kgem_ring_is_idle(struct kgem *kgem, int ring)
285
{
286
	ring = ring == KGEM_BLT;
287
 
288
	if (list_is_empty(&kgem->requests[ring]))
289
		return true;
290
 
291
	return __kgem_ring_is_idle(kgem, ring);
292
}
293
 
294
static inline bool kgem_is_idle(struct kgem *kgem)
295
{
296
	if (!kgem->need_retire)
297
		return true;
298
 
299
	return kgem_ring_is_idle(kgem, kgem->ring);
300
}
301
 
302
void _kgem_submit(struct kgem *kgem);
303
static inline void kgem_submit(struct kgem *kgem)
304
{
305
	if (kgem->nbatch)
306
		_kgem_submit(kgem);
307
}
308
 
309
static inline bool kgem_flush(struct kgem *kgem, bool flush)
310
{
311
	if (kgem->nreloc == 0)
312
		return false;
313
 
314
	return (kgem->flush ^ flush) && kgem_ring_is_idle(kgem, kgem->ring);
315
}
316
 
317
#if 0
318
 
319
static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
320
{
321
	if (bo->exec)
322
		_kgem_submit(kgem);
323
}
324
 
325
void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo);
326
static inline void kgem_bo_flush(struct kgem *kgem, struct kgem_bo *bo)
327
{
328
	kgem_bo_submit(kgem, bo);
329
 
330
	if (!bo->needs_flush)
331
		return;
332
 
333
	/* If the kernel fails to emit the flush, then it will be forced when
334
	 * we assume direct access. And as the useual failure is EIO, we do
335
	 * not actualy care.
336
	 */
337
	__kgem_flush(kgem, bo);
338
}
339
 
340
#endif
341
 
342
static inline struct kgem_bo *kgem_bo_reference(struct kgem_bo *bo)
343
{
344
	assert(bo->refcnt);
345
	bo->refcnt++;
346
	return bo;
347
}
348
 
349
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
350
static inline void kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
351
{
352
	assert(bo->refcnt);
353
	if (--bo->refcnt == 0)
354
		_kgem_bo_destroy(kgem, bo);
355
}
356
 
357
void kgem_clear_dirty(struct kgem *kgem);
358
 
359
static inline void kgem_set_mode(struct kgem *kgem,
360
				 enum kgem_mode mode,
361
				 struct kgem_bo *bo)
362
{
363
	assert(!kgem->wedged);
364
 
365
#if DEBUG_FLUSH_BATCH
366
	kgem_submit(kgem);
367
#endif
368
 
369
	if (kgem->mode == mode)
370
		return;
371
 
372
//   kgem->context_switch(kgem, mode);
373
	kgem->mode = mode;
374
}
375
 
376
static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
377
{
378
	assert(kgem->mode == KGEM_NONE);
379
	assert(kgem->nbatch == 0);
380
	assert(!kgem->wedged);
381
//   kgem->context_switch(kgem, mode);
382
	kgem->mode = mode;
383
}
384
 
385
static inline bool kgem_check_batch(struct kgem *kgem, int num_dwords)
386
{
387
	assert(num_dwords > 0);
388
	assert(kgem->nbatch < kgem->surface);
389
	assert(kgem->surface <= kgem->batch_size);
390
	return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface);
391
}
392
 
393
static inline bool kgem_check_reloc(struct kgem *kgem, int n)
394
{
395
	assert(kgem->nreloc <= KGEM_RELOC_SIZE(kgem));
396
	return likely(kgem->nreloc + n <= KGEM_RELOC_SIZE(kgem));
397
}
398
 
399
static inline bool kgem_check_exec(struct kgem *kgem, int n)
400
{
401
	assert(kgem->nexec <= KGEM_EXEC_SIZE(kgem));
402
	return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem));
403
}
404
 
405
static inline bool kgem_check_reloc_and_exec(struct kgem *kgem, int n)
406
{
407
	return kgem_check_reloc(kgem, n) && kgem_check_exec(kgem, n);
408
}
409
 
410
static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem,
411
						  int num_dwords,
412
						  int num_surfaces)
413
{
414
	return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) &&
415
		kgem_check_reloc(kgem, num_surfaces) &&
416
		kgem_check_exec(kgem, num_surfaces);
417
}
418
 
419
static inline uint32_t *kgem_get_batch(struct kgem *kgem)
420
{
421
 
422
	return kgem->batch + kgem->nbatch;
423
}
424
 
425
bool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(0)));
426
bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo);
427
bool kgem_check_many_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0)));
428
 
429
#define KGEM_RELOC_FENCED 0x8000
430
uint32_t kgem_add_reloc(struct kgem *kgem,
431
			uint32_t pos,
432
			struct kgem_bo *bo,
433
			uint32_t read_write_domains,
434
			uint32_t delta);
435
 
436
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
437
void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo);
438
void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo);
439
void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo);
440
void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
441
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
442
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
443
void kgem_bo_sync__cpu_full(struct kgem *kgem, struct kgem_bo *bo, bool write);
444
void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
445
void __kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr);
446
uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
447
 
448
bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
449
		   const void *data, int length);
450
 
451
int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
452
void kgem_get_tile_size(struct kgem *kgem, int tiling,
453
			int *tile_width, int *tile_height, int *tile_size);
454
 
455
static inline int __kgem_buffer_size(struct kgem_bo *bo)
456
{
457
	assert(bo->proxy != NULL);
458
	return bo->size.bytes;
459
}
460
 
461
static inline int __kgem_bo_size(struct kgem_bo *bo)
462
{
463
	assert(bo->proxy == NULL);
464
	return PAGE_SIZE * bo->size.pages.count;
465
}
466
 
467
static inline int kgem_bo_size(struct kgem_bo *bo)
468
{
469
	if (bo->proxy)
470
		return __kgem_buffer_size(bo);
471
	else
472
		return __kgem_bo_size(bo);
473
}
474
 
475
/*
476
static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem,
477
					   struct kgem_bo *bo)
478
{
479
	int pitch = bo->pitch;
480
	if (kgem->gen >= 040 && bo->tiling)
481
		pitch /= 4;
482
	if (pitch > MAXSHORT) {
483
		DBG(("%s: can not blt to handle=%d, adjusted pitch=%d\n",
484
		     __FUNCTION__, bo->handle, pitch));
485
		return false;
486
	}
487
 
488
	return true;
489
}
490
 
491
static inline bool kgem_bo_can_blt(struct kgem *kgem,
492
				   struct kgem_bo *bo)
493
{
494
	if (bo->tiling == I915_TILING_Y) {
495
		DBG(("%s: can not blt to handle=%d, tiling=Y\n",
496
		     __FUNCTION__, bo->handle));
497
		return false;
498
	}
499
 
500
	return kgem_bo_blt_pitch_is_ok(kgem, bo);
501
}
502
*/
503
 
504
static inline bool __kgem_bo_is_mappable(struct kgem *kgem,
505
				       struct kgem_bo *bo)
506
{
507
	if (bo->domain == DOMAIN_GTT)
508
		return true;
509
 
510
	if (kgem->gen < 040 && bo->tiling &&
511
	    bo->presumed_offset & (kgem_bo_fenced_size(kgem, bo) - 1))
512
		return false;
513
 
514
	if (!bo->presumed_offset)
515
		return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
516
 
517
	return bo->presumed_offset + kgem_bo_size(bo) <= kgem->aperture_mappable;
518
}
519
 
520
static inline bool kgem_bo_mapped(struct kgem *kgem, struct kgem_bo *bo)
521
{
522
	DBG(("%s: map=%p, tiling=%d, domain=%d\n",
523
	     __FUNCTION__, bo->map, bo->tiling, bo->domain));
524
	assert(bo->refcnt);
525
 
526
	if (bo->map == NULL)
527
		return bo->tiling == I915_TILING_NONE && bo->domain == DOMAIN_CPU;
528
 
529
	return IS_CPU_MAP(bo->map) == !bo->tiling;
530
}
531
 
532
 
533
 
534
 
535
 
536
 
537
 
538
 
539
 
540
 
541
 
542
static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
543
{
544
	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
545
	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
546
	assert(bo->refcnt);
547
	return bo->rq;
548
}
549
 
550
/*
551
 
552
static inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo)
553
{
554
	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
555
	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
556
	assert(bo->refcnt);
557
 
558
	if (bo->exec)
559
		return true;
560
 
561
	if (kgem_flush(kgem, bo->flush))
562
		kgem_submit(kgem);
563
 
564
	if (bo->rq && !__kgem_busy(kgem, bo->handle))
565
		__kgem_bo_clear_busy(bo);
566
 
567
	return kgem_bo_is_busy(bo);
568
}
569
 
570
*/
571
 
572
static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
573
{
574
	if (bo == NULL)
575
		return false;
576
 
577
	assert(bo->refcnt);
578
	return bo->dirty;
579
}
580
 
581
static inline void kgem_bo_unclean(struct kgem *kgem, struct kgem_bo *bo)
582
{
583
	/* The bo is outside of our control, so presume it is written to */
584
	bo->needs_flush = true;
585
	if (bo->rq == NULL)
586
		bo->rq = (void *)kgem;
587
 
588
	if (bo->domain != DOMAIN_GPU)
589
		bo->domain = DOMAIN_NONE;
590
}
591
 
592
static inline void __kgem_bo_mark_dirty(struct kgem_bo *bo)
593
{
594
	DBG(("%s: handle=%d (proxy? %d)\n", __FUNCTION__,
595
	     bo->handle, bo->proxy != NULL));
596
 
597
	bo->exec->flags |= LOCAL_EXEC_OBJECT_WRITE;
598
	bo->needs_flush = bo->dirty = true;
599
	list_move(&bo->request, &RQ(bo->rq)->buffers);
600
}
601
 
602
static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
603
{
604
	assert(bo->refcnt);
605
	do {
606
		assert(bo->exec);
607
		assert(bo->rq);
608
 
609
		if (bo->dirty)
610
			return;
611
 
612
		__kgem_bo_mark_dirty(bo);
613
	} while ((bo = bo->proxy));
614
}
615
 
616
#define KGEM_BUFFER_WRITE	0x1
617
#define KGEM_BUFFER_INPLACE	0x2
618
#define KGEM_BUFFER_LAST	0x4
619
 
620
#define KGEM_BUFFER_WRITE_INPLACE (KGEM_BUFFER_WRITE | KGEM_BUFFER_INPLACE)
621
 
622
struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
623
				   uint32_t size, uint32_t flags,
624
				   void **ret);
625
struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
626
				      int width, int height, int bpp,
627
				      uint32_t flags,
628
				      void **ret);
629
bool kgem_buffer_is_inplace(struct kgem_bo *bo);
630
void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo);
631
 
632
void kgem_throttle(struct kgem *kgem);
633
#define MAX_INACTIVE_TIME 10
634
bool kgem_expire_cache(struct kgem *kgem);
635
void kgem_purge_cache(struct kgem *kgem);
636
void kgem_cleanup_cache(struct kgem *kgem);
637
 
638
#if HAS_DEBUG_FULL
639
void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch);
640
#else
641
static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch)
642
{
643
	(void)kgem;
644
	(void)nbatch;
645
}
646
#endif
647
 
648
#endif /* KGEM_H */