Subversion Repositories Kolibri OS

Rev

Rev 3299 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
3254 Serge 1
/*
2
 * Copyright (c) 2011 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
 * SOFTWARE.
22
 *
23
 * Authors:
24
 *    Chris Wilson 
25
 *
26
 */
27
 
28
#ifndef KGEM_H
29
#define KGEM_H
30
 
3299 Serge 31
#define HAS_DEBUG_FULL 0
3258 Serge 32
 
3254 Serge 33
#include 
34
#include 
3258 Serge 35
#include 
3254 Serge 36
#include 
37
 
3769 Serge 38
#include 
3254 Serge 39
 
40
#include "compiler.h"
41
#include "intel_list.h"
42
 
3299 Serge 43
static inline void delay(uint32_t time)
44
{
45
    __asm__ __volatile__(
46
    "int $0x40"
47
    ::"a"(5), "b"(time)
48
    :"memory");
49
};
3254 Serge 50
 
3769 Serge 51
#undef  DBG
3254 Serge 52
 
53
#if HAS_DEBUG_FULL
54
#define DBG(x) printf x
55
#else
56
#define DBG(x)
57
#endif
58
 
59
struct kgem_bo {
60
	struct kgem_request *rq;
61
#define RQ(rq) ((struct kgem_request *)((uintptr_t)(rq) & ~3))
62
#define RQ_RING(rq) ((uintptr_t)(rq) & 3)
63
#define RQ_IS_BLT(rq) (RQ_RING(rq) == KGEM_BLT)
64
	struct drm_i915_gem_exec_object2 *exec;
65
 
66
	struct kgem_bo *proxy;
67
 
68
	struct list list;
69
	struct list request;
70
	struct list vma;
71
 
72
    void     *map;
73
#define IS_CPU_MAP(ptr) ((uintptr_t)(ptr) & 1)
74
#define IS_GTT_MAP(ptr) (ptr && ((uintptr_t)(ptr) & 1) == 0)
75
 
76
	struct kgem_bo_binding {
77
		struct kgem_bo_binding *next;
78
		uint32_t format;
79
		uint16_t offset;
80
	} binding;
81
 
82
	uint32_t unique_id;
83
	uint32_t refcnt;
84
	uint32_t handle;
85
	uint32_t target_handle;
86
	uint32_t presumed_offset;
87
	uint32_t delta;
88
	union {
89
		struct {
90
			uint32_t count:27;
91
#define PAGE_SIZE 4096
92
            uint32_t bucket:5;
93
#define NUM_CACHE_BUCKETS 16
94
#define MAX_CACHE_SIZE (1 << (NUM_CACHE_BUCKETS+12))
95
		} pages;
96
		uint32_t bytes;
97
	} size;
98
    uint32_t pitch  : 18; /* max 128k */
99
	uint32_t tiling : 2;
100
	uint32_t reusable : 1;
101
    uint32_t dirty  : 1;
102
	uint32_t domain : 2;
103
	uint32_t needs_flush : 1;
104
	uint32_t snoop : 1;
105
    uint32_t io     : 1;
106
    uint32_t flush  : 1;
107
	uint32_t scanout : 1;
108
	uint32_t purged : 1;
109
};
110
#define DOMAIN_NONE 0
111
#define DOMAIN_CPU 1
112
#define DOMAIN_GTT 2
113
#define DOMAIN_GPU 3
114
 
115
struct kgem_request {
116
	struct list list;
117
	struct kgem_bo *bo;
118
	struct list buffers;
119
	int ring;
120
};
121
 
122
enum {
123
	MAP_GTT = 0,
124
	MAP_CPU,
125
	NUM_MAP_TYPES,
126
};
127
 
128
struct kgem {
129
	int fd;
130
	int wedged;
131
	unsigned gen;
132
 
133
	uint32_t unique_id;
134
 
135
	enum kgem_mode {
136
		/* order matches I915_EXEC_RING ordering */
137
		KGEM_NONE = 0,
138
		KGEM_RENDER,
139
		KGEM_BSD,
140
		KGEM_BLT,
141
	} mode, ring;
142
 
143
	struct list flushing;
144
	struct list large;
145
	struct list large_inactive;
146
	struct list active[NUM_CACHE_BUCKETS][3];
147
	struct list inactive[NUM_CACHE_BUCKETS];
148
	struct list pinned_batches[2];
149
	struct list snoop;
150
	struct list scanout;
151
	struct list batch_buffers, active_buffers;
152
 
153
	struct list requests[2];
154
	struct kgem_request *next_request;
155
	struct kgem_request static_request;
156
 
157
	struct {
158
		struct list inactive[NUM_CACHE_BUCKETS];
159
		int16_t count;
160
	} vma[NUM_MAP_TYPES];
161
 
162
	uint32_t batch_flags;
163
	uint32_t batch_flags_base;
164
#define I915_EXEC_SECURE (1<<9)
165
#define LOCAL_EXEC_OBJECT_WRITE (1<<2)
166
 
167
	uint16_t nbatch;
168
	uint16_t surface;
169
	uint16_t nexec;
170
	uint16_t nreloc;
171
	uint16_t nreloc__self;
172
	uint16_t nfence;
173
	uint16_t batch_size;
174
	uint16_t min_alignment;
175
 
176
	uint32_t flush:1;
177
	uint32_t need_expire:1;
178
	uint32_t need_purge:1;
179
	uint32_t need_retire:1;
180
	uint32_t need_throttle:1;
181
	uint32_t scanout_busy:1;
182
	uint32_t busy:1;
183
 
184
	uint32_t has_userptr :1;
185
	uint32_t has_blt :1;
186
	uint32_t has_relaxed_fencing :1;
187
	uint32_t has_relaxed_delta :1;
188
	uint32_t has_semaphores :1;
189
	uint32_t has_secure_batches :1;
190
	uint32_t has_pinned_batches :1;
191
	uint32_t has_cacheing :1;
192
	uint32_t has_llc :1;
193
	uint32_t has_no_reloc :1;
194
	uint32_t has_handle_lut :1;
195
 
196
	uint32_t can_blt_cpu :1;
197
 
198
	uint16_t fence_max;
199
	uint16_t half_cpu_cache_pages;
200
	uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
201
	uint32_t aperture, aperture_fenced;
202
	uint32_t max_upload_tile_size, max_copy_tile_size;
203
	uint32_t max_gpu_size, max_cpu_size;
204
	uint32_t large_object_size, max_object_size;
205
	uint32_t buffer_size;
206
 
207
	void (*context_switch)(struct kgem *kgem, int new_mode);
208
    void (*retire)(struct kgem *kgem);
209
	void (*expire)(struct kgem *kgem);
210
 
211
	uint32_t batch[64*1024-8];
212
	struct drm_i915_gem_exec_object2 exec[256];
213
	struct drm_i915_gem_relocation_entry reloc[4096];
214
	uint16_t reloc__self[256];
215
 
216
#ifdef DEBUG_MEMORY
217
	struct {
218
		int bo_allocs;
219
		size_t bo_bytes;
220
	} debug_memory;
221
#endif
222
};
223
 
224
#define KGEM_BATCH_RESERVED 1
225
#define KGEM_RELOC_RESERVED 4
226
#define KGEM_EXEC_RESERVED 1
227
 
228
#ifndef ARRAY_SIZE
229
#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
230
#endif
231
 
232
#define KGEM_BATCH_SIZE(K) ((K)->batch_size-KGEM_BATCH_RESERVED)
233
#define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED)
234
#define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED)
235
 
236
void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen);
237
void kgem_reset(struct kgem *kgem);
238
 
239
struct kgem_bo *kgem_create_map(struct kgem *kgem,
240
				void *ptr, uint32_t size,
241
				bool read_only);
242
 
243
struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name);
244
 
245
struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags);
246
struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
247
				  struct kgem_bo *target,
248
				  int offset, int length);
249
 
250
 
251
int kgem_choose_tiling(struct kgem *kgem,
252
		       int tiling, int width, int height, int bpp);
253
unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth);
254
#define KGEM_CAN_CREATE_GPU     0x1
255
#define KGEM_CAN_CREATE_CPU     0x2
256
#define KGEM_CAN_CREATE_LARGE	0x4
257
#define KGEM_CAN_CREATE_GTT	0x8
258
 
259
struct kgem_bo *
260
kgem_replace_bo(struct kgem *kgem,
261
		struct kgem_bo *src,
262
		uint32_t width,
263
		uint32_t height,
264
		uint32_t pitch,
265
		uint32_t bpp);
266
enum {
267
	CREATE_EXACT = 0x1,
268
	CREATE_INACTIVE = 0x2,
269
	CREATE_CPU_MAP = 0x4,
270
	CREATE_GTT_MAP = 0x8,
271
	CREATE_SCANOUT = 0x10,
272
	CREATE_PRIME = 0x20,
273
	CREATE_TEMPORARY = 0x40,
274
	CREATE_CACHED = 0x80,
275
	CREATE_NO_RETIRE = 0x100,
276
	CREATE_NO_THROTTLE = 0x200,
277
};
278
struct kgem_bo *kgem_create_2d(struct kgem *kgem,
279
			       int width,
280
			       int height,
281
			       int bpp,
282
			       int tiling,
283
			       uint32_t flags);
3258 Serge 284
struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
285
				   int width,
286
				   int height,
287
				   int bpp,
288
				   uint32_t flags);
3254 Serge 289
 
290
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format);
291
void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
292
int kgem_bo_get_swizzling(struct kgem *kgem, struct kgem_bo *bo);
293
 
294
bool kgem_retire(struct kgem *kgem);
295
 
296
bool __kgem_ring_is_idle(struct kgem *kgem, int ring);
297
static inline bool kgem_ring_is_idle(struct kgem *kgem, int ring)
298
{
299
	ring = ring == KGEM_BLT;
300
 
301
	if (list_is_empty(&kgem->requests[ring]))
302
		return true;
303
 
304
	return __kgem_ring_is_idle(kgem, ring);
305
}
306
 
307
static inline bool kgem_is_idle(struct kgem *kgem)
308
{
309
	if (!kgem->need_retire)
310
		return true;
311
 
312
	return kgem_ring_is_idle(kgem, kgem->ring);
313
}
314
 
315
void _kgem_submit(struct kgem *kgem);
316
static inline void kgem_submit(struct kgem *kgem)
317
{
318
	if (kgem->nbatch)
319
		_kgem_submit(kgem);
320
}
321
 
322
static inline bool kgem_flush(struct kgem *kgem, bool flush)
323
{
324
	if (kgem->nreloc == 0)
325
		return false;
326
 
327
	return (kgem->flush ^ flush) && kgem_ring_is_idle(kgem, kgem->ring);
328
}
329
 
330
static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
331
{
332
	if (bo->exec)
333
		_kgem_submit(kgem);
334
}
335
 
336
void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo);
337
static inline void kgem_bo_flush(struct kgem *kgem, struct kgem_bo *bo)
338
{
339
	kgem_bo_submit(kgem, bo);
340
 
341
	if (!bo->needs_flush)
342
		return;
343
 
344
	/* If the kernel fails to emit the flush, then it will be forced when
345
	 * we assume direct access. And as the useual failure is EIO, we do
346
	 * not actualy care.
347
	 */
348
	__kgem_flush(kgem, bo);
349
}
350
 
351
static inline struct kgem_bo *kgem_bo_reference(struct kgem_bo *bo)
352
{
353
	assert(bo->refcnt);
354
	bo->refcnt++;
355
	return bo;
356
}
357
 
358
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
359
static inline void kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
360
{
361
	assert(bo->refcnt);
362
	if (--bo->refcnt == 0)
363
		_kgem_bo_destroy(kgem, bo);
364
}
365
 
366
void kgem_clear_dirty(struct kgem *kgem);
367
 
368
static inline void kgem_set_mode(struct kgem *kgem,
369
				 enum kgem_mode mode,
370
				 struct kgem_bo *bo)
371
{
372
	assert(!kgem->wedged);
373
 
374
#if DEBUG_FLUSH_BATCH
375
	kgem_submit(kgem);
376
#endif
377
 
378
	if (kgem->mode == mode)
379
		return;
380
 
381
//   kgem->context_switch(kgem, mode);
382
	kgem->mode = mode;
383
}
384
 
385
static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
386
{
387
	assert(kgem->mode == KGEM_NONE);
388
	assert(kgem->nbatch == 0);
389
	assert(!kgem->wedged);
390
//   kgem->context_switch(kgem, mode);
391
	kgem->mode = mode;
392
}
393
 
394
static inline bool kgem_check_batch(struct kgem *kgem, int num_dwords)
395
{
396
	assert(num_dwords > 0);
397
	assert(kgem->nbatch < kgem->surface);
398
	assert(kgem->surface <= kgem->batch_size);
399
	return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface);
400
}
401
 
402
static inline bool kgem_check_reloc(struct kgem *kgem, int n)
403
{
404
	assert(kgem->nreloc <= KGEM_RELOC_SIZE(kgem));
405
	return likely(kgem->nreloc + n <= KGEM_RELOC_SIZE(kgem));
406
}
407
 
408
static inline bool kgem_check_exec(struct kgem *kgem, int n)
409
{
410
	assert(kgem->nexec <= KGEM_EXEC_SIZE(kgem));
411
	return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem));
412
}
413
 
414
static inline bool kgem_check_reloc_and_exec(struct kgem *kgem, int n)
415
{
416
	return kgem_check_reloc(kgem, n) && kgem_check_exec(kgem, n);
417
}
418
 
419
static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem,
420
						  int num_dwords,
421
						  int num_surfaces)
422
{
423
	return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) &&
424
		kgem_check_reloc(kgem, num_surfaces) &&
425
		kgem_check_exec(kgem, num_surfaces);
426
}
427
 
428
static inline uint32_t *kgem_get_batch(struct kgem *kgem)
429
{
430
 
431
	return kgem->batch + kgem->nbatch;
432
}
433
 
434
bool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(0)));
435
bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo);
436
bool kgem_check_many_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0)));
437
 
438
#define KGEM_RELOC_FENCED 0x8000
439
uint32_t kgem_add_reloc(struct kgem *kgem,
440
			uint32_t pos,
441
			struct kgem_bo *bo,
442
			uint32_t read_write_domains,
443
			uint32_t delta);
444
 
445
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
446
void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo);
447
void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo);
448
void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo);
449
void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
450
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
451
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
452
void kgem_bo_sync__cpu_full(struct kgem *kgem, struct kgem_bo *bo, bool write);
453
void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
454
void __kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr);
455
uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
456
 
457
bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
458
		   const void *data, int length);
459
 
460
int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
461
void kgem_get_tile_size(struct kgem *kgem, int tiling,
462
			int *tile_width, int *tile_height, int *tile_size);
463
 
464
static inline int __kgem_buffer_size(struct kgem_bo *bo)
465
{
466
	assert(bo->proxy != NULL);
467
	return bo->size.bytes;
468
}
469
 
470
static inline int __kgem_bo_size(struct kgem_bo *bo)
471
{
472
	assert(bo->proxy == NULL);
473
	return PAGE_SIZE * bo->size.pages.count;
474
}
475
 
476
static inline int kgem_bo_size(struct kgem_bo *bo)
477
{
478
	if (bo->proxy)
479
		return __kgem_buffer_size(bo);
480
	else
481
		return __kgem_bo_size(bo);
482
}
483
 
484
/*
485
static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem,
486
					   struct kgem_bo *bo)
487
{
488
	int pitch = bo->pitch;
489
	if (kgem->gen >= 040 && bo->tiling)
490
		pitch /= 4;
491
	if (pitch > MAXSHORT) {
492
		DBG(("%s: can not blt to handle=%d, adjusted pitch=%d\n",
493
		     __FUNCTION__, bo->handle, pitch));
494
		return false;
495
	}
496
 
497
	return true;
498
}
499
 
500
static inline bool kgem_bo_can_blt(struct kgem *kgem,
501
				   struct kgem_bo *bo)
502
{
503
	if (bo->tiling == I915_TILING_Y) {
504
		DBG(("%s: can not blt to handle=%d, tiling=Y\n",
505
		     __FUNCTION__, bo->handle));
506
		return false;
507
	}
508
 
509
	return kgem_bo_blt_pitch_is_ok(kgem, bo);
510
}
511
*/
512
 
513
static inline bool __kgem_bo_is_mappable(struct kgem *kgem,
514
				       struct kgem_bo *bo)
515
{
516
	if (bo->domain == DOMAIN_GTT)
517
		return true;
518
 
519
	if (kgem->gen < 040 && bo->tiling &&
520
	    bo->presumed_offset & (kgem_bo_fenced_size(kgem, bo) - 1))
521
		return false;
522
 
523
	if (!bo->presumed_offset)
524
		return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
525
 
526
	return bo->presumed_offset + kgem_bo_size(bo) <= kgem->aperture_mappable;
527
}
528
 
3258 Serge 529
static inline bool kgem_bo_is_mappable(struct kgem *kgem,
530
				       struct kgem_bo *bo)
531
{
532
	DBG(("%s: domain=%d, offset: %d size: %d\n",
533
	     __FUNCTION__, bo->domain, bo->presumed_offset, kgem_bo_size(bo)));
534
	assert(bo->refcnt);
535
	return __kgem_bo_is_mappable(kgem, bo);
536
}
537
 
3254 Serge 538
static inline bool kgem_bo_mapped(struct kgem *kgem, struct kgem_bo *bo)
539
{
540
	DBG(("%s: map=%p, tiling=%d, domain=%d\n",
541
	     __FUNCTION__, bo->map, bo->tiling, bo->domain));
542
	assert(bo->refcnt);
543
 
544
	if (bo->map == NULL)
545
		return bo->tiling == I915_TILING_NONE && bo->domain == DOMAIN_CPU;
546
 
547
	return IS_CPU_MAP(bo->map) == !bo->tiling;
548
}
549
 
3258 Serge 550
static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
551
{
552
	if (kgem_bo_mapped(kgem, bo))
553
		return true;
3254 Serge 554
 
3258 Serge 555
	if (!bo->tiling && kgem->has_llc)
556
		return true;
3254 Serge 557
 
3258 Serge 558
	if (kgem->gen == 021 && bo->tiling == I915_TILING_Y)
559
		return false;
3254 Serge 560
 
3258 Serge 561
	return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
562
}
3254 Serge 563
 
3258 Serge 564
static inline bool kgem_bo_is_snoop(struct kgem_bo *bo)
565
{
566
	assert(bo->refcnt);
567
	while (bo->proxy)
568
		bo = bo->proxy;
569
	return bo->snoop;
570
}
3254 Serge 571
 
3258 Serge 572
bool __kgem_busy(struct kgem *kgem, int handle);
3254 Serge 573
 
3258 Serge 574
static inline void kgem_bo_mark_busy(struct kgem_bo *bo, int ring)
575
{
576
	bo->rq = (struct kgem_request *)((uintptr_t)bo->rq | ring);
577
}
3254 Serge 578
 
3258 Serge 579
inline static void __kgem_bo_clear_busy(struct kgem_bo *bo)
580
{
581
	bo->needs_flush = false;
582
	list_del(&bo->request);
583
	bo->rq = NULL;
584
	bo->domain = DOMAIN_NONE;
585
}
3254 Serge 586
 
587
static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
588
{
589
	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
590
	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
591
	assert(bo->refcnt);
592
	return bo->rq;
593
}
594
 
595
/*
596
 
597
static inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo)
598
{
599
	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
600
	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
601
	assert(bo->refcnt);
602
 
603
	if (bo->exec)
604
		return true;
605
 
606
	if (kgem_flush(kgem, bo->flush))
607
		kgem_submit(kgem);
608
 
609
	if (bo->rq && !__kgem_busy(kgem, bo->handle))
610
		__kgem_bo_clear_busy(bo);
611
 
612
	return kgem_bo_is_busy(bo);
613
}
614
 
615
*/
616
 
617
static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
618
{
619
	if (bo == NULL)
620
		return false;
621
 
622
	assert(bo->refcnt);
623
	return bo->dirty;
624
}
625
 
626
static inline void kgem_bo_unclean(struct kgem *kgem, struct kgem_bo *bo)
627
{
628
	/* The bo is outside of our control, so presume it is written to */
629
	bo->needs_flush = true;
630
	if (bo->rq == NULL)
631
		bo->rq = (void *)kgem;
632
 
633
	if (bo->domain != DOMAIN_GPU)
634
		bo->domain = DOMAIN_NONE;
635
}
636
 
637
static inline void __kgem_bo_mark_dirty(struct kgem_bo *bo)
638
{
639
	DBG(("%s: handle=%d (proxy? %d)\n", __FUNCTION__,
640
	     bo->handle, bo->proxy != NULL));
641
 
642
	bo->exec->flags |= LOCAL_EXEC_OBJECT_WRITE;
643
	bo->needs_flush = bo->dirty = true;
644
	list_move(&bo->request, &RQ(bo->rq)->buffers);
645
}
646
 
647
static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
648
{
649
	assert(bo->refcnt);
650
	do {
651
		assert(bo->exec);
652
		assert(bo->rq);
653
 
654
		if (bo->dirty)
655
			return;
656
 
657
		__kgem_bo_mark_dirty(bo);
658
	} while ((bo = bo->proxy));
659
}
660
 
661
#define KGEM_BUFFER_WRITE	0x1
662
#define KGEM_BUFFER_INPLACE	0x2
663
#define KGEM_BUFFER_LAST	0x4
664
 
665
#define KGEM_BUFFER_WRITE_INPLACE (KGEM_BUFFER_WRITE | KGEM_BUFFER_INPLACE)
666
 
667
struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
668
				   uint32_t size, uint32_t flags,
669
				   void **ret);
670
struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
671
				      int width, int height, int bpp,
672
				      uint32_t flags,
673
				      void **ret);
674
bool kgem_buffer_is_inplace(struct kgem_bo *bo);
675
void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo);
676
 
677
void kgem_throttle(struct kgem *kgem);
678
#define MAX_INACTIVE_TIME 10
679
bool kgem_expire_cache(struct kgem *kgem);
680
void kgem_purge_cache(struct kgem *kgem);
681
void kgem_cleanup_cache(struct kgem *kgem);
682
 
683
#if HAS_DEBUG_FULL
684
void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch);
685
#else
686
static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch)
687
{
688
	(void)kgem;
689
	(void)nbatch;
690
}
691
#endif
692
 
693
#endif /* KGEM_H */