Subversion Repositories Kolibri OS

Rev

Rev 3291 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
3254 Serge 1
/*
2
 * Copyright (c) 2011 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
 * SOFTWARE.
22
 *
23
 * Authors:
24
 *    Chris Wilson 
25
 *
26
 */
27
 
28
#ifndef KGEM_H
29
#define KGEM_H
30
 
3299 Serge 31
#define HAS_DEBUG_FULL 0
3258 Serge 32
 
3254 Serge 33
#include 
34
#include 
3258 Serge 35
#include 
3254 Serge 36
#include 
37
 
38
#include "i915_drm.h"
39
 
40
#include "compiler.h"
41
#include "intel_list.h"
42
 
3299 Serge 43
static inline void delay(uint32_t time)
44
{
45
    __asm__ __volatile__(
46
    "int $0x40"
47
    ::"a"(5), "b"(time)
48
    :"memory");
49
};
3254 Serge 50
 
51
 
52
#if HAS_DEBUG_FULL
53
#define DBG(x) printf x
54
#else
55
#define DBG(x)
56
#endif
57
 
58
struct kgem_bo {
59
	struct kgem_request *rq;
60
#define RQ(rq) ((struct kgem_request *)((uintptr_t)(rq) & ~3))
61
#define RQ_RING(rq) ((uintptr_t)(rq) & 3)
62
#define RQ_IS_BLT(rq) (RQ_RING(rq) == KGEM_BLT)
63
	struct drm_i915_gem_exec_object2 *exec;
64
 
65
	struct kgem_bo *proxy;
66
 
67
	struct list list;
68
	struct list request;
69
	struct list vma;
70
 
71
    void     *map;
72
#define IS_CPU_MAP(ptr) ((uintptr_t)(ptr) & 1)
73
#define IS_GTT_MAP(ptr) (ptr && ((uintptr_t)(ptr) & 1) == 0)
74
 
75
	struct kgem_bo_binding {
76
		struct kgem_bo_binding *next;
77
		uint32_t format;
78
		uint16_t offset;
79
	} binding;
80
 
81
	uint32_t unique_id;
82
	uint32_t refcnt;
83
	uint32_t handle;
84
	uint32_t target_handle;
85
	uint32_t presumed_offset;
86
	uint32_t delta;
87
	union {
88
		struct {
89
			uint32_t count:27;
90
#define PAGE_SIZE 4096
91
            uint32_t bucket:5;
92
#define NUM_CACHE_BUCKETS 16
93
#define MAX_CACHE_SIZE (1 << (NUM_CACHE_BUCKETS+12))
94
		} pages;
95
		uint32_t bytes;
96
	} size;
97
    uint32_t pitch  : 18; /* max 128k */
98
	uint32_t tiling : 2;
99
	uint32_t reusable : 1;
100
    uint32_t dirty  : 1;
101
	uint32_t domain : 2;
102
	uint32_t needs_flush : 1;
103
	uint32_t snoop : 1;
104
    uint32_t io     : 1;
105
    uint32_t flush  : 1;
106
	uint32_t scanout : 1;
107
	uint32_t purged : 1;
108
};
109
#define DOMAIN_NONE 0
110
#define DOMAIN_CPU 1
111
#define DOMAIN_GTT 2
112
#define DOMAIN_GPU 3
113
 
114
struct kgem_request {
115
	struct list list;
116
	struct kgem_bo *bo;
117
	struct list buffers;
118
	int ring;
119
};
120
 
121
enum {
122
	MAP_GTT = 0,
123
	MAP_CPU,
124
	NUM_MAP_TYPES,
125
};
126
 
127
struct kgem {
128
	int fd;
129
	int wedged;
130
	unsigned gen;
131
 
132
	uint32_t unique_id;
133
 
134
	enum kgem_mode {
135
		/* order matches I915_EXEC_RING ordering */
136
		KGEM_NONE = 0,
137
		KGEM_RENDER,
138
		KGEM_BSD,
139
		KGEM_BLT,
140
	} mode, ring;
141
 
142
	struct list flushing;
143
	struct list large;
144
	struct list large_inactive;
145
	struct list active[NUM_CACHE_BUCKETS][3];
146
	struct list inactive[NUM_CACHE_BUCKETS];
147
	struct list pinned_batches[2];
148
	struct list snoop;
149
	struct list scanout;
150
	struct list batch_buffers, active_buffers;
151
 
152
	struct list requests[2];
153
	struct kgem_request *next_request;
154
	struct kgem_request static_request;
155
 
156
	struct {
157
		struct list inactive[NUM_CACHE_BUCKETS];
158
		int16_t count;
159
	} vma[NUM_MAP_TYPES];
160
 
161
	uint32_t batch_flags;
162
	uint32_t batch_flags_base;
163
#define I915_EXEC_SECURE (1<<9)
164
#define LOCAL_EXEC_OBJECT_WRITE (1<<2)
165
 
166
	uint16_t nbatch;
167
	uint16_t surface;
168
	uint16_t nexec;
169
	uint16_t nreloc;
170
	uint16_t nreloc__self;
171
	uint16_t nfence;
172
	uint16_t batch_size;
173
	uint16_t min_alignment;
174
 
175
	uint32_t flush:1;
176
	uint32_t need_expire:1;
177
	uint32_t need_purge:1;
178
	uint32_t need_retire:1;
179
	uint32_t need_throttle:1;
180
	uint32_t scanout_busy:1;
181
	uint32_t busy:1;
182
 
183
	uint32_t has_userptr :1;
184
	uint32_t has_blt :1;
185
	uint32_t has_relaxed_fencing :1;
186
	uint32_t has_relaxed_delta :1;
187
	uint32_t has_semaphores :1;
188
	uint32_t has_secure_batches :1;
189
	uint32_t has_pinned_batches :1;
190
	uint32_t has_cacheing :1;
191
	uint32_t has_llc :1;
192
	uint32_t has_no_reloc :1;
193
	uint32_t has_handle_lut :1;
194
 
195
	uint32_t can_blt_cpu :1;
196
 
197
	uint16_t fence_max;
198
	uint16_t half_cpu_cache_pages;
199
	uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
200
	uint32_t aperture, aperture_fenced;
201
	uint32_t max_upload_tile_size, max_copy_tile_size;
202
	uint32_t max_gpu_size, max_cpu_size;
203
	uint32_t large_object_size, max_object_size;
204
	uint32_t buffer_size;
205
 
206
	void (*context_switch)(struct kgem *kgem, int new_mode);
207
    void (*retire)(struct kgem *kgem);
208
	void (*expire)(struct kgem *kgem);
209
 
210
	uint32_t batch[64*1024-8];
211
	struct drm_i915_gem_exec_object2 exec[256];
212
	struct drm_i915_gem_relocation_entry reloc[4096];
213
	uint16_t reloc__self[256];
214
 
215
#ifdef DEBUG_MEMORY
216
	struct {
217
		int bo_allocs;
218
		size_t bo_bytes;
219
	} debug_memory;
220
#endif
221
};
222
 
223
#define KGEM_BATCH_RESERVED 1
224
#define KGEM_RELOC_RESERVED 4
225
#define KGEM_EXEC_RESERVED 1
226
 
227
#ifndef ARRAY_SIZE
228
#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
229
#endif
230
 
231
#define KGEM_BATCH_SIZE(K) ((K)->batch_size-KGEM_BATCH_RESERVED)
232
#define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED)
233
#define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED)
234
 
235
void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen);
236
void kgem_reset(struct kgem *kgem);
237
 
238
struct kgem_bo *kgem_create_map(struct kgem *kgem,
239
				void *ptr, uint32_t size,
240
				bool read_only);
241
 
242
struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name);
243
 
244
struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags);
245
struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
246
				  struct kgem_bo *target,
247
				  int offset, int length);
248
 
249
 
250
int kgem_choose_tiling(struct kgem *kgem,
251
		       int tiling, int width, int height, int bpp);
252
unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth);
253
#define KGEM_CAN_CREATE_GPU     0x1
254
#define KGEM_CAN_CREATE_CPU     0x2
255
#define KGEM_CAN_CREATE_LARGE	0x4
256
#define KGEM_CAN_CREATE_GTT	0x8
257
 
258
struct kgem_bo *
259
kgem_replace_bo(struct kgem *kgem,
260
		struct kgem_bo *src,
261
		uint32_t width,
262
		uint32_t height,
263
		uint32_t pitch,
264
		uint32_t bpp);
265
enum {
266
	CREATE_EXACT = 0x1,
267
	CREATE_INACTIVE = 0x2,
268
	CREATE_CPU_MAP = 0x4,
269
	CREATE_GTT_MAP = 0x8,
270
	CREATE_SCANOUT = 0x10,
271
	CREATE_PRIME = 0x20,
272
	CREATE_TEMPORARY = 0x40,
273
	CREATE_CACHED = 0x80,
274
	CREATE_NO_RETIRE = 0x100,
275
	CREATE_NO_THROTTLE = 0x200,
276
};
277
struct kgem_bo *kgem_create_2d(struct kgem *kgem,
278
			       int width,
279
			       int height,
280
			       int bpp,
281
			       int tiling,
282
			       uint32_t flags);
3258 Serge 283
struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
284
				   int width,
285
				   int height,
286
				   int bpp,
287
				   uint32_t flags);
3254 Serge 288
 
289
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format);
290
void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
291
int kgem_bo_get_swizzling(struct kgem *kgem, struct kgem_bo *bo);
292
 
293
bool kgem_retire(struct kgem *kgem);
294
 
295
bool __kgem_ring_is_idle(struct kgem *kgem, int ring);
296
static inline bool kgem_ring_is_idle(struct kgem *kgem, int ring)
297
{
298
	ring = ring == KGEM_BLT;
299
 
300
	if (list_is_empty(&kgem->requests[ring]))
301
		return true;
302
 
303
	return __kgem_ring_is_idle(kgem, ring);
304
}
305
 
306
static inline bool kgem_is_idle(struct kgem *kgem)
307
{
308
	if (!kgem->need_retire)
309
		return true;
310
 
311
	return kgem_ring_is_idle(kgem, kgem->ring);
312
}
313
 
314
void _kgem_submit(struct kgem *kgem);
315
static inline void kgem_submit(struct kgem *kgem)
316
{
317
	if (kgem->nbatch)
318
		_kgem_submit(kgem);
319
}
320
 
321
static inline bool kgem_flush(struct kgem *kgem, bool flush)
322
{
323
	if (kgem->nreloc == 0)
324
		return false;
325
 
326
	return (kgem->flush ^ flush) && kgem_ring_is_idle(kgem, kgem->ring);
327
}
328
 
329
static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
330
{
331
	if (bo->exec)
332
		_kgem_submit(kgem);
333
}
334
 
335
void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo);
336
static inline void kgem_bo_flush(struct kgem *kgem, struct kgem_bo *bo)
337
{
338
	kgem_bo_submit(kgem, bo);
339
 
340
	if (!bo->needs_flush)
341
		return;
342
 
343
	/* If the kernel fails to emit the flush, then it will be forced when
344
	 * we assume direct access. And as the useual failure is EIO, we do
345
	 * not actualy care.
346
	 */
347
	__kgem_flush(kgem, bo);
348
}
349
 
350
static inline struct kgem_bo *kgem_bo_reference(struct kgem_bo *bo)
351
{
352
	assert(bo->refcnt);
353
	bo->refcnt++;
354
	return bo;
355
}
356
 
357
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
358
static inline void kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
359
{
360
	assert(bo->refcnt);
361
	if (--bo->refcnt == 0)
362
		_kgem_bo_destroy(kgem, bo);
363
}
364
 
365
void kgem_clear_dirty(struct kgem *kgem);
366
 
367
static inline void kgem_set_mode(struct kgem *kgem,
368
				 enum kgem_mode mode,
369
				 struct kgem_bo *bo)
370
{
371
	assert(!kgem->wedged);
372
 
373
#if DEBUG_FLUSH_BATCH
374
	kgem_submit(kgem);
375
#endif
376
 
377
	if (kgem->mode == mode)
378
		return;
379
 
380
//   kgem->context_switch(kgem, mode);
381
	kgem->mode = mode;
382
}
383
 
384
static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
385
{
386
	assert(kgem->mode == KGEM_NONE);
387
	assert(kgem->nbatch == 0);
388
	assert(!kgem->wedged);
389
//   kgem->context_switch(kgem, mode);
390
	kgem->mode = mode;
391
}
392
 
393
static inline bool kgem_check_batch(struct kgem *kgem, int num_dwords)
394
{
395
	assert(num_dwords > 0);
396
	assert(kgem->nbatch < kgem->surface);
397
	assert(kgem->surface <= kgem->batch_size);
398
	return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface);
399
}
400
 
401
static inline bool kgem_check_reloc(struct kgem *kgem, int n)
402
{
403
	assert(kgem->nreloc <= KGEM_RELOC_SIZE(kgem));
404
	return likely(kgem->nreloc + n <= KGEM_RELOC_SIZE(kgem));
405
}
406
 
407
static inline bool kgem_check_exec(struct kgem *kgem, int n)
408
{
409
	assert(kgem->nexec <= KGEM_EXEC_SIZE(kgem));
410
	return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem));
411
}
412
 
413
static inline bool kgem_check_reloc_and_exec(struct kgem *kgem, int n)
414
{
415
	return kgem_check_reloc(kgem, n) && kgem_check_exec(kgem, n);
416
}
417
 
418
static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem,
419
						  int num_dwords,
420
						  int num_surfaces)
421
{
422
	return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) &&
423
		kgem_check_reloc(kgem, num_surfaces) &&
424
		kgem_check_exec(kgem, num_surfaces);
425
}
426
 
427
static inline uint32_t *kgem_get_batch(struct kgem *kgem)
428
{
429
 
430
	return kgem->batch + kgem->nbatch;
431
}
432
 
433
bool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(0)));
434
bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo);
435
bool kgem_check_many_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0)));
436
 
437
#define KGEM_RELOC_FENCED 0x8000
438
uint32_t kgem_add_reloc(struct kgem *kgem,
439
			uint32_t pos,
440
			struct kgem_bo *bo,
441
			uint32_t read_write_domains,
442
			uint32_t delta);
443
 
444
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
445
void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo);
446
void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo);
447
void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo);
448
void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
449
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
450
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
451
void kgem_bo_sync__cpu_full(struct kgem *kgem, struct kgem_bo *bo, bool write);
452
void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
453
void __kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr);
454
uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
455
 
456
bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
457
		   const void *data, int length);
458
 
459
int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
460
void kgem_get_tile_size(struct kgem *kgem, int tiling,
461
			int *tile_width, int *tile_height, int *tile_size);
462
 
463
static inline int __kgem_buffer_size(struct kgem_bo *bo)
464
{
465
	assert(bo->proxy != NULL);
466
	return bo->size.bytes;
467
}
468
 
469
static inline int __kgem_bo_size(struct kgem_bo *bo)
470
{
471
	assert(bo->proxy == NULL);
472
	return PAGE_SIZE * bo->size.pages.count;
473
}
474
 
475
static inline int kgem_bo_size(struct kgem_bo *bo)
476
{
477
	if (bo->proxy)
478
		return __kgem_buffer_size(bo);
479
	else
480
		return __kgem_bo_size(bo);
481
}
482
 
483
/*
484
static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem,
485
					   struct kgem_bo *bo)
486
{
487
	int pitch = bo->pitch;
488
	if (kgem->gen >= 040 && bo->tiling)
489
		pitch /= 4;
490
	if (pitch > MAXSHORT) {
491
		DBG(("%s: can not blt to handle=%d, adjusted pitch=%d\n",
492
		     __FUNCTION__, bo->handle, pitch));
493
		return false;
494
	}
495
 
496
	return true;
497
}
498
 
499
static inline bool kgem_bo_can_blt(struct kgem *kgem,
500
				   struct kgem_bo *bo)
501
{
502
	if (bo->tiling == I915_TILING_Y) {
503
		DBG(("%s: can not blt to handle=%d, tiling=Y\n",
504
		     __FUNCTION__, bo->handle));
505
		return false;
506
	}
507
 
508
	return kgem_bo_blt_pitch_is_ok(kgem, bo);
509
}
510
*/
511
 
512
static inline bool __kgem_bo_is_mappable(struct kgem *kgem,
513
				       struct kgem_bo *bo)
514
{
515
	if (bo->domain == DOMAIN_GTT)
516
		return true;
517
 
518
	if (kgem->gen < 040 && bo->tiling &&
519
	    bo->presumed_offset & (kgem_bo_fenced_size(kgem, bo) - 1))
520
		return false;
521
 
522
	if (!bo->presumed_offset)
523
		return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
524
 
525
	return bo->presumed_offset + kgem_bo_size(bo) <= kgem->aperture_mappable;
526
}
527
 
3258 Serge 528
static inline bool kgem_bo_is_mappable(struct kgem *kgem,
529
				       struct kgem_bo *bo)
530
{
531
	DBG(("%s: domain=%d, offset: %d size: %d\n",
532
	     __FUNCTION__, bo->domain, bo->presumed_offset, kgem_bo_size(bo)));
533
	assert(bo->refcnt);
534
	return __kgem_bo_is_mappable(kgem, bo);
535
}
536
 
3254 Serge 537
static inline bool kgem_bo_mapped(struct kgem *kgem, struct kgem_bo *bo)
538
{
539
	DBG(("%s: map=%p, tiling=%d, domain=%d\n",
540
	     __FUNCTION__, bo->map, bo->tiling, bo->domain));
541
	assert(bo->refcnt);
542
 
543
	if (bo->map == NULL)
544
		return bo->tiling == I915_TILING_NONE && bo->domain == DOMAIN_CPU;
545
 
546
	return IS_CPU_MAP(bo->map) == !bo->tiling;
547
}
548
 
3258 Serge 549
static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
550
{
551
	if (kgem_bo_mapped(kgem, bo))
552
		return true;
3254 Serge 553
 
3258 Serge 554
	if (!bo->tiling && kgem->has_llc)
555
		return true;
3254 Serge 556
 
3258 Serge 557
	if (kgem->gen == 021 && bo->tiling == I915_TILING_Y)
558
		return false;
3254 Serge 559
 
3258 Serge 560
	return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
561
}
3254 Serge 562
 
3258 Serge 563
static inline bool kgem_bo_is_snoop(struct kgem_bo *bo)
564
{
565
	assert(bo->refcnt);
566
	while (bo->proxy)
567
		bo = bo->proxy;
568
	return bo->snoop;
569
}
3254 Serge 570
 
3258 Serge 571
bool __kgem_busy(struct kgem *kgem, int handle);
3254 Serge 572
 
3258 Serge 573
static inline void kgem_bo_mark_busy(struct kgem_bo *bo, int ring)
574
{
575
	bo->rq = (struct kgem_request *)((uintptr_t)bo->rq | ring);
576
}
3254 Serge 577
 
3258 Serge 578
inline static void __kgem_bo_clear_busy(struct kgem_bo *bo)
579
{
580
	bo->needs_flush = false;
581
	list_del(&bo->request);
582
	bo->rq = NULL;
583
	bo->domain = DOMAIN_NONE;
584
}
3254 Serge 585
 
586
static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
587
{
588
	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
589
	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
590
	assert(bo->refcnt);
591
	return bo->rq;
592
}
593
 
594
/*
595
 
596
static inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo)
597
{
598
	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
599
	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
600
	assert(bo->refcnt);
601
 
602
	if (bo->exec)
603
		return true;
604
 
605
	if (kgem_flush(kgem, bo->flush))
606
		kgem_submit(kgem);
607
 
608
	if (bo->rq && !__kgem_busy(kgem, bo->handle))
609
		__kgem_bo_clear_busy(bo);
610
 
611
	return kgem_bo_is_busy(bo);
612
}
613
 
614
*/
615
 
616
static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
617
{
618
	if (bo == NULL)
619
		return false;
620
 
621
	assert(bo->refcnt);
622
	return bo->dirty;
623
}
624
 
625
static inline void kgem_bo_unclean(struct kgem *kgem, struct kgem_bo *bo)
626
{
627
	/* The bo is outside of our control, so presume it is written to */
628
	bo->needs_flush = true;
629
	if (bo->rq == NULL)
630
		bo->rq = (void *)kgem;
631
 
632
	if (bo->domain != DOMAIN_GPU)
633
		bo->domain = DOMAIN_NONE;
634
}
635
 
636
static inline void __kgem_bo_mark_dirty(struct kgem_bo *bo)
637
{
638
	DBG(("%s: handle=%d (proxy? %d)\n", __FUNCTION__,
639
	     bo->handle, bo->proxy != NULL));
640
 
641
	bo->exec->flags |= LOCAL_EXEC_OBJECT_WRITE;
642
	bo->needs_flush = bo->dirty = true;
643
	list_move(&bo->request, &RQ(bo->rq)->buffers);
644
}
645
 
646
static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
647
{
648
	assert(bo->refcnt);
649
	do {
650
		assert(bo->exec);
651
		assert(bo->rq);
652
 
653
		if (bo->dirty)
654
			return;
655
 
656
		__kgem_bo_mark_dirty(bo);
657
	} while ((bo = bo->proxy));
658
}
659
 
660
#define KGEM_BUFFER_WRITE	0x1
661
#define KGEM_BUFFER_INPLACE	0x2
662
#define KGEM_BUFFER_LAST	0x4
663
 
664
#define KGEM_BUFFER_WRITE_INPLACE (KGEM_BUFFER_WRITE | KGEM_BUFFER_INPLACE)
665
 
666
struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
667
				   uint32_t size, uint32_t flags,
668
				   void **ret);
669
struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
670
				      int width, int height, int bpp,
671
				      uint32_t flags,
672
				      void **ret);
673
bool kgem_buffer_is_inplace(struct kgem_bo *bo);
674
void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo);
675
 
676
void kgem_throttle(struct kgem *kgem);
677
#define MAX_INACTIVE_TIME 10
678
bool kgem_expire_cache(struct kgem *kgem);
679
void kgem_purge_cache(struct kgem *kgem);
680
void kgem_cleanup_cache(struct kgem *kgem);
681
 
682
#if HAS_DEBUG_FULL
683
void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch);
684
#else
685
static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch)
686
{
687
	(void)kgem;
688
	(void)nbatch;
689
}
690
#endif
691
 
692
#endif /* KGEM_H */