Subversion Repositories Kolibri OS

Rev

Rev 3769 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4245 Serge 1
/*
2
 * Copyright (c) 2011 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
 * SOFTWARE.
22
 *
23
 * Authors:
24
 *    Chris Wilson 
25
 *
26
 */
27
 
28
#ifndef KGEM_H
29
#define KGEM_H
30
 
31
#define HAS_DEBUG_FULL 1
32
 
33
#include 
34
#include 
35
#include 
36
#include 
37
 
38
#include 
39
 
40
#include "compiler.h"
41
#include "intel_list.h"
42
 
43
#undef  DBG
44
 
45
#if HAS_DEBUG_FULL
46
#define DBG(x) printf x
47
#else
48
#define DBG(x)
49
#endif
50
 
51
struct kgem_bo {
52
	struct kgem_request *rq;
53
#define RQ(rq) ((struct kgem_request *)((uintptr_t)(rq) & ~3))
54
#define RQ_RING(rq) ((uintptr_t)(rq) & 3)
55
#define RQ_IS_BLT(rq) (RQ_RING(rq) == KGEM_BLT)
56
	struct drm_i915_gem_exec_object2 *exec;
57
 
58
	struct kgem_bo *proxy;
59
 
60
	struct list list;
61
	struct list request;
62
	struct list vma;
63
 
64
    void     *map;
65
#define IS_CPU_MAP(ptr) ((uintptr_t)(ptr) & 1)
66
#define IS_GTT_MAP(ptr) (ptr && ((uintptr_t)(ptr) & 1) == 0)
67
 
68
	struct kgem_bo_binding {
69
		struct kgem_bo_binding *next;
70
		uint32_t format;
71
		uint16_t offset;
72
	} binding;
73
 
74
	uint32_t unique_id;
75
	uint32_t refcnt;
76
	uint32_t handle;
77
	uint32_t target_handle;
78
	uint32_t presumed_offset;
79
	uint32_t delta;
80
	union {
81
		struct {
82
			uint32_t count:27;
83
#define PAGE_SIZE 4096
84
            uint32_t bucket:5;
85
#define NUM_CACHE_BUCKETS 16
86
#define MAX_CACHE_SIZE (1 << (NUM_CACHE_BUCKETS+12))
87
		} pages;
88
		uint32_t bytes;
89
	} size;
90
    uint32_t pitch  : 18; /* max 128k */
91
	uint32_t tiling : 2;
92
	uint32_t reusable : 1;
93
    uint32_t dirty  : 1;
94
	uint32_t domain : 2;
95
	uint32_t needs_flush : 1;
96
	uint32_t snoop : 1;
97
    uint32_t io     : 1;
98
    uint32_t flush  : 1;
99
	uint32_t scanout : 1;
100
	uint32_t purged : 1;
101
};
102
#define DOMAIN_NONE 0
103
#define DOMAIN_CPU 1
104
#define DOMAIN_GTT 2
105
#define DOMAIN_GPU 3
106
 
107
struct kgem_request {
108
	struct list list;
109
	struct kgem_bo *bo;
110
	struct list buffers;
111
	int ring;
112
};
113
 
114
enum {
115
	MAP_GTT = 0,
116
	MAP_CPU,
117
	NUM_MAP_TYPES,
118
};
119
 
120
struct kgem {
121
	int fd;
122
	int wedged;
123
	unsigned gen;
124
 
125
	uint32_t unique_id;
126
 
127
	enum kgem_mode {
128
		/* order matches I915_EXEC_RING ordering */
129
		KGEM_NONE = 0,
130
		KGEM_RENDER,
131
		KGEM_BSD,
132
		KGEM_BLT,
133
	} mode, ring;
134
 
135
	struct list flushing;
136
	struct list large;
137
	struct list large_inactive;
138
	struct list active[NUM_CACHE_BUCKETS][3];
139
	struct list inactive[NUM_CACHE_BUCKETS];
140
	struct list pinned_batches[2];
141
	struct list snoop;
142
	struct list scanout;
143
	struct list batch_buffers, active_buffers;
144
 
145
	struct list requests[2];
146
	struct kgem_request *next_request;
147
	struct kgem_request static_request;
148
 
149
	struct {
150
		struct list inactive[NUM_CACHE_BUCKETS];
151
		int16_t count;
152
	} vma[NUM_MAP_TYPES];
153
 
154
	uint32_t batch_flags;
155
	uint32_t batch_flags_base;
156
#define I915_EXEC_SECURE (1<<9)
157
#define LOCAL_EXEC_OBJECT_WRITE (1<<2)
158
 
159
	uint16_t nbatch;
160
	uint16_t surface;
161
	uint16_t nexec;
162
	uint16_t nreloc;
163
	uint16_t nreloc__self;
164
	uint16_t nfence;
165
	uint16_t batch_size;
166
	uint16_t min_alignment;
167
 
168
	uint32_t flush:1;
169
	uint32_t need_expire:1;
170
	uint32_t need_purge:1;
171
	uint32_t need_retire:1;
172
	uint32_t need_throttle:1;
173
	uint32_t scanout_busy:1;
174
	uint32_t busy:1;
175
 
176
	uint32_t has_userptr :1;
177
	uint32_t has_blt :1;
178
	uint32_t has_relaxed_fencing :1;
179
	uint32_t has_relaxed_delta :1;
180
	uint32_t has_semaphores :1;
181
	uint32_t has_secure_batches :1;
182
	uint32_t has_pinned_batches :1;
183
	uint32_t has_cacheing :1;
184
	uint32_t has_llc :1;
185
	uint32_t has_no_reloc :1;
186
	uint32_t has_handle_lut :1;
187
 
188
	uint32_t can_blt_cpu :1;
189
 
190
	uint16_t fence_max;
191
	uint16_t half_cpu_cache_pages;
192
	uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
193
	uint32_t aperture, aperture_fenced;
194
	uint32_t max_upload_tile_size, max_copy_tile_size;
195
	uint32_t max_gpu_size, max_cpu_size;
196
	uint32_t large_object_size, max_object_size;
197
	uint32_t buffer_size;
198
 
199
	void (*context_switch)(struct kgem *kgem, int new_mode);
200
    void (*retire)(struct kgem *kgem);
201
	void (*expire)(struct kgem *kgem);
202
 
203
	uint32_t batch[64*1024-8];
204
	struct drm_i915_gem_exec_object2 exec[256];
205
	struct drm_i915_gem_relocation_entry reloc[4096];
206
	uint16_t reloc__self[256];
207
 
208
#ifdef DEBUG_MEMORY
209
	struct {
210
		int bo_allocs;
211
		size_t bo_bytes;
212
	} debug_memory;
213
#endif
214
};
215
 
216
#define KGEM_BATCH_RESERVED 1
217
#define KGEM_RELOC_RESERVED 4
218
#define KGEM_EXEC_RESERVED 1
219
 
220
#ifndef ARRAY_SIZE
221
#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
222
#endif
223
 
224
#define KGEM_BATCH_SIZE(K) ((K)->batch_size-KGEM_BATCH_RESERVED)
225
#define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED)
226
#define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED)
227
 
228
void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen);
229
void kgem_reset(struct kgem *kgem);
230
 
231
struct kgem_bo *kgem_create_map(struct kgem *kgem,
232
				void *ptr, uint32_t size,
233
				bool read_only);
234
 
235
struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name);
236
 
237
struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags);
238
struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
239
				  struct kgem_bo *target,
240
				  int offset, int length);
241
 
242
 
243
int kgem_choose_tiling(struct kgem *kgem,
244
		       int tiling, int width, int height, int bpp);
245
unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth);
246
#define KGEM_CAN_CREATE_GPU     0x1
247
#define KGEM_CAN_CREATE_CPU     0x2
248
#define KGEM_CAN_CREATE_LARGE	0x4
249
#define KGEM_CAN_CREATE_GTT	0x8
250
 
251
struct kgem_bo *
252
kgem_replace_bo(struct kgem *kgem,
253
		struct kgem_bo *src,
254
		uint32_t width,
255
		uint32_t height,
256
		uint32_t pitch,
257
		uint32_t bpp);
258
enum {
259
	CREATE_EXACT = 0x1,
260
	CREATE_INACTIVE = 0x2,
261
	CREATE_CPU_MAP = 0x4,
262
	CREATE_GTT_MAP = 0x8,
263
	CREATE_SCANOUT = 0x10,
264
	CREATE_PRIME = 0x20,
265
	CREATE_TEMPORARY = 0x40,
266
	CREATE_CACHED = 0x80,
267
	CREATE_NO_RETIRE = 0x100,
268
	CREATE_NO_THROTTLE = 0x200,
269
};
270
struct kgem_bo *kgem_create_2d(struct kgem *kgem,
271
			       int width,
272
			       int height,
273
			       int bpp,
274
			       int tiling,
275
			       uint32_t flags);
276
struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
277
				   int width,
278
				   int height,
279
				   int bpp,
280
				   uint32_t flags);
281
 
282
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format);
283
void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
284
int kgem_bo_get_swizzling(struct kgem *kgem, struct kgem_bo *bo);
285
 
286
bool kgem_retire(struct kgem *kgem);
287
 
288
bool __kgem_ring_is_idle(struct kgem *kgem, int ring);
289
static inline bool kgem_ring_is_idle(struct kgem *kgem, int ring)
290
{
291
	ring = ring == KGEM_BLT;
292
 
293
	if (list_is_empty(&kgem->requests[ring]))
294
		return true;
295
 
296
	return __kgem_ring_is_idle(kgem, ring);
297
}
298
 
299
static inline bool kgem_is_idle(struct kgem *kgem)
300
{
301
	if (!kgem->need_retire)
302
		return true;
303
 
304
	return kgem_ring_is_idle(kgem, kgem->ring);
305
}
306
 
307
void _kgem_submit(struct kgem *kgem);
308
static inline void kgem_submit(struct kgem *kgem)
309
{
310
	if (kgem->nbatch)
311
		_kgem_submit(kgem);
312
}
313
 
314
static inline bool kgem_flush(struct kgem *kgem, bool flush)
315
{
316
	if (kgem->nreloc == 0)
317
		return false;
318
 
319
	return (kgem->flush ^ flush) && kgem_ring_is_idle(kgem, kgem->ring);
320
}
321
 
322
static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
323
{
324
	if (bo->exec)
325
		_kgem_submit(kgem);
326
}
327
 
328
void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo);
329
static inline void kgem_bo_flush(struct kgem *kgem, struct kgem_bo *bo)
330
{
331
	kgem_bo_submit(kgem, bo);
332
 
333
	if (!bo->needs_flush)
334
		return;
335
 
336
	/* If the kernel fails to emit the flush, then it will be forced when
337
	 * we assume direct access. And as the useual failure is EIO, we do
338
	 * not actualy care.
339
	 */
340
	__kgem_flush(kgem, bo);
341
}
342
 
343
static inline struct kgem_bo *kgem_bo_reference(struct kgem_bo *bo)
344
{
345
	assert(bo->refcnt);
346
	bo->refcnt++;
347
	return bo;
348
}
349
 
350
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
351
static inline void kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
352
{
353
	assert(bo->refcnt);
354
	if (--bo->refcnt == 0)
355
		_kgem_bo_destroy(kgem, bo);
356
}
357
 
358
void kgem_clear_dirty(struct kgem *kgem);
359
 
360
static inline void kgem_set_mode(struct kgem *kgem,
361
				 enum kgem_mode mode,
362
				 struct kgem_bo *bo)
363
{
364
	assert(!kgem->wedged);
365
 
366
#if DEBUG_FLUSH_BATCH
367
	kgem_submit(kgem);
368
#endif
369
 
370
	if (kgem->mode == mode)
371
		return;
372
 
373
//   kgem->context_switch(kgem, mode);
374
	kgem->mode = mode;
375
}
376
 
377
static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
378
{
379
	assert(kgem->mode == KGEM_NONE);
380
	assert(kgem->nbatch == 0);
381
	assert(!kgem->wedged);
382
//   kgem->context_switch(kgem, mode);
383
	kgem->mode = mode;
384
}
385
 
386
static inline bool kgem_check_batch(struct kgem *kgem, int num_dwords)
387
{
388
	assert(num_dwords > 0);
389
	assert(kgem->nbatch < kgem->surface);
390
	assert(kgem->surface <= kgem->batch_size);
391
	return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface);
392
}
393
 
394
static inline bool kgem_check_reloc(struct kgem *kgem, int n)
395
{
396
	assert(kgem->nreloc <= KGEM_RELOC_SIZE(kgem));
397
	return likely(kgem->nreloc + n <= KGEM_RELOC_SIZE(kgem));
398
}
399
 
400
static inline bool kgem_check_exec(struct kgem *kgem, int n)
401
{
402
	assert(kgem->nexec <= KGEM_EXEC_SIZE(kgem));
403
	return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem));
404
}
405
 
406
static inline bool kgem_check_reloc_and_exec(struct kgem *kgem, int n)
407
{
408
	return kgem_check_reloc(kgem, n) && kgem_check_exec(kgem, n);
409
}
410
 
411
static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem,
412
						  int num_dwords,
413
						  int num_surfaces)
414
{
415
	return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) &&
416
		kgem_check_reloc(kgem, num_surfaces) &&
417
		kgem_check_exec(kgem, num_surfaces);
418
}
419
 
420
static inline uint32_t *kgem_get_batch(struct kgem *kgem)
421
{
422
 
423
	return kgem->batch + kgem->nbatch;
424
}
425
 
426
bool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(0)));
427
bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo);
428
bool kgem_check_many_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0)));
429
 
430
#define KGEM_RELOC_FENCED 0x8000
431
uint32_t kgem_add_reloc(struct kgem *kgem,
432
			uint32_t pos,
433
			struct kgem_bo *bo,
434
			uint32_t read_write_domains,
435
			uint32_t delta);
436
 
437
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
438
void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo);
439
void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo);
440
void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo);
441
void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
442
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
443
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
444
void kgem_bo_sync__cpu_full(struct kgem *kgem, struct kgem_bo *bo, bool write);
445
void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
446
void __kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr);
447
uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
448
 
449
bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
450
		   const void *data, int length);
451
 
452
int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
453
void kgem_get_tile_size(struct kgem *kgem, int tiling,
454
			int *tile_width, int *tile_height, int *tile_size);
455
 
456
static inline int __kgem_buffer_size(struct kgem_bo *bo)
457
{
458
	assert(bo->proxy != NULL);
459
	return bo->size.bytes;
460
}
461
 
462
static inline int __kgem_bo_size(struct kgem_bo *bo)
463
{
464
	assert(bo->proxy == NULL);
465
	return PAGE_SIZE * bo->size.pages.count;
466
}
467
 
468
static inline int kgem_bo_size(struct kgem_bo *bo)
469
{
470
	if (bo->proxy)
471
		return __kgem_buffer_size(bo);
472
	else
473
		return __kgem_bo_size(bo);
474
}
475
 
476
/*
477
static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem,
478
					   struct kgem_bo *bo)
479
{
480
	int pitch = bo->pitch;
481
	if (kgem->gen >= 040 && bo->tiling)
482
		pitch /= 4;
483
	if (pitch > MAXSHORT) {
484
		DBG(("%s: can not blt to handle=%d, adjusted pitch=%d\n",
485
		     __FUNCTION__, bo->handle, pitch));
486
		return false;
487
	}
488
 
489
	return true;
490
}
491
 
492
static inline bool kgem_bo_can_blt(struct kgem *kgem,
493
				   struct kgem_bo *bo)
494
{
495
	if (bo->tiling == I915_TILING_Y) {
496
		DBG(("%s: can not blt to handle=%d, tiling=Y\n",
497
		     __FUNCTION__, bo->handle));
498
		return false;
499
	}
500
 
501
	return kgem_bo_blt_pitch_is_ok(kgem, bo);
502
}
503
*/
504
 
505
static inline bool __kgem_bo_is_mappable(struct kgem *kgem,
506
				       struct kgem_bo *bo)
507
{
508
	if (bo->domain == DOMAIN_GTT)
509
		return true;
510
 
511
	if (kgem->gen < 040 && bo->tiling &&
512
	    bo->presumed_offset & (kgem_bo_fenced_size(kgem, bo) - 1))
513
		return false;
514
 
515
	if (!bo->presumed_offset)
516
		return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
517
 
518
	return bo->presumed_offset + kgem_bo_size(bo) <= kgem->aperture_mappable;
519
}
520
 
521
static inline bool kgem_bo_is_mappable(struct kgem *kgem,
522
				       struct kgem_bo *bo)
523
{
524
	DBG(("%s: domain=%d, offset: %d size: %d\n",
525
	     __FUNCTION__, bo->domain, bo->presumed_offset, kgem_bo_size(bo)));
526
	assert(bo->refcnt);
527
	return __kgem_bo_is_mappable(kgem, bo);
528
}
529
 
530
static inline bool kgem_bo_mapped(struct kgem *kgem, struct kgem_bo *bo)
531
{
532
	DBG(("%s: map=%p, tiling=%d, domain=%d\n",
533
	     __FUNCTION__, bo->map, bo->tiling, bo->domain));
534
	assert(bo->refcnt);
535
 
536
	if (bo->map == NULL)
537
		return bo->tiling == I915_TILING_NONE && bo->domain == DOMAIN_CPU;
538
 
539
	return IS_CPU_MAP(bo->map) == !bo->tiling;
540
}
541
 
542
static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
543
{
544
	if (kgem_bo_mapped(kgem, bo))
545
		return true;
546
 
547
	if (!bo->tiling && kgem->has_llc)
548
		return true;
549
 
550
	if (kgem->gen == 021 && bo->tiling == I915_TILING_Y)
551
		return false;
552
 
553
	return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
554
}
555
 
556
static inline bool kgem_bo_is_snoop(struct kgem_bo *bo)
557
{
558
	assert(bo->refcnt);
559
	while (bo->proxy)
560
		bo = bo->proxy;
561
	return bo->snoop;
562
}
563
 
564
bool __kgem_busy(struct kgem *kgem, int handle);
565
 
566
static inline void kgem_bo_mark_busy(struct kgem_bo *bo, int ring)
567
{
568
	bo->rq = (struct kgem_request *)((uintptr_t)bo->rq | ring);
569
}
570
 
571
inline static void __kgem_bo_clear_busy(struct kgem_bo *bo)
572
{
573
	bo->needs_flush = false;
574
	list_del(&bo->request);
575
	bo->rq = NULL;
576
	bo->domain = DOMAIN_NONE;
577
}
578
 
579
static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
580
{
581
	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
582
	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
583
	assert(bo->refcnt);
584
	return bo->rq;
585
}
586
 
587
/*
588
 
589
static inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo)
590
{
591
	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
592
	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
593
	assert(bo->refcnt);
594
 
595
	if (bo->exec)
596
		return true;
597
 
598
	if (kgem_flush(kgem, bo->flush))
599
		kgem_submit(kgem);
600
 
601
	if (bo->rq && !__kgem_busy(kgem, bo->handle))
602
		__kgem_bo_clear_busy(bo);
603
 
604
	return kgem_bo_is_busy(bo);
605
}
606
 
607
*/
608
 
609
static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
610
{
611
	if (bo == NULL)
612
		return false;
613
 
614
	assert(bo->refcnt);
615
	return bo->dirty;
616
}
617
 
618
static inline void kgem_bo_unclean(struct kgem *kgem, struct kgem_bo *bo)
619
{
620
	/* The bo is outside of our control, so presume it is written to */
621
	bo->needs_flush = true;
622
	if (bo->rq == NULL)
623
		bo->rq = (void *)kgem;
624
 
625
	if (bo->domain != DOMAIN_GPU)
626
		bo->domain = DOMAIN_NONE;
627
}
628
 
629
static inline void __kgem_bo_mark_dirty(struct kgem_bo *bo)
630
{
631
	DBG(("%s: handle=%d (proxy? %d)\n", __FUNCTION__,
632
	     bo->handle, bo->proxy != NULL));
633
 
634
	bo->exec->flags |= LOCAL_EXEC_OBJECT_WRITE;
635
	bo->needs_flush = bo->dirty = true;
636
	list_move(&bo->request, &RQ(bo->rq)->buffers);
637
}
638
 
639
static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
640
{
641
	assert(bo->refcnt);
642
	do {
643
		assert(bo->exec);
644
		assert(bo->rq);
645
 
646
		if (bo->dirty)
647
			return;
648
 
649
		__kgem_bo_mark_dirty(bo);
650
	} while ((bo = bo->proxy));
651
}
652
 
653
#define KGEM_BUFFER_WRITE	0x1
654
#define KGEM_BUFFER_INPLACE	0x2
655
#define KGEM_BUFFER_LAST	0x4
656
 
657
#define KGEM_BUFFER_WRITE_INPLACE (KGEM_BUFFER_WRITE | KGEM_BUFFER_INPLACE)
658
 
659
struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
660
				   uint32_t size, uint32_t flags,
661
				   void **ret);
662
struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
663
				      int width, int height, int bpp,
664
				      uint32_t flags,
665
				      void **ret);
666
bool kgem_buffer_is_inplace(struct kgem_bo *bo);
667
void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo);
668
 
669
void kgem_throttle(struct kgem *kgem);
670
#define MAX_INACTIVE_TIME 10
671
bool kgem_expire_cache(struct kgem *kgem);
672
void kgem_purge_cache(struct kgem *kgem);
673
void kgem_cleanup_cache(struct kgem *kgem);
674
 
675
#if HAS_DEBUG_FULL
676
void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch);
677
#else
678
static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch)
679
{
680
	(void)kgem;
681
	(void)nbatch;
682
}
683
#endif
684
 
685
#endif /* KGEM_H */