Subversion Repositories Kolibri OS

Rev

Rev 4359 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4304 Serge 1
/*
2
 * Copyright (c) 2011 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
 * SOFTWARE.
22
 *
23
 * Authors:
24
 *    Chris Wilson 
25
 *
26
 */
27
 
28
#ifndef KGEM_H
29
#define KGEM_H
30
 
31
#include 
32
#include 
33
#include 
34
#include 
35
 
36
#include 
37
 
38
#include "compiler.h"
39
#include "intel_list.h"
40
 
41
#include 
42
#if !defined(MAXSHORT) || !defined(MINSHORT) || \
43
    !defined(MAXINT) || !defined(MININT)
44
/*
45
 * Some implementations #define these through , so preclude
46
 * #include'ing it later.
47
 */
48
 
49
#include 
50
#undef MAXSHORT
51
#define MAXSHORT SHRT_MAX
52
#undef MINSHORT
53
#define MINSHORT SHRT_MIN
54
#undef MAXINT
55
#define MAXINT INT_MAX
56
#undef MININT
57
#define MININT INT_MIN
58
 
59
#endif
60
 
61
struct kgem_bo {
62
	struct kgem_request *rq;
63
#define RQ(rq) ((struct kgem_request *)((uintptr_t)(rq) & ~3))
64
#define RQ_RING(rq) ((uintptr_t)(rq) & 3)
65
#define RQ_IS_BLT(rq) (RQ_RING(rq) == KGEM_BLT)
66
	struct drm_i915_gem_exec_object2 *exec;
67
 
68
	struct kgem_bo *proxy;
69
 
70
	struct list list;
71
	struct list request;
72
	struct list vma;
73
 
4501 Serge 74
	void *map__cpu;
75
	void *map__gtt;
4304 Serge 76
#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
77
 
78
	struct kgem_bo_binding {
79
		struct kgem_bo_binding *next;
80
		uint32_t format;
81
		uint16_t offset;
82
	} binding;
83
 
4501 Serge 84
	uint64_t presumed_offset;
4304 Serge 85
	uint32_t unique_id;
86
	uint32_t refcnt;
87
	uint32_t handle;
88
	uint32_t target_handle;
89
	uint32_t delta;
90
	union {
91
		struct {
92
			uint32_t count:27;
93
#define PAGE_SIZE 4096
94
            uint32_t bucket:5;
95
#define NUM_CACHE_BUCKETS 16
96
#define MAX_CACHE_SIZE (1 << (NUM_CACHE_BUCKETS+12))
97
		} pages;
98
		uint32_t bytes;
99
	} size;
100
    uint32_t pitch  : 18; /* max 128k */
101
	uint32_t tiling : 2;
102
	uint32_t reusable : 1;
103
	uint32_t gpu_dirty : 1;
104
	uint32_t gtt_dirty : 1;
105
	uint32_t domain : 2;
106
	uint32_t needs_flush : 1;
107
	uint32_t snoop : 1;
108
    uint32_t io     : 1;
109
    uint32_t flush  : 1;
110
	uint32_t scanout : 1;
111
	uint32_t purged : 1;
112
};
113
#define DOMAIN_NONE 0
114
#define DOMAIN_CPU 1
115
#define DOMAIN_GTT 2
116
#define DOMAIN_GPU 3
117
 
118
struct kgem_request {
119
	struct list list;
120
	struct kgem_bo *bo;
121
	struct list buffers;
122
	int ring;
123
};
124
 
125
enum {
126
	MAP_GTT = 0,
127
	MAP_CPU,
128
	NUM_MAP_TYPES,
129
};
130
 
131
struct kgem {
132
	int fd;
133
	int wedged;
134
	unsigned gen;
135
 
136
	uint32_t unique_id;
137
 
138
	enum kgem_mode {
139
		/* order matches I915_EXEC_RING ordering */
140
		KGEM_NONE = 0,
141
		KGEM_RENDER,
142
		KGEM_BSD,
143
		KGEM_BLT,
144
	} mode, ring;
145
 
146
	struct list flushing;
147
	struct list large;
148
	struct list large_inactive;
149
	struct list active[NUM_CACHE_BUCKETS][3];
150
	struct list inactive[NUM_CACHE_BUCKETS];
151
	struct list pinned_batches[2];
152
	struct list snoop;
153
	struct list scanout;
154
	struct list batch_buffers, active_buffers;
155
 
156
	struct list requests[2];
157
	struct kgem_request *next_request;
158
	struct kgem_request static_request;
159
 
160
	struct {
161
		struct list inactive[NUM_CACHE_BUCKETS];
162
		int16_t count;
163
	} vma[NUM_MAP_TYPES];
164
 
165
	uint32_t batch_flags;
166
	uint32_t batch_flags_base;
167
#define I915_EXEC_SECURE (1<<9)
168
#define LOCAL_EXEC_OBJECT_WRITE (1<<2)
169
 
170
	uint16_t nbatch;
171
	uint16_t surface;
172
	uint16_t nexec;
173
	uint16_t nreloc;
174
	uint16_t nreloc__self;
175
	uint16_t nfence;
176
	uint16_t batch_size;
177
	uint16_t min_alignment;
178
 
179
	uint32_t flush:1;
180
	uint32_t need_expire:1;
181
	uint32_t need_purge:1;
182
	uint32_t need_retire:1;
183
	uint32_t need_throttle:1;
184
	uint32_t scanout_busy:1;
185
	uint32_t busy:1;
186
 
187
	uint32_t has_create2 :1;
188
	uint32_t has_userptr :1;
189
	uint32_t has_blt :1;
190
	uint32_t has_relaxed_fencing :1;
191
	uint32_t has_relaxed_delta :1;
192
	uint32_t has_semaphores :1;
193
	uint32_t has_secure_batches :1;
194
	uint32_t has_pinned_batches :1;
195
	uint32_t has_caching :1;
196
	uint32_t has_llc :1;
197
	uint32_t has_wt :1;
198
	uint32_t has_no_reloc :1;
199
	uint32_t has_handle_lut :1;
200
 
201
	uint32_t can_blt_cpu :1;
4501 Serge 202
	uint32_t can_render_y :1;
4304 Serge 203
 
204
	uint16_t fence_max;
205
	uint16_t half_cpu_cache_pages;
206
	uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
4501 Serge 207
	uint32_t aperture, aperture_fenced, aperture_max_fence;
4304 Serge 208
	uint32_t max_upload_tile_size, max_copy_tile_size;
209
	uint32_t max_gpu_size, max_cpu_size;
210
	uint32_t large_object_size, max_object_size;
211
	uint32_t buffer_size;
212
 
213
	void (*context_switch)(struct kgem *kgem, int new_mode);
214
    void (*retire)(struct kgem *kgem);
215
	void (*expire)(struct kgem *kgem);
216
 
217
#if 0
218
	void (*memcpy_to_tiled_x)(const void *src, void *dst, int bpp,
219
				  int32_t src_stride, int32_t dst_stride,
220
				  int16_t src_x, int16_t src_y,
221
				  int16_t dst_x, int16_t dst_y,
222
				  uint16_t width, uint16_t height);
223
	void (*memcpy_from_tiled_x)(const void *src, void *dst, int bpp,
224
				    int32_t src_stride, int32_t dst_stride,
225
				    int16_t src_x, int16_t src_y,
226
				    int16_t dst_x, int16_t dst_y,
227
				    uint16_t width, uint16_t height);
228
#endif
229
 
230
	uint16_t reloc__self[256];
231
	uint32_t batch[64*1024-8] page_aligned;
232
	struct drm_i915_gem_exec_object2 exec[384] page_aligned;
233
	struct drm_i915_gem_relocation_entry reloc[8192] page_aligned;
234
 
235
#ifdef DEBUG_MEMORY
236
	struct {
237
		int bo_allocs;
238
		size_t bo_bytes;
239
	} debug_memory;
240
#endif
241
};
242
 
243
#define KGEM_MAX_DEFERRED_VBO 16
244
 
245
#define KGEM_BATCH_RESERVED 1
246
#define KGEM_RELOC_RESERVED (KGEM_MAX_DEFERRED_VBO)
247
#define KGEM_EXEC_RESERVED (1+KGEM_MAX_DEFERRED_VBO)
248
 
249
#ifndef ARRAY_SIZE
250
#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
251
#endif
252
 
253
#define KGEM_BATCH_SIZE(K) ((K)->batch_size-KGEM_BATCH_RESERVED)
254
#define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED)
255
#define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED)
256
 
257
void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen);
258
void kgem_reset(struct kgem *kgem);
259
 
260
struct kgem_bo *kgem_create_map(struct kgem *kgem,
261
				void *ptr, uint32_t size,
262
				bool read_only);
263
 
264
struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name);
265
struct kgem_bo *kgem_create_for_prime(struct kgem *kgem, int name, uint32_t size);
266
int kgem_bo_export_to_prime(struct kgem *kgem, struct kgem_bo *bo);
267
 
268
struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags);
269
struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
270
				  struct kgem_bo *target,
271
				  int offset, int length);
272
 
273
void kgem_proxy_bo_attach(struct kgem_bo *bo, struct kgem_bo **ptr);
274
 
275
int kgem_choose_tiling(struct kgem *kgem,
276
		       int tiling, int width, int height, int bpp);
277
unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth);
278
#define KGEM_CAN_CREATE_GPU     0x1
279
#define KGEM_CAN_CREATE_CPU     0x2
280
#define KGEM_CAN_CREATE_LARGE	0x4
281
#define KGEM_CAN_CREATE_GTT	0x8
282
 
283
uint32_t kgem_get_unique_id(struct kgem *kgem);
284
 
285
struct kgem_bo *
286
kgem_replace_bo(struct kgem *kgem,
287
		struct kgem_bo *src,
288
		uint32_t width,
289
		uint32_t height,
290
		uint32_t pitch,
291
		uint32_t bpp);
292
enum {
293
	CREATE_EXACT = 0x1,
294
	CREATE_INACTIVE = 0x2,
295
	CREATE_CPU_MAP = 0x4,
296
	CREATE_GTT_MAP = 0x8,
297
	CREATE_SCANOUT = 0x10,
298
	CREATE_PRIME = 0x20,
299
	CREATE_TEMPORARY = 0x40,
300
	CREATE_CACHED = 0x80,
301
	CREATE_NO_RETIRE = 0x100,
302
	CREATE_NO_THROTTLE = 0x200,
303
};
304
struct kgem_bo *kgem_create_2d(struct kgem *kgem,
305
			       int width,
306
			       int height,
307
			       int bpp,
308
			       int tiling,
309
			       uint32_t flags);
310
struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
311
				   int width,
312
				   int height,
313
				   int bpp,
314
				   uint32_t flags);
315
 
4501 Serge 316
bool kgem_bo_convert_to_gpu(struct kgem *kgem, struct kgem_bo *bo);
317
 
4304 Serge 318
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format);
319
void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
320
 
321
bool kgem_retire(struct kgem *kgem);
322
 
323
bool __kgem_ring_is_idle(struct kgem *kgem, int ring);
324
static inline bool kgem_ring_is_idle(struct kgem *kgem, int ring)
325
{
326
	ring = ring == KGEM_BLT;
327
 
328
	if (list_is_empty(&kgem->requests[ring]))
329
		return true;
330
 
331
	return __kgem_ring_is_idle(kgem, ring);
332
}
333
 
334
static inline bool kgem_is_idle(struct kgem *kgem)
335
{
336
	if (!kgem->need_retire)
337
		return true;
338
 
339
	return kgem_ring_is_idle(kgem, kgem->ring);
340
}
341
 
342
static inline bool __kgem_ring_empty(struct kgem *kgem)
343
{
344
	return list_is_empty(&kgem->requests[kgem->ring == KGEM_BLT]);
345
}
346
 
347
void _kgem_submit(struct kgem *kgem);
348
static inline void kgem_submit(struct kgem *kgem)
349
{
350
	if (kgem->nbatch)
351
		_kgem_submit(kgem);
352
}
353
 
354
static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
355
{
356
	if (bo->exec)
357
		_kgem_submit(kgem);
358
}
359
 
360
void kgem_scanout_flush(struct kgem *kgem, struct kgem_bo *bo);
361
 
362
static inline struct kgem_bo *kgem_bo_reference(struct kgem_bo *bo)
363
{
364
	assert(bo->refcnt);
365
	bo->refcnt++;
366
	return bo;
367
}
368
 
369
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
370
static inline void kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
371
{
372
	assert(bo->refcnt);
373
	if (--bo->refcnt == 0)
374
		_kgem_bo_destroy(kgem, bo);
375
}
376
 
377
void kgem_clear_dirty(struct kgem *kgem);
378
 
379
static inline void kgem_set_mode(struct kgem *kgem,
380
				 enum kgem_mode mode,
381
				 struct kgem_bo *bo)
382
{
383
	assert(!kgem->wedged);
384
 
385
#if DEBUG_FLUSH_BATCH
386
	kgem_submit(kgem);
387
#endif
388
 
4501 Serge 389
	if (kgem->nreloc && bo->exec == NULL && kgem_ring_is_idle(kgem, kgem->ring)) {
390
		DBG(("%s: flushing before new bo\n", __FUNCTION__));
4304 Serge 391
		_kgem_submit(kgem);
4501 Serge 392
	}
4304 Serge 393
 
394
	if (kgem->mode == mode)
395
		return;
396
 
4315 Serge 397
    kgem->context_switch(kgem, mode);
4304 Serge 398
	kgem->mode = mode;
399
}
400
 
401
static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
402
{
403
	assert(kgem->mode == KGEM_NONE);
404
	assert(kgem->nbatch == 0);
405
	assert(!kgem->wedged);
4315 Serge 406
    kgem->context_switch(kgem, mode);
4304 Serge 407
	kgem->mode = mode;
408
}
409
 
410
static inline bool kgem_check_batch(struct kgem *kgem, int num_dwords)
411
{
412
	assert(num_dwords > 0);
413
	assert(kgem->nbatch < kgem->surface);
414
	assert(kgem->surface <= kgem->batch_size);
415
	return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface);
416
}
417
 
418
static inline bool kgem_check_reloc(struct kgem *kgem, int n)
419
{
420
	assert(kgem->nreloc <= KGEM_RELOC_SIZE(kgem));
421
	return likely(kgem->nreloc + n <= KGEM_RELOC_SIZE(kgem));
422
}
423
 
424
static inline bool kgem_check_exec(struct kgem *kgem, int n)
425
{
426
	assert(kgem->nexec <= KGEM_EXEC_SIZE(kgem));
427
	return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem));
428
}
429
 
430
static inline bool kgem_check_reloc_and_exec(struct kgem *kgem, int n)
431
{
432
	return kgem_check_reloc(kgem, n) && kgem_check_exec(kgem, n);
433
}
434
 
435
static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem,
436
						  int num_dwords,
437
						  int num_surfaces)
438
{
439
	return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) &&
440
		kgem_check_reloc(kgem, num_surfaces) &&
441
		kgem_check_exec(kgem, num_surfaces);
442
}
443
 
444
static inline uint32_t *kgem_get_batch(struct kgem *kgem)
445
{
446
	if (kgem->nreloc) {
447
		unsigned mode = kgem->mode;
448
		_kgem_submit(kgem);
449
		_kgem_set_mode(kgem, mode);
450
	}
451
 
452
	return kgem->batch + kgem->nbatch;
453
}
454
 
455
bool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(0)));
456
bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo);
457
bool kgem_check_many_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0)));
458
 
459
#define KGEM_RELOC_FENCED 0x8000
460
uint32_t kgem_add_reloc(struct kgem *kgem,
461
			uint32_t pos,
462
			struct kgem_bo *bo,
463
			uint32_t read_write_domains,
464
			uint32_t delta);
4501 Serge 465
uint64_t kgem_add_reloc64(struct kgem *kgem,
466
			  uint32_t pos,
467
			  struct kgem_bo *bo,
468
			  uint32_t read_write_domains,
469
			  uint64_t delta);
4304 Serge 470
 
471
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
472
void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo);
473
void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo);
474
void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo);
475
void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
476
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
477
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
478
void kgem_bo_sync__cpu_full(struct kgem *kgem, struct kgem_bo *bo, bool write);
479
uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
480
 
481
bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
482
		   const void *data, int length);
483
 
484
int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
4501 Serge 485
void kgem_get_tile_size(struct kgem *kgem, int tiling, int pitch,
4304 Serge 486
			int *tile_width, int *tile_height, int *tile_size);
487
 
488
static inline int __kgem_buffer_size(struct kgem_bo *bo)
489
{
490
	assert(bo->proxy != NULL);
491
	return bo->size.bytes;
492
}
493
 
494
static inline int __kgem_bo_size(struct kgem_bo *bo)
495
{
496
	assert(bo->proxy == NULL);
497
	return PAGE_SIZE * bo->size.pages.count;
498
}
499
 
4501 Serge 500
static inline int __kgem_bo_num_pages(struct kgem_bo *bo)
501
{
502
	assert(bo->proxy == NULL);
503
	return bo->size.pages.count;
504
}
505
 
4304 Serge 506
static inline int kgem_bo_size(struct kgem_bo *bo)
507
{
508
	if (bo->proxy)
509
		return __kgem_buffer_size(bo);
510
	else
511
		return __kgem_bo_size(bo);
512
}
513
 
514
static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem,
515
					   struct kgem_bo *bo)
516
{
517
	int pitch = bo->pitch;
518
	if (kgem->gen >= 040 && bo->tiling)
519
		pitch /= 4;
520
	if (pitch > MAXSHORT) {
521
		DBG(("%s: can not blt to handle=%d, adjusted pitch=%d\n",
522
		     __FUNCTION__, bo->handle, pitch));
523
		return false;
524
	}
525
 
526
	return true;
527
}
528
 
529
static inline bool kgem_bo_can_blt(struct kgem *kgem,
530
				   struct kgem_bo *bo)
531
{
532
	if (bo->tiling == I915_TILING_Y) {
533
		DBG(("%s: can not blt to handle=%d, tiling=Y\n",
534
		     __FUNCTION__, bo->handle));
535
		return false;
536
	}
537
 
538
	return kgem_bo_blt_pitch_is_ok(kgem, bo);
539
}
540
 
541
static inline bool kgem_bo_is_snoop(struct kgem_bo *bo)
542
{
543
	assert(bo->refcnt);
544
	while (bo->proxy)
545
		bo = bo->proxy;
546
	return bo->snoop;
547
}
548
 
549
void kgem_bo_undo(struct kgem *kgem, struct kgem_bo *bo);
550
 
551
bool __kgem_busy(struct kgem *kgem, int handle);
552
 
553
static inline void kgem_bo_mark_busy(struct kgem_bo *bo, int ring)
554
{
555
	bo->rq = (struct kgem_request *)((uintptr_t)bo->rq | ring);
556
}
557
 
558
inline static void __kgem_bo_clear_busy(struct kgem_bo *bo)
559
{
560
	bo->rq = NULL;
561
	list_del(&bo->request);
562
 
563
	bo->domain = DOMAIN_NONE;
564
	bo->needs_flush = false;
565
	bo->gtt_dirty = false;
566
}
567
 
568
static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
569
{
570
	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
571
	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
572
	assert(bo->refcnt);
573
	return bo->rq;
574
}
575
 
576
static inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo)
577
{
578
	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
579
	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
580
	assert(bo->refcnt);
581
 
582
	if (bo->exec)
583
		return true;
584
 
585
	if (bo->rq && !__kgem_busy(kgem, bo->handle))
586
		__kgem_bo_clear_busy(bo);
587
 
588
	return kgem_bo_is_busy(bo);
589
}
590
 
591
static inline bool kgem_bo_is_render(struct kgem_bo *bo)
592
{
593
	DBG(("%s: handle=%d, rq? %d [%d]\n", __FUNCTION__,
594
	     bo->handle, bo->rq != NULL, (int)RQ_RING(bo->rq)));
595
	assert(bo->refcnt);
596
	return bo->rq && RQ_RING(bo->rq) == I915_EXEC_RENDER;
597
}
598
 
599
static inline void kgem_bo_mark_unreusable(struct kgem_bo *bo)
600
{
601
	while (bo->proxy) {
602
		bo->flush = true;
603
		bo = bo->proxy;
604
	}
605
	bo->flush = true;
606
	bo->reusable = false;
607
}
608
 
609
static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
610
{
611
	if (bo == NULL)
612
		return false;
613
 
614
	assert(bo->refcnt);
615
	return bo->gpu_dirty;
616
}
617
 
618
static inline void kgem_bo_unclean(struct kgem *kgem, struct kgem_bo *bo)
619
{
620
	/* The bo is outside of our control, so presume it is written to */
621
	bo->needs_flush = true;
622
	if (bo->rq == NULL)
623
		bo->rq = (void *)kgem;
624
 
625
	if (bo->domain != DOMAIN_GPU)
626
		bo->domain = DOMAIN_NONE;
627
}
628
 
629
static inline void __kgem_bo_mark_dirty(struct kgem_bo *bo)
630
{
631
	DBG(("%s: handle=%d (proxy? %d)\n", __FUNCTION__,
632
	     bo->handle, bo->proxy != NULL));
633
 
634
	bo->exec->flags |= LOCAL_EXEC_OBJECT_WRITE;
635
	bo->needs_flush = bo->gpu_dirty = true;
636
	list_move(&bo->request, &RQ(bo->rq)->buffers);
637
}
638
 
639
static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
640
{
641
	assert(bo->refcnt);
642
	do {
643
		assert(bo->exec);
644
		assert(bo->rq);
645
 
646
		if (bo->gpu_dirty)
647
			return;
648
 
649
		__kgem_bo_mark_dirty(bo);
650
	} while ((bo = bo->proxy));
651
}
652
 
4501 Serge 653
static inline bool kgem_bo_mapped(struct kgem *kgem, struct kgem_bo *bo)
654
{
655
	DBG(("%s: map=%p:%p, tiling=%d, domain=%d\n",
656
	     __FUNCTION__, bo->map__gtt, bo->map__cpu, bo->tiling, bo->domain));
657
 
658
	if (bo->tiling == I915_TILING_NONE && (bo->domain == DOMAIN_CPU || kgem->has_llc))
659
		return bo->map__cpu != NULL;
660
 
661
	return bo->map__gtt != NULL;
662
}
663
 
664
static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
665
{
666
	DBG(("%s: map=%p:%p, tiling=%d, domain=%d, offset=%ld\n",
667
	     __FUNCTION__, bo->map__gtt, bo->map__cpu, bo->tiling, bo->domain, (long)bo->presumed_offset));
668
 
669
	if (!bo->tiling && (kgem->has_llc || bo->domain == DOMAIN_CPU))
670
		return true;
671
 
672
	if (bo->map__gtt != NULL)
673
		return true;
674
 
675
	if (kgem->gen == 021 && bo->tiling == I915_TILING_Y)
676
		return false;
677
 
678
	if (!bo->presumed_offset)
679
		return __kgem_bo_num_pages(bo) <= kgem->aperture_mappable / 4;
680
 
681
	return bo->presumed_offset / PAGE_SIZE + __kgem_bo_num_pages(bo) <= kgem->aperture_mappable;
682
}
683
 
684
static inline bool kgem_bo_can_map__cpu(struct kgem *kgem,
685
					struct kgem_bo *bo,
686
					bool write)
687
{
688
	if (bo->purged || (bo->scanout && write))
689
		return false;
690
 
691
	if (kgem->has_llc)
692
		return true;
693
 
694
	if (bo->domain != DOMAIN_CPU)
695
		return false;
696
 
697
	return !write || bo->exec == NULL;
698
}
699
 
4304 Serge 700
#define KGEM_BUFFER_WRITE	0x1
701
#define KGEM_BUFFER_INPLACE	0x2
702
#define KGEM_BUFFER_LAST	0x4
703
 
704
#define KGEM_BUFFER_WRITE_INPLACE (KGEM_BUFFER_WRITE | KGEM_BUFFER_INPLACE)
705
 
706
struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
707
				   uint32_t size, uint32_t flags,
708
				   void **ret);
709
struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
710
				      int width, int height, int bpp,
711
				      uint32_t flags,
712
				      void **ret);
713
bool kgem_buffer_is_inplace(struct kgem_bo *bo);
714
void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo);
715
 
716
void kgem_throttle(struct kgem *kgem);
717
#define MAX_INACTIVE_TIME 10
718
bool kgem_expire_cache(struct kgem *kgem);
4501 Serge 719
bool kgem_cleanup_cache(struct kgem *kgem);
4304 Serge 720
 
721
void kgem_clean_scanout_cache(struct kgem *kgem);
722
void kgem_clean_large_cache(struct kgem *kgem);
723
 
724
#if HAS_DEBUG_FULL
725
void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch);
726
#else
727
static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch)
728
{
729
	(void)kgem;
730
	(void)nbatch;
731
}
732
#endif
733
 
4501 Serge 734
void choose_memcpy_tiled_x(struct kgem *kgem, int swizzling);
735
 
4304 Serge 736
#endif /* KGEM_H */