Subversion Repositories Kolibri OS

Rev

Rev 4245 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4245 Serge 1
/*
2
 * Copyright (c) 2011 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
 * SOFTWARE.
22
 *
23
 * Authors:
24
 *    Chris Wilson 
25
 *
26
 */
27
 
28
#ifndef KGEM_H
29
#define KGEM_H
30
 
31
#include 
4251 Serge 32
#include 
4245 Serge 33
#include 
34
#include 
35
 
36
#include 
37
 
38
#include "compiler.h"
39
#include "intel_list.h"
40
 
4251 Serge 41
#include 
42
#if !defined(MAXSHORT) || !defined(MINSHORT) || \
43
    !defined(MAXINT) || !defined(MININT)
44
/*
45
 * Some implementations #define these through , so preclude
46
 * #include'ing it later.
47
 */
4245 Serge 48
 
4251 Serge 49
#include 
50
#undef MAXSHORT
51
#define MAXSHORT SHRT_MAX
52
#undef MINSHORT
53
#define MINSHORT SHRT_MIN
54
#undef MAXINT
55
#define MAXINT INT_MAX
56
#undef MININT
57
#define MININT INT_MIN
58
 
4245 Serge 59
#endif
60
 
61
struct kgem_bo {
62
	struct kgem_request *rq;
63
#define RQ(rq) ((struct kgem_request *)((uintptr_t)(rq) & ~3))
64
#define RQ_RING(rq) ((uintptr_t)(rq) & 3)
65
#define RQ_IS_BLT(rq) (RQ_RING(rq) == KGEM_BLT)
66
	struct drm_i915_gem_exec_object2 *exec;
67
 
68
	struct kgem_bo *proxy;
69
 
70
	struct list list;
71
	struct list request;
72
	struct list vma;
73
 
74
    void     *map;
75
#define IS_CPU_MAP(ptr) ((uintptr_t)(ptr) & 1)
76
#define IS_GTT_MAP(ptr) (ptr && ((uintptr_t)(ptr) & 1) == 0)
4251 Serge 77
#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
4245 Serge 78
 
79
	struct kgem_bo_binding {
80
		struct kgem_bo_binding *next;
81
		uint32_t format;
82
		uint16_t offset;
83
	} binding;
84
 
85
	uint32_t unique_id;
86
	uint32_t refcnt;
87
	uint32_t handle;
88
	uint32_t target_handle;
89
	uint32_t presumed_offset;
90
	uint32_t delta;
91
	union {
92
		struct {
93
			uint32_t count:27;
94
#define PAGE_SIZE 4096
95
            uint32_t bucket:5;
96
#define NUM_CACHE_BUCKETS 16
97
#define MAX_CACHE_SIZE (1 << (NUM_CACHE_BUCKETS+12))
98
		} pages;
99
		uint32_t bytes;
100
	} size;
101
    uint32_t pitch  : 18; /* max 128k */
102
	uint32_t tiling : 2;
103
	uint32_t reusable : 1;
4251 Serge 104
	uint32_t gpu_dirty : 1;
105
	uint32_t gtt_dirty : 1;
4245 Serge 106
	uint32_t domain : 2;
107
	uint32_t needs_flush : 1;
108
	uint32_t snoop : 1;
109
    uint32_t io     : 1;
110
    uint32_t flush  : 1;
111
	uint32_t scanout : 1;
112
	uint32_t purged : 1;
113
};
114
#define DOMAIN_NONE 0
115
#define DOMAIN_CPU 1
116
#define DOMAIN_GTT 2
117
#define DOMAIN_GPU 3
118
 
119
struct kgem_request {
120
	struct list list;
121
	struct kgem_bo *bo;
122
	struct list buffers;
123
	int ring;
124
};
125
 
126
enum {
127
	MAP_GTT = 0,
128
	MAP_CPU,
129
	NUM_MAP_TYPES,
130
};
131
 
132
struct kgem {
133
	int fd;
134
	int wedged;
135
	unsigned gen;
136
 
137
	uint32_t unique_id;
138
 
139
	enum kgem_mode {
140
		/* order matches I915_EXEC_RING ordering */
141
		KGEM_NONE = 0,
142
		KGEM_RENDER,
143
		KGEM_BSD,
144
		KGEM_BLT,
145
	} mode, ring;
146
 
147
	struct list flushing;
148
	struct list large;
149
	struct list large_inactive;
150
	struct list active[NUM_CACHE_BUCKETS][3];
151
	struct list inactive[NUM_CACHE_BUCKETS];
152
	struct list pinned_batches[2];
153
	struct list snoop;
154
	struct list scanout;
155
	struct list batch_buffers, active_buffers;
156
 
157
	struct list requests[2];
158
	struct kgem_request *next_request;
159
	struct kgem_request static_request;
160
 
161
	struct {
162
		struct list inactive[NUM_CACHE_BUCKETS];
163
		int16_t count;
164
	} vma[NUM_MAP_TYPES];
165
 
166
	uint32_t batch_flags;
167
	uint32_t batch_flags_base;
168
#define I915_EXEC_SECURE (1<<9)
169
#define LOCAL_EXEC_OBJECT_WRITE (1<<2)
170
 
171
	uint16_t nbatch;
172
	uint16_t surface;
173
	uint16_t nexec;
174
	uint16_t nreloc;
175
	uint16_t nreloc__self;
176
	uint16_t nfence;
177
	uint16_t batch_size;
178
	uint16_t min_alignment;
179
 
180
	uint32_t flush:1;
181
	uint32_t need_expire:1;
182
	uint32_t need_purge:1;
183
	uint32_t need_retire:1;
184
	uint32_t need_throttle:1;
185
	uint32_t scanout_busy:1;
186
	uint32_t busy:1;
187
 
4251 Serge 188
	uint32_t has_create2 :1;
4245 Serge 189
	uint32_t has_userptr :1;
190
	uint32_t has_blt :1;
191
	uint32_t has_relaxed_fencing :1;
192
	uint32_t has_relaxed_delta :1;
193
	uint32_t has_semaphores :1;
194
	uint32_t has_secure_batches :1;
195
	uint32_t has_pinned_batches :1;
4251 Serge 196
	uint32_t has_caching :1;
4245 Serge 197
	uint32_t has_llc :1;
4251 Serge 198
	uint32_t has_wt :1;
4245 Serge 199
	uint32_t has_no_reloc :1;
200
	uint32_t has_handle_lut :1;
201
 
202
	uint32_t can_blt_cpu :1;
203
 
204
	uint16_t fence_max;
205
	uint16_t half_cpu_cache_pages;
206
	uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
207
	uint32_t aperture, aperture_fenced;
208
	uint32_t max_upload_tile_size, max_copy_tile_size;
209
	uint32_t max_gpu_size, max_cpu_size;
210
	uint32_t large_object_size, max_object_size;
211
	uint32_t buffer_size;
212
 
213
	void (*context_switch)(struct kgem *kgem, int new_mode);
214
    void (*retire)(struct kgem *kgem);
215
	void (*expire)(struct kgem *kgem);
216
 
4251 Serge 217
#if 0
218
	void (*memcpy_to_tiled_x)(const void *src, void *dst, int bpp,
219
				  int32_t src_stride, int32_t dst_stride,
220
				  int16_t src_x, int16_t src_y,
221
				  int16_t dst_x, int16_t dst_y,
222
				  uint16_t width, uint16_t height);
223
	void (*memcpy_from_tiled_x)(const void *src, void *dst, int bpp,
224
				    int32_t src_stride, int32_t dst_stride,
225
				    int16_t src_x, int16_t src_y,
226
				    int16_t dst_x, int16_t dst_y,
227
				    uint16_t width, uint16_t height);
228
#endif
229
 
4245 Serge 230
	uint16_t reloc__self[256];
4251 Serge 231
	uint32_t batch[64*1024-8] page_aligned;
232
	struct drm_i915_gem_exec_object2 exec[384] page_aligned;
233
	struct drm_i915_gem_relocation_entry reloc[8192] page_aligned;
4245 Serge 234
 
235
#ifdef DEBUG_MEMORY
236
	struct {
237
		int bo_allocs;
238
		size_t bo_bytes;
239
	} debug_memory;
240
#endif
241
};
242
 
4251 Serge 243
#define KGEM_MAX_DEFERRED_VBO 16
244
 
4245 Serge 245
#define KGEM_BATCH_RESERVED 1
4251 Serge 246
#define KGEM_RELOC_RESERVED (KGEM_MAX_DEFERRED_VBO)
247
#define KGEM_EXEC_RESERVED (1+KGEM_MAX_DEFERRED_VBO)
4245 Serge 248
 
249
#ifndef ARRAY_SIZE
250
#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
251
#endif
252
 
253
#define KGEM_BATCH_SIZE(K) ((K)->batch_size-KGEM_BATCH_RESERVED)
254
#define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED)
255
#define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED)
256
 
257
void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen);
258
void kgem_reset(struct kgem *kgem);
259
 
260
struct kgem_bo *kgem_create_map(struct kgem *kgem,
261
				void *ptr, uint32_t size,
262
				bool read_only);
263
 
264
struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name);
4251 Serge 265
struct kgem_bo *kgem_create_for_prime(struct kgem *kgem, int name, uint32_t size);
266
int kgem_bo_export_to_prime(struct kgem *kgem, struct kgem_bo *bo);
4245 Serge 267
 
268
struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags);
269
struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
270
				  struct kgem_bo *target,
271
				  int offset, int length);
272
 
4251 Serge 273
void kgem_proxy_bo_attach(struct kgem_bo *bo, struct kgem_bo **ptr);
4245 Serge 274
 
275
int kgem_choose_tiling(struct kgem *kgem,
276
		       int tiling, int width, int height, int bpp);
277
unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth);
278
#define KGEM_CAN_CREATE_GPU     0x1
279
#define KGEM_CAN_CREATE_CPU     0x2
280
#define KGEM_CAN_CREATE_LARGE	0x4
281
#define KGEM_CAN_CREATE_GTT	0x8
282
 
4251 Serge 283
uint32_t kgem_get_unique_id(struct kgem *kgem);
284
 
4245 Serge 285
struct kgem_bo *
286
kgem_replace_bo(struct kgem *kgem,
287
		struct kgem_bo *src,
288
		uint32_t width,
289
		uint32_t height,
290
		uint32_t pitch,
291
		uint32_t bpp);
292
enum {
293
	CREATE_EXACT = 0x1,
294
	CREATE_INACTIVE = 0x2,
295
	CREATE_CPU_MAP = 0x4,
296
	CREATE_GTT_MAP = 0x8,
297
	CREATE_SCANOUT = 0x10,
298
	CREATE_PRIME = 0x20,
299
	CREATE_TEMPORARY = 0x40,
300
	CREATE_CACHED = 0x80,
301
	CREATE_NO_RETIRE = 0x100,
302
	CREATE_NO_THROTTLE = 0x200,
303
};
304
struct kgem_bo *kgem_create_2d(struct kgem *kgem,
305
			       int width,
306
			       int height,
307
			       int bpp,
308
			       int tiling,
309
			       uint32_t flags);
310
struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
311
				   int width,
312
				   int height,
313
				   int bpp,
314
				   uint32_t flags);
315
 
316
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format);
317
void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
318
 
319
bool kgem_retire(struct kgem *kgem);
320
 
321
bool __kgem_ring_is_idle(struct kgem *kgem, int ring);
322
static inline bool kgem_ring_is_idle(struct kgem *kgem, int ring)
323
{
324
	ring = ring == KGEM_BLT;
325
 
326
	if (list_is_empty(&kgem->requests[ring]))
327
		return true;
328
 
329
	return __kgem_ring_is_idle(kgem, ring);
330
}
331
 
332
static inline bool kgem_is_idle(struct kgem *kgem)
333
{
334
	if (!kgem->need_retire)
335
		return true;
336
 
337
	return kgem_ring_is_idle(kgem, kgem->ring);
338
}
339
 
4251 Serge 340
static inline bool __kgem_ring_empty(struct kgem *kgem)
341
{
342
	return list_is_empty(&kgem->requests[kgem->ring == KGEM_BLT]);
343
}
344
 
4245 Serge 345
void _kgem_submit(struct kgem *kgem);
346
static inline void kgem_submit(struct kgem *kgem)
347
{
348
	if (kgem->nbatch)
349
		_kgem_submit(kgem);
350
}
351
 
352
static inline bool kgem_flush(struct kgem *kgem, bool flush)
353
{
354
	if (kgem->nreloc == 0)
355
		return false;
356
 
357
	return (kgem->flush ^ flush) && kgem_ring_is_idle(kgem, kgem->ring);
358
}
359
 
360
static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
361
{
362
	if (bo->exec)
363
		_kgem_submit(kgem);
364
}
365
 
4251 Serge 366
void kgem_scanout_flush(struct kgem *kgem, struct kgem_bo *bo);
4245 Serge 367
 
368
static inline struct kgem_bo *kgem_bo_reference(struct kgem_bo *bo)
369
{
370
	assert(bo->refcnt);
371
	bo->refcnt++;
372
	return bo;
373
}
374
 
375
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
376
static inline void kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
377
{
378
	assert(bo->refcnt);
379
	if (--bo->refcnt == 0)
380
		_kgem_bo_destroy(kgem, bo);
381
}
382
 
383
void kgem_clear_dirty(struct kgem *kgem);
384
 
385
static inline void kgem_set_mode(struct kgem *kgem,
386
				 enum kgem_mode mode,
387
				 struct kgem_bo *bo)
388
{
389
	assert(!kgem->wedged);
390
 
391
#if DEBUG_FLUSH_BATCH
392
	kgem_submit(kgem);
393
#endif
394
 
4251 Serge 395
	if (kgem->nreloc && bo->exec == NULL && kgem_ring_is_idle(kgem, kgem->ring))
396
		_kgem_submit(kgem);
397
 
4245 Serge 398
	if (kgem->mode == mode)
399
		return;
400
 
401
//   kgem->context_switch(kgem, mode);
402
	kgem->mode = mode;
403
}
404
 
405
static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
406
{
407
	assert(kgem->mode == KGEM_NONE);
408
	assert(kgem->nbatch == 0);
409
	assert(!kgem->wedged);
410
//   kgem->context_switch(kgem, mode);
411
	kgem->mode = mode;
412
}
413
 
414
static inline bool kgem_check_batch(struct kgem *kgem, int num_dwords)
415
{
416
	assert(num_dwords > 0);
417
	assert(kgem->nbatch < kgem->surface);
418
	assert(kgem->surface <= kgem->batch_size);
419
	return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface);
420
}
421
 
422
static inline bool kgem_check_reloc(struct kgem *kgem, int n)
423
{
424
	assert(kgem->nreloc <= KGEM_RELOC_SIZE(kgem));
425
	return likely(kgem->nreloc + n <= KGEM_RELOC_SIZE(kgem));
426
}
427
 
428
static inline bool kgem_check_exec(struct kgem *kgem, int n)
429
{
430
	assert(kgem->nexec <= KGEM_EXEC_SIZE(kgem));
431
	return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem));
432
}
433
 
434
static inline bool kgem_check_reloc_and_exec(struct kgem *kgem, int n)
435
{
436
	return kgem_check_reloc(kgem, n) && kgem_check_exec(kgem, n);
437
}
438
 
439
static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem,
440
						  int num_dwords,
441
						  int num_surfaces)
442
{
443
	return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) &&
444
		kgem_check_reloc(kgem, num_surfaces) &&
445
		kgem_check_exec(kgem, num_surfaces);
446
}
447
 
448
static inline uint32_t *kgem_get_batch(struct kgem *kgem)
449
{
4251 Serge 450
	if (kgem->nreloc) {
451
		unsigned mode = kgem->mode;
452
		_kgem_submit(kgem);
453
		_kgem_set_mode(kgem, mode);
454
	}
4245 Serge 455
 
456
	return kgem->batch + kgem->nbatch;
457
}
458
 
459
bool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(0)));
460
bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo);
461
bool kgem_check_many_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0)));
462
 
463
#define KGEM_RELOC_FENCED 0x8000
464
uint32_t kgem_add_reloc(struct kgem *kgem,
465
			uint32_t pos,
466
			struct kgem_bo *bo,
467
			uint32_t read_write_domains,
468
			uint32_t delta);
469
 
470
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
471
void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo);
472
void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo);
473
void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo);
474
void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
475
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
476
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
477
void kgem_bo_sync__cpu_full(struct kgem *kgem, struct kgem_bo *bo, bool write);
478
void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
479
void __kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr);
480
uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
481
 
482
bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
483
		   const void *data, int length);
484
 
485
int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
486
void kgem_get_tile_size(struct kgem *kgem, int tiling,
487
			int *tile_width, int *tile_height, int *tile_size);
488
 
489
static inline int __kgem_buffer_size(struct kgem_bo *bo)
490
{
491
	assert(bo->proxy != NULL);
492
	return bo->size.bytes;
493
}
494
 
495
static inline int __kgem_bo_size(struct kgem_bo *bo)
496
{
497
	assert(bo->proxy == NULL);
498
	return PAGE_SIZE * bo->size.pages.count;
499
}
500
 
501
static inline int kgem_bo_size(struct kgem_bo *bo)
502
{
503
	if (bo->proxy)
504
		return __kgem_buffer_size(bo);
505
	else
506
		return __kgem_bo_size(bo);
507
}
508
 
509
/*
510
static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem,
511
					   struct kgem_bo *bo)
512
{
513
	int pitch = bo->pitch;
514
	if (kgem->gen >= 040 && bo->tiling)
515
		pitch /= 4;
516
	if (pitch > MAXSHORT) {
517
		DBG(("%s: can not blt to handle=%d, adjusted pitch=%d\n",
518
		     __FUNCTION__, bo->handle, pitch));
519
		return false;
520
	}
521
 
522
	return true;
523
}
524
 
525
static inline bool kgem_bo_can_blt(struct kgem *kgem,
526
				   struct kgem_bo *bo)
527
{
528
	if (bo->tiling == I915_TILING_Y) {
529
		DBG(("%s: can not blt to handle=%d, tiling=Y\n",
530
		     __FUNCTION__, bo->handle));
531
		return false;
532
	}
533
 
534
	return kgem_bo_blt_pitch_is_ok(kgem, bo);
535
}
536
*/
537
 
538
static inline bool __kgem_bo_is_mappable(struct kgem *kgem,
539
				       struct kgem_bo *bo)
540
{
541
	if (bo->domain == DOMAIN_GTT)
542
		return true;
543
 
544
	if (kgem->gen < 040 && bo->tiling &&
545
	    bo->presumed_offset & (kgem_bo_fenced_size(kgem, bo) - 1))
546
		return false;
547
 
4251 Serge 548
	if (kgem->gen == 021 && bo->tiling == I915_TILING_Y)
549
		return false;
550
 
551
	if (kgem->has_llc && bo->tiling == I915_TILING_NONE)
552
		return true;
553
 
4245 Serge 554
	if (!bo->presumed_offset)
555
		return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
556
 
557
	return bo->presumed_offset + kgem_bo_size(bo) <= kgem->aperture_mappable;
558
}
559
 
560
static inline bool kgem_bo_is_mappable(struct kgem *kgem,
561
				       struct kgem_bo *bo)
562
{
563
	DBG(("%s: domain=%d, offset: %d size: %d\n",
564
	     __FUNCTION__, bo->domain, bo->presumed_offset, kgem_bo_size(bo)));
565
	assert(bo->refcnt);
566
	return __kgem_bo_is_mappable(kgem, bo);
567
}
568
 
569
static inline bool kgem_bo_mapped(struct kgem *kgem, struct kgem_bo *bo)
570
{
571
	DBG(("%s: map=%p, tiling=%d, domain=%d\n",
572
	     __FUNCTION__, bo->map, bo->tiling, bo->domain));
573
	assert(bo->refcnt);
574
 
575
	if (bo->map == NULL)
576
		return bo->tiling == I915_TILING_NONE && bo->domain == DOMAIN_CPU;
577
 
578
	return IS_CPU_MAP(bo->map) == !bo->tiling;
579
}
580
 
581
static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
582
{
583
	if (kgem_bo_mapped(kgem, bo))
584
		return true;
585
 
4251 Serge 586
	if (!bo->tiling && (kgem->has_llc || bo->domain == DOMAIN_CPU))
4245 Serge 587
		return true;
588
 
589
	if (kgem->gen == 021 && bo->tiling == I915_TILING_Y)
590
		return false;
591
 
592
	return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
593
}
594
 
4251 Serge 595
static inline bool kgem_bo_can_map__cpu(struct kgem *kgem,
596
					struct kgem_bo *bo,
597
					bool write)
598
{
599
	if (bo->purged || (bo->scanout && write))
600
		return false;
601
 
602
	if (kgem->has_llc)
603
		return true;
604
 
605
	if (bo->domain != DOMAIN_CPU)
606
		return false;
607
 
608
	return !write || bo->exec == NULL;
609
}
610
 
4245 Serge 611
static inline bool kgem_bo_is_snoop(struct kgem_bo *bo)
612
{
613
	assert(bo->refcnt);
614
	while (bo->proxy)
615
		bo = bo->proxy;
616
	return bo->snoop;
617
}
618
 
4251 Serge 619
void kgem_bo_undo(struct kgem *kgem, struct kgem_bo *bo);
620
 
4245 Serge 621
bool __kgem_busy(struct kgem *kgem, int handle);
622
 
623
static inline void kgem_bo_mark_busy(struct kgem_bo *bo, int ring)
624
{
625
	bo->rq = (struct kgem_request *)((uintptr_t)bo->rq | ring);
626
}
627
 
628
inline static void __kgem_bo_clear_busy(struct kgem_bo *bo)
629
{
4251 Serge 630
	bo->rq = NULL;
4245 Serge 631
	list_del(&bo->request);
4251 Serge 632
 
4245 Serge 633
	bo->domain = DOMAIN_NONE;
4251 Serge 634
	bo->needs_flush = false;
635
	bo->gtt_dirty = false;
4245 Serge 636
}
637
 
638
static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
639
{
640
	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
641
	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
642
	assert(bo->refcnt);
643
	return bo->rq;
644
}
645
 
646
static inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo)
647
{
648
	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
649
	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
650
	assert(bo->refcnt);
651
 
652
	if (bo->exec)
653
		return true;
654
 
655
	if (kgem_flush(kgem, bo->flush))
656
		kgem_submit(kgem);
657
 
658
	if (bo->rq && !__kgem_busy(kgem, bo->handle))
659
		__kgem_bo_clear_busy(bo);
660
 
661
	return kgem_bo_is_busy(bo);
662
}
663
 
4251 Serge 664
static inline bool kgem_bo_is_render(struct kgem_bo *bo)
665
{
666
	DBG(("%s: handle=%d, rq? %d [%d]\n", __FUNCTION__,
667
	     bo->handle, bo->rq != NULL, (int)RQ_RING(bo->rq)));
668
	assert(bo->refcnt);
669
	return bo->rq && RQ_RING(bo->rq) == I915_EXEC_RENDER;
670
}
4245 Serge 671
 
4251 Serge 672
static inline void kgem_bo_mark_unreusable(struct kgem_bo *bo)
673
{
674
	while (bo->proxy) {
675
		bo->flush = true;
676
		bo = bo->proxy;
677
	}
678
	bo->flush = true;
679
	bo->reusable = false;
680
}
681
 
4245 Serge 682
static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
683
{
684
	if (bo == NULL)
685
		return false;
686
 
687
	assert(bo->refcnt);
4251 Serge 688
	return bo->gpu_dirty;
4245 Serge 689
}
690
 
691
static inline void kgem_bo_unclean(struct kgem *kgem, struct kgem_bo *bo)
692
{
693
	/* The bo is outside of our control, so presume it is written to */
694
	bo->needs_flush = true;
695
	if (bo->rq == NULL)
696
		bo->rq = (void *)kgem;
697
 
698
	if (bo->domain != DOMAIN_GPU)
699
		bo->domain = DOMAIN_NONE;
700
}
701
 
702
static inline void __kgem_bo_mark_dirty(struct kgem_bo *bo)
703
{
704
	DBG(("%s: handle=%d (proxy? %d)\n", __FUNCTION__,
705
	     bo->handle, bo->proxy != NULL));
706
 
707
	bo->exec->flags |= LOCAL_EXEC_OBJECT_WRITE;
4251 Serge 708
	bo->needs_flush = bo->gpu_dirty = true;
4245 Serge 709
	list_move(&bo->request, &RQ(bo->rq)->buffers);
710
}
711
 
712
static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
713
{
714
	assert(bo->refcnt);
715
	do {
716
		assert(bo->exec);
717
		assert(bo->rq);
718
 
4251 Serge 719
		if (bo->gpu_dirty)
4245 Serge 720
			return;
721
 
722
		__kgem_bo_mark_dirty(bo);
723
	} while ((bo = bo->proxy));
724
}
725
 
726
#define KGEM_BUFFER_WRITE	0x1
727
#define KGEM_BUFFER_INPLACE	0x2
728
#define KGEM_BUFFER_LAST	0x4
729
 
730
#define KGEM_BUFFER_WRITE_INPLACE (KGEM_BUFFER_WRITE | KGEM_BUFFER_INPLACE)
731
 
732
struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
733
				   uint32_t size, uint32_t flags,
734
				   void **ret);
735
struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
736
				      int width, int height, int bpp,
737
				      uint32_t flags,
738
				      void **ret);
739
bool kgem_buffer_is_inplace(struct kgem_bo *bo);
740
void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo);
741
 
742
void kgem_throttle(struct kgem *kgem);
743
#define MAX_INACTIVE_TIME 10
744
bool kgem_expire_cache(struct kgem *kgem);
745
void kgem_purge_cache(struct kgem *kgem);
746
void kgem_cleanup_cache(struct kgem *kgem);
747
 
4251 Serge 748
void kgem_clean_scanout_cache(struct kgem *kgem);
749
void kgem_clean_large_cache(struct kgem *kgem);
750
 
4245 Serge 751
#if HAS_DEBUG_FULL
752
void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch);
753
#else
754
static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch)
755
{
756
	(void)kgem;
757
	(void)nbatch;
758
}
759
#endif
760
 
761
#endif /* KGEM_H */