Subversion Repositories Kolibri OS

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
2351 Serge 1
/*
2
 * Copyright (c) 2011 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
 * SOFTWARE.
22
 *
23
 * Authors:
24
 *    Chris Wilson 
25
 *
26
 */
27
 
28
#ifndef KGEM_H
29
#define KGEM_H
30
 
31
#include "compiler.h"
32
#include 
33
//#include 
34
 
35
#include 
36
 
37
 
38
#if DEBUG_KGEM
39
#define DBG_HDR(x) ErrorF x
40
#else
41
#define DBG_HDR(x)
42
#endif
43
 
44
struct kgem_bo {
45
	struct kgem_bo *proxy;
46
 
47
    struct list_head list;
48
    struct list_head request;
49
    struct list_head vma;
50
 
51
    void     *map;
52
    uint32_t  gaddr;
53
 
54
#define IS_CPU_MAP(ptr) ((uintptr_t)(ptr) & 1)
55
#define IS_GTT_MAP(ptr) (ptr && ((uintptr_t)(ptr) & 1) == 0)
56
	struct kgem_request *rq;
57
    struct drm_i915_gem_exec_object2 *exec;
58
 
59
	struct kgem_bo_binding {
60
		struct kgem_bo_binding *next;
61
		uint32_t format;
62
		uint16_t offset;
63
	} binding;
64
 
65
	uint32_t unique_id;
66
	uint32_t refcnt;
67
	uint32_t handle;
68
	uint32_t presumed_offset;
69
	uint32_t delta;
70
	union {
71
		struct {
72
			uint32_t count:27;
73
            uint32_t bucket:5;
74
#define NUM_CACHE_BUCKETS 16
75
#define MAX_CACHE_SIZE (1 << (NUM_CACHE_BUCKETS+12))
76
		} pages;
77
		uint32_t bytes;
78
	} size;
79
    uint32_t pitch  : 18; /* max 128k */
80
	uint32_t tiling : 2;
81
	uint32_t reusable : 1;
82
    uint32_t dirty  : 1;
83
	uint32_t domain : 2;
84
	uint32_t needs_flush : 1;
85
    uint32_t vmap   : 1;
86
    uint32_t io     : 1;
87
    uint32_t flush  : 1;
88
	uint32_t scanout : 1;
89
    uint32_t sync   : 1;
90
	uint32_t purged : 1;
91
};
92
#define DOMAIN_NONE 0
93
#define DOMAIN_CPU 1
94
#define DOMAIN_GTT 2
95
#define DOMAIN_GPU 3
96
 
97
struct kgem_request {
98
    struct list_head list;
99
	struct kgem_bo *bo;
100
    struct list_head buffers;
101
};
102
 
103
enum {
104
	MAP_GTT = 0,
105
	MAP_CPU,
106
	NUM_MAP_TYPES,
107
};
108
 
109
struct kgem {
110
	int fd;
111
	int wedged;
112
	int gen;
113
 
114
	uint32_t unique_id;
115
 
116
	enum kgem_mode {
117
		/* order matches I915_EXEC_RING ordering */
118
		KGEM_NONE = 0,
119
		KGEM_RENDER,
120
		KGEM_BSD,
121
		KGEM_BLT,
122
	} mode, ring;
123
 
124
    struct list_head flushing;
125
    struct list_head large;
126
    struct list_head active[NUM_CACHE_BUCKETS][3];
127
    struct list_head inactive[NUM_CACHE_BUCKETS];
128
    struct list_head partial;
129
    struct list_head requests;
130
	struct kgem_request *next_request;
131
 
132
	struct {
133
        struct list_head inactive[NUM_CACHE_BUCKETS];
134
		int16_t count;
135
	} vma[NUM_MAP_TYPES];
136
 
137
	uint16_t nbatch;
138
	uint16_t surface;
139
	uint16_t nexec;
140
	uint16_t nreloc;
141
	uint16_t nfence;
142
	uint16_t max_batch_size;
143
 
144
	uint32_t flush:1;
145
	uint32_t sync:1;
146
	uint32_t need_expire:1;
147
	uint32_t need_purge:1;
148
	uint32_t need_retire:1;
149
	uint32_t scanout:1;
150
	uint32_t flush_now:1;
151
	uint32_t busy:1;
152
 
153
	uint32_t has_vmap :1;
154
	uint32_t has_relaxed_fencing :1;
155
	uint32_t has_semaphores :1;
156
	uint32_t has_llc :1;
157
	uint32_t has_cpu_bo :1;
158
 
159
	uint16_t fence_max;
160
	uint16_t half_cpu_cache_pages;
161
	uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
162
	uint32_t aperture, aperture_fenced;
163
	uint32_t min_alignment;
164
	uint32_t max_upload_tile_size, max_copy_tile_size;
165
	uint32_t max_gpu_size, max_cpu_size;
166
	uint32_t large_object_size, max_object_size;
167
	uint32_t partial_buffer_size;
168
 
169
//   void (*context_switch)(struct kgem *kgem, int new_mode);
170
    void (*retire)(struct kgem *kgem);
171
 
172
    uint32_t *batch;
173
    uint32_t *batch_ptr;
174
    int       batch_idx;
175
    struct drm_i915_gem_object *batch_obj;
176
 
177
    struct drm_i915_gem_exec_object2 exec[256];
178
    struct drm_i915_gem_relocation_entry reloc[384];
179
};
180
 
181
#define KGEM_BATCH_RESERVED 1
182
#define KGEM_RELOC_RESERVED 4
183
#define KGEM_EXEC_RESERVED 1
184
 
185
#define KGEM_BATCH_SIZE(K) ((K)->max_batch_size-KGEM_BATCH_RESERVED)
186
#define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED)
187
#define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED)
188
 
189
void kgem_init(struct kgem *kgem, int gen);
190
void kgem_reset(struct kgem *kgem);
191
 
192
struct kgem_bo *kgem_create_map(struct kgem *kgem,
193
				void *ptr, uint32_t size,
194
				bool read_only);
195
 
196
struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name);
197
 
198
struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size);
199
struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
200
				  int offset, int length);
201
 
202
//struct kgem_bo *kgem_upload_source_image(struct kgem *kgem,
203
//                    const void *data,
204
//                    BoxPtr box,
205
//                    int stride, int bpp);
206
 
207
int kgem_choose_tiling(struct kgem *kgem,
208
		       int tiling, int width, int height, int bpp);
209
unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth);
210
#define KGEM_CAN_CREATE_GPU     0x1
211
#define KGEM_CAN_CREATE_CPU     0x2
212
#define KGEM_CAN_CREATE_LARGE	0x4
213
 
214
struct kgem_bo *
215
kgem_replace_bo(struct kgem *kgem,
216
		struct kgem_bo *src,
217
		uint32_t width,
218
		uint32_t height,
219
		uint32_t pitch,
220
		uint32_t bpp);
221
enum {
222
	CREATE_EXACT = 0x1,
223
	CREATE_INACTIVE = 0x2,
224
	CREATE_CPU_MAP = 0x4,
225
	CREATE_GTT_MAP = 0x8,
226
	CREATE_SCANOUT = 0x10,
227
};
228
struct kgem_bo *kgem_create_2d(struct kgem *kgem,
229
			       int width,
230
			       int height,
231
			       int bpp,
232
			       int tiling,
233
			       uint32_t flags);
234
 
235
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format);
236
void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
237
 
238
bool kgem_retire(struct kgem *kgem);
239
 
240
void _kgem_submit(struct kgem *kgem);
241
static inline void kgem_submit(struct kgem *kgem)
242
{
243
	if (kgem->nbatch)
244
		_kgem_submit(kgem);
245
}
246
 
247
/*
248
static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
249
{
250
	if (bo->exec)
251
		_kgem_submit(kgem);
252
}
253
 
254
void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo);
255
static inline void kgem_bo_flush(struct kgem *kgem, struct kgem_bo *bo)
256
{
257
	kgem_bo_submit(kgem, bo);
258
 
259
	if (!bo->needs_flush)
260
		return;
261
 
262
	__kgem_flush(kgem, bo);
263
 
264
	bo->needs_flush = false;
265
}
266
*/
267
static inline struct kgem_bo *kgem_bo_reference(struct kgem_bo *bo)
268
{
269
	bo->refcnt++;
270
	return bo;
271
}
272
 
273
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
274
static inline void kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
275
{
276
	assert(bo->refcnt);
277
	if (--bo->refcnt == 0)
278
		_kgem_bo_destroy(kgem, bo);
279
}
280
 
281
void kgem_clear_dirty(struct kgem *kgem);
282
 
283
static inline void kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
284
{
285
	assert(!kgem->wedged);
286
 
287
#if DEBUG_FLUSH_BATCH
288
	kgem_submit(kgem);
289
#endif
290
 
291
	if (kgem->mode == mode)
292
		return;
293
 
294
//   kgem->context_switch(kgem, mode);
295
	kgem->mode = mode;
296
}
297
 
298
 
299
static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
300
{
301
	assert(kgem->mode == KGEM_NONE);
302
//   kgem->context_switch(kgem, mode);
303
	kgem->mode = mode;
304
}
305
 
306
static inline bool kgem_check_batch(struct kgem *kgem, int num_dwords)
307
{
308
	return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface);
309
}
310
 
311
static inline bool kgem_check_reloc(struct kgem *kgem, int n)
312
{
313
	return likely(kgem->nreloc + n <= KGEM_RELOC_SIZE(kgem));
314
}
315
 
316
static inline bool kgem_check_exec(struct kgem *kgem, int n)
317
{
318
	return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem));
319
}
320
 
321
static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem,
322
						  int num_dwords,
323
						  int num_surfaces)
324
{
325
	return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) &&
326
		kgem_check_reloc(kgem, num_surfaces);
327
}
328
 
329
static inline uint32_t *kgem_get_batch(struct kgem *kgem, int num_dwords)
330
{
331
	if (!kgem_check_batch(kgem, num_dwords))
332
		_kgem_submit(kgem);
333
 
334
	return kgem->batch + kgem->nbatch;
335
}
336
 
337
static inline void kgem_advance_batch(struct kgem *kgem, int num_dwords)
338
{
339
	kgem->nbatch += num_dwords;
340
}
341
 
342
bool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(0)));
343
bool kgem_check_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0)));
344
 
345
void _kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo);
346
static inline void kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
347
{
348
	if (bo->proxy)
349
		bo = bo->proxy;
350
 
351
	if (bo->exec == NULL)
352
		_kgem_add_bo(kgem, bo);
353
}
354
 
355
#define KGEM_RELOC_FENCED 0x8000
356
uint32_t kgem_add_reloc(struct kgem *kgem,
357
			uint32_t pos,
358
			struct kgem_bo *bo,
359
			uint32_t read_write_domains,
360
			uint32_t delta);
361
 
362
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
363
void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
364
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
365
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
366
uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
367
 
368
Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
369
		   const void *data, int length);
370
 
371
int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
372
void kgem_get_tile_size(struct kgem *kgem, int tiling,
373
			int *tile_width, int *tile_height, int *tile_size);
374
 
375
static inline int kgem_bo_size(struct kgem_bo *bo)
376
{
377
	assert(!(bo->proxy && bo->io));
378
	return PAGE_SIZE * bo->size.pages.count;
379
}
380
 
381
static inline int kgem_buffer_size(struct kgem_bo *bo)
382
{
383
	assert(bo->proxy && bo->io);
384
	return bo->size.bytes;
385
}
386
 
387
/*
388
static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem,
389
					   struct kgem_bo *bo)
390
{
391
	int pitch = bo->pitch;
392
	if (kgem->gen >= 40 && bo->tiling)
393
		pitch /= 4;
394
	if (pitch > MAXSHORT) {
395
		DBG(("%s: can not blt to handle=%d, adjusted pitch=%d\n",
396
		     __FUNCTION__, pitch));
397
		return false;
398
	}
399
 
400
	return true;
401
}
402
 
403
static inline bool kgem_bo_can_blt(struct kgem *kgem,
404
				   struct kgem_bo *bo)
405
{
406
	if (bo->tiling == I915_TILING_Y) {
407
		DBG(("%s: can not blt to handle=%d, tiling=Y\n",
408
		     __FUNCTION__, bo->handle));
409
		return false;
410
	}
411
 
412
	return kgem_bo_blt_pitch_is_ok(kgem, bo);
413
}
414
*/
415
static inline bool kgem_bo_is_mappable(struct kgem *kgem,
416
				       struct kgem_bo *bo)
417
{
418
	DBG_HDR(("%s: domain=%d, offset: %d size: %d\n",
419
		 __FUNCTION__, bo->domain, bo->presumed_offset, kgem_bo_size(bo)));
420
 
421
	if (bo->domain == DOMAIN_GTT)
422
		return true;
423
 
424
	if (IS_GTT_MAP(bo->map))
425
		return true;
426
 
427
	if (kgem->gen < 40 && bo->tiling &&
428
	    bo->presumed_offset & (kgem_bo_fenced_size(kgem, bo) - 1))
429
		return false;
430
 
431
	if (!bo->presumed_offset)
432
		return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
433
 
434
	return bo->presumed_offset + kgem_bo_size(bo) <= kgem->aperture_mappable;
435
}
436
 
437
static inline bool kgem_bo_mapped(struct kgem_bo *bo)
438
{
439
	DBG_HDR(("%s: map=%p, tiling=%d\n", __FUNCTION__, bo->map, bo->tiling));
440
 
441
	if (bo->map == NULL)
442
		return false;
443
 
444
	return IS_CPU_MAP(bo->map) == !bo->tiling;
445
}
446
 
447
static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
448
{
449
	DBG_HDR(("%s: domain: %d exec? %d, rq? %d\n",
450
		 __FUNCTION__, bo->domain, bo->exec != NULL, bo->rq != NULL));
451
	assert(bo->proxy == NULL);
452
	return bo->rq;
453
}
454
/*
455
static inline bool kgem_bo_map_will_stall(struct kgem *kgem, struct kgem_bo *bo)
456
{
457
	DBG(("%s? handle=%d, domain=%d, offset=%x, size=%x\n",
458
	     __FUNCTION__, bo->handle,
459
	     bo->domain, bo->presumed_offset, bo->size));
460
 
461
	if (!kgem_bo_is_mappable(kgem, bo))
462
		return true;
463
 
464
	if (kgem->wedged)
465
		return false;
466
 
467
	if (kgem_bo_is_busy(bo))
468
		return true;
469
 
470
	if (bo->presumed_offset == 0)
471
		return !list_is_empty(&kgem->requests);
472
 
473
	return false;
474
}
475
*/
476
 
477
static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
478
{
479
	if (bo == NULL)
480
		return FALSE;
481
 
482
	return bo->dirty;
483
}
484
 
485
static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
486
{
487
	DBG_HDR(("%s: handle=%d\n", __FUNCTION__, bo->handle));
488
	bo->dirty = true;
489
}
490
 
491
void kgem_sync(struct kgem *kgem);
492
 
493
#define KGEM_BUFFER_WRITE	0x1
494
#define KGEM_BUFFER_INPLACE	0x2
495
#define KGEM_BUFFER_LAST	0x4
496
 
497
#define KGEM_BUFFER_WRITE_INPLACE (KGEM_BUFFER_WRITE | KGEM_BUFFER_INPLACE)
498
 
499
struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
500
				   uint32_t size, uint32_t flags,
501
				   void **ret);
502
struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
503
				      int width, int height, int bpp,
504
				      uint32_t flags,
505
				      void **ret);
506
void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo);
507
 
508
void kgem_throttle(struct kgem *kgem);
509
#define MAX_INACTIVE_TIME 10
510
bool kgem_expire_cache(struct kgem *kgem);
511
void kgem_purge_cache(struct kgem *kgem);
512
void kgem_cleanup_cache(struct kgem *kgem);
513
 
514
#if HAS_EXTRA_DEBUG
515
void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch);
516
#else
517
static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch)
518
{
519
	(void)kgem;
520
	(void)nbatch;
521
}
522
#endif
523
 
524
#undef DBG_HDR
525
 
526
u32 get_buffer_offset(uint32_t handle);
527
 
528
 
529
#endif /* KGEM_H */