Subversion Repositories Kolibri OS

Rev

Rev 2351 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2351 Serge 1
/*
2
 * Copyright (c) 2011 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
 * SOFTWARE.
22
 *
23
 * Authors:
24
 *    Chris Wilson 
25
 *
26
 */
27
 
28
#ifndef KGEM_H
29
#define KGEM_H
30
 
31
#include "compiler.h"
32
#include 
33
//#include 
34
 
35
#include 
36
 
37
 
38
#if DEBUG_KGEM
39
#define DBG_HDR(x) ErrorF x
40
#else
41
#define DBG_HDR(x)
42
#endif
43
 
44
struct kgem_bo {
45
	struct kgem_bo *proxy;
46
 
47
    struct list_head list;
48
    struct list_head request;
49
    struct list_head vma;
50
 
51
    void     *map;
52
    uint32_t  gaddr;
53
 
54
#define IS_CPU_MAP(ptr) ((uintptr_t)(ptr) & 1)
55
#define IS_GTT_MAP(ptr) (ptr && ((uintptr_t)(ptr) & 1) == 0)
56
	struct kgem_request *rq;
57
    struct drm_i915_gem_exec_object2 *exec;
58
 
59
	struct kgem_bo_binding {
60
		struct kgem_bo_binding *next;
61
		uint32_t format;
62
		uint16_t offset;
63
	} binding;
64
 
65
	uint32_t unique_id;
66
	uint32_t refcnt;
67
	uint32_t handle;
68
	uint32_t presumed_offset;
69
	uint32_t delta;
70
	union {
71
		struct {
72
			uint32_t count:27;
73
            uint32_t bucket:5;
74
#define NUM_CACHE_BUCKETS 16
75
#define MAX_CACHE_SIZE (1 << (NUM_CACHE_BUCKETS+12))
76
		} pages;
77
		uint32_t bytes;
78
	} size;
79
    uint32_t pitch  : 18; /* max 128k */
80
	uint32_t tiling : 2;
81
	uint32_t reusable : 1;
82
    uint32_t dirty  : 1;
83
	uint32_t domain : 2;
84
	uint32_t needs_flush : 1;
85
    uint32_t vmap   : 1;
86
    uint32_t io     : 1;
87
    uint32_t flush  : 1;
88
	uint32_t scanout : 1;
89
    uint32_t sync   : 1;
90
	uint32_t purged : 1;
91
};
92
#define DOMAIN_NONE 0
93
#define DOMAIN_CPU 1
94
#define DOMAIN_GTT 2
95
#define DOMAIN_GPU 3
96
 
97
struct kgem_request {
98
    struct list_head list;
99
	struct kgem_bo *bo;
100
    struct list_head buffers;
101
};
102
 
103
enum {
104
	MAP_GTT = 0,
105
	MAP_CPU,
106
	NUM_MAP_TYPES,
107
};
108
 
109
struct kgem {
110
	int fd;
111
	int wedged;
112
	int gen;
113
 
114
	uint32_t unique_id;
115
 
116
	enum kgem_mode {
117
		/* order matches I915_EXEC_RING ordering */
118
		KGEM_NONE = 0,
119
		KGEM_RENDER,
120
		KGEM_BSD,
121
		KGEM_BLT,
122
	} mode, ring;
123
 
124
    struct list_head flushing;
125
    struct list_head large;
126
    struct list_head active[NUM_CACHE_BUCKETS][3];
127
    struct list_head inactive[NUM_CACHE_BUCKETS];
128
    struct list_head partial;
129
    struct list_head requests;
130
	struct kgem_request *next_request;
131
 
132
	struct {
133
        struct list_head inactive[NUM_CACHE_BUCKETS];
134
		int16_t count;
135
	} vma[NUM_MAP_TYPES];
136
 
137
	uint16_t nbatch;
138
	uint16_t surface;
139
	uint16_t nexec;
140
	uint16_t nreloc;
141
	uint16_t nfence;
142
	uint16_t max_batch_size;
143
 
144
	uint32_t flush:1;
145
	uint32_t sync:1;
146
	uint32_t need_expire:1;
147
	uint32_t need_purge:1;
148
	uint32_t need_retire:1;
149
	uint32_t scanout:1;
150
	uint32_t flush_now:1;
151
	uint32_t busy:1;
152
 
153
	uint32_t has_vmap :1;
154
	uint32_t has_relaxed_fencing :1;
155
	uint32_t has_semaphores :1;
156
	uint32_t has_llc :1;
157
	uint32_t has_cpu_bo :1;
158
 
159
	uint16_t fence_max;
160
	uint16_t half_cpu_cache_pages;
161
	uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
162
	uint32_t aperture, aperture_fenced;
163
	uint32_t min_alignment;
164
	uint32_t max_upload_tile_size, max_copy_tile_size;
165
	uint32_t max_gpu_size, max_cpu_size;
166
	uint32_t large_object_size, max_object_size;
167
	uint32_t partial_buffer_size;
168
 
169
//   void (*context_switch)(struct kgem *kgem, int new_mode);
170
    void (*retire)(struct kgem *kgem);
171
 
172
    uint32_t *batch;
173
    uint32_t *batch_ptr;
174
    int       batch_idx;
175
    struct drm_i915_gem_object *batch_obj;
176
 
177
    struct drm_i915_gem_exec_object2 exec[256];
178
    struct drm_i915_gem_relocation_entry reloc[384];
179
};
180
 
2352 Serge 181
typedef struct
182
{
183
    struct drm_i915_gem_object *batch;
184
    struct list_head  objects;
185
    u32    exec_start;
186
    u32    exec_len;
187
 
188
}batchbuffer_t;
189
 
2351 Serge 190
#define KGEM_BATCH_RESERVED 1
191
#define KGEM_RELOC_RESERVED 4
192
#define KGEM_EXEC_RESERVED 1
193
 
194
#define KGEM_BATCH_SIZE(K) ((K)->max_batch_size-KGEM_BATCH_RESERVED)
195
#define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED)
196
#define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED)
197
 
198
void kgem_init(struct kgem *kgem, int gen);
199
void kgem_reset(struct kgem *kgem);
200
 
201
struct kgem_bo *kgem_create_map(struct kgem *kgem,
202
				void *ptr, uint32_t size,
203
				bool read_only);
204
 
205
struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name);
206
 
207
struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size);
208
struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
209
				  int offset, int length);
210
 
211
//struct kgem_bo *kgem_upload_source_image(struct kgem *kgem,
212
//                    const void *data,
213
//                    BoxPtr box,
214
//                    int stride, int bpp);
215
 
216
int kgem_choose_tiling(struct kgem *kgem,
217
		       int tiling, int width, int height, int bpp);
218
unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth);
219
#define KGEM_CAN_CREATE_GPU     0x1
220
#define KGEM_CAN_CREATE_CPU     0x2
221
#define KGEM_CAN_CREATE_LARGE	0x4
222
 
223
struct kgem_bo *
224
kgem_replace_bo(struct kgem *kgem,
225
		struct kgem_bo *src,
226
		uint32_t width,
227
		uint32_t height,
228
		uint32_t pitch,
229
		uint32_t bpp);
230
enum {
231
	CREATE_EXACT = 0x1,
232
	CREATE_INACTIVE = 0x2,
233
	CREATE_CPU_MAP = 0x4,
234
	CREATE_GTT_MAP = 0x8,
235
	CREATE_SCANOUT = 0x10,
236
};
237
struct kgem_bo *kgem_create_2d(struct kgem *kgem,
238
			       int width,
239
			       int height,
240
			       int bpp,
241
			       int tiling,
242
			       uint32_t flags);
243
 
244
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format);
245
void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
246
 
247
bool kgem_retire(struct kgem *kgem);
248
 
2352 Serge 249
void _kgem_submit(struct kgem *kgem, batchbuffer_t *exb);
250
//static inline void kgem_submit(struct kgem *kgem)
251
//{
252
//   if (kgem->nbatch)
253
//       _kgem_submit(kgem);
254
//}
2351 Serge 255
 
256
/*
257
static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
258
{
259
	if (bo->exec)
260
		_kgem_submit(kgem);
261
}
262
 
263
void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo);
264
static inline void kgem_bo_flush(struct kgem *kgem, struct kgem_bo *bo)
265
{
266
	kgem_bo_submit(kgem, bo);
267
 
268
	if (!bo->needs_flush)
269
		return;
270
 
271
	__kgem_flush(kgem, bo);
272
 
273
	bo->needs_flush = false;
274
}
275
*/
276
static inline struct kgem_bo *kgem_bo_reference(struct kgem_bo *bo)
277
{
278
	bo->refcnt++;
279
	return bo;
280
}
281
 
282
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
283
static inline void kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
284
{
285
	assert(bo->refcnt);
286
	if (--bo->refcnt == 0)
287
		_kgem_bo_destroy(kgem, bo);
288
}
289
 
290
void kgem_clear_dirty(struct kgem *kgem);
291
 
292
static inline void kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
293
{
294
	assert(!kgem->wedged);
295
 
296
#if DEBUG_FLUSH_BATCH
297
	kgem_submit(kgem);
298
#endif
299
 
300
	if (kgem->mode == mode)
301
		return;
302
 
303
//   kgem->context_switch(kgem, mode);
304
	kgem->mode = mode;
305
}
306
 
307
 
308
static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
309
{
310
	assert(kgem->mode == KGEM_NONE);
311
//   kgem->context_switch(kgem, mode);
312
	kgem->mode = mode;
313
}
314
 
315
static inline bool kgem_check_batch(struct kgem *kgem, int num_dwords)
316
{
317
	return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface);
318
}
319
 
320
static inline bool kgem_check_reloc(struct kgem *kgem, int n)
321
{
322
	return likely(kgem->nreloc + n <= KGEM_RELOC_SIZE(kgem));
323
}
324
 
325
static inline bool kgem_check_exec(struct kgem *kgem, int n)
326
{
327
	return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem));
328
}
329
 
330
static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem,
331
						  int num_dwords,
332
						  int num_surfaces)
333
{
334
	return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) &&
335
		kgem_check_reloc(kgem, num_surfaces);
336
}
337
 
338
static inline uint32_t *kgem_get_batch(struct kgem *kgem, int num_dwords)
339
{
2352 Serge 340
//   if (!kgem_check_batch(kgem, num_dwords))
341
//       _kgem_submit(kgem);
2351 Serge 342
 
343
	return kgem->batch + kgem->nbatch;
344
}
345
 
346
static inline void kgem_advance_batch(struct kgem *kgem, int num_dwords)
347
{
348
	kgem->nbatch += num_dwords;
349
}
350
 
351
bool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(0)));
352
bool kgem_check_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0)));
353
 
354
void _kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo);
355
static inline void kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
356
{
357
	if (bo->proxy)
358
		bo = bo->proxy;
359
 
360
	if (bo->exec == NULL)
361
		_kgem_add_bo(kgem, bo);
362
}
363
 
364
#define KGEM_RELOC_FENCED 0x8000
365
uint32_t kgem_add_reloc(struct kgem *kgem,
366
			uint32_t pos,
367
			struct kgem_bo *bo,
368
			uint32_t read_write_domains,
369
			uint32_t delta);
370
 
371
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
372
void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
373
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
374
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
375
uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
376
 
377
Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
378
		   const void *data, int length);
379
 
380
int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
381
void kgem_get_tile_size(struct kgem *kgem, int tiling,
382
			int *tile_width, int *tile_height, int *tile_size);
383
 
384
static inline int kgem_bo_size(struct kgem_bo *bo)
385
{
386
	assert(!(bo->proxy && bo->io));
387
	return PAGE_SIZE * bo->size.pages.count;
388
}
389
 
390
static inline int kgem_buffer_size(struct kgem_bo *bo)
391
{
392
	assert(bo->proxy && bo->io);
393
	return bo->size.bytes;
394
}
395
 
396
/*
397
static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem,
398
					   struct kgem_bo *bo)
399
{
400
	int pitch = bo->pitch;
401
	if (kgem->gen >= 40 && bo->tiling)
402
		pitch /= 4;
403
	if (pitch > MAXSHORT) {
404
		DBG(("%s: can not blt to handle=%d, adjusted pitch=%d\n",
405
		     __FUNCTION__, pitch));
406
		return false;
407
	}
408
 
409
	return true;
410
}
411
 
412
static inline bool kgem_bo_can_blt(struct kgem *kgem,
413
				   struct kgem_bo *bo)
414
{
415
	if (bo->tiling == I915_TILING_Y) {
416
		DBG(("%s: can not blt to handle=%d, tiling=Y\n",
417
		     __FUNCTION__, bo->handle));
418
		return false;
419
	}
420
 
421
	return kgem_bo_blt_pitch_is_ok(kgem, bo);
422
}
423
*/
424
static inline bool kgem_bo_is_mappable(struct kgem *kgem,
425
				       struct kgem_bo *bo)
426
{
427
	DBG_HDR(("%s: domain=%d, offset: %d size: %d\n",
428
		 __FUNCTION__, bo->domain, bo->presumed_offset, kgem_bo_size(bo)));
429
 
430
	if (bo->domain == DOMAIN_GTT)
431
		return true;
432
 
433
	if (IS_GTT_MAP(bo->map))
434
		return true;
435
 
436
	if (kgem->gen < 40 && bo->tiling &&
437
	    bo->presumed_offset & (kgem_bo_fenced_size(kgem, bo) - 1))
438
		return false;
439
 
440
	if (!bo->presumed_offset)
441
		return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
442
 
443
	return bo->presumed_offset + kgem_bo_size(bo) <= kgem->aperture_mappable;
444
}
445
 
446
static inline bool kgem_bo_mapped(struct kgem_bo *bo)
447
{
448
	DBG_HDR(("%s: map=%p, tiling=%d\n", __FUNCTION__, bo->map, bo->tiling));
449
 
450
	if (bo->map == NULL)
451
		return false;
452
 
453
	return IS_CPU_MAP(bo->map) == !bo->tiling;
454
}
455
 
456
static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
457
{
458
	DBG_HDR(("%s: domain: %d exec? %d, rq? %d\n",
459
		 __FUNCTION__, bo->domain, bo->exec != NULL, bo->rq != NULL));
460
	assert(bo->proxy == NULL);
461
	return bo->rq;
462
}
463
/*
464
static inline bool kgem_bo_map_will_stall(struct kgem *kgem, struct kgem_bo *bo)
465
{
466
	DBG(("%s? handle=%d, domain=%d, offset=%x, size=%x\n",
467
	     __FUNCTION__, bo->handle,
468
	     bo->domain, bo->presumed_offset, bo->size));
469
 
470
	if (!kgem_bo_is_mappable(kgem, bo))
471
		return true;
472
 
473
	if (kgem->wedged)
474
		return false;
475
 
476
	if (kgem_bo_is_busy(bo))
477
		return true;
478
 
479
	if (bo->presumed_offset == 0)
480
		return !list_is_empty(&kgem->requests);
481
 
482
	return false;
483
}
484
*/
485
 
486
static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
487
{
488
	if (bo == NULL)
489
		return FALSE;
490
 
491
	return bo->dirty;
492
}
493
 
494
static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
495
{
496
	DBG_HDR(("%s: handle=%d\n", __FUNCTION__, bo->handle));
497
	bo->dirty = true;
498
}
499
 
500
void kgem_sync(struct kgem *kgem);
501
 
502
#define KGEM_BUFFER_WRITE	0x1
503
#define KGEM_BUFFER_INPLACE	0x2
504
#define KGEM_BUFFER_LAST	0x4
505
 
506
#define KGEM_BUFFER_WRITE_INPLACE (KGEM_BUFFER_WRITE | KGEM_BUFFER_INPLACE)
507
 
508
struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
509
				   uint32_t size, uint32_t flags,
510
				   void **ret);
511
struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
512
				      int width, int height, int bpp,
513
				      uint32_t flags,
514
				      void **ret);
515
void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo);
516
 
517
void kgem_throttle(struct kgem *kgem);
518
#define MAX_INACTIVE_TIME 10
519
bool kgem_expire_cache(struct kgem *kgem);
520
void kgem_purge_cache(struct kgem *kgem);
521
void kgem_cleanup_cache(struct kgem *kgem);
522
 
523
#if HAS_EXTRA_DEBUG
524
void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch);
525
#else
526
static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch)
527
{
528
	(void)kgem;
529
	(void)nbatch;
530
}
531
#endif
532
 
533
#undef DBG_HDR
534
 
535
u32 get_buffer_offset(uint32_t handle);
536
 
537
 
538
#endif /* KGEM_H */