Subversion Repositories Kolibri OS

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
3254 Serge 1
/*
2
 * Copyright (c) 2011 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
 * SOFTWARE.
22
 *
23
 * Authors:
24
 *    Chris Wilson 
25
 *
26
 */
27
 
28
#ifdef HAVE_CONFIG_H
29
#include "config.h"
30
#endif
31
 
32
#include "sna.h"
33
#include "sna_reg.h"
34
 
3256 Serge 35
 
36
unsigned int cpu_cache_size();
37
 
38
static struct kgem_bo *
39
search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
40
 
41
static struct kgem_bo *
42
search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
43
 
3254 Serge 44
#define DBG_NO_HW 0
45
#define DBG_NO_TILING 1
46
#define DBG_NO_CACHE 0
47
#define DBG_NO_CACHE_LEVEL 0
48
#define DBG_NO_CPU 0
49
#define DBG_NO_USERPTR 0
50
#define DBG_NO_LLC 0
51
#define DBG_NO_SEMAPHORES 0
3256 Serge 52
#define DBG_NO_MADV 1
3254 Serge 53
#define DBG_NO_UPLOAD_CACHE 0
54
#define DBG_NO_UPLOAD_ACTIVE 0
55
#define DBG_NO_MAP_UPLOAD 0
56
#define DBG_NO_RELAXED_FENCING 0
57
#define DBG_NO_SECURE_BATCHES 0
58
#define DBG_NO_PINNED_BATCHES 0
59
#define DBG_NO_FAST_RELOC 0
60
#define DBG_NO_HANDLE_LUT 0
61
#define DBG_DUMP 0
62
 
3256 Serge 63
#ifndef DEBUG_SYNC
64
#define DEBUG_SYNC 0
65
#endif
66
 
67
#define SHOW_BATCH 0
68
 
69
#if 0
70
#define ASSERT_IDLE(kgem__, handle__) assert(!__kgem_busy(kgem__, handle__))
71
#define ASSERT_MAYBE_IDLE(kgem__, handle__, expect__) assert(!(expect__) || !__kgem_busy(kgem__, handle__))
72
#else
73
#define ASSERT_IDLE(kgem__, handle__)
74
#define ASSERT_MAYBE_IDLE(kgem__, handle__, expect__)
75
#endif
76
 
3255 Serge 77
/* Worst case seems to be 965gm where we cannot write within a cacheline that
78
 * is being simultaneously being read by the GPU, or within the sampler
79
 * prefetch. In general, the chipsets seem to have a requirement that sampler
80
 * offsets be aligned to a cacheline (64 bytes).
81
 */
82
#define UPLOAD_ALIGNMENT 128
83
 
84
#define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE)
85
#define NUM_PAGES(x) (((x) + PAGE_SIZE-1) / PAGE_SIZE)
86
 
3254 Serge 87
#define MAX_GTT_VMA_CACHE 512
88
#define MAX_CPU_VMA_CACHE INT16_MAX
89
#define MAP_PRESERVE_TIME 10
90
 
91
#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
92
#define MAKE_CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1))
93
#define MAKE_USER_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3))
94
#define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 2)
95
#define __MAP_TYPE(ptr) ((uintptr_t)(ptr) & 3)
96
 
97
#define MAKE_REQUEST(rq, ring) ((struct kgem_request *)((uintptr_t)(rq) | (ring)))
98
 
99
#define LOCAL_I915_PARAM_HAS_BLT		        11
100
#define LOCAL_I915_PARAM_HAS_RELAXED_FENCING	12
101
#define LOCAL_I915_PARAM_HAS_RELAXED_DELTA	    15
102
#define LOCAL_I915_PARAM_HAS_SEMAPHORES		    20
103
#define LOCAL_I915_PARAM_HAS_SECURE_BATCHES	    23
104
#define LOCAL_I915_PARAM_HAS_PINNED_BATCHES	    24
105
#define LOCAL_I915_PARAM_HAS_NO_RELOC		    25
106
#define LOCAL_I915_PARAM_HAS_HANDLE_LUT		    26
107
 
3256 Serge 108
#define LOCAL_I915_EXEC_IS_PINNED		(1<<10)
109
#define LOCAL_I915_EXEC_NO_RELOC		(1<<11)
110
#define LOCAL_I915_EXEC_HANDLE_LUT		(1<<12)
111
#define UNCACHED	0
112
#define SNOOPED		1
113
 
114
struct local_i915_gem_cacheing {
115
	uint32_t handle;
116
	uint32_t cacheing;
117
};
3255 Serge 118
static struct kgem_bo *__kgem_freed_bo;
3256 Serge 119
static struct kgem_request *__kgem_freed_request;
3254 Serge 120
 
3255 Serge 121
#define bucket(B) (B)->size.pages.bucket
122
#define num_pages(B) (B)->size.pages.count
3254 Serge 123
 
3255 Serge 124
#ifdef DEBUG_MEMORY
125
static void debug_alloc(struct kgem *kgem, size_t size)
126
{
127
	kgem->debug_memory.bo_allocs++;
128
	kgem->debug_memory.bo_bytes += size;
129
}
130
static void debug_alloc__bo(struct kgem *kgem, struct kgem_bo *bo)
131
{
132
	debug_alloc(kgem, bytes(bo));
133
}
134
#else
135
#define debug_alloc(k, b)
136
#define debug_alloc__bo(k, b)
137
#endif
138
 
3256 Serge 139
static bool gem_set_tiling(int fd, uint32_t handle, int tiling, int stride)
140
{
141
	struct drm_i915_gem_set_tiling set_tiling;
142
	int ret;
143
 
144
	if (DBG_NO_TILING)
145
		return false;
146
/*
147
	VG_CLEAR(set_tiling);
148
	do {
149
		set_tiling.handle = handle;
150
		set_tiling.tiling_mode = tiling;
151
		set_tiling.stride = stride;
152
 
153
		ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
154
	} while (ret == -1 && (errno == EINTR || errno == EAGAIN));
155
*/
156
	return ret == 0;
157
}
158
 
159
static bool gem_set_cacheing(int fd, uint32_t handle, int cacheing)
160
{
161
	struct local_i915_gem_cacheing arg;
162
    ioctl_t  io;
163
 
164
	VG_CLEAR(arg);
165
	arg.handle = handle;
166
	arg.cacheing = cacheing;
167
 
168
    io.handle   = fd;
169
    io.io_code  = SRV_I915_GEM_SET_CACHEING;
170
    io.input    = &arg;
171
    io.inp_size = sizeof(arg);
172
    io.output   = NULL;
173
    io.out_size = 0;
174
 
175
	return call_service(&io) == 0;
176
 
177
}
178
 
179
static bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags)
180
{
181
	if (flags & CREATE_NO_RETIRE) {
182
		DBG(("%s: not retiring per-request\n", __FUNCTION__));
183
		return false;
184
	}
185
 
186
	if (!kgem->need_retire) {
187
		DBG(("%s: nothing to retire\n", __FUNCTION__));
188
		return false;
189
	}
190
 
191
//	if (kgem_retire(kgem))
192
//		return true;
193
 
194
	if (flags & CREATE_NO_THROTTLE || !kgem->need_throttle) {
195
		DBG(("%s: not throttling\n", __FUNCTION__));
196
		return false;
197
	}
198
 
199
//	kgem_throttle(kgem);
200
//	return kgem_retire(kgem);
201
		return false;
202
 
203
}
204
 
205
static int gem_write(int fd, uint32_t handle,
206
		     int offset, int length,
207
		     const void *src)
208
{
209
	struct drm_i915_gem_pwrite pwrite;
210
 
211
	DBG(("%s(handle=%d, offset=%d, len=%d)\n", __FUNCTION__,
212
	     handle, offset, length));
213
 
214
	VG_CLEAR(pwrite);
215
	pwrite.handle = handle;
216
	/* align the transfer to cachelines; fortuitously this is safe! */
217
	if ((offset | length) & 63) {
218
		pwrite.offset = offset & ~63;
219
		pwrite.size = ALIGN(offset+length, 64) - pwrite.offset;
220
		pwrite.data_ptr = (uintptr_t)src + pwrite.offset - offset;
221
	} else {
222
		pwrite.offset = offset;
223
		pwrite.size = length;
224
		pwrite.data_ptr = (uintptr_t)src;
225
	}
226
//	return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
227
    return -1;
228
}
229
 
230
 
231
bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
232
		   const void *data, int length)
233
{
234
	assert(bo->refcnt);
235
	assert(!bo->purged);
236
	assert(bo->proxy == NULL);
237
	ASSERT_IDLE(kgem, bo->handle);
238
 
239
	assert(length <= bytes(bo));
240
	if (gem_write(kgem->fd, bo->handle, 0, length, data))
241
		return false;
242
 
243
	DBG(("%s: flush=%d, domain=%d\n", __FUNCTION__, bo->flush, bo->domain));
244
	if (bo->exec == NULL) {
245
//		kgem_bo_retire(kgem, bo);
246
		bo->domain = DOMAIN_NONE;
247
	}
248
	return true;
249
}
250
 
3255 Serge 251
static uint32_t gem_create(int fd, int num_pages)
252
{
253
	struct drm_i915_gem_create create;
254
    ioctl_t  io;
255
 
256
	VG_CLEAR(create);
257
	create.handle = 0;
258
	create.size = PAGE_SIZE * num_pages;
259
 
260
    io.handle   = fd;
261
    io.io_code  = SRV_I915_GEM_CREATE;
262
    io.input    = &create;
263
    io.inp_size = sizeof(create);
264
    io.output   = NULL;
265
    io.out_size = 0;
266
 
267
    if (call_service(&io)!=0)
268
        return 0;
269
 
270
	return create.handle;
271
}
272
 
3256 Serge 273
static bool
274
kgem_bo_set_purgeable(struct kgem *kgem, struct kgem_bo *bo)
275
{
276
#if DBG_NO_MADV
277
	return true;
278
#else
279
	struct drm_i915_gem_madvise madv;
280
 
281
	assert(bo->exec == NULL);
282
	assert(!bo->purged);
283
 
284
	VG_CLEAR(madv);
285
	madv.handle = bo->handle;
286
	madv.madv = I915_MADV_DONTNEED;
287
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) {
288
		bo->purged = 1;
289
		kgem->need_purge |= !madv.retained && bo->domain == DOMAIN_GPU;
290
		return madv.retained;
291
	}
292
 
293
	return true;
294
#endif
295
}
296
 
297
static bool
298
kgem_bo_is_retained(struct kgem *kgem, struct kgem_bo *bo)
299
{
300
#if DBG_NO_MADV
301
	return true;
302
#else
303
	struct drm_i915_gem_madvise madv;
304
 
305
	if (!bo->purged)
306
		return true;
307
 
308
	VG_CLEAR(madv);
309
	madv.handle = bo->handle;
310
	madv.madv = I915_MADV_DONTNEED;
311
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0)
312
		return madv.retained;
313
 
314
	return false;
315
#endif
316
}
317
 
318
static bool
319
kgem_bo_clear_purgeable(struct kgem *kgem, struct kgem_bo *bo)
320
{
321
#if DBG_NO_MADV
322
	return true;
323
#else
324
	struct drm_i915_gem_madvise madv;
325
 
326
	assert(bo->purged);
327
 
328
	VG_CLEAR(madv);
329
	madv.handle = bo->handle;
330
	madv.madv = I915_MADV_WILLNEED;
331
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) {
332
		bo->purged = !madv.retained;
333
		kgem->need_purge |= !madv.retained && bo->domain == DOMAIN_GPU;
334
		return madv.retained;
335
	}
336
 
337
	return false;
338
#endif
339
}
340
 
3255 Serge 341
static void gem_close(int fd, uint32_t handle)
342
{
343
	struct drm_gem_close close;
344
    ioctl_t  io;
345
 
346
	VG_CLEAR(close);
347
	close.handle = handle;
348
 
349
    io.handle   = fd;
350
    io.io_code  = SRV_DRM_GEM_CLOSE;
351
    io.input    = &close;
352
    io.inp_size = sizeof(close);
353
    io.output   = NULL;
354
    io.out_size = 0;
355
 
356
    call_service(&io);
357
}
358
 
359
constant inline static unsigned long __fls(unsigned long word)
360
{
361
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86__) || defined(__x86_64__))
362
	asm("bsr %1,%0"
363
	    : "=r" (word)
364
	    : "rm" (word));
365
	return word;
366
#else
367
	unsigned int v = 0;
368
 
369
	while (word >>= 1)
370
		v++;
371
 
372
	return v;
373
#endif
374
}
375
 
376
constant inline static int cache_bucket(int num_pages)
377
{
378
	return __fls(num_pages);
379
}
380
 
381
static struct kgem_bo *__kgem_bo_init(struct kgem_bo *bo,
382
				      int handle, int num_pages)
383
{
384
	assert(num_pages);
385
	memset(bo, 0, sizeof(*bo));
386
 
387
	bo->refcnt = 1;
388
	bo->handle = handle;
389
	bo->target_handle = -1;
390
	num_pages(bo) = num_pages;
391
	bucket(bo) = cache_bucket(num_pages);
392
	bo->reusable = true;
393
	bo->domain = DOMAIN_CPU;
394
	list_init(&bo->request);
395
	list_init(&bo->list);
396
	list_init(&bo->vma);
397
 
398
	return bo;
399
}
400
 
401
static struct kgem_bo *__kgem_bo_alloc(int handle, int num_pages)
402
{
403
	struct kgem_bo *bo;
404
 
405
	if (__kgem_freed_bo) {
406
		bo = __kgem_freed_bo;
407
		__kgem_freed_bo = *(struct kgem_bo **)bo;
408
	} else {
409
		bo = malloc(sizeof(*bo));
410
		if (bo == NULL)
411
			return NULL;
412
	}
413
 
414
	return __kgem_bo_init(bo, handle, num_pages);
415
}
416
 
3256 Serge 417
static struct kgem_request *__kgem_request_alloc(struct kgem *kgem)
418
{
419
	struct kgem_request *rq;
420
 
421
	rq = __kgem_freed_request;
422
	if (rq) {
423
		__kgem_freed_request = *(struct kgem_request **)rq;
424
	} else {
425
		rq = malloc(sizeof(*rq));
426
		if (rq == NULL)
427
			rq = &kgem->static_request;
428
	}
429
 
430
	list_init(&rq->buffers);
431
	rq->bo = NULL;
432
	rq->ring = 0;
433
 
434
	return rq;
435
}
436
 
437
static void __kgem_request_free(struct kgem_request *rq)
438
{
439
	_list_del(&rq->list);
440
	*(struct kgem_request **)rq = __kgem_freed_request;
441
	__kgem_freed_request = rq;
442
}
443
 
444
static struct list *inactive(struct kgem *kgem, int num_pages)
445
{
446
	assert(num_pages < MAX_CACHE_SIZE / PAGE_SIZE);
447
	assert(cache_bucket(num_pages) < NUM_CACHE_BUCKETS);
448
	return &kgem->inactive[cache_bucket(num_pages)];
449
}
450
 
451
static struct list *active(struct kgem *kgem, int num_pages, int tiling)
452
{
453
	assert(num_pages < MAX_CACHE_SIZE / PAGE_SIZE);
454
	assert(cache_bucket(num_pages) < NUM_CACHE_BUCKETS);
455
	return &kgem->active[cache_bucket(num_pages)][tiling];
456
}
457
 
458
static size_t
459
agp_aperture_size(struct pci_device *dev, unsigned gen)
460
{
461
	/* XXX assume that only future chipsets are unknown and follow
462
	 * the post gen2 PCI layout.
463
	 */
464
//	return dev->regions[gen < 030 ? 0 : 2].size;
465
 
466
    return 0;
467
}
468
 
469
static size_t
470
total_ram_size(void)
471
{
472
    uint32_t  data[9];
473
    size_t    size = 0;
474
 
475
    asm volatile("int $0x40"
476
        : "=a" (size)
477
        : "a" (18),"b"(20), "c" (data)
478
        : "memory");
479
 
480
    return size != -1 ? size : 0;
481
}
482
 
483
 
3254 Serge 484
static int gem_param(struct kgem *kgem, int name)
485
{
486
    ioctl_t  io;
487
 
488
    drm_i915_getparam_t gp;
489
    int v = -1; /* No param uses the sign bit, reserve it for errors */
490
 
491
    VG_CLEAR(gp);
492
    gp.param = name;
493
    gp.value = &v;
494
 
495
    io.handle   = kgem->fd;
496
    io.io_code  = SRV_GET_PARAM;
497
    io.input    = &gp;
498
    io.inp_size = sizeof(gp);
499
    io.output   = NULL;
500
    io.out_size = 0;
501
 
502
    if (call_service(&io)!=0)
503
        return -1;
504
 
505
    VG(VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v)));
506
    return v;
507
}
508
 
3255 Serge 509
static bool test_has_execbuffer2(struct kgem *kgem)
510
{
511
	return 1;
512
}
513
 
3254 Serge 514
static bool test_has_no_reloc(struct kgem *kgem)
515
{
516
	if (DBG_NO_FAST_RELOC)
517
		return false;
518
 
519
	return gem_param(kgem, LOCAL_I915_PARAM_HAS_NO_RELOC) > 0;
520
}
521
 
522
static bool test_has_handle_lut(struct kgem *kgem)
523
{
524
	if (DBG_NO_HANDLE_LUT)
525
		return false;
526
 
527
	return gem_param(kgem, LOCAL_I915_PARAM_HAS_HANDLE_LUT) > 0;
528
}
529
 
530
static bool test_has_semaphores_enabled(struct kgem *kgem)
531
{
532
	FILE *file;
533
	bool detected = false;
534
	int ret;
535
 
536
	if (DBG_NO_SEMAPHORES)
537
		return false;
538
 
539
	ret = gem_param(kgem, LOCAL_I915_PARAM_HAS_SEMAPHORES);
540
	if (ret != -1)
541
		return ret > 0;
542
 
543
	return detected;
544
}
545
 
3255 Serge 546
static bool __kgem_throttle(struct kgem *kgem)
547
{
548
//	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_THROTTLE, NULL) == 0)
549
		return false;
3254 Serge 550
 
3255 Serge 551
//	return errno == EIO;
552
}
553
 
554
static bool is_hw_supported(struct kgem *kgem,
555
			    struct pci_device *dev)
556
{
557
	if (DBG_NO_HW)
558
		return false;
559
 
560
	if (!test_has_execbuffer2(kgem))
561
		return false;
562
 
563
	if (kgem->gen == (unsigned)-1) /* unknown chipset, assume future gen */
564
		return kgem->has_blt;
565
 
566
	/* Although pre-855gm the GMCH is fubar, it works mostly. So
567
	 * let the user decide through "NoAccel" whether or not to risk
568
	 * hw acceleration.
569
	 */
570
 
571
	if (kgem->gen == 060 && dev->revision < 8) {
572
		/* pre-production SNB with dysfunctional BLT */
573
		return false;
574
	}
575
 
576
	if (kgem->gen >= 060) /* Only if the kernel supports the BLT ring */
577
		return kgem->has_blt;
578
 
579
	return true;
580
}
581
 
3254 Serge 582
static bool test_has_relaxed_fencing(struct kgem *kgem)
583
{
584
	if (kgem->gen < 040) {
585
		if (DBG_NO_RELAXED_FENCING)
586
			return false;
587
 
588
		return gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_FENCING) > 0;
589
	} else
590
		return true;
591
}
592
 
593
static bool test_has_llc(struct kgem *kgem)
594
{
595
	int has_llc = -1;
596
 
597
	if (DBG_NO_LLC)
598
		return false;
599
 
600
#if defined(I915_PARAM_HAS_LLC) /* Expected in libdrm-2.4.31 */
601
	has_llc = gem_param(kgem, I915_PARAM_HAS_LLC);
602
#endif
603
	if (has_llc == -1) {
604
		DBG(("%s: no kernel/drm support for HAS_LLC, assuming support for LLC based on GPU generation\n", __FUNCTION__));
605
		has_llc = kgem->gen >= 060;
606
	}
607
 
608
	return has_llc;
609
}
610
 
611
static bool test_has_cacheing(struct kgem *kgem)
612
{
613
	uint32_t handle;
3256 Serge 614
	bool ret;
3254 Serge 615
 
616
	if (DBG_NO_CACHE_LEVEL)
617
		return false;
618
 
619
	/* Incoherent blt and sampler hangs the GPU */
620
	if (kgem->gen == 040)
621
		return false;
622
 
3256 Serge 623
	handle = gem_create(kgem->fd, 1);
624
	if (handle == 0)
625
		return false;
3254 Serge 626
 
3256 Serge 627
	ret = gem_set_cacheing(kgem->fd, handle, UNCACHED);
628
	gem_close(kgem->fd, handle);
3254 Serge 629
	return ret;
630
}
631
 
632
static bool test_has_userptr(struct kgem *kgem)
633
{
634
#if defined(USE_USERPTR)
635
	uint32_t handle;
636
	void *ptr;
637
 
638
	if (DBG_NO_USERPTR)
639
		return false;
640
 
641
	/* Incoherent blt and sampler hangs the GPU */
642
	if (kgem->gen == 040)
643
		return false;
644
 
645
	ptr = malloc(PAGE_SIZE);
646
	handle = gem_userptr(kgem->fd, ptr, PAGE_SIZE, false);
647
	gem_close(kgem->fd, handle);
648
	free(ptr);
649
 
650
	return handle != 0;
651
#else
652
	return false;
653
#endif
654
}
655
 
656
static bool test_has_secure_batches(struct kgem *kgem)
657
{
658
	if (DBG_NO_SECURE_BATCHES)
659
		return false;
660
 
661
	return gem_param(kgem, LOCAL_I915_PARAM_HAS_SECURE_BATCHES) > 0;
662
}
663
 
664
static bool test_has_pinned_batches(struct kgem *kgem)
665
{
666
	if (DBG_NO_PINNED_BATCHES)
667
		return false;
668
 
669
	return gem_param(kgem, LOCAL_I915_PARAM_HAS_PINNED_BATCHES) > 0;
670
}
671
 
672
 
3255 Serge 673
static bool kgem_init_pinned_batches(struct kgem *kgem)
674
{
675
	ioctl_t  io;
3254 Serge 676
 
3255 Serge 677
	int count[2] = { 4, 2 };
678
	int size[2] = { 1, 4 };
679
	int n, i;
680
 
681
	if (kgem->wedged)
682
		return true;
683
 
684
	for (n = 0; n < ARRAY_SIZE(count); n++) {
685
		for (i = 0; i < count[n]; i++) {
686
			struct drm_i915_gem_pin pin;
687
			struct kgem_bo *bo;
688
 
689
			VG_CLEAR(pin);
690
 
691
			pin.handle = gem_create(kgem->fd, size[n]);
692
			if (pin.handle == 0)
693
				goto err;
694
 
695
			DBG(("%s: new handle=%d, num_pages=%d\n",
696
			     __FUNCTION__, pin.handle, size[n]));
697
 
698
			bo = __kgem_bo_alloc(pin.handle, size[n]);
699
			if (bo == NULL) {
700
				gem_close(kgem->fd, pin.handle);
701
				goto err;
702
			}
703
 
704
			pin.alignment = 0;
705
 
706
            io.handle   = kgem->fd;
707
            io.io_code  = SRV_I915_GEM_PIN;
708
            io.input    = &pin;
709
            io.inp_size = sizeof(pin);
710
            io.output   = NULL;
711
            io.out_size = 0;
712
 
713
            if (call_service(&io)!=0){
714
				gem_close(kgem->fd, pin.handle);
715
				goto err;
716
			}
717
			bo->presumed_offset = pin.offset;
718
			debug_alloc__bo(kgem, bo);
719
			list_add(&bo->list, &kgem->pinned_batches[n]);
720
		}
721
	}
722
 
723
	return true;
724
 
725
err:
726
	for (n = 0; n < ARRAY_SIZE(kgem->pinned_batches); n++) {
727
		while (!list_is_empty(&kgem->pinned_batches[n])) {
728
			kgem_bo_destroy(kgem,
729
					list_first_entry(&kgem->pinned_batches[n],
730
							 struct kgem_bo, list));
731
		}
732
	}
733
 
734
	/* For simplicity populate the lists with a single unpinned bo */
735
	for (n = 0; n < ARRAY_SIZE(count); n++) {
736
		struct kgem_bo *bo;
737
		uint32_t handle;
738
 
739
		handle = gem_create(kgem->fd, size[n]);
740
		if (handle == 0)
741
			break;
742
 
743
		bo = __kgem_bo_alloc(handle, size[n]);
744
		if (bo == NULL) {
745
			gem_close(kgem->fd, handle);
746
			break;
747
		}
748
 
749
		debug_alloc__bo(kgem, bo);
750
		list_add(&bo->list, &kgem->pinned_batches[n]);
751
	}
752
	return false;
753
}
754
 
3254 Serge 755
void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen)
756
{
757
    struct drm_i915_gem_get_aperture aperture;
758
    size_t totalram;
759
    unsigned half_gpu_max;
760
    unsigned int i, j;
3256 Serge 761
    ioctl_t   io;
3254 Serge 762
 
763
    DBG(("%s: fd=%d, gen=%d\n", __FUNCTION__, fd, gen));
764
 
765
    memset(kgem, 0, sizeof(*kgem));
766
 
767
    kgem->fd = fd;
768
    kgem->gen = gen;
769
 
770
    list_init(&kgem->requests[0]);
771
    list_init(&kgem->requests[1]);
772
    list_init(&kgem->batch_buffers);
773
    list_init(&kgem->active_buffers);
774
    list_init(&kgem->flushing);
775
    list_init(&kgem->large);
776
    list_init(&kgem->large_inactive);
777
    list_init(&kgem->snoop);
778
    list_init(&kgem->scanout);
779
    for (i = 0; i < ARRAY_SIZE(kgem->pinned_batches); i++)
780
        list_init(&kgem->pinned_batches[i]);
781
    for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
782
        list_init(&kgem->inactive[i]);
783
    for (i = 0; i < ARRAY_SIZE(kgem->active); i++) {
784
        for (j = 0; j < ARRAY_SIZE(kgem->active[i]); j++)
785
            list_init(&kgem->active[i][j]);
786
    }
787
    for (i = 0; i < ARRAY_SIZE(kgem->vma); i++) {
788
        for (j = 0; j < ARRAY_SIZE(kgem->vma[i].inactive); j++)
789
            list_init(&kgem->vma[i].inactive[j]);
790
    }
791
    kgem->vma[MAP_GTT].count = -MAX_GTT_VMA_CACHE;
792
    kgem->vma[MAP_CPU].count = -MAX_CPU_VMA_CACHE;
793
 
794
    kgem->has_blt = gem_param(kgem, LOCAL_I915_PARAM_HAS_BLT) > 0;
795
    DBG(("%s: has BLT ring? %d\n", __FUNCTION__,
796
         kgem->has_blt));
797
 
798
    kgem->has_relaxed_delta =
799
        gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_DELTA) > 0;
800
    DBG(("%s: has relaxed delta? %d\n", __FUNCTION__,
801
         kgem->has_relaxed_delta));
802
 
803
    kgem->has_relaxed_fencing = test_has_relaxed_fencing(kgem);
804
    DBG(("%s: has relaxed fencing? %d\n", __FUNCTION__,
805
         kgem->has_relaxed_fencing));
806
 
807
    kgem->has_llc = test_has_llc(kgem);
808
    DBG(("%s: has shared last-level-cache? %d\n", __FUNCTION__,
809
         kgem->has_llc));
810
 
811
    kgem->has_cacheing = test_has_cacheing(kgem);
812
    DBG(("%s: has set-cache-level? %d\n", __FUNCTION__,
813
         kgem->has_cacheing));
814
 
815
    kgem->has_userptr = test_has_userptr(kgem);
816
    DBG(("%s: has userptr? %d\n", __FUNCTION__,
817
         kgem->has_userptr));
818
 
819
    kgem->has_no_reloc = test_has_no_reloc(kgem);
820
    DBG(("%s: has no-reloc? %d\n", __FUNCTION__,
821
         kgem->has_no_reloc));
822
 
823
    kgem->has_handle_lut = test_has_handle_lut(kgem);
824
    DBG(("%s: has handle-lut? %d\n", __FUNCTION__,
825
         kgem->has_handle_lut));
826
 
827
    kgem->has_semaphores = false;
828
    if (kgem->has_blt && test_has_semaphores_enabled(kgem))
829
        kgem->has_semaphores = true;
830
    DBG(("%s: semaphores enabled? %d\n", __FUNCTION__,
831
         kgem->has_semaphores));
832
 
833
    kgem->can_blt_cpu = gen >= 030;
834
    DBG(("%s: can blt to cpu? %d\n", __FUNCTION__,
835
         kgem->can_blt_cpu));
836
 
837
    kgem->has_secure_batches = test_has_secure_batches(kgem);
838
    DBG(("%s: can use privileged batchbuffers? %d\n", __FUNCTION__,
839
         kgem->has_secure_batches));
840
 
841
    kgem->has_pinned_batches = test_has_pinned_batches(kgem);
842
    DBG(("%s: can use pinned batchbuffers (to avoid CS w/a)? %d\n", __FUNCTION__,
843
         kgem->has_pinned_batches));
844
 
845
    if (!is_hw_supported(kgem, dev)) {
3255 Serge 846
        printf("Detected unsupported/dysfunctional hardware, disabling acceleration.\n");
3254 Serge 847
        kgem->wedged = 1;
848
    } else if (__kgem_throttle(kgem)) {
3255 Serge 849
        printf("Detected a hung GPU, disabling acceleration.\n");
3254 Serge 850
        kgem->wedged = 1;
851
    }
852
 
853
    kgem->batch_size = ARRAY_SIZE(kgem->batch);
854
    if (gen == 020 && !kgem->has_pinned_batches)
855
        /* Limited to what we can pin */
856
        kgem->batch_size = 4*1024;
857
    if (gen == 022)
858
        /* 865g cannot handle a batch spanning multiple pages */
859
        kgem->batch_size = PAGE_SIZE / sizeof(uint32_t);
860
    if ((gen >> 3) == 7)
861
        kgem->batch_size = 16*1024;
862
    if (!kgem->has_relaxed_delta && kgem->batch_size > 4*1024)
863
        kgem->batch_size = 4*1024;
864
 
865
    if (!kgem_init_pinned_batches(kgem) && gen == 020) {
3255 Serge 866
        printf("Unable to reserve memory for GPU, disabling acceleration.\n");
3254 Serge 867
        kgem->wedged = 1;
868
    }
869
 
870
    DBG(("%s: maximum batch size? %d\n", __FUNCTION__,
871
         kgem->batch_size));
872
 
873
    kgem->min_alignment = 4;
874
    if (gen < 040)
875
        kgem->min_alignment = 64;
876
 
877
    kgem->half_cpu_cache_pages = cpu_cache_size() >> 13;
878
    DBG(("%s: half cpu cache %d pages\n", __FUNCTION__,
879
         kgem->half_cpu_cache_pages));
880
 
881
    kgem->next_request = __kgem_request_alloc(kgem);
882
 
883
    DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, userptr? %d\n", __FUNCTION__,
884
         !DBG_NO_CPU && (kgem->has_llc | kgem->has_userptr | kgem->has_cacheing),
885
         kgem->has_llc, kgem->has_cacheing, kgem->has_userptr));
886
 
887
    VG_CLEAR(aperture);
888
    aperture.aper_size = 0;
3256 Serge 889
 
890
    io.handle   = fd;
891
    io.io_code  = SRV_I915_GEM_GET_APERTURE;
892
    io.input    = &aperture;
893
    io.inp_size = sizeof(aperture);
894
    io.output   = NULL;
895
    io.out_size = 0;
896
 
897
    (void)call_service(&io);
898
 
3254 Serge 899
    if (aperture.aper_size == 0)
900
        aperture.aper_size = 64*1024*1024;
901
 
902
    DBG(("%s: aperture size %lld, available now %lld\n",
903
         __FUNCTION__,
904
         (long long)aperture.aper_size,
905
         (long long)aperture.aper_available_size));
906
 
907
    kgem->aperture_total = aperture.aper_size;
908
    kgem->aperture_high = aperture.aper_size * 3/4;
909
    kgem->aperture_low = aperture.aper_size * 1/3;
910
    if (gen < 033) {
911
        /* Severe alignment penalties */
912
        kgem->aperture_high /= 2;
913
        kgem->aperture_low /= 2;
914
    }
915
    DBG(("%s: aperture low=%d [%d], high=%d [%d]\n", __FUNCTION__,
916
         kgem->aperture_low, kgem->aperture_low / (1024*1024),
917
         kgem->aperture_high, kgem->aperture_high / (1024*1024)));
918
 
919
    kgem->aperture_mappable = agp_aperture_size(dev, gen);
920
    if (kgem->aperture_mappable == 0 ||
921
        kgem->aperture_mappable > aperture.aper_size)
922
        kgem->aperture_mappable = aperture.aper_size;
923
    DBG(("%s: aperture mappable=%d [%d MiB]\n", __FUNCTION__,
924
         kgem->aperture_mappable, kgem->aperture_mappable / (1024*1024)));
925
 
926
    kgem->buffer_size = 64 * 1024;
927
    while (kgem->buffer_size < kgem->aperture_mappable >> 10)
928
        kgem->buffer_size *= 2;
929
    if (kgem->buffer_size >> 12 > kgem->half_cpu_cache_pages)
930
        kgem->buffer_size = kgem->half_cpu_cache_pages << 12;
931
    DBG(("%s: buffer size=%d [%d KiB]\n", __FUNCTION__,
932
         kgem->buffer_size, kgem->buffer_size / 1024));
933
 
934
    kgem->max_object_size = 3 * (kgem->aperture_high >> 12) << 10;
935
    kgem->max_gpu_size = kgem->max_object_size;
936
    if (!kgem->has_llc)
937
        kgem->max_gpu_size = MAX_CACHE_SIZE;
938
 
939
    totalram = total_ram_size();
940
    if (totalram == 0) {
941
        DBG(("%s: total ram size unknown, assuming maximum of total aperture\n",
942
             __FUNCTION__));
943
        totalram = kgem->aperture_total;
944
    }
3256 Serge 945
    DBG(("%s: total ram=%u\n", __FUNCTION__, totalram));
3254 Serge 946
    if (kgem->max_object_size > totalram / 2)
947
        kgem->max_object_size = totalram / 2;
948
    if (kgem->max_gpu_size > totalram / 4)
949
        kgem->max_gpu_size = totalram / 4;
950
 
951
    kgem->max_cpu_size = kgem->max_object_size;
952
 
953
    half_gpu_max = kgem->max_gpu_size / 2;
954
    kgem->max_copy_tile_size = (MAX_CACHE_SIZE + 1)/2;
955
    if (kgem->max_copy_tile_size > half_gpu_max)
956
        kgem->max_copy_tile_size = half_gpu_max;
957
 
958
    if (kgem->has_llc)
959
        kgem->max_upload_tile_size = kgem->max_copy_tile_size;
960
    else
961
        kgem->max_upload_tile_size = kgem->aperture_mappable / 4;
962
    if (kgem->max_upload_tile_size > half_gpu_max)
963
        kgem->max_upload_tile_size = half_gpu_max;
964
 
965
    kgem->large_object_size = MAX_CACHE_SIZE;
966
    if (kgem->large_object_size > kgem->max_gpu_size)
967
        kgem->large_object_size = kgem->max_gpu_size;
968
 
969
    if (kgem->has_llc | kgem->has_cacheing | kgem->has_userptr) {
970
        if (kgem->large_object_size > kgem->max_cpu_size)
971
            kgem->large_object_size = kgem->max_cpu_size;
972
    } else
973
        kgem->max_cpu_size = 0;
974
    if (DBG_NO_CPU)
975
        kgem->max_cpu_size = 0;
976
 
977
    DBG(("%s: maximum object size=%d\n",
978
         __FUNCTION__, kgem->max_object_size));
979
    DBG(("%s: large object thresold=%d\n",
980
         __FUNCTION__, kgem->large_object_size));
981
    DBG(("%s: max object sizes (gpu=%d, cpu=%d, tile upload=%d, copy=%d)\n",
982
         __FUNCTION__,
983
         kgem->max_gpu_size, kgem->max_cpu_size,
984
         kgem->max_upload_tile_size, kgem->max_copy_tile_size));
985
 
986
    /* Convert the aperture thresholds to pages */
987
    kgem->aperture_low /= PAGE_SIZE;
988
    kgem->aperture_high /= PAGE_SIZE;
989
 
990
    kgem->fence_max = gem_param(kgem, I915_PARAM_NUM_FENCES_AVAIL) - 2;
991
    if ((int)kgem->fence_max < 0)
992
        kgem->fence_max = 5; /* minimum safe value for all hw */
993
    DBG(("%s: max fences=%d\n", __FUNCTION__, kgem->fence_max));
994
 
995
    kgem->batch_flags_base = 0;
996
    if (kgem->has_no_reloc)
997
        kgem->batch_flags_base |= LOCAL_I915_EXEC_NO_RELOC;
998
    if (kgem->has_handle_lut)
999
        kgem->batch_flags_base |= LOCAL_I915_EXEC_HANDLE_LUT;
1000
    if (kgem->has_pinned_batches)
1001
        kgem->batch_flags_base |= LOCAL_I915_EXEC_IS_PINNED;
1002
 
3256 Serge 1003
}
3254 Serge 1004
 
3256 Serge 1005
 
1006
inline static void kgem_bo_remove_from_inactive(struct kgem *kgem,
1007
						struct kgem_bo *bo)
1008
{
1009
	DBG(("%s: removing handle=%d from inactive\n", __FUNCTION__, bo->handle));
1010
 
1011
	list_del(&bo->list);
1012
	assert(bo->rq == NULL);
1013
	assert(bo->exec == NULL);
1014
	if (bo->map) {
1015
		assert(!list_is_empty(&bo->vma));
1016
		list_del(&bo->vma);
1017
		kgem->vma[IS_CPU_MAP(bo->map)].count--;
1018
	}
3254 Serge 1019
}
1020
 
1021
 
1022
 
3256 Serge 1023
 
1024
static struct kgem_bo *
1025
search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
1026
{
1027
	struct kgem_bo *bo, *first = NULL;
1028
	bool use_active = (flags & CREATE_INACTIVE) == 0;
1029
	struct list *cache;
1030
 
1031
	DBG(("%s: num_pages=%d, flags=%x, use_active? %d\n",
1032
	     __FUNCTION__, num_pages, flags, use_active));
1033
 
1034
	if (num_pages >= MAX_CACHE_SIZE / PAGE_SIZE)
1035
		return NULL;
1036
 
1037
	if (!use_active && list_is_empty(inactive(kgem, num_pages))) {
1038
		DBG(("%s: inactive and cache bucket empty\n",
1039
		     __FUNCTION__));
1040
 
1041
		if (flags & CREATE_NO_RETIRE) {
1042
			DBG(("%s: can not retire\n", __FUNCTION__));
1043
			return NULL;
1044
		}
1045
 
1046
		if (list_is_empty(active(kgem, num_pages, I915_TILING_NONE))) {
1047
			DBG(("%s: active cache bucket empty\n", __FUNCTION__));
1048
			return NULL;
1049
		}
1050
 
1051
		if (!__kgem_throttle_retire(kgem, flags)) {
1052
			DBG(("%s: nothing retired\n", __FUNCTION__));
1053
			return NULL;
1054
		}
1055
 
1056
		if (list_is_empty(inactive(kgem, num_pages))) {
1057
			DBG(("%s: active cache bucket still empty after retire\n",
1058
			     __FUNCTION__));
1059
			return NULL;
1060
		}
1061
	}
1062
 
1063
	if (!use_active && flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
1064
		int for_cpu = !!(flags & CREATE_CPU_MAP);
1065
		DBG(("%s: searching for inactive %s map\n",
1066
		     __FUNCTION__, for_cpu ? "cpu" : "gtt"));
1067
		cache = &kgem->vma[for_cpu].inactive[cache_bucket(num_pages)];
1068
		list_for_each_entry(bo, cache, vma) {
1069
			assert(IS_CPU_MAP(bo->map) == for_cpu);
1070
			assert(bucket(bo) == cache_bucket(num_pages));
1071
			assert(bo->proxy == NULL);
1072
			assert(bo->rq == NULL);
1073
			assert(bo->exec == NULL);
1074
			assert(!bo->scanout);
1075
 
1076
			if (num_pages > num_pages(bo)) {
1077
				DBG(("inactive too small: %d < %d\n",
1078
				     num_pages(bo), num_pages));
1079
				continue;
1080
			}
1081
 
1082
			if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
1083
				kgem_bo_free(kgem, bo);
1084
				break;
1085
			}
1086
 
1087
			if (I915_TILING_NONE != bo->tiling &&
1088
			    !gem_set_tiling(kgem->fd, bo->handle,
1089
					    I915_TILING_NONE, 0))
1090
				continue;
1091
 
1092
			kgem_bo_remove_from_inactive(kgem, bo);
1093
 
1094
			bo->tiling = I915_TILING_NONE;
1095
			bo->pitch = 0;
1096
			bo->delta = 0;
1097
			DBG(("  %s: found handle=%d (num_pages=%d) in linear vma cache\n",
1098
			     __FUNCTION__, bo->handle, num_pages(bo)));
1099
			assert(use_active || bo->domain != DOMAIN_GPU);
1100
			assert(!bo->needs_flush);
1101
			ASSERT_MAYBE_IDLE(kgem, bo->handle, !use_active);
1102
			return bo;
1103
		}
1104
 
1105
		if (flags & CREATE_EXACT)
1106
			return NULL;
1107
 
1108
		if (flags & CREATE_CPU_MAP && !kgem->has_llc)
1109
			return NULL;
1110
	}
1111
 
1112
	cache = use_active ? active(kgem, num_pages, I915_TILING_NONE) : inactive(kgem, num_pages);
1113
	list_for_each_entry(bo, cache, list) {
1114
		assert(bo->refcnt == 0);
1115
		assert(bo->reusable);
1116
		assert(!!bo->rq == !!use_active);
1117
		assert(bo->proxy == NULL);
1118
		assert(!bo->scanout);
1119
 
1120
		if (num_pages > num_pages(bo))
1121
			continue;
1122
 
1123
		if (use_active &&
1124
		    kgem->gen <= 040 &&
1125
		    bo->tiling != I915_TILING_NONE)
1126
			continue;
1127
 
1128
		if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
1129
			kgem_bo_free(kgem, bo);
1130
			break;
1131
		}
1132
 
1133
		if (I915_TILING_NONE != bo->tiling) {
1134
			if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP))
1135
				continue;
1136
 
1137
			if (first)
1138
				continue;
1139
 
1140
			if (!gem_set_tiling(kgem->fd, bo->handle,
1141
					    I915_TILING_NONE, 0))
1142
				continue;
1143
 
1144
			bo->tiling = I915_TILING_NONE;
1145
			bo->pitch = 0;
1146
		}
1147
 
1148
		if (bo->map) {
1149
			if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
1150
				int for_cpu = !!(flags & CREATE_CPU_MAP);
1151
				if (IS_CPU_MAP(bo->map) != for_cpu) {
1152
					if (first != NULL)
1153
						break;
1154
 
1155
					first = bo;
1156
					continue;
1157
				}
1158
			} else {
1159
				if (first != NULL)
1160
					break;
1161
 
1162
				first = bo;
1163
				continue;
1164
			}
1165
		} else {
1166
			if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
1167
				if (first != NULL)
1168
					break;
1169
 
1170
				first = bo;
1171
				continue;
1172
			}
1173
		}
1174
 
1175
		if (use_active)
1176
			kgem_bo_remove_from_active(kgem, bo);
1177
		else
1178
			kgem_bo_remove_from_inactive(kgem, bo);
1179
 
1180
		assert(bo->tiling == I915_TILING_NONE);
1181
		bo->pitch = 0;
1182
		bo->delta = 0;
1183
		DBG(("  %s: found handle=%d (num_pages=%d) in linear %s cache\n",
1184
		     __FUNCTION__, bo->handle, num_pages(bo),
1185
		     use_active ? "active" : "inactive"));
1186
		assert(list_is_empty(&bo->list));
1187
		assert(use_active || bo->domain != DOMAIN_GPU);
1188
		assert(!bo->needs_flush || use_active);
1189
		ASSERT_MAYBE_IDLE(kgem, bo->handle, !use_active);
1190
		return bo;
1191
	}
1192
 
1193
	if (first) {
1194
		assert(first->tiling == I915_TILING_NONE);
1195
 
1196
		if (use_active)
1197
			kgem_bo_remove_from_active(kgem, first);
1198
		else
1199
			kgem_bo_remove_from_inactive(kgem, first);
1200
 
1201
		first->pitch = 0;
1202
		first->delta = 0;
1203
		DBG(("  %s: found handle=%d (near-miss) (num_pages=%d) in linear %s cache\n",
1204
		     __FUNCTION__, first->handle, num_pages(first),
1205
		     use_active ? "active" : "inactive"));
1206
		assert(list_is_empty(&first->list));
1207
		assert(use_active || first->domain != DOMAIN_GPU);
1208
		assert(!first->needs_flush || use_active);
1209
		ASSERT_MAYBE_IDLE(kgem, first->handle, !use_active);
1210
		return first;
1211
	}
1212
 
1213
	return NULL;
1214
}
1215
 
1216
 
1217
struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags)
1218
{
1219
	struct kgem_bo *bo;
1220
	uint32_t handle;
1221
 
1222
	DBG(("%s(%d)\n", __FUNCTION__, size));
1223
 
1224
	if (flags & CREATE_GTT_MAP && kgem->has_llc) {
1225
		flags &= ~CREATE_GTT_MAP;
1226
		flags |= CREATE_CPU_MAP;
1227
	}
1228
 
1229
	size = (size + PAGE_SIZE - 1) / PAGE_SIZE;
1230
	bo = search_linear_cache(kgem, size, CREATE_INACTIVE | flags);
1231
	if (bo) {
1232
		assert(bo->domain != DOMAIN_GPU);
1233
		ASSERT_IDLE(kgem, bo->handle);
1234
		bo->refcnt = 1;
1235
		return bo;
1236
	}
1237
 
1238
	if (flags & CREATE_CACHED)
1239
		return NULL;
1240
 
1241
	handle = gem_create(kgem->fd, size);
1242
	if (handle == 0)
1243
		return NULL;
1244
 
1245
	DBG(("%s: new handle=%d, num_pages=%d\n", __FUNCTION__, handle, size));
1246
	bo = __kgem_bo_alloc(handle, size);
1247
	if (bo == NULL) {
1248
		gem_close(kgem->fd, handle);
1249
		return NULL;
1250
	}
1251
 
1252
	debug_alloc__bo(kgem, bo);
1253
	return bo;
1254
}
1255
 
1256
 
1257
 
1258
 
1259
 
3254 Serge 1260
void kgem_clear_dirty(struct kgem *kgem)
1261
{
1262
	struct list * const buffers = &kgem->next_request->buffers;
1263
	struct kgem_bo *bo;
1264
 
1265
	list_for_each_entry(bo, buffers, request) {
1266
		if (!bo->dirty)
1267
			break;
1268
 
1269
		bo->dirty = false;
1270
	}
1271
}
1272
 
1273
 
1274
 
1275
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format)
1276
{
1277
	struct kgem_bo_binding *b;
1278
 
1279
	for (b = &bo->binding; b && b->offset; b = b->next)
1280
		if (format == b->format)
1281
			return b->offset;
1282
 
1283
	return 0;
1284
}
1285
 
1286
void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset)
1287
{
1288
	struct kgem_bo_binding *b;
1289
 
1290
	for (b = &bo->binding; b; b = b->next) {
1291
		if (b->offset)
1292
			continue;
1293
 
1294
		b->offset = offset;
1295
		b->format = format;
1296
 
1297
		if (b->next)
1298
			b->next->offset = 0;
1299
 
1300
		return;
1301
	}
1302
 
1303
	b = malloc(sizeof(*b));
1304
	if (b) {
1305
		b->next = bo->binding.next;
1306
		b->format = format;
1307
		b->offset = offset;
1308
		bo->binding.next = b;
1309
	}
1310
}
1311
 
1312
uint32_t kgem_add_reloc(struct kgem *kgem,
1313
			uint32_t pos,
1314
			struct kgem_bo *bo,
1315
			uint32_t read_write_domain,
1316
			uint32_t delta)
1317
{
1318
    return 0;
1319
}
1320
 
1321
void kgem_reset(struct kgem *kgem)
1322
{
1323
 
1324
};
1325
 
1326
void _kgem_submit(struct kgem *kgem)
1327
{
1328
};
1329
 
1330
 
1331
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
1332
{
1333
 
1334
 
1335
};