Subversion Repositories Kolibri OS

Rev

Rev 3291 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
3254 Serge 1
/*
2
 * Copyright (c) 2011 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
 * SOFTWARE.
22
 *
23
 * Authors:
24
 *    Chris Wilson 
25
 *
26
 */
27
 
28
#ifdef HAVE_CONFIG_H
29
#include "config.h"
30
#endif
31
 
32
#include "sna.h"
33
#include "sna_reg.h"
34
 
3291 Serge 35
static inline
36
int user_free(void *mem)
37
{
38
    int  val;
39
    __asm__ __volatile__(
40
    "int $0x40"
41
    :"=a"(val)
42
    :"a"(68),"b"(12),"c"(mem));
43
    return val;
44
}
3256 Serge 45
 
3291 Serge 46
 
3256 Serge 47
unsigned int cpu_cache_size();
48
 
49
static struct kgem_bo *
50
search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
51
 
52
static struct kgem_bo *
53
search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
54
 
3254 Serge 55
#define DBG_NO_HW 0
56
#define DBG_NO_TILING 1
57
#define DBG_NO_CACHE 0
58
#define DBG_NO_CACHE_LEVEL 0
59
#define DBG_NO_CPU 0
60
#define DBG_NO_USERPTR 0
61
#define DBG_NO_LLC 0
62
#define DBG_NO_SEMAPHORES 0
3256 Serge 63
#define DBG_NO_MADV 1
3254 Serge 64
#define DBG_NO_UPLOAD_CACHE 0
65
#define DBG_NO_UPLOAD_ACTIVE 0
66
#define DBG_NO_MAP_UPLOAD 0
67
#define DBG_NO_RELAXED_FENCING 0
68
#define DBG_NO_SECURE_BATCHES 0
69
#define DBG_NO_PINNED_BATCHES 0
70
#define DBG_NO_FAST_RELOC 0
71
#define DBG_NO_HANDLE_LUT 0
72
#define DBG_DUMP 0
73
 
3256 Serge 74
#ifndef DEBUG_SYNC
75
#define DEBUG_SYNC 0
76
#endif
77
 
78
#define SHOW_BATCH 0
79
 
80
#if 0
81
#define ASSERT_IDLE(kgem__, handle__) assert(!__kgem_busy(kgem__, handle__))
82
#define ASSERT_MAYBE_IDLE(kgem__, handle__, expect__) assert(!(expect__) || !__kgem_busy(kgem__, handle__))
83
#else
84
#define ASSERT_IDLE(kgem__, handle__)
85
#define ASSERT_MAYBE_IDLE(kgem__, handle__, expect__)
86
#endif
87
 
3255 Serge 88
/* Worst case seems to be 965gm where we cannot write within a cacheline that
89
 * is being simultaneously being read by the GPU, or within the sampler
90
 * prefetch. In general, the chipsets seem to have a requirement that sampler
91
 * offsets be aligned to a cacheline (64 bytes).
92
 */
93
#define UPLOAD_ALIGNMENT 128
94
 
95
#define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE)
96
#define NUM_PAGES(x) (((x) + PAGE_SIZE-1) / PAGE_SIZE)
97
 
3254 Serge 98
#define MAX_GTT_VMA_CACHE 512
99
#define MAX_CPU_VMA_CACHE INT16_MAX
100
#define MAP_PRESERVE_TIME 10
101
 
102
#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
103
#define MAKE_CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1))
104
#define MAKE_USER_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3))
105
#define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 2)
106
#define __MAP_TYPE(ptr) ((uintptr_t)(ptr) & 3)
107
 
108
#define MAKE_REQUEST(rq, ring) ((struct kgem_request *)((uintptr_t)(rq) | (ring)))
109
 
110
#define LOCAL_I915_PARAM_HAS_BLT		        11
111
#define LOCAL_I915_PARAM_HAS_RELAXED_FENCING	12
112
#define LOCAL_I915_PARAM_HAS_RELAXED_DELTA	    15
113
#define LOCAL_I915_PARAM_HAS_SEMAPHORES		    20
114
#define LOCAL_I915_PARAM_HAS_SECURE_BATCHES	    23
115
#define LOCAL_I915_PARAM_HAS_PINNED_BATCHES	    24
116
#define LOCAL_I915_PARAM_HAS_NO_RELOC		    25
117
#define LOCAL_I915_PARAM_HAS_HANDLE_LUT		    26
118
 
3256 Serge 119
#define LOCAL_I915_EXEC_IS_PINNED		(1<<10)
120
#define LOCAL_I915_EXEC_NO_RELOC		(1<<11)
121
#define LOCAL_I915_EXEC_HANDLE_LUT		(1<<12)
3263 Serge 122
struct local_i915_gem_userptr {
123
	uint64_t user_ptr;
124
	uint32_t user_size;
125
	uint32_t flags;
126
#define I915_USERPTR_READ_ONLY (1<<0)
127
#define I915_USERPTR_UNSYNCHRONIZED (1<<31)
128
	uint32_t handle;
129
};
130
 
3256 Serge 131
#define UNCACHED	0
132
#define SNOOPED		1
133
 
134
struct local_i915_gem_cacheing {
135
	uint32_t handle;
136
	uint32_t cacheing;
137
};
3258 Serge 138
 
139
#define LOCAL_IOCTL_I915_GEM_SET_CACHEING SRV_I915_GEM_SET_CACHEING
140
 
3263 Serge 141
struct local_fbinfo {
142
	int width;
143
	int height;
144
	int pitch;
145
	int tiling;
146
};
147
 
3258 Serge 148
struct kgem_buffer {
149
	struct kgem_bo base;
150
	void *mem;
151
	uint32_t used;
152
	uint32_t need_io : 1;
153
	uint32_t write : 2;
154
	uint32_t mmapped : 1;
155
};
156
 
3255 Serge 157
static struct kgem_bo *__kgem_freed_bo;
3256 Serge 158
static struct kgem_request *__kgem_freed_request;
3258 Serge 159
static struct drm_i915_gem_exec_object2 _kgem_dummy_exec;
3254 Serge 160
 
3258 Serge 161
static inline int bytes(struct kgem_bo *bo)
162
{
163
	return __kgem_bo_size(bo);
164
}
165
 
3255 Serge 166
#define bucket(B) (B)->size.pages.bucket
167
#define num_pages(B) (B)->size.pages.count
3254 Serge 168
 
3255 Serge 169
#ifdef DEBUG_MEMORY
170
static void debug_alloc(struct kgem *kgem, size_t size)
171
{
172
	kgem->debug_memory.bo_allocs++;
173
	kgem->debug_memory.bo_bytes += size;
174
}
175
static void debug_alloc__bo(struct kgem *kgem, struct kgem_bo *bo)
176
{
177
	debug_alloc(kgem, bytes(bo));
178
}
179
#else
180
#define debug_alloc(k, b)
181
#define debug_alloc__bo(k, b)
182
#endif
183
 
3258 Serge 184
static void kgem_sna_reset(struct kgem *kgem)
185
{
186
	struct sna *sna = container_of(kgem, struct sna, kgem);
187
 
188
	sna->render.reset(sna);
189
	sna->blt_state.fill_bo = 0;
190
}
191
 
192
static void kgem_sna_flush(struct kgem *kgem)
193
{
194
	struct sna *sna = container_of(kgem, struct sna, kgem);
195
 
196
	sna->render.flush(sna);
197
 
198
//	if (sna->render.solid_cache.dirty)
199
//		sna_render_flush_solid(sna);
200
}
201
 
3256 Serge 202
static bool gem_set_tiling(int fd, uint32_t handle, int tiling, int stride)
203
{
204
	struct drm_i915_gem_set_tiling set_tiling;
205
	int ret;
206
 
207
	if (DBG_NO_TILING)
208
		return false;
209
/*
210
	VG_CLEAR(set_tiling);
211
	do {
212
		set_tiling.handle = handle;
213
		set_tiling.tiling_mode = tiling;
214
		set_tiling.stride = stride;
215
 
216
		ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
217
	} while (ret == -1 && (errno == EINTR || errno == EAGAIN));
218
*/
3263 Serge 219
	return false;//ret == 0;
3256 Serge 220
}
221
 
222
static bool gem_set_cacheing(int fd, uint32_t handle, int cacheing)
223
{
224
	struct local_i915_gem_cacheing arg;
225
 
226
	VG_CLEAR(arg);
227
	arg.handle = handle;
228
	arg.cacheing = cacheing;
3258 Serge 229
	return drmIoctl(fd, LOCAL_IOCTL_I915_GEM_SET_CACHEING, &arg) == 0;
230
}
3256 Serge 231
 
3258 Serge 232
 
3256 Serge 233
 
234
 
235
 
236
static bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags)
237
{
238
	if (flags & CREATE_NO_RETIRE) {
239
		DBG(("%s: not retiring per-request\n", __FUNCTION__));
240
		return false;
241
	}
242
 
243
	if (!kgem->need_retire) {
244
		DBG(("%s: nothing to retire\n", __FUNCTION__));
245
		return false;
246
	}
247
 
3258 Serge 248
	if (kgem_retire(kgem))
249
		return true;
3256 Serge 250
 
251
	if (flags & CREATE_NO_THROTTLE || !kgem->need_throttle) {
252
		DBG(("%s: not throttling\n", __FUNCTION__));
253
		return false;
254
	}
255
 
3258 Serge 256
	kgem_throttle(kgem);
257
	return kgem_retire(kgem);
258
}
3256 Serge 259
 
3258 Serge 260
static void *__kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
261
{
262
	struct drm_i915_gem_mmap_gtt mmap_arg;
263
	void *ptr;
264
 
265
	DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__,
266
	     bo->handle, bytes(bo)));
267
	assert(bo->proxy == NULL);
268
 
269
retry_gtt:
270
	VG_CLEAR(mmap_arg);
271
	mmap_arg.handle = bo->handle;
272
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg)) {
273
		printf("%s: failed to retrieve GTT offset for handle=%d: %d\n",
274
		       __FUNCTION__, bo->handle, 0);
275
		(void)__kgem_throttle_retire(kgem, 0);
276
		if (kgem_expire_cache(kgem))
277
			goto retry_gtt;
278
 
279
		if (kgem->need_expire) {
280
			kgem_cleanup_cache(kgem);
281
			goto retry_gtt;
282
		}
283
 
284
		return NULL;
285
	}
286
 
287
retry_mmap:
288
//	ptr = mmap(0, bytes(bo), PROT_READ | PROT_WRITE, MAP_SHARED,
289
//		   kgem->fd, mmap_arg.offset);
3263 Serge 290
//	if (ptr == 0) {
3258 Serge 291
		printf("%s: failed to mmap %d, %d bytes, into GTT domain: %d\n",
292
		       __FUNCTION__, bo->handle, bytes(bo), 0);
3263 Serge 293
//		if (__kgem_throttle_retire(kgem, 0))
294
//			goto retry_mmap;
3258 Serge 295
 
3263 Serge 296
//		if (kgem->need_expire) {
297
//			kgem_cleanup_cache(kgem);
298
//			goto retry_mmap;
299
//		}
3258 Serge 300
 
301
		ptr = NULL;
3263 Serge 302
//	}
3258 Serge 303
 
304
	return ptr;
3256 Serge 305
}
306
 
3258 Serge 307
static int __gem_write(int fd, uint32_t handle,
308
		       int offset, int length,
309
		       const void *src)
310
{
311
	struct drm_i915_gem_pwrite pwrite;
312
 
313
	DBG(("%s(handle=%d, offset=%d, len=%d)\n", __FUNCTION__,
314
	     handle, offset, length));
315
 
316
	VG_CLEAR(pwrite);
317
	pwrite.handle = handle;
318
	pwrite.offset = offset;
319
	pwrite.size = length;
320
	pwrite.data_ptr = (uintptr_t)src;
321
	return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
322
}
323
 
3256 Serge 324
static int gem_write(int fd, uint32_t handle,
325
		     int offset, int length,
326
		     const void *src)
327
{
328
	struct drm_i915_gem_pwrite pwrite;
329
 
330
	DBG(("%s(handle=%d, offset=%d, len=%d)\n", __FUNCTION__,
331
	     handle, offset, length));
332
 
333
	VG_CLEAR(pwrite);
334
	pwrite.handle = handle;
335
	/* align the transfer to cachelines; fortuitously this is safe! */
336
	if ((offset | length) & 63) {
337
		pwrite.offset = offset & ~63;
338
		pwrite.size = ALIGN(offset+length, 64) - pwrite.offset;
339
		pwrite.data_ptr = (uintptr_t)src + pwrite.offset - offset;
340
	} else {
341
		pwrite.offset = offset;
342
		pwrite.size = length;
343
		pwrite.data_ptr = (uintptr_t)src;
344
	}
3258 Serge 345
	return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
3256 Serge 346
}
3258 Serge 347
 
3256 Serge 348
 
3258 Serge 349
bool __kgem_busy(struct kgem *kgem, int handle)
350
{
351
	struct drm_i915_gem_busy busy;
352
 
353
	VG_CLEAR(busy);
354
	busy.handle = handle;
355
	busy.busy = !kgem->wedged;
356
	(void)drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
357
	DBG(("%s: handle=%d, busy=%d, wedged=%d\n",
358
	     __FUNCTION__, handle, busy.busy, kgem->wedged));
3256 Serge 359
 
3258 Serge 360
	return busy.busy;
361
}
362
 
363
static void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo)
364
{
365
	DBG(("%s: retiring bo handle=%d (needed flush? %d), rq? %d [busy?=%d]\n",
366
	     __FUNCTION__, bo->handle, bo->needs_flush, bo->rq != NULL,
367
	     __kgem_busy(kgem, bo->handle)));
368
	assert(bo->exec == NULL);
369
	assert(list_is_empty(&bo->vma));
370
 
371
	if (bo->rq) {
372
		if (!__kgem_busy(kgem, bo->handle)) {
373
			__kgem_bo_clear_busy(bo);
374
			kgem_retire(kgem);
375
		}
376
	} else {
377
		assert(!bo->needs_flush);
378
		ASSERT_IDLE(kgem, bo->handle);
379
	}
380
}
381
 
3256 Serge 382
bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
383
		   const void *data, int length)
384
{
385
	assert(bo->refcnt);
386
	assert(!bo->purged);
387
	assert(bo->proxy == NULL);
388
	ASSERT_IDLE(kgem, bo->handle);
389
 
390
	assert(length <= bytes(bo));
391
	if (gem_write(kgem->fd, bo->handle, 0, length, data))
392
		return false;
393
 
394
	DBG(("%s: flush=%d, domain=%d\n", __FUNCTION__, bo->flush, bo->domain));
395
	if (bo->exec == NULL) {
3258 Serge 396
		kgem_bo_retire(kgem, bo);
3256 Serge 397
		bo->domain = DOMAIN_NONE;
398
	}
399
	return true;
400
}
401
 
3255 Serge 402
static uint32_t gem_create(int fd, int num_pages)
403
{
404
	struct drm_i915_gem_create create;
405
 
406
	VG_CLEAR(create);
407
	create.handle = 0;
408
	create.size = PAGE_SIZE * num_pages;
3258 Serge 409
	(void)drmIoctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
3255 Serge 410
 
411
	return create.handle;
412
}
413
 
3256 Serge 414
static bool
415
kgem_bo_set_purgeable(struct kgem *kgem, struct kgem_bo *bo)
416
{
417
#if DBG_NO_MADV
418
	return true;
419
#else
420
	struct drm_i915_gem_madvise madv;
421
 
422
	assert(bo->exec == NULL);
423
	assert(!bo->purged);
424
 
425
	VG_CLEAR(madv);
426
	madv.handle = bo->handle;
427
	madv.madv = I915_MADV_DONTNEED;
428
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) {
429
		bo->purged = 1;
430
		kgem->need_purge |= !madv.retained && bo->domain == DOMAIN_GPU;
431
		return madv.retained;
432
	}
433
 
434
	return true;
435
#endif
436
}
437
 
438
static bool
439
kgem_bo_is_retained(struct kgem *kgem, struct kgem_bo *bo)
440
{
441
#if DBG_NO_MADV
442
	return true;
443
#else
444
	struct drm_i915_gem_madvise madv;
445
 
446
	if (!bo->purged)
447
		return true;
448
 
449
	VG_CLEAR(madv);
450
	madv.handle = bo->handle;
451
	madv.madv = I915_MADV_DONTNEED;
452
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0)
453
		return madv.retained;
454
 
455
	return false;
456
#endif
457
}
458
 
459
static bool
460
kgem_bo_clear_purgeable(struct kgem *kgem, struct kgem_bo *bo)
461
{
462
#if DBG_NO_MADV
463
	return true;
464
#else
465
	struct drm_i915_gem_madvise madv;
466
 
467
	assert(bo->purged);
468
 
469
	VG_CLEAR(madv);
470
	madv.handle = bo->handle;
471
	madv.madv = I915_MADV_WILLNEED;
472
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) {
473
		bo->purged = !madv.retained;
474
		kgem->need_purge |= !madv.retained && bo->domain == DOMAIN_GPU;
475
		return madv.retained;
476
	}
477
 
478
	return false;
479
#endif
480
}
481
 
3255 Serge 482
static void gem_close(int fd, uint32_t handle)
483
{
484
	struct drm_gem_close close;
485
 
486
	VG_CLEAR(close);
487
	close.handle = handle;
3258 Serge 488
	(void)drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
3255 Serge 489
}
490
 
491
constant inline static unsigned long __fls(unsigned long word)
492
{
493
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86__) || defined(__x86_64__))
494
	asm("bsr %1,%0"
495
	    : "=r" (word)
496
	    : "rm" (word));
497
	return word;
498
#else
499
	unsigned int v = 0;
500
 
501
	while (word >>= 1)
502
		v++;
503
 
504
	return v;
505
#endif
506
}
507
 
508
constant inline static int cache_bucket(int num_pages)
509
{
510
	return __fls(num_pages);
511
}
512
 
513
static struct kgem_bo *__kgem_bo_init(struct kgem_bo *bo,
514
				      int handle, int num_pages)
515
{
516
	assert(num_pages);
517
	memset(bo, 0, sizeof(*bo));
518
 
519
	bo->refcnt = 1;
520
	bo->handle = handle;
521
	bo->target_handle = -1;
522
	num_pages(bo) = num_pages;
523
	bucket(bo) = cache_bucket(num_pages);
524
	bo->reusable = true;
525
	bo->domain = DOMAIN_CPU;
526
	list_init(&bo->request);
527
	list_init(&bo->list);
528
	list_init(&bo->vma);
529
 
530
	return bo;
531
}
532
 
533
static struct kgem_bo *__kgem_bo_alloc(int handle, int num_pages)
534
{
535
	struct kgem_bo *bo;
536
 
537
	if (__kgem_freed_bo) {
538
		bo = __kgem_freed_bo;
539
		__kgem_freed_bo = *(struct kgem_bo **)bo;
540
	} else {
541
		bo = malloc(sizeof(*bo));
542
		if (bo == NULL)
543
			return NULL;
544
	}
545
 
546
	return __kgem_bo_init(bo, handle, num_pages);
547
}
548
 
3256 Serge 549
static struct kgem_request *__kgem_request_alloc(struct kgem *kgem)
550
{
551
	struct kgem_request *rq;
552
 
553
	rq = __kgem_freed_request;
554
	if (rq) {
555
		__kgem_freed_request = *(struct kgem_request **)rq;
556
	} else {
557
		rq = malloc(sizeof(*rq));
558
		if (rq == NULL)
559
			rq = &kgem->static_request;
560
	}
561
 
562
	list_init(&rq->buffers);
563
	rq->bo = NULL;
564
	rq->ring = 0;
565
 
566
	return rq;
567
}
568
 
569
static void __kgem_request_free(struct kgem_request *rq)
570
{
571
	_list_del(&rq->list);
572
	*(struct kgem_request **)rq = __kgem_freed_request;
573
	__kgem_freed_request = rq;
574
}
575
 
576
static struct list *inactive(struct kgem *kgem, int num_pages)
577
{
578
	assert(num_pages < MAX_CACHE_SIZE / PAGE_SIZE);
579
	assert(cache_bucket(num_pages) < NUM_CACHE_BUCKETS);
580
	return &kgem->inactive[cache_bucket(num_pages)];
581
}
582
 
583
static struct list *active(struct kgem *kgem, int num_pages, int tiling)
584
{
585
	assert(num_pages < MAX_CACHE_SIZE / PAGE_SIZE);
586
	assert(cache_bucket(num_pages) < NUM_CACHE_BUCKETS);
587
	return &kgem->active[cache_bucket(num_pages)][tiling];
588
}
589
 
590
static size_t
591
agp_aperture_size(struct pci_device *dev, unsigned gen)
592
{
593
	/* XXX assume that only future chipsets are unknown and follow
594
	 * the post gen2 PCI layout.
595
	 */
596
//	return dev->regions[gen < 030 ? 0 : 2].size;
597
 
598
    return 0;
599
}
600
 
601
static size_t
602
total_ram_size(void)
603
{
604
    uint32_t  data[9];
605
    size_t    size = 0;
606
 
607
    asm volatile("int $0x40"
608
        : "=a" (size)
609
        : "a" (18),"b"(20), "c" (data)
610
        : "memory");
611
 
612
    return size != -1 ? size : 0;
613
}
614
 
3254 Serge 615
static int gem_param(struct kgem *kgem, int name)
616
{
617
    drm_i915_getparam_t gp;
618
    int v = -1; /* No param uses the sign bit, reserve it for errors */
619
 
620
    VG_CLEAR(gp);
621
    gp.param = name;
622
    gp.value = &v;
3258 Serge 623
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GETPARAM, &gp))
3254 Serge 624
        return -1;
625
 
626
    VG(VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v)));
627
    return v;
628
}
629
 
3255 Serge 630
static bool test_has_execbuffer2(struct kgem *kgem)
631
{
632
	return 1;
633
}
634
 
3254 Serge 635
static bool test_has_no_reloc(struct kgem *kgem)
636
{
637
	if (DBG_NO_FAST_RELOC)
638
		return false;
639
 
640
	return gem_param(kgem, LOCAL_I915_PARAM_HAS_NO_RELOC) > 0;
641
}
642
 
643
static bool test_has_handle_lut(struct kgem *kgem)
644
{
645
	if (DBG_NO_HANDLE_LUT)
646
		return false;
647
 
648
	return gem_param(kgem, LOCAL_I915_PARAM_HAS_HANDLE_LUT) > 0;
649
}
650
 
651
static bool test_has_semaphores_enabled(struct kgem *kgem)
652
{
653
	FILE *file;
654
	bool detected = false;
655
	int ret;
656
 
657
	if (DBG_NO_SEMAPHORES)
658
		return false;
659
 
660
	ret = gem_param(kgem, LOCAL_I915_PARAM_HAS_SEMAPHORES);
661
	if (ret != -1)
662
		return ret > 0;
663
 
664
	return detected;
665
}
666
 
3255 Serge 667
static bool __kgem_throttle(struct kgem *kgem)
668
{
3263 Serge 669
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_THROTTLE, NULL) == 0)
3255 Serge 670
		return false;
3254 Serge 671
 
3263 Serge 672
	return errno == EIO;
3255 Serge 673
}
674
 
675
static bool is_hw_supported(struct kgem *kgem,
676
			    struct pci_device *dev)
677
{
678
	if (DBG_NO_HW)
679
		return false;
680
 
681
	if (!test_has_execbuffer2(kgem))
682
		return false;
683
 
684
	if (kgem->gen == (unsigned)-1) /* unknown chipset, assume future gen */
685
		return kgem->has_blt;
686
 
687
	/* Although pre-855gm the GMCH is fubar, it works mostly. So
688
	 * let the user decide through "NoAccel" whether or not to risk
689
	 * hw acceleration.
690
	 */
691
 
692
	if (kgem->gen == 060 && dev->revision < 8) {
693
		/* pre-production SNB with dysfunctional BLT */
694
		return false;
695
	}
696
 
697
	if (kgem->gen >= 060) /* Only if the kernel supports the BLT ring */
698
		return kgem->has_blt;
699
 
700
	return true;
701
}
702
 
3254 Serge 703
static bool test_has_relaxed_fencing(struct kgem *kgem)
704
{
705
	if (kgem->gen < 040) {
706
		if (DBG_NO_RELAXED_FENCING)
707
			return false;
708
 
709
		return gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_FENCING) > 0;
710
	} else
711
		return true;
712
}
713
 
714
static bool test_has_llc(struct kgem *kgem)
715
{
716
	int has_llc = -1;
717
 
718
	if (DBG_NO_LLC)
719
		return false;
720
 
721
#if defined(I915_PARAM_HAS_LLC) /* Expected in libdrm-2.4.31 */
722
	has_llc = gem_param(kgem, I915_PARAM_HAS_LLC);
723
#endif
724
	if (has_llc == -1) {
725
		DBG(("%s: no kernel/drm support for HAS_LLC, assuming support for LLC based on GPU generation\n", __FUNCTION__));
726
		has_llc = kgem->gen >= 060;
727
	}
728
 
729
	return has_llc;
730
}
731
 
732
static bool test_has_cacheing(struct kgem *kgem)
733
{
734
	uint32_t handle;
3256 Serge 735
	bool ret;
3254 Serge 736
 
737
	if (DBG_NO_CACHE_LEVEL)
738
		return false;
739
 
740
	/* Incoherent blt and sampler hangs the GPU */
741
	if (kgem->gen == 040)
742
		return false;
743
 
3256 Serge 744
	handle = gem_create(kgem->fd, 1);
745
	if (handle == 0)
746
		return false;
3254 Serge 747
 
3256 Serge 748
	ret = gem_set_cacheing(kgem->fd, handle, UNCACHED);
749
	gem_close(kgem->fd, handle);
3254 Serge 750
	return ret;
751
}
752
 
753
static bool test_has_userptr(struct kgem *kgem)
754
{
755
#if defined(USE_USERPTR)
756
	uint32_t handle;
757
	void *ptr;
758
 
759
	if (DBG_NO_USERPTR)
760
		return false;
761
 
762
	/* Incoherent blt and sampler hangs the GPU */
763
	if (kgem->gen == 040)
764
		return false;
765
 
766
	ptr = malloc(PAGE_SIZE);
767
	handle = gem_userptr(kgem->fd, ptr, PAGE_SIZE, false);
768
	gem_close(kgem->fd, handle);
769
	free(ptr);
770
 
771
	return handle != 0;
772
#else
773
	return false;
774
#endif
775
}
776
 
777
static bool test_has_secure_batches(struct kgem *kgem)
778
{
779
	if (DBG_NO_SECURE_BATCHES)
780
		return false;
781
 
782
	return gem_param(kgem, LOCAL_I915_PARAM_HAS_SECURE_BATCHES) > 0;
783
}
784
 
785
static bool test_has_pinned_batches(struct kgem *kgem)
786
{
787
	if (DBG_NO_PINNED_BATCHES)
788
		return false;
789
 
790
	return gem_param(kgem, LOCAL_I915_PARAM_HAS_PINNED_BATCHES) > 0;
791
}
792
 
793
 
3255 Serge 794
static bool kgem_init_pinned_batches(struct kgem *kgem)
795
{
3299 Serge 796
	int count[2] = { 2, 1 };
797
	int size[2] = { 1, 2 };
3255 Serge 798
	int n, i;
799
 
800
	if (kgem->wedged)
801
		return true;
802
 
803
	for (n = 0; n < ARRAY_SIZE(count); n++) {
804
		for (i = 0; i < count[n]; i++) {
805
			struct drm_i915_gem_pin pin;
806
			struct kgem_bo *bo;
807
 
808
			VG_CLEAR(pin);
809
 
810
			pin.handle = gem_create(kgem->fd, size[n]);
811
			if (pin.handle == 0)
812
				goto err;
813
 
814
			DBG(("%s: new handle=%d, num_pages=%d\n",
815
			     __FUNCTION__, pin.handle, size[n]));
816
 
817
			bo = __kgem_bo_alloc(pin.handle, size[n]);
818
			if (bo == NULL) {
819
				gem_close(kgem->fd, pin.handle);
820
				goto err;
821
			}
822
 
823
			pin.alignment = 0;
3258 Serge 824
			if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_PIN, &pin)) {
3255 Serge 825
				gem_close(kgem->fd, pin.handle);
826
				goto err;
827
			}
828
			bo->presumed_offset = pin.offset;
829
			debug_alloc__bo(kgem, bo);
830
			list_add(&bo->list, &kgem->pinned_batches[n]);
831
		}
832
	}
833
 
834
	return true;
835
 
836
err:
837
	for (n = 0; n < ARRAY_SIZE(kgem->pinned_batches); n++) {
838
		while (!list_is_empty(&kgem->pinned_batches[n])) {
839
			kgem_bo_destroy(kgem,
840
					list_first_entry(&kgem->pinned_batches[n],
841
							 struct kgem_bo, list));
842
		}
843
	}
844
 
845
	/* For simplicity populate the lists with a single unpinned bo */
846
	for (n = 0; n < ARRAY_SIZE(count); n++) {
847
		struct kgem_bo *bo;
848
		uint32_t handle;
849
 
850
		handle = gem_create(kgem->fd, size[n]);
851
		if (handle == 0)
852
			break;
853
 
854
		bo = __kgem_bo_alloc(handle, size[n]);
855
		if (bo == NULL) {
856
			gem_close(kgem->fd, handle);
857
			break;
858
		}
859
 
860
		debug_alloc__bo(kgem, bo);
861
		list_add(&bo->list, &kgem->pinned_batches[n]);
862
	}
863
	return false;
864
}
865
 
3254 Serge 866
void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen)
867
{
868
    struct drm_i915_gem_get_aperture aperture;
869
    size_t totalram;
870
    unsigned half_gpu_max;
871
    unsigned int i, j;
872
 
873
    DBG(("%s: fd=%d, gen=%d\n", __FUNCTION__, fd, gen));
874
 
875
    memset(kgem, 0, sizeof(*kgem));
876
 
877
    kgem->fd = fd;
878
    kgem->gen = gen;
879
 
880
    list_init(&kgem->requests[0]);
881
    list_init(&kgem->requests[1]);
882
    list_init(&kgem->batch_buffers);
883
    list_init(&kgem->active_buffers);
884
    list_init(&kgem->flushing);
885
    list_init(&kgem->large);
886
    list_init(&kgem->large_inactive);
887
    list_init(&kgem->snoop);
888
    list_init(&kgem->scanout);
889
    for (i = 0; i < ARRAY_SIZE(kgem->pinned_batches); i++)
890
        list_init(&kgem->pinned_batches[i]);
891
    for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
892
        list_init(&kgem->inactive[i]);
893
    for (i = 0; i < ARRAY_SIZE(kgem->active); i++) {
894
        for (j = 0; j < ARRAY_SIZE(kgem->active[i]); j++)
895
            list_init(&kgem->active[i][j]);
896
    }
897
    for (i = 0; i < ARRAY_SIZE(kgem->vma); i++) {
898
        for (j = 0; j < ARRAY_SIZE(kgem->vma[i].inactive); j++)
899
            list_init(&kgem->vma[i].inactive[j]);
900
    }
901
    kgem->vma[MAP_GTT].count = -MAX_GTT_VMA_CACHE;
902
    kgem->vma[MAP_CPU].count = -MAX_CPU_VMA_CACHE;
903
 
904
    kgem->has_blt = gem_param(kgem, LOCAL_I915_PARAM_HAS_BLT) > 0;
905
    DBG(("%s: has BLT ring? %d\n", __FUNCTION__,
906
         kgem->has_blt));
907
 
908
    kgem->has_relaxed_delta =
909
        gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_DELTA) > 0;
910
    DBG(("%s: has relaxed delta? %d\n", __FUNCTION__,
911
         kgem->has_relaxed_delta));
912
 
913
    kgem->has_relaxed_fencing = test_has_relaxed_fencing(kgem);
914
    DBG(("%s: has relaxed fencing? %d\n", __FUNCTION__,
915
         kgem->has_relaxed_fencing));
916
 
917
    kgem->has_llc = test_has_llc(kgem);
918
    DBG(("%s: has shared last-level-cache? %d\n", __FUNCTION__,
919
         kgem->has_llc));
920
 
921
    kgem->has_cacheing = test_has_cacheing(kgem);
922
    DBG(("%s: has set-cache-level? %d\n", __FUNCTION__,
923
         kgem->has_cacheing));
924
 
925
    kgem->has_userptr = test_has_userptr(kgem);
926
    DBG(("%s: has userptr? %d\n", __FUNCTION__,
927
         kgem->has_userptr));
928
 
929
    kgem->has_no_reloc = test_has_no_reloc(kgem);
930
    DBG(("%s: has no-reloc? %d\n", __FUNCTION__,
931
         kgem->has_no_reloc));
932
 
933
    kgem->has_handle_lut = test_has_handle_lut(kgem);
934
    DBG(("%s: has handle-lut? %d\n", __FUNCTION__,
935
         kgem->has_handle_lut));
936
 
937
    kgem->has_semaphores = false;
938
    if (kgem->has_blt && test_has_semaphores_enabled(kgem))
939
        kgem->has_semaphores = true;
940
    DBG(("%s: semaphores enabled? %d\n", __FUNCTION__,
941
         kgem->has_semaphores));
942
 
943
    kgem->can_blt_cpu = gen >= 030;
944
    DBG(("%s: can blt to cpu? %d\n", __FUNCTION__,
945
         kgem->can_blt_cpu));
946
 
947
    kgem->has_secure_batches = test_has_secure_batches(kgem);
948
    DBG(("%s: can use privileged batchbuffers? %d\n", __FUNCTION__,
949
         kgem->has_secure_batches));
950
 
951
    kgem->has_pinned_batches = test_has_pinned_batches(kgem);
952
    DBG(("%s: can use pinned batchbuffers (to avoid CS w/a)? %d\n", __FUNCTION__,
953
         kgem->has_pinned_batches));
954
 
955
    if (!is_hw_supported(kgem, dev)) {
3255 Serge 956
        printf("Detected unsupported/dysfunctional hardware, disabling acceleration.\n");
3254 Serge 957
        kgem->wedged = 1;
958
    } else if (__kgem_throttle(kgem)) {
3255 Serge 959
        printf("Detected a hung GPU, disabling acceleration.\n");
3254 Serge 960
        kgem->wedged = 1;
961
    }
962
 
963
    kgem->batch_size = ARRAY_SIZE(kgem->batch);
964
    if (gen == 020 && !kgem->has_pinned_batches)
965
        /* Limited to what we can pin */
966
        kgem->batch_size = 4*1024;
967
    if (gen == 022)
968
        /* 865g cannot handle a batch spanning multiple pages */
969
        kgem->batch_size = PAGE_SIZE / sizeof(uint32_t);
970
    if ((gen >> 3) == 7)
971
        kgem->batch_size = 16*1024;
972
    if (!kgem->has_relaxed_delta && kgem->batch_size > 4*1024)
973
        kgem->batch_size = 4*1024;
974
 
975
    if (!kgem_init_pinned_batches(kgem) && gen == 020) {
3255 Serge 976
        printf("Unable to reserve memory for GPU, disabling acceleration.\n");
3254 Serge 977
        kgem->wedged = 1;
978
    }
979
 
980
    DBG(("%s: maximum batch size? %d\n", __FUNCTION__,
981
         kgem->batch_size));
982
 
3291 Serge 983
    kgem->min_alignment = 16;
3254 Serge 984
    if (gen < 040)
985
        kgem->min_alignment = 64;
986
 
987
    kgem->half_cpu_cache_pages = cpu_cache_size() >> 13;
988
    DBG(("%s: half cpu cache %d pages\n", __FUNCTION__,
989
         kgem->half_cpu_cache_pages));
990
 
991
    kgem->next_request = __kgem_request_alloc(kgem);
992
 
993
    DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, userptr? %d\n", __FUNCTION__,
994
         !DBG_NO_CPU && (kgem->has_llc | kgem->has_userptr | kgem->has_cacheing),
995
         kgem->has_llc, kgem->has_cacheing, kgem->has_userptr));
996
 
997
    VG_CLEAR(aperture);
998
    aperture.aper_size = 0;
3258 Serge 999
	(void)drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
3254 Serge 1000
    if (aperture.aper_size == 0)
1001
        aperture.aper_size = 64*1024*1024;
1002
 
1003
    DBG(("%s: aperture size %lld, available now %lld\n",
1004
         __FUNCTION__,
1005
         (long long)aperture.aper_size,
1006
         (long long)aperture.aper_available_size));
1007
 
1008
    kgem->aperture_total = aperture.aper_size;
1009
    kgem->aperture_high = aperture.aper_size * 3/4;
1010
    kgem->aperture_low = aperture.aper_size * 1/3;
1011
    if (gen < 033) {
1012
        /* Severe alignment penalties */
1013
        kgem->aperture_high /= 2;
1014
        kgem->aperture_low /= 2;
1015
    }
1016
    DBG(("%s: aperture low=%d [%d], high=%d [%d]\n", __FUNCTION__,
1017
         kgem->aperture_low, kgem->aperture_low / (1024*1024),
1018
         kgem->aperture_high, kgem->aperture_high / (1024*1024)));
1019
 
1020
    kgem->aperture_mappable = agp_aperture_size(dev, gen);
1021
    if (kgem->aperture_mappable == 0 ||
1022
        kgem->aperture_mappable > aperture.aper_size)
1023
        kgem->aperture_mappable = aperture.aper_size;
1024
    DBG(("%s: aperture mappable=%d [%d MiB]\n", __FUNCTION__,
1025
         kgem->aperture_mappable, kgem->aperture_mappable / (1024*1024)));
1026
 
1027
    kgem->buffer_size = 64 * 1024;
1028
    while (kgem->buffer_size < kgem->aperture_mappable >> 10)
1029
        kgem->buffer_size *= 2;
1030
    if (kgem->buffer_size >> 12 > kgem->half_cpu_cache_pages)
1031
        kgem->buffer_size = kgem->half_cpu_cache_pages << 12;
1032
    DBG(("%s: buffer size=%d [%d KiB]\n", __FUNCTION__,
1033
         kgem->buffer_size, kgem->buffer_size / 1024));
1034
 
1035
    kgem->max_object_size = 3 * (kgem->aperture_high >> 12) << 10;
1036
    kgem->max_gpu_size = kgem->max_object_size;
1037
    if (!kgem->has_llc)
1038
        kgem->max_gpu_size = MAX_CACHE_SIZE;
1039
 
1040
    totalram = total_ram_size();
1041
    if (totalram == 0) {
1042
        DBG(("%s: total ram size unknown, assuming maximum of total aperture\n",
1043
             __FUNCTION__));
1044
        totalram = kgem->aperture_total;
1045
    }
3256 Serge 1046
    DBG(("%s: total ram=%u\n", __FUNCTION__, totalram));
3254 Serge 1047
    if (kgem->max_object_size > totalram / 2)
1048
        kgem->max_object_size = totalram / 2;
1049
    if (kgem->max_gpu_size > totalram / 4)
1050
        kgem->max_gpu_size = totalram / 4;
1051
 
1052
    kgem->max_cpu_size = kgem->max_object_size;
1053
 
1054
    half_gpu_max = kgem->max_gpu_size / 2;
1055
    kgem->max_copy_tile_size = (MAX_CACHE_SIZE + 1)/2;
1056
    if (kgem->max_copy_tile_size > half_gpu_max)
1057
        kgem->max_copy_tile_size = half_gpu_max;
1058
 
1059
    if (kgem->has_llc)
1060
        kgem->max_upload_tile_size = kgem->max_copy_tile_size;
1061
    else
1062
        kgem->max_upload_tile_size = kgem->aperture_mappable / 4;
1063
    if (kgem->max_upload_tile_size > half_gpu_max)
1064
        kgem->max_upload_tile_size = half_gpu_max;
1065
 
1066
    kgem->large_object_size = MAX_CACHE_SIZE;
1067
    if (kgem->large_object_size > kgem->max_gpu_size)
1068
        kgem->large_object_size = kgem->max_gpu_size;
1069
 
1070
    if (kgem->has_llc | kgem->has_cacheing | kgem->has_userptr) {
1071
        if (kgem->large_object_size > kgem->max_cpu_size)
1072
            kgem->large_object_size = kgem->max_cpu_size;
1073
    } else
1074
        kgem->max_cpu_size = 0;
1075
    if (DBG_NO_CPU)
1076
        kgem->max_cpu_size = 0;
1077
 
1078
    DBG(("%s: maximum object size=%d\n",
1079
         __FUNCTION__, kgem->max_object_size));
1080
    DBG(("%s: large object thresold=%d\n",
1081
         __FUNCTION__, kgem->large_object_size));
1082
    DBG(("%s: max object sizes (gpu=%d, cpu=%d, tile upload=%d, copy=%d)\n",
1083
         __FUNCTION__,
1084
         kgem->max_gpu_size, kgem->max_cpu_size,
1085
         kgem->max_upload_tile_size, kgem->max_copy_tile_size));
1086
 
1087
    /* Convert the aperture thresholds to pages */
1088
    kgem->aperture_low /= PAGE_SIZE;
1089
    kgem->aperture_high /= PAGE_SIZE;
1090
 
1091
    kgem->fence_max = gem_param(kgem, I915_PARAM_NUM_FENCES_AVAIL) - 2;
1092
    if ((int)kgem->fence_max < 0)
1093
        kgem->fence_max = 5; /* minimum safe value for all hw */
1094
    DBG(("%s: max fences=%d\n", __FUNCTION__, kgem->fence_max));
1095
 
1096
    kgem->batch_flags_base = 0;
1097
    if (kgem->has_no_reloc)
1098
        kgem->batch_flags_base |= LOCAL_I915_EXEC_NO_RELOC;
1099
    if (kgem->has_handle_lut)
1100
        kgem->batch_flags_base |= LOCAL_I915_EXEC_HANDLE_LUT;
1101
    if (kgem->has_pinned_batches)
1102
        kgem->batch_flags_base |= LOCAL_I915_EXEC_IS_PINNED;
3263 Serge 1103
}
3254 Serge 1104
 
3263 Serge 1105
/* XXX hopefully a good approximation */
1106
static uint32_t kgem_get_unique_id(struct kgem *kgem)
1107
{
1108
	uint32_t id;
1109
	id = ++kgem->unique_id;
1110
	if (id == 0)
1111
		id = ++kgem->unique_id;
1112
	return id;
3256 Serge 1113
}
3254 Serge 1114
 
3263 Serge 1115
inline static uint32_t kgem_pitch_alignment(struct kgem *kgem, unsigned flags)
1116
{
1117
	if (flags & CREATE_PRIME)
1118
		return 256;
1119
	if (flags & CREATE_SCANOUT)
1120
		return 64;
1121
	return kgem->min_alignment;
1122
}
1123
 
1124
static uint32_t kgem_untiled_pitch(struct kgem *kgem,
1125
				   uint32_t width, uint32_t bpp,
1126
				   unsigned flags)
1127
{
1128
	width = ALIGN(width, 2) * bpp >> 3;
1129
	return ALIGN(width, kgem_pitch_alignment(kgem, flags));
1130
}
1131
static uint32_t kgem_surface_size(struct kgem *kgem,
1132
				  bool relaxed_fencing,
1133
				  unsigned flags,
1134
				  uint32_t width,
1135
				  uint32_t height,
1136
				  uint32_t bpp,
1137
				  uint32_t tiling,
1138
				  uint32_t *pitch)
1139
{
1140
	uint32_t tile_width, tile_height;
1141
	uint32_t size;
1142
 
1143
	assert(width <= MAXSHORT);
1144
	assert(height <= MAXSHORT);
1145
 
1146
	if (kgem->gen <= 030) {
1147
		if (tiling) {
1148
			if (kgem->gen < 030) {
1149
				tile_width = 128;
1150
				tile_height = 32;
1151
			} else {
1152
				tile_width = 512;
1153
				tile_height = 16;
1154
			}
1155
		} else {
1156
			tile_width = 2 * bpp >> 3;
1157
			tile_width = ALIGN(tile_width,
1158
					   kgem_pitch_alignment(kgem, flags));
1159
			tile_height = 2;
1160
		}
1161
	} else switch (tiling) {
1162
	default:
1163
	case I915_TILING_NONE:
1164
		tile_width = 2 * bpp >> 3;
1165
		tile_width = ALIGN(tile_width,
1166
				   kgem_pitch_alignment(kgem, flags));
1167
		tile_height = 2;
1168
		break;
1169
 
1170
		/* XXX align to an even tile row */
1171
	case I915_TILING_X:
1172
		tile_width = 512;
1173
		tile_height = 16;
1174
		break;
1175
	case I915_TILING_Y:
1176
		tile_width = 128;
1177
		tile_height = 64;
1178
		break;
1179
	}
1180
 
1181
	*pitch = ALIGN(width * bpp / 8, tile_width);
1182
	height = ALIGN(height, tile_height);
1183
	if (kgem->gen >= 040)
1184
		return PAGE_ALIGN(*pitch * height);
1185
 
1186
	/* If it is too wide for the blitter, don't even bother.  */
1187
	if (tiling != I915_TILING_NONE) {
1188
		if (*pitch > 8192)
1189
			return 0;
1190
 
1191
		for (size = tile_width; size < *pitch; size <<= 1)
1192
			;
1193
		*pitch = size;
1194
	} else {
1195
		if (*pitch >= 32768)
1196
			return 0;
1197
	}
1198
 
1199
	size = *pitch * height;
1200
	if (relaxed_fencing || tiling == I915_TILING_NONE)
1201
		return PAGE_ALIGN(size);
1202
 
1203
	/*  We need to allocate a pot fence region for a tiled buffer. */
1204
	if (kgem->gen < 030)
1205
		tile_width = 512 * 1024;
1206
	else
1207
		tile_width = 1024 * 1024;
1208
	while (tile_width < size)
1209
		tile_width *= 2;
1210
	return tile_width;
1211
}
1212
 
1213
static uint32_t kgem_aligned_height(struct kgem *kgem,
1214
				    uint32_t height, uint32_t tiling)
1215
{
1216
	uint32_t tile_height;
1217
 
1218
	if (kgem->gen <= 030) {
1219
		tile_height = tiling ? kgem->gen < 030 ? 32 : 16 : 1;
1220
	} else switch (tiling) {
1221
		/* XXX align to an even tile row */
1222
	default:
1223
	case I915_TILING_NONE:
1224
		tile_height = 1;
1225
		break;
1226
	case I915_TILING_X:
1227
		tile_height = 16;
1228
		break;
1229
	case I915_TILING_Y:
1230
		tile_height = 64;
1231
		break;
1232
	}
1233
 
1234
	return ALIGN(height, tile_height);
1235
}
1236
 
3258 Serge 1237
static struct drm_i915_gem_exec_object2 *
1238
kgem_add_handle(struct kgem *kgem, struct kgem_bo *bo)
1239
{
1240
	struct drm_i915_gem_exec_object2 *exec;
3256 Serge 1241
 
3258 Serge 1242
	DBG(("%s: handle=%d, index=%d\n",
1243
	     __FUNCTION__, bo->handle, kgem->nexec));
1244
 
1245
	assert(kgem->nexec < ARRAY_SIZE(kgem->exec));
1246
	bo->target_handle = kgem->has_handle_lut ? kgem->nexec : bo->handle;
1247
	exec = memset(&kgem->exec[kgem->nexec++], 0, sizeof(*exec));
1248
	exec->handle = bo->handle;
1249
	exec->offset = bo->presumed_offset;
1250
 
1251
	kgem->aperture += num_pages(bo);
1252
 
1253
	return exec;
1254
}
1255
 
1256
static void kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
1257
{
1258
	bo->exec = kgem_add_handle(kgem, bo);
1259
	bo->rq = MAKE_REQUEST(kgem->next_request, kgem->ring);
1260
 
1261
	list_move_tail(&bo->request, &kgem->next_request->buffers);
1262
 
1263
	/* XXX is it worth working around gcc here? */
1264
	kgem->flush |= bo->flush;
1265
}
1266
 
1267
static uint32_t kgem_end_batch(struct kgem *kgem)
1268
{
1269
	kgem->batch[kgem->nbatch++] = MI_BATCH_BUFFER_END;
1270
	if (kgem->nbatch & 1)
1271
		kgem->batch[kgem->nbatch++] = MI_NOOP;
1272
 
1273
	return kgem->nbatch;
1274
}
1275
 
1276
static void kgem_fixup_self_relocs(struct kgem *kgem, struct kgem_bo *bo)
1277
{
1278
	int n;
1279
 
1280
	if (kgem->nreloc__self == 0)
1281
		return;
1282
 
1283
	for (n = 0; n < kgem->nreloc__self; n++) {
1284
		int i = kgem->reloc__self[n];
1285
		assert(kgem->reloc[i].target_handle == ~0U);
1286
		kgem->reloc[i].target_handle = bo->target_handle;
1287
		kgem->reloc[i].presumed_offset = bo->presumed_offset;
1288
		kgem->batch[kgem->reloc[i].offset/sizeof(kgem->batch[0])] =
1289
			kgem->reloc[i].delta + bo->presumed_offset;
1290
	}
1291
 
1292
	if (n == 256) {
1293
		for (n = kgem->reloc__self[255]; n < kgem->nreloc; n++) {
1294
			if (kgem->reloc[n].target_handle == ~0U) {
1295
				kgem->reloc[n].target_handle = bo->target_handle;
1296
				kgem->reloc[n].presumed_offset = bo->presumed_offset;
1297
				kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] =
1298
					kgem->reloc[n].delta + bo->presumed_offset;
1299
			}
1300
		}
1301
 
1302
	}
1303
 
1304
}
1305
 
1306
static void kgem_bo_binding_free(struct kgem *kgem, struct kgem_bo *bo)
1307
{
1308
	struct kgem_bo_binding *b;
1309
 
1310
	b = bo->binding.next;
1311
	while (b) {
1312
		struct kgem_bo_binding *next = b->next;
1313
		free (b);
1314
		b = next;
1315
	}
1316
}
1317
 
1318
static void kgem_bo_release_map(struct kgem *kgem, struct kgem_bo *bo)
1319
{
1320
	int type = IS_CPU_MAP(bo->map);
1321
 
1322
	assert(!IS_USER_MAP(bo->map));
1323
 
1324
	DBG(("%s: releasing %s vma for handle=%d, count=%d\n",
1325
	     __FUNCTION__, type ? "CPU" : "GTT",
1326
	     bo->handle, kgem->vma[type].count));
1327
 
1328
	VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo)));
3291 Serge 1329
	user_free(MAP(bo->map));
3258 Serge 1330
	bo->map = NULL;
1331
 
1332
	if (!list_is_empty(&bo->vma)) {
1333
		list_del(&bo->vma);
1334
		kgem->vma[type].count--;
1335
	}
1336
}
1337
 
1338
static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
1339
{
1340
	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
3291 Serge 1341
 
3258 Serge 1342
	assert(bo->refcnt == 0);
1343
	assert(bo->exec == NULL);
1344
	assert(!bo->snoop || bo->rq == NULL);
1345
 
1346
#ifdef DEBUG_MEMORY
1347
	kgem->debug_memory.bo_allocs--;
1348
	kgem->debug_memory.bo_bytes -= bytes(bo);
1349
#endif
1350
 
1351
	kgem_bo_binding_free(kgem, bo);
1352
 
1353
	if (IS_USER_MAP(bo->map)) {
1354
		assert(bo->rq == NULL);
1355
		assert(MAP(bo->map) != bo || bo->io);
1356
		if (bo != MAP(bo->map)) {
1357
			DBG(("%s: freeing snooped base\n", __FUNCTION__));
1358
			free(MAP(bo->map));
1359
		}
1360
		bo->map = NULL;
1361
	}
1362
	if (bo->map)
1363
		kgem_bo_release_map(kgem, bo);
1364
	assert(list_is_empty(&bo->vma));
1365
 
1366
	_list_del(&bo->list);
1367
	_list_del(&bo->request);
1368
	gem_close(kgem->fd, bo->handle);
1369
 
1370
	if (!bo->io) {
1371
		*(struct kgem_bo **)bo = __kgem_freed_bo;
1372
		__kgem_freed_bo = bo;
1373
	} else
1374
		free(bo);
1375
}
1376
 
1377
inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
1378
					    struct kgem_bo *bo)
1379
{
1380
	DBG(("%s: moving handle=%d to inactive\n", __FUNCTION__, bo->handle));
1381
 
1382
	assert(bo->refcnt == 0);
1383
	assert(bo->reusable);
1384
	assert(bo->rq == NULL);
1385
	assert(bo->exec == NULL);
1386
	assert(bo->domain != DOMAIN_GPU);
1387
	assert(!bo->proxy);
1388
	assert(!bo->io);
1389
	assert(!bo->scanout);
1390
	assert(!bo->needs_flush);
1391
	assert(list_is_empty(&bo->vma));
1392
	ASSERT_IDLE(kgem, bo->handle);
1393
 
1394
	kgem->need_expire = true;
1395
 
1396
	if (bucket(bo) >= NUM_CACHE_BUCKETS) {
1397
		list_move(&bo->list, &kgem->large_inactive);
1398
		return;
1399
	}
1400
 
1401
	assert(bo->flush == false);
1402
	list_move(&bo->list, &kgem->inactive[bucket(bo)]);
1403
	if (bo->map) {
1404
		int type = IS_CPU_MAP(bo->map);
1405
		if (bucket(bo) >= NUM_CACHE_BUCKETS ||
1406
		    (!type && !__kgem_bo_is_mappable(kgem, bo))) {
1407
//			munmap(MAP(bo->map), bytes(bo));
1408
			bo->map = NULL;
1409
		}
1410
		if (bo->map) {
1411
			list_add(&bo->vma, &kgem->vma[type].inactive[bucket(bo)]);
1412
			kgem->vma[type].count++;
1413
		}
1414
	}
1415
}
1416
 
1417
static struct kgem_bo *kgem_bo_replace_io(struct kgem_bo *bo)
1418
{
1419
	struct kgem_bo *base;
1420
 
1421
	if (!bo->io)
1422
		return bo;
1423
 
1424
	assert(!bo->snoop);
1425
	base = malloc(sizeof(*base));
1426
	if (base) {
1427
		DBG(("%s: transferring io handle=%d to bo\n",
1428
		     __FUNCTION__, bo->handle));
1429
		/* transfer the handle to a minimum bo */
1430
		memcpy(base, bo, sizeof(*base));
1431
		base->io = false;
1432
		list_init(&base->list);
1433
		list_replace(&bo->request, &base->request);
1434
		list_replace(&bo->vma, &base->vma);
1435
		free(bo);
1436
		bo = base;
1437
	} else
1438
		bo->reusable = false;
1439
 
1440
	return bo;
1441
}
1442
 
3256 Serge 1443
inline static void kgem_bo_remove_from_inactive(struct kgem *kgem,
1444
						struct kgem_bo *bo)
1445
{
1446
	DBG(("%s: removing handle=%d from inactive\n", __FUNCTION__, bo->handle));
1447
 
1448
	list_del(&bo->list);
1449
	assert(bo->rq == NULL);
1450
	assert(bo->exec == NULL);
1451
	if (bo->map) {
1452
		assert(!list_is_empty(&bo->vma));
1453
		list_del(&bo->vma);
1454
		kgem->vma[IS_CPU_MAP(bo->map)].count--;
1455
	}
3254 Serge 1456
}
1457
 
3258 Serge 1458
inline static void kgem_bo_remove_from_active(struct kgem *kgem,
1459
					      struct kgem_bo *bo)
1460
{
1461
	DBG(("%s: removing handle=%d from active\n", __FUNCTION__, bo->handle));
3254 Serge 1462
 
3258 Serge 1463
	list_del(&bo->list);
1464
	assert(bo->rq != NULL);
1465
	if (bo->rq == (void *)kgem)
1466
		list_del(&bo->request);
1467
	assert(list_is_empty(&bo->vma));
1468
}
3254 Serge 1469
 
3258 Serge 1470
static void kgem_bo_clear_scanout(struct kgem *kgem, struct kgem_bo *bo)
1471
{
1472
	assert(bo->scanout);
1473
	assert(!bo->refcnt);
1474
	assert(bo->exec == NULL);
1475
	assert(bo->proxy == NULL);
3256 Serge 1476
 
3258 Serge 1477
	DBG(("%s: handle=%d, fb=%d (reusable=%d)\n",
1478
	     __FUNCTION__, bo->handle, bo->delta, bo->reusable));
1479
	if (bo->delta) {
1480
		/* XXX will leak if we are not DRM_MASTER. *shrug* */
1481
//		drmModeRmFB(kgem->fd, bo->delta);
1482
		bo->delta = 0;
1483
	}
1484
 
1485
	bo->scanout = false;
1486
	bo->flush = false;
1487
	bo->reusable = true;
1488
 
1489
	if (kgem->has_llc &&
1490
	    !gem_set_cacheing(kgem->fd, bo->handle, SNOOPED))
1491
		bo->reusable = false;
1492
}
1493
 
1494
static void _kgem_bo_delete_buffer(struct kgem *kgem, struct kgem_bo *bo)
1495
{
1496
	struct kgem_buffer *io = (struct kgem_buffer *)bo->proxy;
1497
 
1498
	DBG(("%s: size=%d, offset=%d, parent used=%d\n",
1499
	     __FUNCTION__, bo->size.bytes, bo->delta, io->used));
1500
 
1501
	if (ALIGN(bo->delta + bo->size.bytes, UPLOAD_ALIGNMENT) == io->used)
1502
		io->used = bo->delta;
1503
}
1504
 
1505
static void kgem_bo_move_to_scanout(struct kgem *kgem, struct kgem_bo *bo)
1506
{
1507
	assert(bo->refcnt == 0);
1508
	assert(bo->scanout);
1509
	assert(bo->delta);
1510
	assert(!bo->snoop);
1511
	assert(!bo->io);
1512
 
1513
	DBG(("%s: moving %d [fb %d] to scanout cache, active? %d\n",
1514
	     __FUNCTION__, bo->handle, bo->delta, bo->rq != NULL));
1515
	if (bo->rq)
1516
		list_move_tail(&bo->list, &kgem->scanout);
1517
	else
1518
	list_move(&bo->list, &kgem->scanout);
1519
}
1520
 
1521
static void kgem_bo_move_to_snoop(struct kgem *kgem, struct kgem_bo *bo)
1522
{
1523
	assert(bo->refcnt == 0);
1524
	assert(bo->exec == NULL);
1525
 
1526
	if (num_pages(bo) > kgem->max_cpu_size >> 13) {
1527
		DBG(("%s handle=%d discarding large CPU buffer (%d >%d pages)\n",
1528
		     __FUNCTION__, bo->handle, num_pages(bo), kgem->max_cpu_size >> 13));
1529
		kgem_bo_free(kgem, bo);
1530
		return;
1531
	}
1532
 
1533
	assert(bo->tiling == I915_TILING_NONE);
1534
	assert(bo->rq == NULL);
1535
 
1536
	DBG(("%s: moving %d to snoop cachee\n", __FUNCTION__, bo->handle));
1537
	list_add(&bo->list, &kgem->snoop);
1538
}
1539
 
3256 Serge 1540
static struct kgem_bo *
3258 Serge 1541
search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
1542
{
1543
	struct kgem_bo *bo, *first = NULL;
1544
 
1545
	DBG(("%s: num_pages=%d, flags=%x\n", __FUNCTION__, num_pages, flags));
1546
 
1547
	if ((kgem->has_cacheing | kgem->has_userptr) == 0)
1548
		return NULL;
1549
 
1550
	if (list_is_empty(&kgem->snoop)) {
1551
		DBG(("%s: inactive and cache empty\n", __FUNCTION__));
1552
		if (!__kgem_throttle_retire(kgem, flags)) {
1553
			DBG(("%s: nothing retired\n", __FUNCTION__));
1554
			return NULL;
1555
		}
1556
	}
1557
 
1558
	list_for_each_entry(bo, &kgem->snoop, list) {
1559
		assert(bo->refcnt == 0);
1560
		assert(bo->snoop);
1561
		assert(!bo->scanout);
1562
		assert(bo->proxy == NULL);
1563
		assert(bo->tiling == I915_TILING_NONE);
1564
		assert(bo->rq == NULL);
1565
		assert(bo->exec == NULL);
1566
 
1567
		if (num_pages > num_pages(bo))
1568
			continue;
1569
 
1570
		if (num_pages(bo) > 2*num_pages) {
1571
			if (first == NULL)
1572
				first = bo;
1573
			continue;
1574
		}
1575
 
1576
		list_del(&bo->list);
1577
		bo->pitch = 0;
1578
		bo->delta = 0;
1579
 
1580
		DBG(("  %s: found handle=%d (num_pages=%d) in snoop cache\n",
1581
		     __FUNCTION__, bo->handle, num_pages(bo)));
1582
		return bo;
1583
	}
1584
 
1585
	if (first) {
1586
		list_del(&first->list);
1587
		first->pitch = 0;
1588
		first->delta = 0;
1589
 
1590
		DBG(("  %s: found handle=%d (num_pages=%d) in snoop cache\n",
1591
		     __FUNCTION__, first->handle, num_pages(first)));
1592
		return first;
1593
	}
1594
 
1595
	return NULL;
1596
}
1597
 
1598
static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
1599
{
1600
	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
1601
 
1602
	assert(list_is_empty(&bo->list));
1603
	assert(bo->refcnt == 0);
1604
	assert(!bo->purged);
1605
	assert(bo->proxy == NULL);
1606
 
1607
	bo->binding.offset = 0;
1608
 
1609
	if (DBG_NO_CACHE)
1610
		goto destroy;
1611
 
1612
	if (bo->snoop && !bo->flush) {
1613
		DBG(("%s: handle=%d is snooped\n", __FUNCTION__, bo->handle));
1614
		assert(!bo->flush);
1615
		assert(list_is_empty(&bo->list));
1616
		if (bo->exec == NULL && bo->rq && !__kgem_busy(kgem, bo->handle))
1617
			__kgem_bo_clear_busy(bo);
1618
		if (bo->rq == NULL) {
1619
			assert(!bo->needs_flush);
1620
			kgem_bo_move_to_snoop(kgem, bo);
1621
		}
1622
		return;
1623
	}
1624
 
1625
	if (bo->scanout) {
1626
		kgem_bo_move_to_scanout(kgem, bo);
1627
		return;
1628
	}
1629
 
1630
	if (bo->io)
1631
		bo = kgem_bo_replace_io(bo);
1632
	if (!bo->reusable) {
1633
		DBG(("%s: handle=%d, not reusable\n",
1634
		     __FUNCTION__, bo->handle));
1635
		goto destroy;
1636
	}
1637
 
1638
	if (!kgem->has_llc && IS_CPU_MAP(bo->map) && bo->domain != DOMAIN_CPU)
1639
		kgem_bo_release_map(kgem, bo);
1640
 
1641
	assert(list_is_empty(&bo->vma));
1642
	assert(list_is_empty(&bo->list));
1643
	assert(bo->snoop == false);
1644
	assert(bo->io == false);
1645
	assert(bo->scanout == false);
1646
 
1647
	if (bo->exec && kgem->nexec == 1) {
1648
		DBG(("%s: only handle in batch, discarding last operations\n",
1649
		     __FUNCTION__));
1650
		assert(bo->exec == &kgem->exec[0]);
1651
		assert(kgem->exec[0].handle == bo->handle);
1652
		assert(RQ(bo->rq) == kgem->next_request);
1653
		bo->refcnt = 1;
1654
		kgem_reset(kgem);
1655
		bo->refcnt = 0;
1656
	}
1657
 
1658
	if (bo->rq && bo->exec == NULL && !__kgem_busy(kgem, bo->handle))
1659
		__kgem_bo_clear_busy(bo);
1660
 
1661
	if (bo->rq) {
1662
		struct list *cache;
1663
 
1664
		DBG(("%s: handle=%d -> active\n", __FUNCTION__, bo->handle));
1665
		if (bucket(bo) < NUM_CACHE_BUCKETS)
1666
			cache = &kgem->active[bucket(bo)][bo->tiling];
1667
		else
1668
			cache = &kgem->large;
1669
		list_add(&bo->list, cache);
1670
		return;
1671
	}
1672
 
1673
	assert(bo->exec == NULL);
1674
	assert(list_is_empty(&bo->request));
1675
 
1676
	if (!IS_CPU_MAP(bo->map)) {
1677
		if (!kgem_bo_set_purgeable(kgem, bo))
1678
			goto destroy;
1679
 
1680
		if (!kgem->has_llc && bo->domain == DOMAIN_CPU)
1681
			goto destroy;
1682
 
1683
		DBG(("%s: handle=%d, purged\n",
1684
		     __FUNCTION__, bo->handle));
1685
	}
1686
 
1687
	kgem_bo_move_to_inactive(kgem, bo);
1688
	return;
1689
 
1690
destroy:
1691
	if (!bo->exec)
1692
		kgem_bo_free(kgem, bo);
1693
}
1694
 
1695
static void kgem_bo_unref(struct kgem *kgem, struct kgem_bo *bo)
1696
{
1697
	assert(bo->refcnt);
1698
	if (--bo->refcnt == 0)
1699
		__kgem_bo_destroy(kgem, bo);
1700
}
1701
 
1702
static void kgem_buffer_release(struct kgem *kgem, struct kgem_buffer *bo)
1703
{
1704
	while (!list_is_empty(&bo->base.vma)) {
1705
		struct kgem_bo *cached;
1706
 
1707
		cached = list_first_entry(&bo->base.vma, struct kgem_bo, vma);
1708
		assert(cached->proxy == &bo->base);
1709
		list_del(&cached->vma);
1710
 
1711
		assert(*(struct kgem_bo **)cached->map == cached);
1712
		*(struct kgem_bo **)cached->map = NULL;
1713
		cached->map = NULL;
1714
 
1715
		kgem_bo_destroy(kgem, cached);
1716
	}
1717
}
1718
 
1719
static bool kgem_retire__buffers(struct kgem *kgem)
1720
{
1721
	bool retired = false;
1722
 
1723
	while (!list_is_empty(&kgem->active_buffers)) {
1724
		struct kgem_buffer *bo =
1725
			list_last_entry(&kgem->active_buffers,
1726
					struct kgem_buffer,
1727
					base.list);
1728
 
1729
		if (bo->base.rq)
1730
			break;
1731
 
1732
		DBG(("%s: releasing upload cache for handle=%d? %d\n",
1733
		     __FUNCTION__, bo->base.handle, !list_is_empty(&bo->base.vma)));
1734
		list_del(&bo->base.list);
1735
		kgem_buffer_release(kgem, bo);
1736
		kgem_bo_unref(kgem, &bo->base);
1737
		retired = true;
1738
	}
1739
 
1740
	return retired;
1741
}
1742
 
1743
static bool kgem_retire__flushing(struct kgem *kgem)
1744
{
1745
	struct kgem_bo *bo, *next;
1746
	bool retired = false;
1747
 
1748
	list_for_each_entry_safe(bo, next, &kgem->flushing, request) {
1749
		assert(bo->rq == (void *)kgem);
1750
		assert(bo->exec == NULL);
1751
 
1752
		if (__kgem_busy(kgem, bo->handle))
1753
			break;
1754
 
1755
		__kgem_bo_clear_busy(bo);
1756
 
1757
		if (bo->refcnt)
1758
			continue;
1759
 
1760
		if (bo->snoop) {
1761
			kgem_bo_move_to_snoop(kgem, bo);
1762
		} else if (bo->scanout) {
1763
			kgem_bo_move_to_scanout(kgem, bo);
1764
		} else if ((bo = kgem_bo_replace_io(bo))->reusable &&
1765
			   kgem_bo_set_purgeable(kgem, bo)) {
1766
			kgem_bo_move_to_inactive(kgem, bo);
1767
			retired = true;
1768
		} else
1769
			kgem_bo_free(kgem, bo);
1770
	}
1771
#if HAS_DEBUG_FULL
1772
	{
1773
		int count = 0;
1774
		list_for_each_entry(bo, &kgem->flushing, request)
1775
			count++;
1776
		printf("%s: %d bo on flushing list\n", __FUNCTION__, count);
1777
	}
1778
#endif
1779
 
1780
	kgem->need_retire |= !list_is_empty(&kgem->flushing);
1781
 
1782
	return retired;
1783
}
1784
 
1785
 
1786
static bool __kgem_retire_rq(struct kgem *kgem, struct kgem_request *rq)
1787
{
1788
	bool retired = false;
1789
 
1790
	DBG(("%s: request %d complete\n",
1791
	     __FUNCTION__, rq->bo->handle));
1792
 
1793
	while (!list_is_empty(&rq->buffers)) {
1794
		struct kgem_bo *bo;
1795
 
1796
		bo = list_first_entry(&rq->buffers,
1797
				      struct kgem_bo,
1798
				      request);
1799
 
1800
		assert(RQ(bo->rq) == rq);
1801
		assert(bo->exec == NULL);
1802
		assert(bo->domain == DOMAIN_GPU || bo->domain == DOMAIN_NONE);
1803
 
1804
		list_del(&bo->request);
1805
 
1806
		if (bo->needs_flush)
1807
			bo->needs_flush = __kgem_busy(kgem, bo->handle);
1808
		if (bo->needs_flush) {
1809
			DBG(("%s: moving %d to flushing\n",
1810
			     __FUNCTION__, bo->handle));
1811
			list_add(&bo->request, &kgem->flushing);
1812
			bo->rq = (void *)kgem;
1813
			continue;
1814
		}
1815
 
1816
		bo->domain = DOMAIN_NONE;
1817
		bo->rq = NULL;
1818
		if (bo->refcnt)
1819
			continue;
1820
 
1821
		if (bo->snoop) {
1822
			kgem_bo_move_to_snoop(kgem, bo);
1823
		} else if (bo->scanout) {
1824
			kgem_bo_move_to_scanout(kgem, bo);
1825
		} else if ((bo = kgem_bo_replace_io(bo))->reusable &&
1826
			   kgem_bo_set_purgeable(kgem, bo)) {
1827
			kgem_bo_move_to_inactive(kgem, bo);
1828
			retired = true;
1829
		} else {
1830
			DBG(("%s: closing %d\n",
1831
			     __FUNCTION__, bo->handle));
1832
			kgem_bo_free(kgem, bo);
1833
		}
1834
	}
1835
 
1836
	assert(rq->bo->rq == NULL);
1837
	assert(list_is_empty(&rq->bo->request));
1838
 
1839
	if (--rq->bo->refcnt == 0) {
1840
		if (kgem_bo_set_purgeable(kgem, rq->bo)) {
1841
			kgem_bo_move_to_inactive(kgem, rq->bo);
1842
			retired = true;
1843
		} else {
1844
			DBG(("%s: closing %d\n",
1845
			     __FUNCTION__, rq->bo->handle));
1846
			kgem_bo_free(kgem, rq->bo);
1847
		}
1848
	}
1849
 
1850
	__kgem_request_free(rq);
1851
	return retired;
1852
}
1853
 
1854
static bool kgem_retire__requests_ring(struct kgem *kgem, int ring)
1855
{
1856
	bool retired = false;
1857
 
1858
	while (!list_is_empty(&kgem->requests[ring])) {
1859
		struct kgem_request *rq;
1860
 
1861
		rq = list_first_entry(&kgem->requests[ring],
1862
				      struct kgem_request,
1863
				      list);
1864
		if (__kgem_busy(kgem, rq->bo->handle))
1865
			break;
1866
 
1867
		retired |= __kgem_retire_rq(kgem, rq);
1868
	}
1869
 
1870
#if HAS_DEBUG_FULL
1871
	{
1872
		struct kgem_bo *bo;
1873
		int count = 0;
1874
 
1875
		list_for_each_entry(bo, &kgem->requests[ring], request)
1876
			count++;
1877
 
1878
		bo = NULL;
1879
		if (!list_is_empty(&kgem->requests[ring]))
1880
			bo = list_first_entry(&kgem->requests[ring],
1881
					      struct kgem_request,
1882
					      list)->bo;
1883
 
1884
		printf("%s: ring=%d, %d outstanding requests, oldest=%d\n",
1885
		       __FUNCTION__, ring, count, bo ? bo->handle : 0);
1886
	}
1887
#endif
1888
 
1889
	return retired;
1890
}
1891
 
1892
static bool kgem_retire__requests(struct kgem *kgem)
1893
{
1894
	bool retired = false;
1895
	int n;
1896
 
1897
	for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) {
1898
		retired |= kgem_retire__requests_ring(kgem, n);
1899
		kgem->need_retire |= !list_is_empty(&kgem->requests[n]);
1900
	}
1901
 
1902
	return retired;
1903
}
1904
 
1905
bool kgem_retire(struct kgem *kgem)
1906
{
1907
	bool retired = false;
1908
 
1909
	DBG(("%s\n", __FUNCTION__));
1910
 
1911
	kgem->need_retire = false;
1912
 
1913
	retired |= kgem_retire__flushing(kgem);
1914
	retired |= kgem_retire__requests(kgem);
1915
	retired |= kgem_retire__buffers(kgem);
1916
 
1917
	DBG(("%s -- retired=%d, need_retire=%d\n",
1918
	     __FUNCTION__, retired, kgem->need_retire));
1919
 
1920
	kgem->retire(kgem);
1921
 
1922
	return retired;
1923
}
1924
 
3263 Serge 1925
bool __kgem_ring_is_idle(struct kgem *kgem, int ring)
1926
{
1927
	struct kgem_request *rq;
3258 Serge 1928
 
3263 Serge 1929
	assert(!list_is_empty(&kgem->requests[ring]));
3258 Serge 1930
 
3263 Serge 1931
	rq = list_last_entry(&kgem->requests[ring],
1932
			     struct kgem_request, list);
1933
	if (__kgem_busy(kgem, rq->bo->handle)) {
1934
		DBG(("%s: last requests handle=%d still busy\n",
1935
		     __FUNCTION__, rq->bo->handle));
1936
		return false;
1937
	}
3258 Serge 1938
 
3263 Serge 1939
	DBG(("%s: ring=%d idle (handle=%d)\n",
1940
	     __FUNCTION__, ring, rq->bo->handle));
3258 Serge 1941
 
3263 Serge 1942
	kgem_retire__requests_ring(kgem, ring);
1943
	assert(list_is_empty(&kgem->requests[ring]));
1944
	return true;
1945
}
3258 Serge 1946
 
1947
static void kgem_commit(struct kgem *kgem)
1948
{
1949
	struct kgem_request *rq = kgem->next_request;
1950
	struct kgem_bo *bo, *next;
1951
 
1952
	list_for_each_entry_safe(bo, next, &rq->buffers, request) {
1953
		assert(next->request.prev == &bo->request);
1954
 
1955
		DBG(("%s: release handle=%d (proxy? %d), dirty? %d flush? %d, snoop? %d -> offset=%x\n",
1956
		     __FUNCTION__, bo->handle, bo->proxy != NULL,
1957
		     bo->dirty, bo->needs_flush, bo->snoop,
1958
		     (unsigned)bo->exec->offset));
1959
 
1960
		assert(!bo->purged);
1961
		assert(bo->exec);
1962
		assert(bo->proxy == NULL || bo->exec == &_kgem_dummy_exec);
1963
		assert(RQ(bo->rq) == rq || (RQ(bo->proxy->rq) == rq));
1964
 
1965
		bo->presumed_offset = bo->exec->offset;
1966
		bo->exec = NULL;
1967
		bo->target_handle = -1;
1968
 
1969
		if (!bo->refcnt && !bo->reusable) {
1970
			assert(!bo->snoop);
1971
			kgem_bo_free(kgem, bo);
1972
			continue;
1973
		}
1974
 
1975
		bo->binding.offset = 0;
1976
		bo->domain = DOMAIN_GPU;
1977
		bo->dirty = false;
1978
 
1979
		if (bo->proxy) {
1980
			/* proxies are not used for domain tracking */
1981
			bo->exec = NULL;
1982
			__kgem_bo_clear_busy(bo);
1983
		}
1984
 
1985
		kgem->scanout_busy |= bo->scanout;
1986
	}
1987
 
1988
	if (rq == &kgem->static_request) {
1989
		struct drm_i915_gem_set_domain set_domain;
1990
 
1991
		DBG(("%s: syncing due to allocation failure\n", __FUNCTION__));
1992
 
1993
		VG_CLEAR(set_domain);
1994
		set_domain.handle = rq->bo->handle;
1995
		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1996
		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1997
		if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) {
1998
			DBG(("%s: sync: GPU hang detected\n", __FUNCTION__));
1999
			kgem_throttle(kgem);
2000
		}
2001
 
2002
		kgem_retire(kgem);
2003
		assert(list_is_empty(&rq->buffers));
2004
 
2005
		gem_close(kgem->fd, rq->bo->handle);
2006
		kgem_cleanup_cache(kgem);
2007
	} else {
2008
		list_add_tail(&rq->list, &kgem->requests[rq->ring]);
2009
		kgem->need_throttle = kgem->need_retire = 1;
2010
	}
2011
 
2012
	kgem->next_request = NULL;
2013
}
2014
 
2015
static void kgem_close_list(struct kgem *kgem, struct list *head)
2016
{
2017
	while (!list_is_empty(head))
2018
		kgem_bo_free(kgem, list_first_entry(head, struct kgem_bo, list));
2019
}
2020
 
2021
static void kgem_close_inactive(struct kgem *kgem)
2022
{
2023
	unsigned int i;
2024
 
2025
	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
2026
		kgem_close_list(kgem, &kgem->inactive[i]);
2027
}
2028
 
2029
static void kgem_finish_buffers(struct kgem *kgem)
2030
{
2031
	struct kgem_buffer *bo, *next;
2032
 
2033
	list_for_each_entry_safe(bo, next, &kgem->batch_buffers, base.list) {
2034
		DBG(("%s: buffer handle=%d, used=%d, exec?=%d, write=%d, mmapped=%d\n",
2035
		     __FUNCTION__, bo->base.handle, bo->used, bo->base.exec!=NULL,
2036
		     bo->write, bo->mmapped));
2037
 
2038
		assert(next->base.list.prev == &bo->base.list);
2039
		assert(bo->base.io);
2040
		assert(bo->base.refcnt >= 1);
2041
 
2042
		if (!bo->base.exec) {
2043
			DBG(("%s: skipping unattached handle=%d, used=%d\n",
2044
			     __FUNCTION__, bo->base.handle, bo->used));
2045
			continue;
2046
		}
2047
 
2048
		if (!bo->write) {
2049
			assert(bo->base.exec || bo->base.refcnt > 1);
2050
			goto decouple;
2051
		}
2052
 
2053
		if (bo->mmapped) {
2054
			int used;
2055
 
2056
			assert(!bo->need_io);
2057
 
2058
			used = ALIGN(bo->used, PAGE_SIZE);
2059
			if (!DBG_NO_UPLOAD_ACTIVE &&
2060
			    used + PAGE_SIZE <= bytes(&bo->base) &&
2061
			    (kgem->has_llc || !IS_CPU_MAP(bo->base.map) || bo->base.snoop)) {
2062
				DBG(("%s: retaining upload buffer (%d/%d)\n",
2063
				     __FUNCTION__, bo->used, bytes(&bo->base)));
2064
				bo->used = used;
2065
				list_move(&bo->base.list,
2066
					  &kgem->active_buffers);
2067
				continue;
2068
			}
2069
			DBG(("%s: discarding mmapped buffer, used=%d, map type=%d\n",
2070
			     __FUNCTION__, bo->used, (int)__MAP_TYPE(bo->base.map)));
2071
			goto decouple;
2072
		}
2073
 
2074
		if (!bo->used) {
2075
			/* Unless we replace the handle in the execbuffer,
2076
			 * then this bo will become active. So decouple it
2077
			 * from the buffer list and track it in the normal
2078
			 * manner.
2079
			 */
2080
			goto decouple;
2081
		}
2082
 
2083
		assert(bo->need_io);
2084
		assert(bo->base.rq == MAKE_REQUEST(kgem->next_request, kgem->ring));
2085
		assert(bo->base.domain != DOMAIN_GPU);
2086
 
2087
		if (bo->base.refcnt == 1 &&
2088
		    bo->base.size.pages.count > 1 &&
2089
		    bo->used < bytes(&bo->base) / 2) {
2090
			struct kgem_bo *shrink;
2091
			unsigned alloc = NUM_PAGES(bo->used);
2092
 
2093
			shrink = search_snoop_cache(kgem, alloc,
2094
						    CREATE_INACTIVE | CREATE_NO_RETIRE);
2095
			if (shrink) {
2096
				void *map;
2097
				int n;
2098
 
2099
				DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n",
2100
				     __FUNCTION__,
2101
				     bo->used, bytes(&bo->base), bytes(shrink),
2102
				     bo->base.handle, shrink->handle));
2103
 
2104
				assert(bo->used <= bytes(shrink));
2105
				map = kgem_bo_map__cpu(kgem, shrink);
2106
				if (map) {
2107
					kgem_bo_sync__cpu(kgem, shrink);
2108
					memcpy(map, bo->mem, bo->used);
2109
 
2110
					shrink->target_handle =
2111
						kgem->has_handle_lut ? bo->base.target_handle : shrink->handle;
2112
					for (n = 0; n < kgem->nreloc; n++) {
2113
						if (kgem->reloc[n].target_handle == bo->base.target_handle) {
2114
							kgem->reloc[n].target_handle = shrink->target_handle;
2115
							kgem->reloc[n].presumed_offset = shrink->presumed_offset;
2116
							kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] =
2117
								kgem->reloc[n].delta + shrink->presumed_offset;
2118
						}
2119
					}
2120
 
2121
					bo->base.exec->handle = shrink->handle;
2122
					bo->base.exec->offset = shrink->presumed_offset;
2123
					shrink->exec = bo->base.exec;
2124
					shrink->rq = bo->base.rq;
2125
					list_replace(&bo->base.request,
2126
						     &shrink->request);
2127
					list_init(&bo->base.request);
2128
					shrink->needs_flush = bo->base.dirty;
2129
 
2130
					bo->base.exec = NULL;
2131
					bo->base.rq = NULL;
2132
					bo->base.dirty = false;
2133
					bo->base.needs_flush = false;
2134
					bo->used = 0;
2135
 
2136
					goto decouple;
2137
				}
2138
 
2139
				__kgem_bo_destroy(kgem, shrink);
2140
			}
2141
 
2142
			shrink = search_linear_cache(kgem, alloc,
2143
						     CREATE_INACTIVE | CREATE_NO_RETIRE);
2144
			if (shrink) {
2145
				int n;
2146
 
2147
				DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n",
2148
				     __FUNCTION__,
2149
				     bo->used, bytes(&bo->base), bytes(shrink),
2150
				     bo->base.handle, shrink->handle));
2151
 
2152
				assert(bo->used <= bytes(shrink));
2153
				if (gem_write(kgem->fd, shrink->handle,
2154
					      0, bo->used, bo->mem) == 0) {
2155
					shrink->target_handle =
2156
						kgem->has_handle_lut ? bo->base.target_handle : shrink->handle;
2157
					for (n = 0; n < kgem->nreloc; n++) {
2158
						if (kgem->reloc[n].target_handle == bo->base.target_handle) {
2159
							kgem->reloc[n].target_handle = shrink->target_handle;
2160
							kgem->reloc[n].presumed_offset = shrink->presumed_offset;
2161
							kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] =
2162
								kgem->reloc[n].delta + shrink->presumed_offset;
2163
						}
2164
					}
2165
 
2166
					bo->base.exec->handle = shrink->handle;
2167
					bo->base.exec->offset = shrink->presumed_offset;
2168
					shrink->exec = bo->base.exec;
2169
					shrink->rq = bo->base.rq;
2170
					list_replace(&bo->base.request,
2171
						     &shrink->request);
2172
					list_init(&bo->base.request);
2173
					shrink->needs_flush = bo->base.dirty;
2174
 
2175
					bo->base.exec = NULL;
2176
					bo->base.rq = NULL;
2177
					bo->base.dirty = false;
2178
					bo->base.needs_flush = false;
2179
					bo->used = 0;
2180
 
2181
					goto decouple;
2182
				}
2183
 
2184
				__kgem_bo_destroy(kgem, shrink);
2185
			}
2186
		}
2187
 
2188
		DBG(("%s: handle=%d, uploading %d/%d\n",
2189
		     __FUNCTION__, bo->base.handle, bo->used, bytes(&bo->base)));
2190
		ASSERT_IDLE(kgem, bo->base.handle);
2191
		assert(bo->used <= bytes(&bo->base));
2192
		gem_write(kgem->fd, bo->base.handle,
2193
			  0, bo->used, bo->mem);
2194
		bo->need_io = 0;
2195
 
2196
decouple:
2197
		DBG(("%s: releasing handle=%d\n",
2198
		     __FUNCTION__, bo->base.handle));
2199
		list_del(&bo->base.list);
2200
		kgem_bo_unref(kgem, &bo->base);
2201
	}
2202
}
2203
 
2204
static void kgem_cleanup(struct kgem *kgem)
2205
{
2206
	int n;
2207
 
2208
	for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) {
2209
		while (!list_is_empty(&kgem->requests[n])) {
2210
			struct kgem_request *rq;
2211
 
2212
			rq = list_first_entry(&kgem->requests[n],
2213
					      struct kgem_request,
2214
					      list);
2215
			while (!list_is_empty(&rq->buffers)) {
2216
				struct kgem_bo *bo;
2217
 
2218
				bo = list_first_entry(&rq->buffers,
2219
						      struct kgem_bo,
2220
						      request);
2221
 
2222
				bo->exec = NULL;
2223
				bo->dirty = false;
2224
				__kgem_bo_clear_busy(bo);
2225
				if (bo->refcnt == 0)
2226
					kgem_bo_free(kgem, bo);
2227
			}
2228
 
2229
			__kgem_request_free(rq);
2230
		}
2231
	}
2232
 
2233
	kgem_close_inactive(kgem);
2234
}
2235
 
2236
static int kgem_batch_write(struct kgem *kgem, uint32_t handle, uint32_t size)
2237
{
2238
	int ret;
2239
 
2240
	ASSERT_IDLE(kgem, handle);
2241
 
2242
	/* If there is no surface data, just upload the batch */
2243
	if (kgem->surface == kgem->batch_size)
2244
		return gem_write(kgem->fd, handle,
2245
				 0, sizeof(uint32_t)*kgem->nbatch,
2246
				 kgem->batch);
2247
 
2248
	/* Are the batch pages conjoint with the surface pages? */
2249
	if (kgem->surface < kgem->nbatch + PAGE_SIZE/sizeof(uint32_t)) {
2250
		assert(size == PAGE_ALIGN(kgem->batch_size*sizeof(uint32_t)));
2251
		return gem_write(kgem->fd, handle,
2252
				 0, kgem->batch_size*sizeof(uint32_t),
2253
				 kgem->batch);
2254
	}
2255
 
2256
	/* Disjoint surface/batch, upload separately */
2257
	ret = gem_write(kgem->fd, handle,
2258
			0, sizeof(uint32_t)*kgem->nbatch,
2259
			kgem->batch);
2260
	if (ret)
2261
		return ret;
2262
 
2263
	ret = PAGE_ALIGN(sizeof(uint32_t) * kgem->batch_size);
2264
	ret -= sizeof(uint32_t) * kgem->surface;
2265
	assert(size-ret >= kgem->nbatch*sizeof(uint32_t));
2266
	return __gem_write(kgem->fd, handle,
2267
			size - ret, (kgem->batch_size - kgem->surface)*sizeof(uint32_t),
2268
			kgem->batch + kgem->surface);
2269
}
2270
 
2271
void kgem_reset(struct kgem *kgem)
2272
{
2273
	if (kgem->next_request) {
2274
		struct kgem_request *rq = kgem->next_request;
2275
 
2276
		while (!list_is_empty(&rq->buffers)) {
2277
			struct kgem_bo *bo =
2278
				list_first_entry(&rq->buffers,
2279
						 struct kgem_bo,
2280
						 request);
2281
			list_del(&bo->request);
2282
 
2283
			assert(RQ(bo->rq) == rq);
2284
 
2285
			bo->binding.offset = 0;
2286
			bo->exec = NULL;
2287
			bo->target_handle = -1;
2288
			bo->dirty = false;
2289
 
2290
			if (bo->needs_flush && __kgem_busy(kgem, bo->handle)) {
2291
				list_add(&bo->request, &kgem->flushing);
2292
				bo->rq = (void *)kgem;
2293
			} else
2294
				__kgem_bo_clear_busy(bo);
2295
 
2296
			if (!bo->refcnt && !bo->reusable) {
2297
				assert(!bo->snoop);
2298
				DBG(("%s: discarding handle=%d\n",
2299
				     __FUNCTION__, bo->handle));
2300
				kgem_bo_free(kgem, bo);
2301
			}
2302
		}
2303
 
2304
		if (rq != &kgem->static_request) {
2305
			list_init(&rq->list);
2306
			__kgem_request_free(rq);
2307
		}
2308
	}
2309
 
2310
	kgem->nfence = 0;
2311
	kgem->nexec = 0;
2312
	kgem->nreloc = 0;
2313
	kgem->nreloc__self = 0;
2314
	kgem->aperture = 0;
2315
	kgem->aperture_fenced = 0;
2316
	kgem->nbatch = 0;
2317
	kgem->surface = kgem->batch_size;
2318
	kgem->mode = KGEM_NONE;
2319
	kgem->flush = 0;
2320
	kgem->batch_flags = kgem->batch_flags_base;
2321
 
2322
	kgem->next_request = __kgem_request_alloc(kgem);
2323
 
2324
	kgem_sna_reset(kgem);
2325
}
2326
 
2327
static int compact_batch_surface(struct kgem *kgem)
2328
{
2329
	int size, shrink, n;
2330
 
2331
	if (!kgem->has_relaxed_delta)
2332
		return kgem->batch_size;
2333
 
2334
	/* See if we can pack the contents into one or two pages */
2335
	n = ALIGN(kgem->batch_size, 1024);
2336
	size = n - kgem->surface + kgem->nbatch;
2337
	size = ALIGN(size, 1024);
2338
 
2339
	shrink = n - size;
2340
	if (shrink) {
2341
		DBG(("shrinking from %d to %d\n", kgem->batch_size, size));
2342
 
2343
		shrink *= sizeof(uint32_t);
2344
		for (n = 0; n < kgem->nreloc; n++) {
2345
			if (kgem->reloc[n].read_domains == I915_GEM_DOMAIN_INSTRUCTION &&
2346
			    kgem->reloc[n].target_handle == ~0U)
2347
				kgem->reloc[n].delta -= shrink;
2348
 
2349
			if (kgem->reloc[n].offset >= sizeof(uint32_t)*kgem->nbatch)
2350
				kgem->reloc[n].offset -= shrink;
2351
		}
2352
	}
2353
 
2354
	return size * sizeof(uint32_t);
2355
}
2356
 
2357
static struct kgem_bo *
2358
kgem_create_batch(struct kgem *kgem, int size)
2359
{
2360
	struct drm_i915_gem_set_domain set_domain;
2361
	struct kgem_bo *bo;
2362
 
2363
	if (size <= 4096) {
2364
		bo = list_first_entry(&kgem->pinned_batches[0],
2365
				      struct kgem_bo,
2366
				      list);
2367
		if (!bo->rq) {
2368
out_4096:
2369
			list_move_tail(&bo->list, &kgem->pinned_batches[0]);
2370
			return kgem_bo_reference(bo);
2371
		}
2372
 
2373
		if (!__kgem_busy(kgem, bo->handle)) {
2374
			assert(RQ(bo->rq)->bo == bo);
2375
			__kgem_retire_rq(kgem, RQ(bo->rq));
2376
			goto out_4096;
2377
		}
2378
	}
2379
 
2380
	if (size <= 16384) {
2381
		bo = list_first_entry(&kgem->pinned_batches[1],
2382
				      struct kgem_bo,
2383
				      list);
2384
		if (!bo->rq) {
2385
out_16384:
2386
			list_move_tail(&bo->list, &kgem->pinned_batches[1]);
2387
			return kgem_bo_reference(bo);
2388
		}
2389
 
2390
		if (!__kgem_busy(kgem, bo->handle)) {
2391
			assert(RQ(bo->rq)->bo == bo);
2392
			__kgem_retire_rq(kgem, RQ(bo->rq));
2393
			goto out_16384;
2394
		}
2395
	}
2396
 
2397
	if (kgem->gen == 020 && !kgem->has_pinned_batches) {
2398
		assert(size <= 16384);
2399
 
2400
		bo = list_first_entry(&kgem->pinned_batches[size > 4096],
2401
				      struct kgem_bo,
2402
				      list);
2403
		list_move_tail(&bo->list, &kgem->pinned_batches[size > 4096]);
2404
 
2405
		DBG(("%s: syncing due to busy batches\n", __FUNCTION__));
2406
 
2407
		VG_CLEAR(set_domain);
2408
		set_domain.handle = bo->handle;
2409
		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
2410
		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
2411
		if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) {
2412
			DBG(("%s: sync: GPU hang detected\n", __FUNCTION__));
2413
			kgem_throttle(kgem);
2414
			return NULL;
2415
		}
2416
 
2417
		kgem_retire(kgem);
2418
		assert(bo->rq == NULL);
2419
		return kgem_bo_reference(bo);
2420
	}
2421
 
2422
	return kgem_create_linear(kgem, size, CREATE_NO_THROTTLE);
2423
}
2424
 
2425
void _kgem_submit(struct kgem *kgem)
2426
{
2427
	struct kgem_request *rq;
2428
	uint32_t batch_end;
2429
	int size;
2430
 
2431
	assert(!DBG_NO_HW);
2432
	assert(!kgem->wedged);
2433
 
2434
	assert(kgem->nbatch);
2435
	assert(kgem->nbatch <= KGEM_BATCH_SIZE(kgem));
2436
	assert(kgem->nbatch <= kgem->surface);
2437
 
2438
	batch_end = kgem_end_batch(kgem);
2439
	kgem_sna_flush(kgem);
2440
 
2441
	DBG(("batch[%d/%d]: %d %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d\n",
2442
	     kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface, kgem->batch_size,
2443
	     kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture));
2444
 
2445
	assert(kgem->nbatch <= kgem->batch_size);
2446
	assert(kgem->nbatch <= kgem->surface);
2447
	assert(kgem->nreloc <= ARRAY_SIZE(kgem->reloc));
2448
	assert(kgem->nexec < ARRAY_SIZE(kgem->exec));
2449
	assert(kgem->nfence <= kgem->fence_max);
2450
 
2451
	kgem_finish_buffers(kgem);
2452
 
2453
#if SHOW_BATCH
2454
	__kgem_batch_debug(kgem, batch_end);
2455
#endif
2456
 
2457
	rq = kgem->next_request;
2458
	if (kgem->surface != kgem->batch_size)
2459
		size = compact_batch_surface(kgem);
2460
	else
2461
		size = kgem->nbatch * sizeof(kgem->batch[0]);
2462
	rq->bo = kgem_create_batch(kgem, size);
2463
	if (rq->bo) {
2464
		uint32_t handle = rq->bo->handle;
2465
		int i;
2466
 
2467
		assert(!rq->bo->needs_flush);
2468
 
2469
		i = kgem->nexec++;
2470
		kgem->exec[i].handle = handle;
2471
		kgem->exec[i].relocation_count = kgem->nreloc;
2472
		kgem->exec[i].relocs_ptr = (uintptr_t)kgem->reloc;
2473
		kgem->exec[i].alignment = 0;
2474
		kgem->exec[i].offset = rq->bo->presumed_offset;
2475
		kgem->exec[i].flags = 0;
2476
		kgem->exec[i].rsvd1 = 0;
2477
		kgem->exec[i].rsvd2 = 0;
2478
 
2479
		rq->bo->target_handle = kgem->has_handle_lut ? i : handle;
2480
		rq->bo->exec = &kgem->exec[i];
2481
		rq->bo->rq = MAKE_REQUEST(rq, kgem->ring); /* useful sanity check */
2482
		list_add(&rq->bo->request, &rq->buffers);
2483
		rq->ring = kgem->ring == KGEM_BLT;
2484
 
2485
		kgem_fixup_self_relocs(kgem, rq->bo);
2486
 
2487
		if (kgem_batch_write(kgem, handle, size) == 0) {
2488
			struct drm_i915_gem_execbuffer2 execbuf;
2489
			int ret, retry = 3;
2490
 
2491
			VG_CLEAR(execbuf);
2492
			execbuf.buffers_ptr = (uintptr_t)kgem->exec;
2493
			execbuf.buffer_count = kgem->nexec;
2494
			execbuf.batch_start_offset = 0;
2495
			execbuf.batch_len = batch_end*sizeof(uint32_t);
2496
			execbuf.cliprects_ptr = 0;
2497
			execbuf.num_cliprects = 0;
2498
			execbuf.DR1 = 0;
2499
			execbuf.DR4 = 0;
2500
			execbuf.flags = kgem->ring | kgem->batch_flags;
2501
			execbuf.rsvd1 = 0;
2502
			execbuf.rsvd2 = 0;
2503
 
2504
 
2505
 
3263 Serge 2506
			ret = drmIoctl(kgem->fd,
2507
				       DRM_IOCTL_I915_GEM_EXECBUFFER2,
2508
				       &execbuf);
2509
			while (ret == -1 && errno == EBUSY && retry--) {
2510
				__kgem_throttle(kgem);
2511
				ret = drmIoctl(kgem->fd,
2512
					       DRM_IOCTL_I915_GEM_EXECBUFFER2,
2513
					       &execbuf);
2514
			}
3258 Serge 2515
			if (DEBUG_SYNC && ret == 0) {
2516
				struct drm_i915_gem_set_domain set_domain;
2517
 
2518
				VG_CLEAR(set_domain);
2519
				set_domain.handle = handle;
2520
				set_domain.read_domains = I915_GEM_DOMAIN_GTT;
2521
				set_domain.write_domain = I915_GEM_DOMAIN_GTT;
2522
 
2523
				ret = drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
2524
			}
2525
			if (ret == -1) {
2526
//				DBG(("%s: GPU hang detected [%d]\n",
2527
//				     __FUNCTION__, errno));
2528
				kgem_throttle(kgem);
2529
				kgem->wedged = true;
2530
 
2531
#if 0
2532
				ret = errno;
2533
				ErrorF("batch[%d/%d]: %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d: errno=%d\n",
2534
				       kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface,
2535
				       kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture, errno);
2536
 
2537
				for (i = 0; i < kgem->nexec; i++) {
2538
					struct kgem_bo *bo, *found = NULL;
2539
 
2540
					list_for_each_entry(bo, &kgem->next_request->buffers, request) {
2541
						if (bo->handle == kgem->exec[i].handle) {
2542
							found = bo;
2543
							break;
2544
						}
2545
					}
2546
					ErrorF("exec[%d] = handle:%d, presumed offset: %x, size: %d, tiling %d, fenced %d, snooped %d, deleted %d\n",
2547
					       i,
2548
					       kgem->exec[i].handle,
2549
					       (int)kgem->exec[i].offset,
2550
					       found ? kgem_bo_size(found) : -1,
2551
					       found ? found->tiling : -1,
2552
					       (int)(kgem->exec[i].flags & EXEC_OBJECT_NEEDS_FENCE),
2553
					       found ? found->snoop : -1,
2554
					       found ? found->purged : -1);
2555
				}
2556
				for (i = 0; i < kgem->nreloc; i++) {
2557
					ErrorF("reloc[%d] = pos:%d, target:%d, delta:%d, read:%x, write:%x, offset:%x\n",
2558
					       i,
2559
					       (int)kgem->reloc[i].offset,
2560
					       kgem->reloc[i].target_handle,
2561
					       kgem->reloc[i].delta,
2562
					       kgem->reloc[i].read_domains,
2563
					       kgem->reloc[i].write_domain,
2564
					       (int)kgem->reloc[i].presumed_offset);
2565
				}
2566
 
2567
				if (DEBUG_SYNC) {
2568
					int fd = open("/tmp/batchbuffer", O_WRONLY | O_CREAT | O_APPEND, 0666);
2569
					if (fd != -1) {
2570
						write(fd, kgem->batch, batch_end*sizeof(uint32_t));
2571
						close(fd);
2572
					}
2573
 
2574
					FatalError("SNA: failed to submit batchbuffer, errno=%d\n", ret);
2575
				}
2576
#endif
2577
			}
2578
		}
2579
 
2580
		kgem_commit(kgem);
2581
	}
2582
	if (kgem->wedged)
2583
		kgem_cleanup(kgem);
2584
 
2585
	kgem_reset(kgem);
2586
 
2587
	assert(kgem->next_request != NULL);
2588
}
2589
 
2590
void kgem_throttle(struct kgem *kgem)
2591
{
2592
	kgem->need_throttle = 0;
2593
	if (kgem->wedged)
2594
		return;
2595
 
2596
	kgem->wedged = __kgem_throttle(kgem);
2597
	if (kgem->wedged) {
2598
		printf("Detected a hung GPU, disabling acceleration.\n");
2599
		printf("When reporting this, please include i915_error_state from debugfs and the full dmesg.\n");
2600
	}
2601
}
2602
 
2603
void kgem_purge_cache(struct kgem *kgem)
2604
{
2605
	struct kgem_bo *bo, *next;
2606
	int i;
2607
 
2608
	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
2609
		list_for_each_entry_safe(bo, next, &kgem->inactive[i], list) {
2610
			if (!kgem_bo_is_retained(kgem, bo)) {
2611
				DBG(("%s: purging %d\n",
2612
				     __FUNCTION__, bo->handle));
2613
				kgem_bo_free(kgem, bo);
2614
			}
2615
		}
2616
	}
2617
 
2618
	kgem->need_purge = false;
2619
}
2620
 
2621
bool kgem_expire_cache(struct kgem *kgem)
2622
{
2623
	time_t now, expire;
2624
	struct kgem_bo *bo;
2625
	unsigned int size = 0, count = 0;
2626
	bool idle;
2627
	unsigned int i;
2628
 
2629
	time(&now);
2630
 
2631
	while (__kgem_freed_bo) {
2632
		bo = __kgem_freed_bo;
2633
		__kgem_freed_bo = *(struct kgem_bo **)bo;
2634
		free(bo);
2635
	}
2636
 
2637
	while (__kgem_freed_request) {
2638
		struct kgem_request *rq = __kgem_freed_request;
2639
		__kgem_freed_request = *(struct kgem_request **)rq;
2640
		free(rq);
2641
	}
2642
 
2643
	while (!list_is_empty(&kgem->large_inactive)) {
2644
		kgem_bo_free(kgem,
2645
			     list_first_entry(&kgem->large_inactive,
2646
					      struct kgem_bo, list));
2647
 
2648
	}
2649
 
2650
	while (!list_is_empty(&kgem->scanout)) {
2651
		bo = list_first_entry(&kgem->scanout, struct kgem_bo, list);
2652
		if (__kgem_busy(kgem, bo->handle))
2653
			break;
2654
 
2655
		list_del(&bo->list);
2656
		kgem_bo_clear_scanout(kgem, bo);
2657
		__kgem_bo_destroy(kgem, bo);
2658
	}
2659
 
2660
	expire = 0;
2661
	list_for_each_entry(bo, &kgem->snoop, list) {
2662
		if (bo->delta) {
2663
			expire = now - MAX_INACTIVE_TIME/2;
2664
			break;
2665
		}
2666
 
2667
		bo->delta = now;
2668
	}
2669
	if (expire) {
2670
		while (!list_is_empty(&kgem->snoop)) {
2671
			bo = list_last_entry(&kgem->snoop, struct kgem_bo, list);
2672
 
2673
			if (bo->delta > expire)
2674
				break;
2675
 
2676
			kgem_bo_free(kgem, bo);
2677
		}
2678
	}
2679
#ifdef DEBUG_MEMORY
2680
	{
2681
		long snoop_size = 0;
2682
		int snoop_count = 0;
2683
		list_for_each_entry(bo, &kgem->snoop, list)
2684
			snoop_count++, snoop_size += bytes(bo);
2685
		ErrorF("%s: still allocated %d bo, %ld bytes, in snoop cache\n",
2686
		       __FUNCTION__, snoop_count, snoop_size);
2687
	}
2688
#endif
2689
 
2690
	kgem_retire(kgem);
2691
	if (kgem->wedged)
2692
		kgem_cleanup(kgem);
2693
 
2694
	kgem->expire(kgem);
2695
 
2696
	if (kgem->need_purge)
2697
		kgem_purge_cache(kgem);
2698
 
2699
	expire = 0;
2700
 
2701
	idle = !kgem->need_retire;
2702
	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
2703
		idle &= list_is_empty(&kgem->inactive[i]);
2704
		list_for_each_entry(bo, &kgem->inactive[i], list) {
2705
			if (bo->delta) {
2706
				expire = now - MAX_INACTIVE_TIME;
2707
				break;
2708
			}
2709
 
2710
			bo->delta = now;
2711
		}
2712
	}
2713
	if (idle) {
2714
		DBG(("%s: idle\n", __FUNCTION__));
2715
		kgem->need_expire = false;
2716
		return false;
2717
	}
2718
	if (expire == 0)
2719
		return true;
2720
 
2721
	idle = !kgem->need_retire;
2722
	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
2723
		struct list preserve;
2724
 
2725
		list_init(&preserve);
2726
		while (!list_is_empty(&kgem->inactive[i])) {
2727
			bo = list_last_entry(&kgem->inactive[i],
2728
					     struct kgem_bo, list);
2729
 
2730
			if (bo->delta > expire) {
2731
				idle = false;
2732
				break;
2733
			}
2734
 
2735
			if (bo->map && bo->delta + MAP_PRESERVE_TIME > expire) {
2736
				idle = false;
2737
				list_move_tail(&bo->list, &preserve);
2738
			} else {
2739
				count++;
2740
				size += bytes(bo);
2741
				kgem_bo_free(kgem, bo);
2742
				DBG(("%s: expiring %d\n",
2743
				     __FUNCTION__, bo->handle));
2744
			}
2745
		}
2746
		if (!list_is_empty(&preserve)) {
2747
			preserve.prev->next = kgem->inactive[i].next;
2748
			kgem->inactive[i].next->prev = preserve.prev;
2749
			kgem->inactive[i].next = preserve.next;
2750
			preserve.next->prev = &kgem->inactive[i];
2751
		}
2752
	}
2753
 
2754
#ifdef DEBUG_MEMORY
2755
	{
2756
		long inactive_size = 0;
2757
		int inactive_count = 0;
2758
		for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
2759
			list_for_each_entry(bo, &kgem->inactive[i], list)
2760
				inactive_count++, inactive_size += bytes(bo);
2761
		ErrorF("%s: still allocated %d bo, %ld bytes, in inactive cache\n",
2762
		       __FUNCTION__, inactive_count, inactive_size);
2763
	}
2764
#endif
2765
 
2766
	DBG(("%s: expired %d objects, %d bytes, idle? %d\n",
2767
	     __FUNCTION__, count, size, idle));
2768
 
2769
	kgem->need_expire = !idle;
2770
	return !idle;
2771
	(void)count;
2772
	(void)size;
2773
}
2774
 
2775
void kgem_cleanup_cache(struct kgem *kgem)
2776
{
2777
	unsigned int i;
2778
	int n;
2779
 
2780
	/* sync to the most recent request */
2781
	for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) {
2782
		if (!list_is_empty(&kgem->requests[n])) {
2783
			struct kgem_request *rq;
2784
			struct drm_i915_gem_set_domain set_domain;
2785
 
2786
			rq = list_first_entry(&kgem->requests[n],
2787
					      struct kgem_request,
2788
					      list);
2789
 
2790
			DBG(("%s: sync on cleanup\n", __FUNCTION__));
2791
 
2792
			VG_CLEAR(set_domain);
2793
			set_domain.handle = rq->bo->handle;
2794
			set_domain.read_domains = I915_GEM_DOMAIN_GTT;
2795
			set_domain.write_domain = I915_GEM_DOMAIN_GTT;
2796
			(void)drmIoctl(kgem->fd,
2797
				       DRM_IOCTL_I915_GEM_SET_DOMAIN,
2798
				       &set_domain);
2799
		}
2800
	}
2801
 
2802
	kgem_retire(kgem);
2803
	kgem_cleanup(kgem);
2804
 
2805
	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
2806
		while (!list_is_empty(&kgem->inactive[i]))
2807
			kgem_bo_free(kgem,
2808
				     list_last_entry(&kgem->inactive[i],
2809
						     struct kgem_bo, list));
2810
	}
2811
 
2812
	while (!list_is_empty(&kgem->snoop))
2813
		kgem_bo_free(kgem,
2814
			     list_last_entry(&kgem->snoop,
2815
					     struct kgem_bo, list));
2816
 
2817
	while (__kgem_freed_bo) {
2818
		struct kgem_bo *bo = __kgem_freed_bo;
2819
		__kgem_freed_bo = *(struct kgem_bo **)bo;
2820
		free(bo);
2821
	}
2822
 
2823
	kgem->need_purge = false;
2824
	kgem->need_expire = false;
2825
}
2826
 
2827
static struct kgem_bo *
3256 Serge 2828
search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
2829
{
2830
	struct kgem_bo *bo, *first = NULL;
2831
	bool use_active = (flags & CREATE_INACTIVE) == 0;
2832
	struct list *cache;
2833
 
2834
	DBG(("%s: num_pages=%d, flags=%x, use_active? %d\n",
2835
	     __FUNCTION__, num_pages, flags, use_active));
2836
 
2837
	if (num_pages >= MAX_CACHE_SIZE / PAGE_SIZE)
2838
		return NULL;
2839
 
2840
	if (!use_active && list_is_empty(inactive(kgem, num_pages))) {
2841
		DBG(("%s: inactive and cache bucket empty\n",
2842
		     __FUNCTION__));
2843
 
2844
		if (flags & CREATE_NO_RETIRE) {
2845
			DBG(("%s: can not retire\n", __FUNCTION__));
2846
			return NULL;
2847
		}
2848
 
2849
		if (list_is_empty(active(kgem, num_pages, I915_TILING_NONE))) {
2850
			DBG(("%s: active cache bucket empty\n", __FUNCTION__));
2851
			return NULL;
2852
		}
2853
 
2854
		if (!__kgem_throttle_retire(kgem, flags)) {
2855
			DBG(("%s: nothing retired\n", __FUNCTION__));
2856
			return NULL;
2857
		}
2858
 
2859
		if (list_is_empty(inactive(kgem, num_pages))) {
2860
			DBG(("%s: active cache bucket still empty after retire\n",
2861
			     __FUNCTION__));
2862
			return NULL;
2863
		}
2864
	}
2865
 
2866
	if (!use_active && flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
2867
		int for_cpu = !!(flags & CREATE_CPU_MAP);
2868
		DBG(("%s: searching for inactive %s map\n",
2869
		     __FUNCTION__, for_cpu ? "cpu" : "gtt"));
2870
		cache = &kgem->vma[for_cpu].inactive[cache_bucket(num_pages)];
2871
		list_for_each_entry(bo, cache, vma) {
2872
			assert(IS_CPU_MAP(bo->map) == for_cpu);
2873
			assert(bucket(bo) == cache_bucket(num_pages));
2874
			assert(bo->proxy == NULL);
2875
			assert(bo->rq == NULL);
2876
			assert(bo->exec == NULL);
2877
			assert(!bo->scanout);
2878
 
2879
			if (num_pages > num_pages(bo)) {
2880
				DBG(("inactive too small: %d < %d\n",
2881
				     num_pages(bo), num_pages));
2882
				continue;
2883
			}
2884
 
2885
			if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
2886
				kgem_bo_free(kgem, bo);
2887
				break;
2888
			}
2889
 
2890
			if (I915_TILING_NONE != bo->tiling &&
2891
			    !gem_set_tiling(kgem->fd, bo->handle,
2892
					    I915_TILING_NONE, 0))
2893
				continue;
2894
 
2895
			kgem_bo_remove_from_inactive(kgem, bo);
2896
 
2897
			bo->tiling = I915_TILING_NONE;
2898
			bo->pitch = 0;
2899
			bo->delta = 0;
2900
			DBG(("  %s: found handle=%d (num_pages=%d) in linear vma cache\n",
2901
			     __FUNCTION__, bo->handle, num_pages(bo)));
2902
			assert(use_active || bo->domain != DOMAIN_GPU);
2903
			assert(!bo->needs_flush);
2904
			ASSERT_MAYBE_IDLE(kgem, bo->handle, !use_active);
2905
			return bo;
2906
		}
2907
 
2908
		if (flags & CREATE_EXACT)
2909
			return NULL;
2910
 
2911
		if (flags & CREATE_CPU_MAP && !kgem->has_llc)
2912
			return NULL;
2913
	}
2914
 
2915
	cache = use_active ? active(kgem, num_pages, I915_TILING_NONE) : inactive(kgem, num_pages);
2916
	list_for_each_entry(bo, cache, list) {
2917
		assert(bo->refcnt == 0);
2918
		assert(bo->reusable);
2919
		assert(!!bo->rq == !!use_active);
2920
		assert(bo->proxy == NULL);
2921
		assert(!bo->scanout);
2922
 
2923
		if (num_pages > num_pages(bo))
2924
			continue;
2925
 
2926
		if (use_active &&
2927
		    kgem->gen <= 040 &&
2928
		    bo->tiling != I915_TILING_NONE)
2929
			continue;
2930
 
2931
		if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
2932
			kgem_bo_free(kgem, bo);
2933
			break;
2934
		}
2935
 
2936
		if (I915_TILING_NONE != bo->tiling) {
2937
			if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP))
2938
				continue;
2939
 
2940
			if (first)
2941
				continue;
2942
 
2943
			if (!gem_set_tiling(kgem->fd, bo->handle,
2944
					    I915_TILING_NONE, 0))
2945
				continue;
2946
 
2947
			bo->tiling = I915_TILING_NONE;
2948
			bo->pitch = 0;
2949
		}
2950
 
2951
		if (bo->map) {
2952
			if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
2953
				int for_cpu = !!(flags & CREATE_CPU_MAP);
2954
				if (IS_CPU_MAP(bo->map) != for_cpu) {
2955
					if (first != NULL)
2956
						break;
2957
 
2958
					first = bo;
2959
					continue;
2960
				}
2961
			} else {
2962
				if (first != NULL)
2963
					break;
2964
 
2965
				first = bo;
2966
				continue;
2967
			}
2968
		} else {
2969
			if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
2970
				if (first != NULL)
2971
					break;
2972
 
2973
				first = bo;
2974
				continue;
2975
			}
2976
		}
2977
 
2978
		if (use_active)
2979
			kgem_bo_remove_from_active(kgem, bo);
2980
		else
2981
			kgem_bo_remove_from_inactive(kgem, bo);
2982
 
2983
		assert(bo->tiling == I915_TILING_NONE);
2984
		bo->pitch = 0;
2985
		bo->delta = 0;
2986
		DBG(("  %s: found handle=%d (num_pages=%d) in linear %s cache\n",
2987
		     __FUNCTION__, bo->handle, num_pages(bo),
2988
		     use_active ? "active" : "inactive"));
2989
		assert(list_is_empty(&bo->list));
2990
		assert(use_active || bo->domain != DOMAIN_GPU);
2991
		assert(!bo->needs_flush || use_active);
2992
		ASSERT_MAYBE_IDLE(kgem, bo->handle, !use_active);
2993
		return bo;
2994
	}
2995
 
2996
	if (first) {
2997
		assert(first->tiling == I915_TILING_NONE);
2998
 
2999
		if (use_active)
3000
			kgem_bo_remove_from_active(kgem, first);
3001
		else
3002
			kgem_bo_remove_from_inactive(kgem, first);
3003
 
3004
		first->pitch = 0;
3005
		first->delta = 0;
3006
		DBG(("  %s: found handle=%d (near-miss) (num_pages=%d) in linear %s cache\n",
3007
		     __FUNCTION__, first->handle, num_pages(first),
3008
		     use_active ? "active" : "inactive"));
3009
		assert(list_is_empty(&first->list));
3010
		assert(use_active || first->domain != DOMAIN_GPU);
3011
		assert(!first->needs_flush || use_active);
3012
		ASSERT_MAYBE_IDLE(kgem, first->handle, !use_active);
3013
		return first;
3014
	}
3015
 
3016
	return NULL;
3017
}
3018
 
3019
 
3020
struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags)
3021
{
3022
	struct kgem_bo *bo;
3023
	uint32_t handle;
3024
 
3025
	DBG(("%s(%d)\n", __FUNCTION__, size));
3026
 
3027
	if (flags & CREATE_GTT_MAP && kgem->has_llc) {
3028
		flags &= ~CREATE_GTT_MAP;
3029
		flags |= CREATE_CPU_MAP;
3030
	}
3031
 
3032
	size = (size + PAGE_SIZE - 1) / PAGE_SIZE;
3033
	bo = search_linear_cache(kgem, size, CREATE_INACTIVE | flags);
3034
	if (bo) {
3035
		assert(bo->domain != DOMAIN_GPU);
3036
		ASSERT_IDLE(kgem, bo->handle);
3037
		bo->refcnt = 1;
3038
		return bo;
3039
	}
3040
 
3041
	if (flags & CREATE_CACHED)
3042
		return NULL;
3043
 
3044
	handle = gem_create(kgem->fd, size);
3045
	if (handle == 0)
3046
		return NULL;
3047
 
3048
	DBG(("%s: new handle=%d, num_pages=%d\n", __FUNCTION__, handle, size));
3049
	bo = __kgem_bo_alloc(handle, size);
3050
	if (bo == NULL) {
3051
		gem_close(kgem->fd, handle);
3052
		return NULL;
3053
	}
3054
 
3055
	debug_alloc__bo(kgem, bo);
3056
	return bo;
3057
}
3058
 
3258 Serge 3059
inline int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo)
3060
{
3061
	unsigned int size;
3256 Serge 3062
 
3258 Serge 3063
	assert(bo->tiling);
3064
	assert(kgem->gen < 040);
3256 Serge 3065
 
3258 Serge 3066
	if (kgem->gen < 030)
3067
		size = 512 * 1024;
3068
	else
3069
		size = 1024 * 1024;
3070
	while (size < bytes(bo))
3071
		size *= 2;
3256 Serge 3072
 
3258 Serge 3073
	return size;
3074
}
3256 Serge 3075
 
3258 Serge 3076
struct kgem_bo *kgem_create_2d(struct kgem *kgem,
3077
			       int width,
3078
			       int height,
3079
			       int bpp,
3080
			       int tiling,
3081
			       uint32_t flags)
3082
{
3083
	struct list *cache;
3084
	struct kgem_bo *bo;
3085
	uint32_t pitch, untiled_pitch, tiled_height, size;
3086
	uint32_t handle;
3087
	int i, bucket, retry;
3088
 
3089
	if (tiling < 0)
3090
		tiling = -tiling, flags |= CREATE_EXACT;
3091
 
3092
	DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d, cpu-mapping=%d, gtt-mapping=%d, scanout?=%d, prime?=%d, temp?=%d)\n", __FUNCTION__,
3093
	     width, height, bpp, tiling,
3094
	     !!(flags & CREATE_EXACT),
3095
	     !!(flags & CREATE_INACTIVE),
3096
	     !!(flags & CREATE_CPU_MAP),
3097
	     !!(flags & CREATE_GTT_MAP),
3098
	     !!(flags & CREATE_SCANOUT),
3099
	     !!(flags & CREATE_PRIME),
3100
	     !!(flags & CREATE_TEMPORARY)));
3101
 
3102
	size = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags,
3103
				 width, height, bpp, tiling, &pitch);
3104
	assert(size && size <= kgem->max_object_size);
3105
	size /= PAGE_SIZE;
3106
	bucket = cache_bucket(size);
3107
 
3108
	if (flags & CREATE_SCANOUT) {
3109
		assert((flags & CREATE_INACTIVE) == 0);
3110
		list_for_each_entry_reverse(bo, &kgem->scanout, list) {
3111
			assert(bo->scanout);
3112
			assert(bo->delta);
3113
			assert(!bo->purged);
3114
 
3115
			if (size > num_pages(bo) || num_pages(bo) > 2*size)
3116
				continue;
3117
 
3118
			if (bo->tiling != tiling ||
3119
			    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
3120
				if (!gem_set_tiling(kgem->fd, bo->handle,
3121
						    tiling, pitch))
3122
					continue;
3123
 
3124
				bo->tiling = tiling;
3125
				bo->pitch = pitch;
3126
			}
3127
 
3128
			list_del(&bo->list);
3129
 
3130
			bo->unique_id = kgem_get_unique_id(kgem);
3131
			DBG(("  1:from scanout: pitch=%d, tiling=%d, handle=%d, id=%d\n",
3132
			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3133
			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
3134
			bo->refcnt = 1;
3135
			return bo;
3136
		}
3137
	}
3138
 
3139
	if (bucket >= NUM_CACHE_BUCKETS) {
3140
		DBG(("%s: large bo num pages=%d, bucket=%d\n",
3141
		     __FUNCTION__, size, bucket));
3142
 
3143
		if (flags & CREATE_INACTIVE)
3144
			goto large_inactive;
3145
 
3146
		tiled_height = kgem_aligned_height(kgem, height, tiling);
3147
		untiled_pitch = kgem_untiled_pitch(kgem, width, bpp, flags);
3148
 
3149
		list_for_each_entry(bo, &kgem->large, list) {
3150
			assert(!bo->purged);
3151
			assert(!bo->scanout);
3152
			assert(bo->refcnt == 0);
3153
			assert(bo->reusable);
3154
			assert(bo->flush == true);
3155
 
3156
			if (kgem->gen < 040) {
3157
				if (bo->pitch < pitch) {
3158
					DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n",
3159
					     bo->tiling, tiling,
3160
					     bo->pitch, pitch));
3161
					continue;
3162
				}
3163
 
3164
				if (bo->pitch * tiled_height > bytes(bo))
3165
					continue;
3166
			} else {
3167
				if (num_pages(bo) < size)
3168
					continue;
3169
 
3170
				if (bo->pitch != pitch || bo->tiling != tiling) {
3171
					if (!gem_set_tiling(kgem->fd, bo->handle,
3172
							    tiling, pitch))
3173
						continue;
3174
 
3175
					bo->pitch = pitch;
3176
					bo->tiling = tiling;
3177
				}
3178
			}
3179
 
3180
			kgem_bo_remove_from_active(kgem, bo);
3181
 
3182
			bo->unique_id = kgem_get_unique_id(kgem);
3183
			bo->delta = 0;
3184
			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
3185
			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3186
			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
3187
			bo->refcnt = 1;
3188
			return bo;
3189
		}
3190
 
3191
large_inactive:
3192
		list_for_each_entry(bo, &kgem->large_inactive, list) {
3193
			assert(bo->refcnt == 0);
3194
			assert(bo->reusable);
3195
			assert(!bo->scanout);
3196
 
3197
			if (size > num_pages(bo))
3198
				continue;
3199
 
3200
			if (bo->tiling != tiling ||
3201
			    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
3202
				if (!gem_set_tiling(kgem->fd, bo->handle,
3203
						    tiling, pitch))
3204
					continue;
3205
 
3206
				bo->tiling = tiling;
3207
				bo->pitch = pitch;
3208
			}
3209
 
3210
			if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
3211
				kgem_bo_free(kgem, bo);
3212
				break;
3213
			}
3214
 
3215
			list_del(&bo->list);
3216
 
3217
			bo->unique_id = kgem_get_unique_id(kgem);
3218
			bo->pitch = pitch;
3219
			bo->delta = 0;
3220
			DBG(("  1:from large inactive: pitch=%d, tiling=%d, handle=%d, id=%d\n",
3221
			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3222
			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
3223
			bo->refcnt = 1;
3224
			return bo;
3225
		}
3226
 
3227
		goto create;
3228
	}
3229
 
3230
	if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
3231
		int for_cpu = !!(flags & CREATE_CPU_MAP);
3232
		if (kgem->has_llc && tiling == I915_TILING_NONE)
3233
			for_cpu = 1;
3234
		/* We presume that we will need to upload to this bo,
3235
		 * and so would prefer to have an active VMA.
3236
		 */
3237
		cache = &kgem->vma[for_cpu].inactive[bucket];
3238
		do {
3239
			list_for_each_entry(bo, cache, vma) {
3240
				assert(bucket(bo) == bucket);
3241
				assert(bo->refcnt == 0);
3242
				assert(!bo->scanout);
3243
				assert(bo->map);
3244
				assert(IS_CPU_MAP(bo->map) == for_cpu);
3245
				assert(bo->rq == NULL);
3246
				assert(list_is_empty(&bo->request));
3247
				assert(bo->flush == false);
3248
 
3249
				if (size > num_pages(bo)) {
3250
					DBG(("inactive too small: %d < %d\n",
3251
					     num_pages(bo), size));
3252
					continue;
3253
				}
3254
 
3255
				if (bo->tiling != tiling ||
3256
				    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
3257
					DBG(("inactive vma with wrong tiling: %d < %d\n",
3258
					     bo->tiling, tiling));
3259
					continue;
3260
				}
3261
 
3262
				if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
3263
					kgem_bo_free(kgem, bo);
3264
					break;
3265
				}
3266
 
3267
				bo->pitch = pitch;
3268
				bo->delta = 0;
3269
				bo->unique_id = kgem_get_unique_id(kgem);
3270
 
3271
				kgem_bo_remove_from_inactive(kgem, bo);
3272
 
3273
				DBG(("  from inactive vma: pitch=%d, tiling=%d: handle=%d, id=%d\n",
3274
				     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3275
				assert(bo->reusable);
3276
				assert(bo->domain != DOMAIN_GPU);
3277
				ASSERT_IDLE(kgem, bo->handle);
3278
				assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
3279
				bo->refcnt = 1;
3280
				return bo;
3281
			}
3282
		} while (!list_is_empty(cache) &&
3283
			 __kgem_throttle_retire(kgem, flags));
3284
 
3285
		if (flags & CREATE_CPU_MAP && !kgem->has_llc)
3286
			goto create;
3287
	}
3288
 
3289
	if (flags & CREATE_INACTIVE)
3290
		goto skip_active_search;
3291
 
3292
	/* Best active match */
3293
	retry = NUM_CACHE_BUCKETS - bucket;
3294
	if (retry > 3 && (flags & CREATE_TEMPORARY) == 0)
3295
		retry = 3;
3296
search_again:
3297
	assert(bucket < NUM_CACHE_BUCKETS);
3298
	cache = &kgem->active[bucket][tiling];
3299
	if (tiling) {
3300
		tiled_height = kgem_aligned_height(kgem, height, tiling);
3301
		list_for_each_entry(bo, cache, list) {
3302
			assert(!bo->purged);
3303
			assert(bo->refcnt == 0);
3304
			assert(bucket(bo) == bucket);
3305
			assert(bo->reusable);
3306
			assert(bo->tiling == tiling);
3307
			assert(bo->flush == false);
3308
			assert(!bo->scanout);
3309
 
3310
			if (kgem->gen < 040) {
3311
				if (bo->pitch < pitch) {
3312
					DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n",
3313
					     bo->tiling, tiling,
3314
					     bo->pitch, pitch));
3315
					continue;
3316
				}
3317
 
3318
				if (bo->pitch * tiled_height > bytes(bo))
3319
					continue;
3320
			} else {
3321
				if (num_pages(bo) < size)
3322
					continue;
3323
 
3324
				if (bo->pitch != pitch) {
3325
					if (!gem_set_tiling(kgem->fd,
3326
							    bo->handle,
3327
							    tiling, pitch))
3328
						continue;
3329
 
3330
					bo->pitch = pitch;
3331
				}
3332
			}
3333
 
3334
			kgem_bo_remove_from_active(kgem, bo);
3335
 
3336
			bo->unique_id = kgem_get_unique_id(kgem);
3337
			bo->delta = 0;
3338
			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
3339
			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3340
			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
3341
			bo->refcnt = 1;
3342
			return bo;
3343
		}
3344
	} else {
3345
		list_for_each_entry(bo, cache, list) {
3346
			assert(bucket(bo) == bucket);
3347
			assert(!bo->purged);
3348
			assert(bo->refcnt == 0);
3349
			assert(bo->reusable);
3350
			assert(!bo->scanout);
3351
			assert(bo->tiling == tiling);
3352
			assert(bo->flush == false);
3353
 
3354
			if (num_pages(bo) < size)
3355
				continue;
3356
 
3357
			kgem_bo_remove_from_active(kgem, bo);
3358
 
3359
			bo->pitch = pitch;
3360
			bo->unique_id = kgem_get_unique_id(kgem);
3361
			bo->delta = 0;
3362
			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
3363
			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3364
			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
3365
			bo->refcnt = 1;
3366
			return bo;
3367
		}
3368
	}
3369
 
3370
	if (--retry && flags & CREATE_EXACT) {
3371
		if (kgem->gen >= 040) {
3372
			for (i = I915_TILING_NONE; i <= I915_TILING_Y; i++) {
3373
				if (i == tiling)
3374
					continue;
3375
 
3376
				cache = &kgem->active[bucket][i];
3377
				list_for_each_entry(bo, cache, list) {
3378
					assert(!bo->purged);
3379
					assert(bo->refcnt == 0);
3380
					assert(bo->reusable);
3381
					assert(!bo->scanout);
3382
					assert(bo->flush == false);
3383
 
3384
					if (num_pages(bo) < size)
3385
						continue;
3386
 
3387
					if (!gem_set_tiling(kgem->fd,
3388
							    bo->handle,
3389
							    tiling, pitch))
3390
						continue;
3391
 
3392
					kgem_bo_remove_from_active(kgem, bo);
3393
 
3394
					bo->unique_id = kgem_get_unique_id(kgem);
3395
					bo->pitch = pitch;
3396
					bo->tiling = tiling;
3397
					bo->delta = 0;
3398
					DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
3399
					     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3400
					assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
3401
					bo->refcnt = 1;
3402
					return bo;
3403
				}
3404
			}
3405
		}
3406
 
3407
		bucket++;
3408
		goto search_again;
3409
	}
3410
 
3411
	if ((flags & CREATE_EXACT) == 0) { /* allow an active near-miss? */
3412
		untiled_pitch = kgem_untiled_pitch(kgem, width, bpp, flags);
3413
		i = tiling;
3414
		while (--i >= 0) {
3415
			tiled_height = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags,
3416
							 width, height, bpp, tiling, &pitch);
3417
			cache = active(kgem, tiled_height / PAGE_SIZE, i);
3418
			tiled_height = kgem_aligned_height(kgem, height, i);
3419
			list_for_each_entry(bo, cache, list) {
3420
				assert(!bo->purged);
3421
				assert(bo->refcnt == 0);
3422
				assert(bo->reusable);
3423
				assert(!bo->scanout);
3424
				assert(bo->flush == false);
3425
 
3426
				if (bo->tiling) {
3427
					if (bo->pitch < pitch) {
3428
						DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n",
3429
						     bo->tiling, tiling,
3430
						     bo->pitch, pitch));
3431
						continue;
3432
					}
3433
				} else
3434
					bo->pitch = untiled_pitch;
3435
 
3436
				if (bo->pitch * tiled_height > bytes(bo))
3437
					continue;
3438
 
3439
				kgem_bo_remove_from_active(kgem, bo);
3440
 
3441
				bo->unique_id = kgem_get_unique_id(kgem);
3442
				bo->delta = 0;
3443
				DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
3444
				     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3445
				assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
3446
				bo->refcnt = 1;
3447
				return bo;
3448
			}
3449
		}
3450
	}
3451
 
3452
skip_active_search:
3453
	bucket = cache_bucket(size);
3454
	retry = NUM_CACHE_BUCKETS - bucket;
3455
	if (retry > 3)
3456
		retry = 3;
3457
search_inactive:
3458
	/* Now just look for a close match and prefer any currently active */
3459
	assert(bucket < NUM_CACHE_BUCKETS);
3460
	cache = &kgem->inactive[bucket];
3461
	list_for_each_entry(bo, cache, list) {
3462
		assert(bucket(bo) == bucket);
3463
		assert(bo->reusable);
3464
		assert(!bo->scanout);
3465
		assert(bo->flush == false);
3466
 
3467
		if (size > num_pages(bo)) {
3468
			DBG(("inactive too small: %d < %d\n",
3469
			     num_pages(bo), size));
3470
			continue;
3471
		}
3472
 
3473
		if (bo->tiling != tiling ||
3474
		    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
3475
			if (!gem_set_tiling(kgem->fd, bo->handle,
3476
					    tiling, pitch))
3477
				continue;
3478
 
3479
			if (bo->map)
3480
				kgem_bo_release_map(kgem, bo);
3481
		}
3482
 
3483
		if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
3484
			kgem_bo_free(kgem, bo);
3485
			break;
3486
		}
3487
 
3488
		kgem_bo_remove_from_inactive(kgem, bo);
3489
 
3490
		bo->pitch = pitch;
3491
		bo->tiling = tiling;
3492
 
3493
		bo->delta = 0;
3494
		bo->unique_id = kgem_get_unique_id(kgem);
3495
		assert(bo->pitch);
3496
		DBG(("  from inactive: pitch=%d, tiling=%d: handle=%d, id=%d\n",
3497
		     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3498
		assert(bo->refcnt == 0);
3499
		assert(bo->reusable);
3500
		assert((flags & CREATE_INACTIVE) == 0 || bo->domain != DOMAIN_GPU);
3501
		ASSERT_MAYBE_IDLE(kgem, bo->handle, flags & CREATE_INACTIVE);
3502
		assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
3503
		bo->refcnt = 1;
3504
		return bo;
3505
	}
3506
 
3507
	if (flags & CREATE_INACTIVE &&
3508
	    !list_is_empty(&kgem->active[bucket][tiling]) &&
3509
	    __kgem_throttle_retire(kgem, flags)) {
3510
		flags &= ~CREATE_INACTIVE;
3511
		goto search_inactive;
3512
	}
3513
 
3514
	if (--retry) {
3515
		bucket++;
3516
		flags &= ~CREATE_INACTIVE;
3517
		goto search_inactive;
3518
	}
3519
 
3520
create:
3521
	if (bucket >= NUM_CACHE_BUCKETS)
3522
		size = ALIGN(size, 1024);
3523
	handle = gem_create(kgem->fd, size);
3524
	if (handle == 0)
3525
		return NULL;
3526
 
3527
	bo = __kgem_bo_alloc(handle, size);
3528
	if (!bo) {
3529
		gem_close(kgem->fd, handle);
3530
		return NULL;
3531
	}
3532
 
3533
	bo->domain = DOMAIN_CPU;
3534
	bo->unique_id = kgem_get_unique_id(kgem);
3535
	bo->pitch = pitch;
3536
	if (tiling != I915_TILING_NONE &&
3537
	    gem_set_tiling(kgem->fd, handle, tiling, pitch))
3538
		bo->tiling = tiling;
3539
	if (bucket >= NUM_CACHE_BUCKETS) {
3540
		DBG(("%s: marking large bo for automatic flushing\n",
3541
		     __FUNCTION__));
3542
		bo->flush = true;
3543
	}
3544
 
3545
	assert(bytes(bo) >= bo->pitch * kgem_aligned_height(kgem, height, bo->tiling));
3546
 
3547
	debug_alloc__bo(kgem, bo);
3548
 
3549
	DBG(("  new pitch=%d, tiling=%d, handle=%d, id=%d, num_pages=%d [%d], bucket=%d\n",
3550
	     bo->pitch, bo->tiling, bo->handle, bo->unique_id,
3551
	     size, num_pages(bo), bucket(bo)));
3552
	return bo;
3553
}
3554
 
3263 Serge 3555
#if 0
3258 Serge 3556
struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
3557
				   int width,
3558
				   int height,
3559
				   int bpp,
3560
				   uint32_t flags)
3561
{
3562
	struct kgem_bo *bo;
3563
	int stride, size;
3564
 
3565
	if (DBG_NO_CPU)
3566
		return NULL;
3567
 
3568
	DBG(("%s(%dx%d, bpp=%d)\n", __FUNCTION__, width, height, bpp));
3569
 
3570
	if (kgem->has_llc) {
3571
		bo = kgem_create_2d(kgem, width, height, bpp,
3572
				    I915_TILING_NONE, flags);
3573
		if (bo == NULL)
3574
			return bo;
3575
 
3576
		assert(bo->tiling == I915_TILING_NONE);
3577
 
3578
		if (kgem_bo_map__cpu(kgem, bo) == NULL) {
3579
			kgem_bo_destroy(kgem, bo);
3580
			return NULL;
3581
		}
3582
 
3583
		return bo;
3584
	}
3585
 
3586
	assert(width > 0 && height > 0);
3587
	stride = ALIGN(width, 2) * bpp >> 3;
3588
	stride = ALIGN(stride, 4);
3589
	size = stride * ALIGN(height, 2);
3590
	assert(size >= PAGE_SIZE);
3591
 
3592
	DBG(("%s: %dx%d, %d bpp, stride=%d\n",
3593
	     __FUNCTION__, width, height, bpp, stride));
3594
 
3595
	bo = search_snoop_cache(kgem, NUM_PAGES(size), 0);
3596
	if (bo) {
3597
		assert(bo->tiling == I915_TILING_NONE);
3598
		assert(bo->snoop);
3599
		bo->refcnt = 1;
3600
		bo->pitch = stride;
3601
		bo->unique_id = kgem_get_unique_id(kgem);
3602
		return bo;
3603
	}
3604
 
3605
	if (kgem->has_cacheing) {
3606
		bo = kgem_create_linear(kgem, size, flags);
3607
		if (bo == NULL)
3608
			return NULL;
3609
 
3610
		assert(bo->tiling == I915_TILING_NONE);
3611
 
3612
		if (!gem_set_cacheing(kgem->fd, bo->handle, SNOOPED)) {
3613
			kgem_bo_destroy(kgem, bo);
3614
			return NULL;
3615
		}
3616
		bo->snoop = true;
3617
 
3618
		if (kgem_bo_map__cpu(kgem, bo) == NULL) {
3619
			kgem_bo_destroy(kgem, bo);
3620
			return NULL;
3621
		}
3622
 
3623
		bo->pitch = stride;
3624
		bo->unique_id = kgem_get_unique_id(kgem);
3625
		return bo;
3626
	}
3627
 
3628
	if (kgem->has_userptr) {
3629
		void *ptr;
3630
 
3631
		/* XXX */
3632
		//if (posix_memalign(&ptr, 64, ALIGN(size, 64)))
3633
		if (posix_memalign(&ptr, PAGE_SIZE, ALIGN(size, PAGE_SIZE)))
3634
			return NULL;
3635
 
3636
		bo = kgem_create_map(kgem, ptr, size, false);
3637
		if (bo == NULL) {
3638
			free(ptr);
3639
			return NULL;
3640
		}
3641
 
3642
		bo->map = MAKE_USER_MAP(ptr);
3643
		bo->pitch = stride;
3644
		bo->unique_id = kgem_get_unique_id(kgem);
3645
		return bo;
3646
	}
3647
 
3648
		return NULL;
3649
}
3650
 
3651
 
3652
#endif
3653
 
3654
 
3655
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
3656
{
3657
	DBG(("%s: handle=%d, proxy? %d\n",
3658
	     __FUNCTION__, bo->handle, bo->proxy != NULL));
3659
 
3660
	if (bo->proxy) {
3661
		_list_del(&bo->vma);
3662
		_list_del(&bo->request);
3663
		if (bo->io && bo->exec == NULL)
3664
			_kgem_bo_delete_buffer(kgem, bo);
3665
		kgem_bo_unref(kgem, bo->proxy);
3666
		kgem_bo_binding_free(kgem, bo);
3667
		free(bo);
3668
		return;
3669
		}
3670
 
3671
	__kgem_bo_destroy(kgem, bo);
3672
}
3673
 
3263 Serge 3674
void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo)
3675
{
3676
	assert(bo->rq);
3677
	assert(bo->exec == NULL);
3678
	assert(bo->needs_flush);
3258 Serge 3679
 
3263 Serge 3680
	/* The kernel will emit a flush *and* update its own flushing lists. */
3681
	if (!__kgem_busy(kgem, bo->handle))
3682
		__kgem_bo_clear_busy(bo);
3258 Serge 3683
 
3263 Serge 3684
	DBG(("%s: handle=%d, busy?=%d\n",
3685
	     __FUNCTION__, bo->handle, bo->rq != NULL));
3686
}
3258 Serge 3687
 
3263 Serge 3688
inline static bool needs_semaphore(struct kgem *kgem, struct kgem_bo *bo)
3689
{
3690
	return kgem->nreloc && bo->rq && RQ_RING(bo->rq) != kgem->ring;
3691
}
3258 Serge 3692
 
3263 Serge 3693
bool kgem_check_bo(struct kgem *kgem, ...)
3694
{
3695
	va_list ap;
3696
	struct kgem_bo *bo;
3697
	int num_exec = 0;
3698
	int num_pages = 0;
3699
	bool flush = false;
3258 Serge 3700
 
3263 Serge 3701
	va_start(ap, kgem);
3702
	while ((bo = va_arg(ap, struct kgem_bo *))) {
3703
		while (bo->proxy)
3704
			bo = bo->proxy;
3705
		if (bo->exec)
3706
			continue;
3258 Serge 3707
 
3263 Serge 3708
		if (needs_semaphore(kgem, bo))
3709
			return false;
3258 Serge 3710
 
3263 Serge 3711
		num_pages += num_pages(bo);
3712
		num_exec++;
3258 Serge 3713
 
3263 Serge 3714
		flush |= bo->flush;
3715
	}
3716
	va_end(ap);
3258 Serge 3717
 
3263 Serge 3718
	DBG(("%s: num_pages=+%d, num_exec=+%d\n",
3719
	     __FUNCTION__, num_pages, num_exec));
3258 Serge 3720
 
3263 Serge 3721
	if (!num_pages)
3722
		return true;
3258 Serge 3723
 
3263 Serge 3724
	if (kgem_flush(kgem, flush))
3725
		return false;
3258 Serge 3726
 
3263 Serge 3727
	if (kgem->aperture > kgem->aperture_low &&
3728
	    kgem_ring_is_idle(kgem, kgem->ring)) {
3729
		DBG(("%s: current aperture usage (%d) is greater than low water mark (%d)\n",
3730
		     __FUNCTION__, kgem->aperture, kgem->aperture_low));
3731
		return false;
3732
	}
3258 Serge 3733
 
3263 Serge 3734
	if (num_pages + kgem->aperture > kgem->aperture_high) {
3735
		DBG(("%s: final aperture usage (%d) is greater than high water mark (%d)\n",
3736
		     __FUNCTION__, num_pages + kgem->aperture, kgem->aperture_high));
3737
		return false;
3738
	}
3258 Serge 3739
 
3263 Serge 3740
	if (kgem->nexec + num_exec >= KGEM_EXEC_SIZE(kgem)) {
3741
		DBG(("%s: out of exec slots (%d + %d / %d)\n", __FUNCTION__,
3742
		     kgem->nexec, num_exec, KGEM_EXEC_SIZE(kgem)));
3743
		return false;
3744
	}
3258 Serge 3745
 
3263 Serge 3746
	return true;
3747
}
3258 Serge 3748
 
3749
 
3750
 
3751
 
3752
 
3753
 
3754
 
3755
 
3756
 
3757
 
3758
 
3759
 
3760
 
3761
 
3762
 
3763
 
3764
 
3765
 
3766
 
3767
 
3263 Serge 3768
 
3769
 
3770
 
3771
 
3772
 
3773
 
3774
 
3775
 
3776
 
3777
 
3258 Serge 3778
uint32_t kgem_add_reloc(struct kgem *kgem,
3779
			uint32_t pos,
3780
			struct kgem_bo *bo,
3781
			uint32_t read_write_domain,
3782
			uint32_t delta)
3783
{
3784
	int index;
3785
 
3786
	DBG(("%s: handle=%d, pos=%d, delta=%d, domains=%08x\n",
3787
	     __FUNCTION__, bo ? bo->handle : 0, pos, delta, read_write_domain));
3788
 
3789
	assert((read_write_domain & 0x7fff) == 0 || bo != NULL);
3790
 
3266 Serge 3791
    if( bo != NULL && bo->handle == -2)
3792
    {
3793
   		if (bo->exec == NULL)
3794
			kgem_add_bo(kgem, bo);
3795
 
3796
		if (read_write_domain & 0x7fff && !bo->dirty) {
3797
			assert(!bo->snoop || kgem->can_blt_cpu);
3798
			__kgem_bo_mark_dirty(bo);
3799
		}
3800
        return 0;
3801
    };
3263 Serge 3802
 
3258 Serge 3803
	index = kgem->nreloc++;
3804
	assert(index < ARRAY_SIZE(kgem->reloc));
3805
	kgem->reloc[index].offset = pos * sizeof(kgem->batch[0]);
3806
	if (bo) {
3807
		assert(bo->refcnt);
3808
		assert(!bo->purged);
3809
 
3810
		while (bo->proxy) {
3811
			DBG(("%s: adding proxy [delta=%d] for handle=%d\n",
3812
			     __FUNCTION__, bo->delta, bo->handle));
3813
			delta += bo->delta;
3814
			assert(bo->handle == bo->proxy->handle);
3815
			/* need to release the cache upon batch submit */
3816
			if (bo->exec == NULL) {
3817
				list_move_tail(&bo->request,
3818
					       &kgem->next_request->buffers);
3819
				bo->rq = MAKE_REQUEST(kgem->next_request,
3820
						      kgem->ring);
3821
				bo->exec = &_kgem_dummy_exec;
3822
		}
3823
 
3824
			if (read_write_domain & 0x7fff && !bo->dirty)
3825
				__kgem_bo_mark_dirty(bo);
3826
 
3827
			bo = bo->proxy;
3828
			assert(bo->refcnt);
3829
			assert(!bo->purged);
3830
		}
3831
 
3832
		if (bo->exec == NULL)
3833
			kgem_add_bo(kgem, bo);
3834
		assert(bo->rq == MAKE_REQUEST(kgem->next_request, kgem->ring));
3835
		assert(RQ_RING(bo->rq) == kgem->ring);
3836
 
3837
		if (kgem->gen < 040 && read_write_domain & KGEM_RELOC_FENCED) {
3838
			if (bo->tiling &&
3839
			    (bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0) {
3840
				assert(kgem->nfence < kgem->fence_max);
3841
				kgem->aperture_fenced +=
3842
					kgem_bo_fenced_size(kgem, bo);
3843
				kgem->nfence++;
3844
			}
3845
			bo->exec->flags |= EXEC_OBJECT_NEEDS_FENCE;
3846
		}
3847
 
3848
		kgem->reloc[index].delta = delta;
3849
		kgem->reloc[index].target_handle = bo->target_handle;
3850
		kgem->reloc[index].presumed_offset = bo->presumed_offset;
3851
 
3852
		if (read_write_domain & 0x7fff && !bo->dirty) {
3853
			assert(!bo->snoop || kgem->can_blt_cpu);
3854
			__kgem_bo_mark_dirty(bo);
3855
		}
3856
 
3857
		delta += bo->presumed_offset;
3858
	} else {
3859
		kgem->reloc[index].delta = delta;
3860
		kgem->reloc[index].target_handle = ~0U;
3861
		kgem->reloc[index].presumed_offset = 0;
3862
		if (kgem->nreloc__self < 256)
3863
			kgem->reloc__self[kgem->nreloc__self++] = index;
3864
		}
3865
	kgem->reloc[index].read_domains = read_write_domain >> 16;
3866
	kgem->reloc[index].write_domain = read_write_domain & 0x7fff;
3867
 
3868
	return delta;
3869
}
3870
 
3871
static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket)
3872
{
3873
	int i, j;
3874
 
3875
	DBG(("%s: type=%d, count=%d (bucket: %d)\n",
3876
	     __FUNCTION__, type, kgem->vma[type].count, bucket));
3877
	if (kgem->vma[type].count <= 0)
3878
	       return;
3879
 
3880
	if (kgem->need_purge)
3881
		kgem_purge_cache(kgem);
3882
 
3883
	/* vma are limited on a per-process basis to around 64k.
3884
	 * This includes all malloc arenas as well as other file
3885
	 * mappings. In order to be fair and not hog the cache,
3886
	 * and more importantly not to exhaust that limit and to
3887
	 * start failing mappings, we keep our own number of open
3888
	 * vma to within a conservative value.
3889
	 */
3890
	i = 0;
3891
	while (kgem->vma[type].count > 0) {
3892
		struct kgem_bo *bo = NULL;
3893
 
3894
		for (j = 0;
3895
		     bo == NULL && j < ARRAY_SIZE(kgem->vma[type].inactive);
3896
		     j++) {
3897
			struct list *head = &kgem->vma[type].inactive[i++%ARRAY_SIZE(kgem->vma[type].inactive)];
3898
			if (!list_is_empty(head))
3899
				bo = list_last_entry(head, struct kgem_bo, vma);
3900
	}
3901
		if (bo == NULL)
3902
			break;
3903
 
3904
		DBG(("%s: discarding inactive %s vma cache for %d\n",
3905
		     __FUNCTION__,
3906
		     IS_CPU_MAP(bo->map) ? "CPU" : "GTT", bo->handle));
3907
		assert(IS_CPU_MAP(bo->map) == type);
3908
		assert(bo->map);
3909
			assert(bo->rq == NULL);
3910
 
3911
		VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo)));
3912
//		munmap(MAP(bo->map), bytes(bo));
3913
		bo->map = NULL;
3914
		list_del(&bo->vma);
3915
		kgem->vma[type].count--;
3916
 
3917
		if (!bo->purged && !kgem_bo_set_purgeable(kgem, bo)) {
3918
			DBG(("%s: freeing unpurgeable old mapping\n",
3919
			     __FUNCTION__));
3920
				kgem_bo_free(kgem, bo);
3921
			}
3922
	}
3923
}
3924
 
3925
 
3926
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
3927
{
3928
	void *ptr;
3929
 
3930
	DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__,
3931
	     bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain));
3932
 
3933
	assert(!bo->purged);
3934
	assert(bo->proxy == NULL);
3935
	assert(list_is_empty(&bo->list));
3936
	assert(bo->exec == NULL);
3937
 
3938
	if (bo->tiling == I915_TILING_NONE && !bo->scanout &&
3939
	    (kgem->has_llc || bo->domain == DOMAIN_CPU)) {
3940
		DBG(("%s: converting request for GTT map into CPU map\n",
3941
		     __FUNCTION__));
3942
		ptr = kgem_bo_map__cpu(kgem, bo);
3943
		kgem_bo_sync__cpu(kgem, bo);
3944
		return ptr;
3945
	}
3946
 
3947
	if (IS_CPU_MAP(bo->map))
3948
		kgem_bo_release_map(kgem, bo);
3949
 
3950
	ptr = bo->map;
3951
	if (ptr == NULL) {
3952
		assert(kgem_bo_size(bo) <= kgem->aperture_mappable / 2);
3953
		assert(kgem->gen != 021 || bo->tiling != I915_TILING_Y);
3954
 
3955
		kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
3956
 
3957
		ptr = __kgem_bo_map__gtt(kgem, bo);
3958
		if (ptr == NULL)
3959
			return NULL;
3960
 
3961
		/* Cache this mapping to avoid the overhead of an
3962
		 * excruciatingly slow GTT pagefault. This is more an
3963
		 * issue with compositing managers which need to frequently
3964
		 * flush CPU damage to their GPU bo.
3965
		 */
3966
		bo->map = ptr;
3967
		DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle));
3968
		}
3969
 
3970
	if (bo->domain != DOMAIN_GTT) {
3971
		struct drm_i915_gem_set_domain set_domain;
3972
 
3973
		DBG(("%s: sync: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__,
3974
		     bo->needs_flush, bo->domain, __kgem_busy(kgem, bo->handle)));
3975
 
3976
		/* XXX use PROT_READ to avoid the write flush? */
3977
 
3978
		VG_CLEAR(set_domain);
3979
		set_domain.handle = bo->handle;
3980
		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
3981
		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
3982
		if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) {
3983
			kgem_bo_retire(kgem, bo);
3984
			bo->domain = DOMAIN_GTT;
3985
		}
3986
		}
3987
 
3988
	return ptr;
3989
}
3990
 
3991
void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
3992
{
3993
	void *ptr;
3994
 
3995
	DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__,
3996
	     bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain));
3997
 
3998
	assert(!bo->purged);
3999
	assert(bo->exec == NULL);
4000
	assert(list_is_empty(&bo->list));
4001
 
4002
	if (IS_CPU_MAP(bo->map))
4003
		kgem_bo_release_map(kgem, bo);
4004
 
4005
	ptr = bo->map;
4006
	if (ptr == NULL) {
4007
		assert(bytes(bo) <= kgem->aperture_mappable / 4);
4008
 
4009
		kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
4010
 
4011
		ptr = __kgem_bo_map__gtt(kgem, bo);
4012
		if (ptr == NULL)
4013
			return NULL;
4014
 
4015
		/* Cache this mapping to avoid the overhead of an
4016
		 * excruciatingly slow GTT pagefault. This is more an
4017
		 * issue with compositing managers which need to frequently
4018
		 * flush CPU damage to their GPU bo.
4019
		 */
4020
		bo->map = ptr;
4021
		DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle));
4022
	}
4023
 
4024
	return ptr;
4025
}
4026
 
4027
 
4028
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
4029
{
4030
	struct drm_i915_gem_mmap mmap_arg;
4031
 
4032
	DBG(("%s(handle=%d, size=%d, mapped? %d)\n",
4033
	     __FUNCTION__, bo->handle, bytes(bo), (int)__MAP_TYPE(bo->map)));
4034
	assert(!bo->purged);
4035
	assert(list_is_empty(&bo->list));
4036
	assert(!bo->scanout);
4037
	assert(bo->proxy == NULL);
4038
 
4039
	if (IS_CPU_MAP(bo->map))
4040
		return MAP(bo->map);
4041
 
4042
	if (bo->map)
4043
		kgem_bo_release_map(kgem, bo);
4044
 
4045
	kgem_trim_vma_cache(kgem, MAP_CPU, bucket(bo));
4046
 
4047
retry:
4048
	VG_CLEAR(mmap_arg);
4049
	mmap_arg.handle = bo->handle;
4050
	mmap_arg.offset = 0;
4051
	mmap_arg.size = bytes(bo);
4052
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
4053
		printf("%s: failed to mmap %d, %d bytes, into CPU domain: %d\n",
4054
		       __FUNCTION__, bo->handle, bytes(bo), 0);
4055
		if (__kgem_throttle_retire(kgem, 0))
4056
			goto retry;
4057
 
4058
		if (kgem->need_expire) {
4059
			kgem_cleanup_cache(kgem);
4060
			goto retry;
4061
		}
4062
 
4063
		return NULL;
4064
	}
4065
 
4066
	VG(VALGRIND_MAKE_MEM_DEFINED(mmap_arg.addr_ptr, bytes(bo)));
4067
 
4068
	DBG(("%s: caching CPU vma for %d\n", __FUNCTION__, bo->handle));
4069
	bo->map = MAKE_CPU_MAP(mmap_arg.addr_ptr);
4070
	return (void *)(uintptr_t)mmap_arg.addr_ptr;
4071
}
4072
 
4073
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo)
4074
{
4075
	assert(bo->proxy == NULL);
4076
	kgem_bo_submit(kgem, bo);
4077
 
4078
	if (bo->domain != DOMAIN_CPU) {
4079
		struct drm_i915_gem_set_domain set_domain;
4080
 
4081
		DBG(("%s: SYNC: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__,
4082
		     bo->needs_flush, bo->domain, __kgem_busy(kgem, bo->handle)));
4083
 
4084
		VG_CLEAR(set_domain);
4085
		set_domain.handle = bo->handle;
4086
		set_domain.read_domains = I915_GEM_DOMAIN_CPU;
4087
		set_domain.write_domain = I915_GEM_DOMAIN_CPU;
4088
 
4089
		if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) {
4090
			kgem_bo_retire(kgem, bo);
4091
			bo->domain = DOMAIN_CPU;
4092
		}
4093
	}
4094
}
4095
 
3254 Serge 4096
void kgem_clear_dirty(struct kgem *kgem)
4097
{
4098
	struct list * const buffers = &kgem->next_request->buffers;
4099
	struct kgem_bo *bo;
4100
 
4101
	list_for_each_entry(bo, buffers, request) {
4102
		if (!bo->dirty)
4103
			break;
4104
 
4105
		bo->dirty = false;
4106
	}
4107
}
4108
 
3263 Serge 4109
struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
4110
				  struct kgem_bo *target,
4111
				  int offset, int length)
4112
{
4113
	struct kgem_bo *bo;
4114
 
4115
	DBG(("%s: target handle=%d [proxy? %d], offset=%d, length=%d, io=%d\n",
4116
	     __FUNCTION__, target->handle, target->proxy ? target->proxy->delta : -1,
4117
	     offset, length, target->io));
4118
 
4119
	bo = __kgem_bo_alloc(target->handle, length);
4120
	if (bo == NULL)
4121
		return NULL;
4122
 
4123
	bo->unique_id = kgem_get_unique_id(kgem);
4124
	bo->reusable = false;
4125
	bo->size.bytes = length;
4126
 
4127
	bo->io = target->io && target->proxy == NULL;
4128
	bo->dirty = target->dirty;
4129
	bo->tiling = target->tiling;
4130
	bo->pitch = target->pitch;
4131
 
4132
	assert(!bo->scanout);
4133
	bo->proxy = kgem_bo_reference(target);
4134
	bo->delta = offset;
4135
 
4136
	if (target->exec) {
4137
		list_move_tail(&bo->request, &kgem->next_request->buffers);
4138
		bo->exec = &_kgem_dummy_exec;
4139
	}
4140
	bo->rq = target->rq;
4141
 
4142
	return bo;
4143
}
4144
 
3254 Serge 4145
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format)
4146
{
4147
	struct kgem_bo_binding *b;
4148
 
4149
	for (b = &bo->binding; b && b->offset; b = b->next)
4150
		if (format == b->format)
4151
			return b->offset;
4152
 
4153
	return 0;
4154
}
4155
 
4156
void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset)
4157
{
4158
	struct kgem_bo_binding *b;
4159
 
4160
	for (b = &bo->binding; b; b = b->next) {
4161
		if (b->offset)
4162
			continue;
4163
 
4164
		b->offset = offset;
4165
		b->format = format;
4166
 
4167
		if (b->next)
4168
			b->next->offset = 0;
4169
 
4170
		return;
4171
	}
4172
 
4173
	b = malloc(sizeof(*b));
4174
	if (b) {
4175
		b->next = bo->binding.next;
4176
		b->format = format;
4177
		b->offset = offset;
4178
		bo->binding.next = b;
4179
	}
4180
}
4181
 
4182
 
3263 Serge 4183
int kgem_init_fb(struct kgem *kgem, struct sna_fb *fb)
4184
{
4185
    struct kgem_bo *bo;
4186
    size_t size;
4187
    int ret;
3254 Serge 4188
 
3263 Serge 4189
	ret = drmIoctl(kgem->fd, SRV_FBINFO, fb);
4190
	if( ret != 0 )
4191
	    return 0;
4192
 
4193
    size = fb->pitch * fb->height / PAGE_SIZE;
4194
 
4195
  	bo = __kgem_bo_alloc(-2, size);
4196
	if (!bo) {
4197
		return 0;
4198
	}
3254 Serge 4199
 
3263 Serge 4200
	bo->domain    = DOMAIN_GTT;
4201
	bo->unique_id = kgem_get_unique_id(kgem);
4202
	bo->pitch     = fb->pitch;
4203
    bo->tiling    = I915_TILING_NONE;
4204
    bo->scanout   = 1;
4205
	fb->fb_bo     = bo;
4206
 
3299 Serge 4207
//    printf("fb width %d height %d pitch %d bo %p\n",
4208
//            fb->width, fb->height, fb->pitch, fb->fb_bo);
3263 Serge 4209
 
4210
    return 1;
4211
};
4212
 
3291 Serge 4213
void kgem_close_batches(struct kgem *kgem)
4214
{
4215
    int n;
3263 Serge 4216
 
3291 Serge 4217
	for (n = 0; n < ARRAY_SIZE(kgem->pinned_batches); n++) {
4218
		while (!list_is_empty(&kgem->pinned_batches[n])) {
4219
			kgem_bo_destroy(kgem,
4220
					list_first_entry(&kgem->pinned_batches[n],
4221
							 struct kgem_bo, list));
4222
		}
4223
	}
4224
};
3263 Serge 4225
 
4226
 
4227