Subversion Repositories Kolibri OS

Rev

Rev 3266 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
3254 Serge 1
/*
2
 * Copyright (c) 2011 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
 * SOFTWARE.
22
 *
23
 * Authors:
24
 *    Chris Wilson 
25
 *
26
 */
27
 
28
#ifdef HAVE_CONFIG_H
29
#include "config.h"
30
#endif
31
 
32
#include "sna.h"
33
#include "sna_reg.h"
34
 
3291 Serge 35
static inline
36
int user_free(void *mem)
37
{
38
    int  val;
39
    __asm__ __volatile__(
40
    "int $0x40"
41
    :"=a"(val)
42
    :"a"(68),"b"(12),"c"(mem));
43
    return val;
44
}
3256 Serge 45
 
3291 Serge 46
 
47
 
3256 Serge 48
unsigned int cpu_cache_size();
49
 
50
static struct kgem_bo *
51
search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
52
 
53
static struct kgem_bo *
54
search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
55
 
3254 Serge 56
#define DBG_NO_HW 0
57
#define DBG_NO_TILING 1
58
#define DBG_NO_CACHE 0
59
#define DBG_NO_CACHE_LEVEL 0
60
#define DBG_NO_CPU 0
61
#define DBG_NO_USERPTR 0
62
#define DBG_NO_LLC 0
63
#define DBG_NO_SEMAPHORES 0
3256 Serge 64
#define DBG_NO_MADV 1
3254 Serge 65
#define DBG_NO_UPLOAD_CACHE 0
66
#define DBG_NO_UPLOAD_ACTIVE 0
67
#define DBG_NO_MAP_UPLOAD 0
68
#define DBG_NO_RELAXED_FENCING 0
69
#define DBG_NO_SECURE_BATCHES 0
70
#define DBG_NO_PINNED_BATCHES 0
71
#define DBG_NO_FAST_RELOC 0
72
#define DBG_NO_HANDLE_LUT 0
73
#define DBG_DUMP 0
74
 
3256 Serge 75
#ifndef DEBUG_SYNC
76
#define DEBUG_SYNC 0
77
#endif
78
 
79
#define SHOW_BATCH 0
80
 
81
#if 0
82
#define ASSERT_IDLE(kgem__, handle__) assert(!__kgem_busy(kgem__, handle__))
83
#define ASSERT_MAYBE_IDLE(kgem__, handle__, expect__) assert(!(expect__) || !__kgem_busy(kgem__, handle__))
84
#else
85
#define ASSERT_IDLE(kgem__, handle__)
86
#define ASSERT_MAYBE_IDLE(kgem__, handle__, expect__)
87
#endif
88
 
3255 Serge 89
/* Worst case seems to be 965gm where we cannot write within a cacheline that
90
 * is being simultaneously being read by the GPU, or within the sampler
91
 * prefetch. In general, the chipsets seem to have a requirement that sampler
92
 * offsets be aligned to a cacheline (64 bytes).
93
 */
94
#define UPLOAD_ALIGNMENT 128
95
 
96
#define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE)
97
#define NUM_PAGES(x) (((x) + PAGE_SIZE-1) / PAGE_SIZE)
98
 
3254 Serge 99
#define MAX_GTT_VMA_CACHE 512
100
#define MAX_CPU_VMA_CACHE INT16_MAX
101
#define MAP_PRESERVE_TIME 10
102
 
103
#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
104
#define MAKE_CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1))
105
#define MAKE_USER_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3))
106
#define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 2)
107
#define __MAP_TYPE(ptr) ((uintptr_t)(ptr) & 3)
108
 
109
#define MAKE_REQUEST(rq, ring) ((struct kgem_request *)((uintptr_t)(rq) | (ring)))
110
 
111
#define LOCAL_I915_PARAM_HAS_BLT		        11
112
#define LOCAL_I915_PARAM_HAS_RELAXED_FENCING	12
113
#define LOCAL_I915_PARAM_HAS_RELAXED_DELTA	    15
114
#define LOCAL_I915_PARAM_HAS_SEMAPHORES		    20
115
#define LOCAL_I915_PARAM_HAS_SECURE_BATCHES	    23
116
#define LOCAL_I915_PARAM_HAS_PINNED_BATCHES	    24
117
#define LOCAL_I915_PARAM_HAS_NO_RELOC		    25
118
#define LOCAL_I915_PARAM_HAS_HANDLE_LUT		    26
119
 
3256 Serge 120
#define LOCAL_I915_EXEC_IS_PINNED		(1<<10)
121
#define LOCAL_I915_EXEC_NO_RELOC		(1<<11)
122
#define LOCAL_I915_EXEC_HANDLE_LUT		(1<<12)
3263 Serge 123
struct local_i915_gem_userptr {
124
	uint64_t user_ptr;
125
	uint32_t user_size;
126
	uint32_t flags;
127
#define I915_USERPTR_READ_ONLY (1<<0)
128
#define I915_USERPTR_UNSYNCHRONIZED (1<<31)
129
	uint32_t handle;
130
};
131
 
3256 Serge 132
#define UNCACHED	0
133
#define SNOOPED		1
134
 
135
struct local_i915_gem_cacheing {
136
	uint32_t handle;
137
	uint32_t cacheing;
138
};
3258 Serge 139
 
140
#define LOCAL_IOCTL_I915_GEM_SET_CACHEING SRV_I915_GEM_SET_CACHEING
141
 
3263 Serge 142
struct local_fbinfo {
143
	int width;
144
	int height;
145
	int pitch;
146
	int tiling;
147
};
148
 
3258 Serge 149
struct kgem_buffer {
150
	struct kgem_bo base;
151
	void *mem;
152
	uint32_t used;
153
	uint32_t need_io : 1;
154
	uint32_t write : 2;
155
	uint32_t mmapped : 1;
156
};
157
 
3255 Serge 158
static struct kgem_bo *__kgem_freed_bo;
3256 Serge 159
static struct kgem_request *__kgem_freed_request;
3258 Serge 160
static struct drm_i915_gem_exec_object2 _kgem_dummy_exec;
3254 Serge 161
 
3258 Serge 162
static inline int bytes(struct kgem_bo *bo)
163
{
164
	return __kgem_bo_size(bo);
165
}
166
 
3255 Serge 167
#define bucket(B) (B)->size.pages.bucket
168
#define num_pages(B) (B)->size.pages.count
3254 Serge 169
 
3255 Serge 170
#ifdef DEBUG_MEMORY
171
static void debug_alloc(struct kgem *kgem, size_t size)
172
{
173
	kgem->debug_memory.bo_allocs++;
174
	kgem->debug_memory.bo_bytes += size;
175
}
176
static void debug_alloc__bo(struct kgem *kgem, struct kgem_bo *bo)
177
{
178
	debug_alloc(kgem, bytes(bo));
179
}
180
#else
181
#define debug_alloc(k, b)
182
#define debug_alloc__bo(k, b)
183
#endif
184
 
3258 Serge 185
static void kgem_sna_reset(struct kgem *kgem)
186
{
187
	struct sna *sna = container_of(kgem, struct sna, kgem);
188
 
189
	sna->render.reset(sna);
190
	sna->blt_state.fill_bo = 0;
191
}
192
 
193
static void kgem_sna_flush(struct kgem *kgem)
194
{
195
	struct sna *sna = container_of(kgem, struct sna, kgem);
196
 
197
	sna->render.flush(sna);
198
 
199
//	if (sna->render.solid_cache.dirty)
200
//		sna_render_flush_solid(sna);
201
}
202
 
3256 Serge 203
static bool gem_set_tiling(int fd, uint32_t handle, int tiling, int stride)
204
{
205
	struct drm_i915_gem_set_tiling set_tiling;
206
	int ret;
207
 
208
	if (DBG_NO_TILING)
209
		return false;
210
/*
211
	VG_CLEAR(set_tiling);
212
	do {
213
		set_tiling.handle = handle;
214
		set_tiling.tiling_mode = tiling;
215
		set_tiling.stride = stride;
216
 
217
		ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
218
	} while (ret == -1 && (errno == EINTR || errno == EAGAIN));
219
*/
3263 Serge 220
	return false;//ret == 0;
3256 Serge 221
}
222
 
223
static bool gem_set_cacheing(int fd, uint32_t handle, int cacheing)
224
{
225
	struct local_i915_gem_cacheing arg;
226
 
227
	VG_CLEAR(arg);
228
	arg.handle = handle;
229
	arg.cacheing = cacheing;
3258 Serge 230
	return drmIoctl(fd, LOCAL_IOCTL_I915_GEM_SET_CACHEING, &arg) == 0;
231
}
3256 Serge 232
 
3258 Serge 233
 
3256 Serge 234
 
235
 
236
 
237
static bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags)
238
{
239
	if (flags & CREATE_NO_RETIRE) {
240
		DBG(("%s: not retiring per-request\n", __FUNCTION__));
241
		return false;
242
	}
243
 
244
	if (!kgem->need_retire) {
245
		DBG(("%s: nothing to retire\n", __FUNCTION__));
246
		return false;
247
	}
248
 
3258 Serge 249
	if (kgem_retire(kgem))
250
		return true;
3256 Serge 251
 
252
	if (flags & CREATE_NO_THROTTLE || !kgem->need_throttle) {
253
		DBG(("%s: not throttling\n", __FUNCTION__));
254
		return false;
255
	}
256
 
3258 Serge 257
	kgem_throttle(kgem);
258
	return kgem_retire(kgem);
259
}
3256 Serge 260
 
3258 Serge 261
static void *__kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
262
{
263
	struct drm_i915_gem_mmap_gtt mmap_arg;
264
	void *ptr;
265
 
266
	DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__,
267
	     bo->handle, bytes(bo)));
268
	assert(bo->proxy == NULL);
269
 
270
retry_gtt:
271
	VG_CLEAR(mmap_arg);
272
	mmap_arg.handle = bo->handle;
273
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg)) {
274
		printf("%s: failed to retrieve GTT offset for handle=%d: %d\n",
275
		       __FUNCTION__, bo->handle, 0);
276
		(void)__kgem_throttle_retire(kgem, 0);
277
		if (kgem_expire_cache(kgem))
278
			goto retry_gtt;
279
 
280
		if (kgem->need_expire) {
281
			kgem_cleanup_cache(kgem);
282
			goto retry_gtt;
283
		}
284
 
285
		return NULL;
286
	}
287
 
288
retry_mmap:
289
//	ptr = mmap(0, bytes(bo), PROT_READ | PROT_WRITE, MAP_SHARED,
290
//		   kgem->fd, mmap_arg.offset);
3263 Serge 291
//	if (ptr == 0) {
3258 Serge 292
		printf("%s: failed to mmap %d, %d bytes, into GTT domain: %d\n",
293
		       __FUNCTION__, bo->handle, bytes(bo), 0);
3263 Serge 294
//		if (__kgem_throttle_retire(kgem, 0))
295
//			goto retry_mmap;
3258 Serge 296
 
3263 Serge 297
//		if (kgem->need_expire) {
298
//			kgem_cleanup_cache(kgem);
299
//			goto retry_mmap;
300
//		}
3258 Serge 301
 
302
		ptr = NULL;
3263 Serge 303
//	}
3258 Serge 304
 
305
	return ptr;
3256 Serge 306
}
307
 
3258 Serge 308
static int __gem_write(int fd, uint32_t handle,
309
		       int offset, int length,
310
		       const void *src)
311
{
312
	struct drm_i915_gem_pwrite pwrite;
313
 
314
	DBG(("%s(handle=%d, offset=%d, len=%d)\n", __FUNCTION__,
315
	     handle, offset, length));
316
 
317
	VG_CLEAR(pwrite);
318
	pwrite.handle = handle;
319
	pwrite.offset = offset;
320
	pwrite.size = length;
321
	pwrite.data_ptr = (uintptr_t)src;
322
	return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
323
}
324
 
3256 Serge 325
static int gem_write(int fd, uint32_t handle,
326
		     int offset, int length,
327
		     const void *src)
328
{
329
	struct drm_i915_gem_pwrite pwrite;
330
 
331
	DBG(("%s(handle=%d, offset=%d, len=%d)\n", __FUNCTION__,
332
	     handle, offset, length));
333
 
334
	VG_CLEAR(pwrite);
335
	pwrite.handle = handle;
336
	/* align the transfer to cachelines; fortuitously this is safe! */
337
	if ((offset | length) & 63) {
338
		pwrite.offset = offset & ~63;
339
		pwrite.size = ALIGN(offset+length, 64) - pwrite.offset;
340
		pwrite.data_ptr = (uintptr_t)src + pwrite.offset - offset;
341
	} else {
342
		pwrite.offset = offset;
343
		pwrite.size = length;
344
		pwrite.data_ptr = (uintptr_t)src;
345
	}
3258 Serge 346
	return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
3256 Serge 347
}
3258 Serge 348
 
3256 Serge 349
 
3258 Serge 350
bool __kgem_busy(struct kgem *kgem, int handle)
351
{
352
	struct drm_i915_gem_busy busy;
353
 
354
	VG_CLEAR(busy);
355
	busy.handle = handle;
356
	busy.busy = !kgem->wedged;
357
	(void)drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
358
	DBG(("%s: handle=%d, busy=%d, wedged=%d\n",
359
	     __FUNCTION__, handle, busy.busy, kgem->wedged));
3256 Serge 360
 
3258 Serge 361
	return busy.busy;
362
}
363
 
364
static void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo)
365
{
366
	DBG(("%s: retiring bo handle=%d (needed flush? %d), rq? %d [busy?=%d]\n",
367
	     __FUNCTION__, bo->handle, bo->needs_flush, bo->rq != NULL,
368
	     __kgem_busy(kgem, bo->handle)));
369
	assert(bo->exec == NULL);
370
	assert(list_is_empty(&bo->vma));
371
 
372
	if (bo->rq) {
373
		if (!__kgem_busy(kgem, bo->handle)) {
374
			__kgem_bo_clear_busy(bo);
375
			kgem_retire(kgem);
376
		}
377
	} else {
378
		assert(!bo->needs_flush);
379
		ASSERT_IDLE(kgem, bo->handle);
380
	}
381
}
382
 
3256 Serge 383
bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
384
		   const void *data, int length)
385
{
386
	assert(bo->refcnt);
387
	assert(!bo->purged);
388
	assert(bo->proxy == NULL);
389
	ASSERT_IDLE(kgem, bo->handle);
390
 
391
	assert(length <= bytes(bo));
392
	if (gem_write(kgem->fd, bo->handle, 0, length, data))
393
		return false;
394
 
395
	DBG(("%s: flush=%d, domain=%d\n", __FUNCTION__, bo->flush, bo->domain));
396
	if (bo->exec == NULL) {
3258 Serge 397
		kgem_bo_retire(kgem, bo);
3256 Serge 398
		bo->domain = DOMAIN_NONE;
399
	}
400
	return true;
401
}
402
 
3255 Serge 403
static uint32_t gem_create(int fd, int num_pages)
404
{
405
	struct drm_i915_gem_create create;
406
 
407
	VG_CLEAR(create);
408
	create.handle = 0;
409
	create.size = PAGE_SIZE * num_pages;
3258 Serge 410
	(void)drmIoctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
3255 Serge 411
 
412
	return create.handle;
413
}
414
 
3256 Serge 415
static bool
416
kgem_bo_set_purgeable(struct kgem *kgem, struct kgem_bo *bo)
417
{
418
#if DBG_NO_MADV
419
	return true;
420
#else
421
	struct drm_i915_gem_madvise madv;
422
 
423
	assert(bo->exec == NULL);
424
	assert(!bo->purged);
425
 
426
	VG_CLEAR(madv);
427
	madv.handle = bo->handle;
428
	madv.madv = I915_MADV_DONTNEED;
429
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) {
430
		bo->purged = 1;
431
		kgem->need_purge |= !madv.retained && bo->domain == DOMAIN_GPU;
432
		return madv.retained;
433
	}
434
 
435
	return true;
436
#endif
437
}
438
 
439
static bool
440
kgem_bo_is_retained(struct kgem *kgem, struct kgem_bo *bo)
441
{
442
#if DBG_NO_MADV
443
	return true;
444
#else
445
	struct drm_i915_gem_madvise madv;
446
 
447
	if (!bo->purged)
448
		return true;
449
 
450
	VG_CLEAR(madv);
451
	madv.handle = bo->handle;
452
	madv.madv = I915_MADV_DONTNEED;
453
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0)
454
		return madv.retained;
455
 
456
	return false;
457
#endif
458
}
459
 
460
static bool
461
kgem_bo_clear_purgeable(struct kgem *kgem, struct kgem_bo *bo)
462
{
463
#if DBG_NO_MADV
464
	return true;
465
#else
466
	struct drm_i915_gem_madvise madv;
467
 
468
	assert(bo->purged);
469
 
470
	VG_CLEAR(madv);
471
	madv.handle = bo->handle;
472
	madv.madv = I915_MADV_WILLNEED;
473
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) {
474
		bo->purged = !madv.retained;
475
		kgem->need_purge |= !madv.retained && bo->domain == DOMAIN_GPU;
476
		return madv.retained;
477
	}
478
 
479
	return false;
480
#endif
481
}
482
 
3255 Serge 483
static void gem_close(int fd, uint32_t handle)
484
{
485
	struct drm_gem_close close;
486
 
487
	VG_CLEAR(close);
488
	close.handle = handle;
3258 Serge 489
	(void)drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
3255 Serge 490
}
491
 
492
constant inline static unsigned long __fls(unsigned long word)
493
{
494
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86__) || defined(__x86_64__))
495
	asm("bsr %1,%0"
496
	    : "=r" (word)
497
	    : "rm" (word));
498
	return word;
499
#else
500
	unsigned int v = 0;
501
 
502
	while (word >>= 1)
503
		v++;
504
 
505
	return v;
506
#endif
507
}
508
 
509
constant inline static int cache_bucket(int num_pages)
510
{
511
	return __fls(num_pages);
512
}
513
 
514
static struct kgem_bo *__kgem_bo_init(struct kgem_bo *bo,
515
				      int handle, int num_pages)
516
{
517
	assert(num_pages);
518
	memset(bo, 0, sizeof(*bo));
519
 
520
	bo->refcnt = 1;
521
	bo->handle = handle;
522
	bo->target_handle = -1;
523
	num_pages(bo) = num_pages;
524
	bucket(bo) = cache_bucket(num_pages);
525
	bo->reusable = true;
526
	bo->domain = DOMAIN_CPU;
527
	list_init(&bo->request);
528
	list_init(&bo->list);
529
	list_init(&bo->vma);
530
 
531
	return bo;
532
}
533
 
534
static struct kgem_bo *__kgem_bo_alloc(int handle, int num_pages)
535
{
536
	struct kgem_bo *bo;
537
 
538
	if (__kgem_freed_bo) {
539
		bo = __kgem_freed_bo;
540
		__kgem_freed_bo = *(struct kgem_bo **)bo;
541
	} else {
542
		bo = malloc(sizeof(*bo));
543
		if (bo == NULL)
544
			return NULL;
545
	}
546
 
547
	return __kgem_bo_init(bo, handle, num_pages);
548
}
549
 
3256 Serge 550
static struct kgem_request *__kgem_request_alloc(struct kgem *kgem)
551
{
552
	struct kgem_request *rq;
553
 
554
	rq = __kgem_freed_request;
555
	if (rq) {
556
		__kgem_freed_request = *(struct kgem_request **)rq;
557
	} else {
558
		rq = malloc(sizeof(*rq));
559
		if (rq == NULL)
560
			rq = &kgem->static_request;
561
	}
562
 
563
	list_init(&rq->buffers);
564
	rq->bo = NULL;
565
	rq->ring = 0;
566
 
567
	return rq;
568
}
569
 
570
static void __kgem_request_free(struct kgem_request *rq)
571
{
572
	_list_del(&rq->list);
573
	*(struct kgem_request **)rq = __kgem_freed_request;
574
	__kgem_freed_request = rq;
575
}
576
 
577
static struct list *inactive(struct kgem *kgem, int num_pages)
578
{
579
	assert(num_pages < MAX_CACHE_SIZE / PAGE_SIZE);
580
	assert(cache_bucket(num_pages) < NUM_CACHE_BUCKETS);
581
	return &kgem->inactive[cache_bucket(num_pages)];
582
}
583
 
584
static struct list *active(struct kgem *kgem, int num_pages, int tiling)
585
{
586
	assert(num_pages < MAX_CACHE_SIZE / PAGE_SIZE);
587
	assert(cache_bucket(num_pages) < NUM_CACHE_BUCKETS);
588
	return &kgem->active[cache_bucket(num_pages)][tiling];
589
}
590
 
591
static size_t
592
agp_aperture_size(struct pci_device *dev, unsigned gen)
593
{
594
	/* XXX assume that only future chipsets are unknown and follow
595
	 * the post gen2 PCI layout.
596
	 */
597
//	return dev->regions[gen < 030 ? 0 : 2].size;
598
 
599
    return 0;
600
}
601
 
602
static size_t
603
total_ram_size(void)
604
{
605
    uint32_t  data[9];
606
    size_t    size = 0;
607
 
608
    asm volatile("int $0x40"
609
        : "=a" (size)
610
        : "a" (18),"b"(20), "c" (data)
611
        : "memory");
612
 
613
    return size != -1 ? size : 0;
614
}
615
 
3254 Serge 616
static int gem_param(struct kgem *kgem, int name)
617
{
618
    drm_i915_getparam_t gp;
619
    int v = -1; /* No param uses the sign bit, reserve it for errors */
620
 
621
    VG_CLEAR(gp);
622
    gp.param = name;
623
    gp.value = &v;
3258 Serge 624
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GETPARAM, &gp))
3254 Serge 625
        return -1;
626
 
627
    VG(VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v)));
628
    return v;
629
}
630
 
3255 Serge 631
static bool test_has_execbuffer2(struct kgem *kgem)
632
{
633
	return 1;
634
}
635
 
3254 Serge 636
static bool test_has_no_reloc(struct kgem *kgem)
637
{
638
	if (DBG_NO_FAST_RELOC)
639
		return false;
640
 
641
	return gem_param(kgem, LOCAL_I915_PARAM_HAS_NO_RELOC) > 0;
642
}
643
 
644
static bool test_has_handle_lut(struct kgem *kgem)
645
{
646
	if (DBG_NO_HANDLE_LUT)
647
		return false;
648
 
649
	return gem_param(kgem, LOCAL_I915_PARAM_HAS_HANDLE_LUT) > 0;
650
}
651
 
652
static bool test_has_semaphores_enabled(struct kgem *kgem)
653
{
654
	FILE *file;
655
	bool detected = false;
656
	int ret;
657
 
658
	if (DBG_NO_SEMAPHORES)
659
		return false;
660
 
661
	ret = gem_param(kgem, LOCAL_I915_PARAM_HAS_SEMAPHORES);
662
	if (ret != -1)
663
		return ret > 0;
664
 
665
	return detected;
666
}
667
 
3255 Serge 668
static bool __kgem_throttle(struct kgem *kgem)
669
{
3263 Serge 670
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_THROTTLE, NULL) == 0)
3255 Serge 671
		return false;
3254 Serge 672
 
3263 Serge 673
	return errno == EIO;
3255 Serge 674
}
675
 
676
static bool is_hw_supported(struct kgem *kgem,
677
			    struct pci_device *dev)
678
{
679
	if (DBG_NO_HW)
680
		return false;
681
 
682
	if (!test_has_execbuffer2(kgem))
683
		return false;
684
 
685
	if (kgem->gen == (unsigned)-1) /* unknown chipset, assume future gen */
686
		return kgem->has_blt;
687
 
688
	/* Although pre-855gm the GMCH is fubar, it works mostly. So
689
	 * let the user decide through "NoAccel" whether or not to risk
690
	 * hw acceleration.
691
	 */
692
 
693
	if (kgem->gen == 060 && dev->revision < 8) {
694
		/* pre-production SNB with dysfunctional BLT */
695
		return false;
696
	}
697
 
698
	if (kgem->gen >= 060) /* Only if the kernel supports the BLT ring */
699
		return kgem->has_blt;
700
 
701
	return true;
702
}
703
 
3254 Serge 704
static bool test_has_relaxed_fencing(struct kgem *kgem)
705
{
706
	if (kgem->gen < 040) {
707
		if (DBG_NO_RELAXED_FENCING)
708
			return false;
709
 
710
		return gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_FENCING) > 0;
711
	} else
712
		return true;
713
}
714
 
715
static bool test_has_llc(struct kgem *kgem)
716
{
717
	int has_llc = -1;
718
 
719
	if (DBG_NO_LLC)
720
		return false;
721
 
722
#if defined(I915_PARAM_HAS_LLC) /* Expected in libdrm-2.4.31 */
723
	has_llc = gem_param(kgem, I915_PARAM_HAS_LLC);
724
#endif
725
	if (has_llc == -1) {
726
		DBG(("%s: no kernel/drm support for HAS_LLC, assuming support for LLC based on GPU generation\n", __FUNCTION__));
727
		has_llc = kgem->gen >= 060;
728
	}
729
 
730
	return has_llc;
731
}
732
 
733
static bool test_has_cacheing(struct kgem *kgem)
734
{
735
	uint32_t handle;
3256 Serge 736
	bool ret;
3254 Serge 737
 
738
	if (DBG_NO_CACHE_LEVEL)
739
		return false;
740
 
741
	/* Incoherent blt and sampler hangs the GPU */
742
	if (kgem->gen == 040)
743
		return false;
744
 
3256 Serge 745
	handle = gem_create(kgem->fd, 1);
746
	if (handle == 0)
747
		return false;
3254 Serge 748
 
3256 Serge 749
	ret = gem_set_cacheing(kgem->fd, handle, UNCACHED);
750
	gem_close(kgem->fd, handle);
3254 Serge 751
	return ret;
752
}
753
 
754
static bool test_has_userptr(struct kgem *kgem)
755
{
756
#if defined(USE_USERPTR)
757
	uint32_t handle;
758
	void *ptr;
759
 
760
	if (DBG_NO_USERPTR)
761
		return false;
762
 
763
	/* Incoherent blt and sampler hangs the GPU */
764
	if (kgem->gen == 040)
765
		return false;
766
 
767
	ptr = malloc(PAGE_SIZE);
768
	handle = gem_userptr(kgem->fd, ptr, PAGE_SIZE, false);
769
	gem_close(kgem->fd, handle);
770
	free(ptr);
771
 
772
	return handle != 0;
773
#else
774
	return false;
775
#endif
776
}
777
 
778
static bool test_has_secure_batches(struct kgem *kgem)
779
{
780
	if (DBG_NO_SECURE_BATCHES)
781
		return false;
782
 
783
	return gem_param(kgem, LOCAL_I915_PARAM_HAS_SECURE_BATCHES) > 0;
784
}
785
 
786
static bool test_has_pinned_batches(struct kgem *kgem)
787
{
788
	if (DBG_NO_PINNED_BATCHES)
789
		return false;
790
 
791
	return gem_param(kgem, LOCAL_I915_PARAM_HAS_PINNED_BATCHES) > 0;
792
}
793
 
794
 
3255 Serge 795
static bool kgem_init_pinned_batches(struct kgem *kgem)
796
{
797
	int count[2] = { 4, 2 };
798
	int size[2] = { 1, 4 };
799
	int n, i;
800
 
801
	if (kgem->wedged)
802
		return true;
803
 
804
	for (n = 0; n < ARRAY_SIZE(count); n++) {
805
		for (i = 0; i < count[n]; i++) {
806
			struct drm_i915_gem_pin pin;
807
			struct kgem_bo *bo;
808
 
809
			VG_CLEAR(pin);
810
 
811
			pin.handle = gem_create(kgem->fd, size[n]);
812
			if (pin.handle == 0)
813
				goto err;
814
 
815
			DBG(("%s: new handle=%d, num_pages=%d\n",
816
			     __FUNCTION__, pin.handle, size[n]));
817
 
818
			bo = __kgem_bo_alloc(pin.handle, size[n]);
819
			if (bo == NULL) {
820
				gem_close(kgem->fd, pin.handle);
821
				goto err;
822
			}
823
 
824
			pin.alignment = 0;
3258 Serge 825
			if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_PIN, &pin)) {
3255 Serge 826
				gem_close(kgem->fd, pin.handle);
827
				goto err;
828
			}
829
			bo->presumed_offset = pin.offset;
830
			debug_alloc__bo(kgem, bo);
831
			list_add(&bo->list, &kgem->pinned_batches[n]);
832
		}
833
	}
834
 
835
	return true;
836
 
837
err:
838
	for (n = 0; n < ARRAY_SIZE(kgem->pinned_batches); n++) {
839
		while (!list_is_empty(&kgem->pinned_batches[n])) {
840
			kgem_bo_destroy(kgem,
841
					list_first_entry(&kgem->pinned_batches[n],
842
							 struct kgem_bo, list));
843
		}
844
	}
845
 
846
	/* For simplicity populate the lists with a single unpinned bo */
847
	for (n = 0; n < ARRAY_SIZE(count); n++) {
848
		struct kgem_bo *bo;
849
		uint32_t handle;
850
 
851
		handle = gem_create(kgem->fd, size[n]);
852
		if (handle == 0)
853
			break;
854
 
855
		bo = __kgem_bo_alloc(handle, size[n]);
856
		if (bo == NULL) {
857
			gem_close(kgem->fd, handle);
858
			break;
859
		}
860
 
861
		debug_alloc__bo(kgem, bo);
862
		list_add(&bo->list, &kgem->pinned_batches[n]);
863
	}
864
	return false;
865
}
866
 
3254 Serge 867
void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen)
868
{
869
    struct drm_i915_gem_get_aperture aperture;
870
    size_t totalram;
871
    unsigned half_gpu_max;
872
    unsigned int i, j;
873
 
874
    DBG(("%s: fd=%d, gen=%d\n", __FUNCTION__, fd, gen));
875
 
876
    memset(kgem, 0, sizeof(*kgem));
877
 
878
    kgem->fd = fd;
879
    kgem->gen = gen;
880
 
881
    list_init(&kgem->requests[0]);
882
    list_init(&kgem->requests[1]);
883
    list_init(&kgem->batch_buffers);
884
    list_init(&kgem->active_buffers);
885
    list_init(&kgem->flushing);
886
    list_init(&kgem->large);
887
    list_init(&kgem->large_inactive);
888
    list_init(&kgem->snoop);
889
    list_init(&kgem->scanout);
890
    for (i = 0; i < ARRAY_SIZE(kgem->pinned_batches); i++)
891
        list_init(&kgem->pinned_batches[i]);
892
    for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
893
        list_init(&kgem->inactive[i]);
894
    for (i = 0; i < ARRAY_SIZE(kgem->active); i++) {
895
        for (j = 0; j < ARRAY_SIZE(kgem->active[i]); j++)
896
            list_init(&kgem->active[i][j]);
897
    }
898
    for (i = 0; i < ARRAY_SIZE(kgem->vma); i++) {
899
        for (j = 0; j < ARRAY_SIZE(kgem->vma[i].inactive); j++)
900
            list_init(&kgem->vma[i].inactive[j]);
901
    }
902
    kgem->vma[MAP_GTT].count = -MAX_GTT_VMA_CACHE;
903
    kgem->vma[MAP_CPU].count = -MAX_CPU_VMA_CACHE;
904
 
905
    kgem->has_blt = gem_param(kgem, LOCAL_I915_PARAM_HAS_BLT) > 0;
906
    DBG(("%s: has BLT ring? %d\n", __FUNCTION__,
907
         kgem->has_blt));
908
 
909
    kgem->has_relaxed_delta =
910
        gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_DELTA) > 0;
911
    DBG(("%s: has relaxed delta? %d\n", __FUNCTION__,
912
         kgem->has_relaxed_delta));
913
 
914
    kgem->has_relaxed_fencing = test_has_relaxed_fencing(kgem);
915
    DBG(("%s: has relaxed fencing? %d\n", __FUNCTION__,
916
         kgem->has_relaxed_fencing));
917
 
918
    kgem->has_llc = test_has_llc(kgem);
919
    DBG(("%s: has shared last-level-cache? %d\n", __FUNCTION__,
920
         kgem->has_llc));
921
 
922
    kgem->has_cacheing = test_has_cacheing(kgem);
923
    DBG(("%s: has set-cache-level? %d\n", __FUNCTION__,
924
         kgem->has_cacheing));
925
 
926
    kgem->has_userptr = test_has_userptr(kgem);
927
    DBG(("%s: has userptr? %d\n", __FUNCTION__,
928
         kgem->has_userptr));
929
 
930
    kgem->has_no_reloc = test_has_no_reloc(kgem);
931
    DBG(("%s: has no-reloc? %d\n", __FUNCTION__,
932
         kgem->has_no_reloc));
933
 
934
    kgem->has_handle_lut = test_has_handle_lut(kgem);
935
    DBG(("%s: has handle-lut? %d\n", __FUNCTION__,
936
         kgem->has_handle_lut));
937
 
938
    kgem->has_semaphores = false;
939
    if (kgem->has_blt && test_has_semaphores_enabled(kgem))
940
        kgem->has_semaphores = true;
941
    DBG(("%s: semaphores enabled? %d\n", __FUNCTION__,
942
         kgem->has_semaphores));
943
 
944
    kgem->can_blt_cpu = gen >= 030;
945
    DBG(("%s: can blt to cpu? %d\n", __FUNCTION__,
946
         kgem->can_blt_cpu));
947
 
948
    kgem->has_secure_batches = test_has_secure_batches(kgem);
949
    DBG(("%s: can use privileged batchbuffers? %d\n", __FUNCTION__,
950
         kgem->has_secure_batches));
951
 
952
    kgem->has_pinned_batches = test_has_pinned_batches(kgem);
953
    DBG(("%s: can use pinned batchbuffers (to avoid CS w/a)? %d\n", __FUNCTION__,
954
         kgem->has_pinned_batches));
955
 
956
    if (!is_hw_supported(kgem, dev)) {
3255 Serge 957
        printf("Detected unsupported/dysfunctional hardware, disabling acceleration.\n");
3254 Serge 958
        kgem->wedged = 1;
959
    } else if (__kgem_throttle(kgem)) {
3255 Serge 960
        printf("Detected a hung GPU, disabling acceleration.\n");
3254 Serge 961
        kgem->wedged = 1;
962
    }
963
 
964
    kgem->batch_size = ARRAY_SIZE(kgem->batch);
965
    if (gen == 020 && !kgem->has_pinned_batches)
966
        /* Limited to what we can pin */
967
        kgem->batch_size = 4*1024;
968
    if (gen == 022)
969
        /* 865g cannot handle a batch spanning multiple pages */
970
        kgem->batch_size = PAGE_SIZE / sizeof(uint32_t);
971
    if ((gen >> 3) == 7)
972
        kgem->batch_size = 16*1024;
973
    if (!kgem->has_relaxed_delta && kgem->batch_size > 4*1024)
974
        kgem->batch_size = 4*1024;
975
 
976
    if (!kgem_init_pinned_batches(kgem) && gen == 020) {
3255 Serge 977
        printf("Unable to reserve memory for GPU, disabling acceleration.\n");
3254 Serge 978
        kgem->wedged = 1;
979
    }
980
 
981
    DBG(("%s: maximum batch size? %d\n", __FUNCTION__,
982
         kgem->batch_size));
983
 
3291 Serge 984
    kgem->min_alignment = 16;
3254 Serge 985
    if (gen < 040)
986
        kgem->min_alignment = 64;
987
 
988
    kgem->half_cpu_cache_pages = cpu_cache_size() >> 13;
989
    DBG(("%s: half cpu cache %d pages\n", __FUNCTION__,
990
         kgem->half_cpu_cache_pages));
991
 
992
    kgem->next_request = __kgem_request_alloc(kgem);
993
 
994
    DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, userptr? %d\n", __FUNCTION__,
995
         !DBG_NO_CPU && (kgem->has_llc | kgem->has_userptr | kgem->has_cacheing),
996
         kgem->has_llc, kgem->has_cacheing, kgem->has_userptr));
997
 
998
    VG_CLEAR(aperture);
999
    aperture.aper_size = 0;
3258 Serge 1000
	(void)drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
3254 Serge 1001
    if (aperture.aper_size == 0)
1002
        aperture.aper_size = 64*1024*1024;
1003
 
1004
    DBG(("%s: aperture size %lld, available now %lld\n",
1005
         __FUNCTION__,
1006
         (long long)aperture.aper_size,
1007
         (long long)aperture.aper_available_size));
1008
 
1009
    kgem->aperture_total = aperture.aper_size;
1010
    kgem->aperture_high = aperture.aper_size * 3/4;
1011
    kgem->aperture_low = aperture.aper_size * 1/3;
1012
    if (gen < 033) {
1013
        /* Severe alignment penalties */
1014
        kgem->aperture_high /= 2;
1015
        kgem->aperture_low /= 2;
1016
    }
1017
    DBG(("%s: aperture low=%d [%d], high=%d [%d]\n", __FUNCTION__,
1018
         kgem->aperture_low, kgem->aperture_low / (1024*1024),
1019
         kgem->aperture_high, kgem->aperture_high / (1024*1024)));
1020
 
1021
    kgem->aperture_mappable = agp_aperture_size(dev, gen);
1022
    if (kgem->aperture_mappable == 0 ||
1023
        kgem->aperture_mappable > aperture.aper_size)
1024
        kgem->aperture_mappable = aperture.aper_size;
1025
    DBG(("%s: aperture mappable=%d [%d MiB]\n", __FUNCTION__,
1026
         kgem->aperture_mappable, kgem->aperture_mappable / (1024*1024)));
1027
 
1028
    kgem->buffer_size = 64 * 1024;
1029
    while (kgem->buffer_size < kgem->aperture_mappable >> 10)
1030
        kgem->buffer_size *= 2;
1031
    if (kgem->buffer_size >> 12 > kgem->half_cpu_cache_pages)
1032
        kgem->buffer_size = kgem->half_cpu_cache_pages << 12;
1033
    DBG(("%s: buffer size=%d [%d KiB]\n", __FUNCTION__,
1034
         kgem->buffer_size, kgem->buffer_size / 1024));
1035
 
1036
    kgem->max_object_size = 3 * (kgem->aperture_high >> 12) << 10;
1037
    kgem->max_gpu_size = kgem->max_object_size;
1038
    if (!kgem->has_llc)
1039
        kgem->max_gpu_size = MAX_CACHE_SIZE;
1040
 
1041
    totalram = total_ram_size();
1042
    if (totalram == 0) {
1043
        DBG(("%s: total ram size unknown, assuming maximum of total aperture\n",
1044
             __FUNCTION__));
1045
        totalram = kgem->aperture_total;
1046
    }
3256 Serge 1047
    DBG(("%s: total ram=%u\n", __FUNCTION__, totalram));
3254 Serge 1048
    if (kgem->max_object_size > totalram / 2)
1049
        kgem->max_object_size = totalram / 2;
1050
    if (kgem->max_gpu_size > totalram / 4)
1051
        kgem->max_gpu_size = totalram / 4;
1052
 
1053
    kgem->max_cpu_size = kgem->max_object_size;
1054
 
1055
    half_gpu_max = kgem->max_gpu_size / 2;
1056
    kgem->max_copy_tile_size = (MAX_CACHE_SIZE + 1)/2;
1057
    if (kgem->max_copy_tile_size > half_gpu_max)
1058
        kgem->max_copy_tile_size = half_gpu_max;
1059
 
1060
    if (kgem->has_llc)
1061
        kgem->max_upload_tile_size = kgem->max_copy_tile_size;
1062
    else
1063
        kgem->max_upload_tile_size = kgem->aperture_mappable / 4;
1064
    if (kgem->max_upload_tile_size > half_gpu_max)
1065
        kgem->max_upload_tile_size = half_gpu_max;
1066
 
1067
    kgem->large_object_size = MAX_CACHE_SIZE;
1068
    if (kgem->large_object_size > kgem->max_gpu_size)
1069
        kgem->large_object_size = kgem->max_gpu_size;
1070
 
1071
    if (kgem->has_llc | kgem->has_cacheing | kgem->has_userptr) {
1072
        if (kgem->large_object_size > kgem->max_cpu_size)
1073
            kgem->large_object_size = kgem->max_cpu_size;
1074
    } else
1075
        kgem->max_cpu_size = 0;
1076
    if (DBG_NO_CPU)
1077
        kgem->max_cpu_size = 0;
1078
 
1079
    DBG(("%s: maximum object size=%d\n",
1080
         __FUNCTION__, kgem->max_object_size));
1081
    DBG(("%s: large object thresold=%d\n",
1082
         __FUNCTION__, kgem->large_object_size));
1083
    DBG(("%s: max object sizes (gpu=%d, cpu=%d, tile upload=%d, copy=%d)\n",
1084
         __FUNCTION__,
1085
         kgem->max_gpu_size, kgem->max_cpu_size,
1086
         kgem->max_upload_tile_size, kgem->max_copy_tile_size));
1087
 
1088
    /* Convert the aperture thresholds to pages */
1089
    kgem->aperture_low /= PAGE_SIZE;
1090
    kgem->aperture_high /= PAGE_SIZE;
1091
 
1092
    kgem->fence_max = gem_param(kgem, I915_PARAM_NUM_FENCES_AVAIL) - 2;
1093
    if ((int)kgem->fence_max < 0)
1094
        kgem->fence_max = 5; /* minimum safe value for all hw */
1095
    DBG(("%s: max fences=%d\n", __FUNCTION__, kgem->fence_max));
1096
 
1097
    kgem->batch_flags_base = 0;
1098
    if (kgem->has_no_reloc)
1099
        kgem->batch_flags_base |= LOCAL_I915_EXEC_NO_RELOC;
1100
    if (kgem->has_handle_lut)
1101
        kgem->batch_flags_base |= LOCAL_I915_EXEC_HANDLE_LUT;
1102
    if (kgem->has_pinned_batches)
1103
        kgem->batch_flags_base |= LOCAL_I915_EXEC_IS_PINNED;
3263 Serge 1104
}
3254 Serge 1105
 
3263 Serge 1106
/* XXX hopefully a good approximation */
1107
static uint32_t kgem_get_unique_id(struct kgem *kgem)
1108
{
1109
	uint32_t id;
1110
	id = ++kgem->unique_id;
1111
	if (id == 0)
1112
		id = ++kgem->unique_id;
1113
	return id;
3256 Serge 1114
}
3254 Serge 1115
 
3263 Serge 1116
inline static uint32_t kgem_pitch_alignment(struct kgem *kgem, unsigned flags)
1117
{
1118
	if (flags & CREATE_PRIME)
1119
		return 256;
1120
	if (flags & CREATE_SCANOUT)
1121
		return 64;
1122
	return kgem->min_alignment;
1123
}
1124
 
1125
static uint32_t kgem_untiled_pitch(struct kgem *kgem,
1126
				   uint32_t width, uint32_t bpp,
1127
				   unsigned flags)
1128
{
1129
	width = ALIGN(width, 2) * bpp >> 3;
1130
	return ALIGN(width, kgem_pitch_alignment(kgem, flags));
1131
}
1132
static uint32_t kgem_surface_size(struct kgem *kgem,
1133
				  bool relaxed_fencing,
1134
				  unsigned flags,
1135
				  uint32_t width,
1136
				  uint32_t height,
1137
				  uint32_t bpp,
1138
				  uint32_t tiling,
1139
				  uint32_t *pitch)
1140
{
1141
	uint32_t tile_width, tile_height;
1142
	uint32_t size;
1143
 
1144
	assert(width <= MAXSHORT);
1145
	assert(height <= MAXSHORT);
1146
 
1147
	if (kgem->gen <= 030) {
1148
		if (tiling) {
1149
			if (kgem->gen < 030) {
1150
				tile_width = 128;
1151
				tile_height = 32;
1152
			} else {
1153
				tile_width = 512;
1154
				tile_height = 16;
1155
			}
1156
		} else {
1157
			tile_width = 2 * bpp >> 3;
1158
			tile_width = ALIGN(tile_width,
1159
					   kgem_pitch_alignment(kgem, flags));
1160
			tile_height = 2;
1161
		}
1162
	} else switch (tiling) {
1163
	default:
1164
	case I915_TILING_NONE:
1165
		tile_width = 2 * bpp >> 3;
1166
		tile_width = ALIGN(tile_width,
1167
				   kgem_pitch_alignment(kgem, flags));
1168
		tile_height = 2;
1169
		break;
1170
 
1171
		/* XXX align to an even tile row */
1172
	case I915_TILING_X:
1173
		tile_width = 512;
1174
		tile_height = 16;
1175
		break;
1176
	case I915_TILING_Y:
1177
		tile_width = 128;
1178
		tile_height = 64;
1179
		break;
1180
	}
1181
 
1182
	*pitch = ALIGN(width * bpp / 8, tile_width);
1183
	height = ALIGN(height, tile_height);
1184
	if (kgem->gen >= 040)
1185
		return PAGE_ALIGN(*pitch * height);
1186
 
1187
	/* If it is too wide for the blitter, don't even bother.  */
1188
	if (tiling != I915_TILING_NONE) {
1189
		if (*pitch > 8192)
1190
			return 0;
1191
 
1192
		for (size = tile_width; size < *pitch; size <<= 1)
1193
			;
1194
		*pitch = size;
1195
	} else {
1196
		if (*pitch >= 32768)
1197
			return 0;
1198
	}
1199
 
1200
	size = *pitch * height;
1201
	if (relaxed_fencing || tiling == I915_TILING_NONE)
1202
		return PAGE_ALIGN(size);
1203
 
1204
	/*  We need to allocate a pot fence region for a tiled buffer. */
1205
	if (kgem->gen < 030)
1206
		tile_width = 512 * 1024;
1207
	else
1208
		tile_width = 1024 * 1024;
1209
	while (tile_width < size)
1210
		tile_width *= 2;
1211
	return tile_width;
1212
}
1213
 
1214
static uint32_t kgem_aligned_height(struct kgem *kgem,
1215
				    uint32_t height, uint32_t tiling)
1216
{
1217
	uint32_t tile_height;
1218
 
1219
	if (kgem->gen <= 030) {
1220
		tile_height = tiling ? kgem->gen < 030 ? 32 : 16 : 1;
1221
	} else switch (tiling) {
1222
		/* XXX align to an even tile row */
1223
	default:
1224
	case I915_TILING_NONE:
1225
		tile_height = 1;
1226
		break;
1227
	case I915_TILING_X:
1228
		tile_height = 16;
1229
		break;
1230
	case I915_TILING_Y:
1231
		tile_height = 64;
1232
		break;
1233
	}
1234
 
1235
	return ALIGN(height, tile_height);
1236
}
1237
 
3258 Serge 1238
static struct drm_i915_gem_exec_object2 *
1239
kgem_add_handle(struct kgem *kgem, struct kgem_bo *bo)
1240
{
1241
	struct drm_i915_gem_exec_object2 *exec;
3256 Serge 1242
 
3258 Serge 1243
	DBG(("%s: handle=%d, index=%d\n",
1244
	     __FUNCTION__, bo->handle, kgem->nexec));
1245
 
1246
	assert(kgem->nexec < ARRAY_SIZE(kgem->exec));
1247
	bo->target_handle = kgem->has_handle_lut ? kgem->nexec : bo->handle;
1248
	exec = memset(&kgem->exec[kgem->nexec++], 0, sizeof(*exec));
1249
	exec->handle = bo->handle;
1250
	exec->offset = bo->presumed_offset;
1251
 
1252
	kgem->aperture += num_pages(bo);
1253
 
1254
	return exec;
1255
}
1256
 
1257
static void kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
1258
{
1259
	bo->exec = kgem_add_handle(kgem, bo);
1260
	bo->rq = MAKE_REQUEST(kgem->next_request, kgem->ring);
1261
 
1262
	list_move_tail(&bo->request, &kgem->next_request->buffers);
1263
 
1264
	/* XXX is it worth working around gcc here? */
1265
	kgem->flush |= bo->flush;
1266
}
1267
 
1268
static uint32_t kgem_end_batch(struct kgem *kgem)
1269
{
1270
	kgem->batch[kgem->nbatch++] = MI_BATCH_BUFFER_END;
1271
	if (kgem->nbatch & 1)
1272
		kgem->batch[kgem->nbatch++] = MI_NOOP;
1273
 
1274
	return kgem->nbatch;
1275
}
1276
 
1277
static void kgem_fixup_self_relocs(struct kgem *kgem, struct kgem_bo *bo)
1278
{
1279
	int n;
1280
 
1281
	if (kgem->nreloc__self == 0)
1282
		return;
1283
 
1284
	for (n = 0; n < kgem->nreloc__self; n++) {
1285
		int i = kgem->reloc__self[n];
1286
		assert(kgem->reloc[i].target_handle == ~0U);
1287
		kgem->reloc[i].target_handle = bo->target_handle;
1288
		kgem->reloc[i].presumed_offset = bo->presumed_offset;
1289
		kgem->batch[kgem->reloc[i].offset/sizeof(kgem->batch[0])] =
1290
			kgem->reloc[i].delta + bo->presumed_offset;
1291
	}
1292
 
1293
	if (n == 256) {
1294
		for (n = kgem->reloc__self[255]; n < kgem->nreloc; n++) {
1295
			if (kgem->reloc[n].target_handle == ~0U) {
1296
				kgem->reloc[n].target_handle = bo->target_handle;
1297
				kgem->reloc[n].presumed_offset = bo->presumed_offset;
1298
				kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] =
1299
					kgem->reloc[n].delta + bo->presumed_offset;
1300
			}
1301
		}
1302
 
1303
	}
1304
 
1305
}
1306
 
1307
static void kgem_bo_binding_free(struct kgem *kgem, struct kgem_bo *bo)
1308
{
1309
	struct kgem_bo_binding *b;
1310
 
1311
	b = bo->binding.next;
1312
	while (b) {
1313
		struct kgem_bo_binding *next = b->next;
1314
		free (b);
1315
		b = next;
1316
	}
1317
}
1318
 
1319
static void kgem_bo_release_map(struct kgem *kgem, struct kgem_bo *bo)
1320
{
1321
	int type = IS_CPU_MAP(bo->map);
1322
 
1323
	assert(!IS_USER_MAP(bo->map));
1324
 
1325
	DBG(("%s: releasing %s vma for handle=%d, count=%d\n",
1326
	     __FUNCTION__, type ? "CPU" : "GTT",
1327
	     bo->handle, kgem->vma[type].count));
1328
 
1329
	VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo)));
3291 Serge 1330
	user_free(MAP(bo->map));
3258 Serge 1331
	bo->map = NULL;
1332
 
1333
	if (!list_is_empty(&bo->vma)) {
1334
		list_del(&bo->vma);
1335
		kgem->vma[type].count--;
1336
	}
1337
}
1338
 
1339
static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
1340
{
1341
	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
3291 Serge 1342
	printf("%s: handle=%d\n", __FUNCTION__, bo->handle);
1343
 
3258 Serge 1344
	assert(bo->refcnt == 0);
1345
	assert(bo->exec == NULL);
1346
	assert(!bo->snoop || bo->rq == NULL);
1347
 
1348
#ifdef DEBUG_MEMORY
1349
	kgem->debug_memory.bo_allocs--;
1350
	kgem->debug_memory.bo_bytes -= bytes(bo);
1351
#endif
1352
 
1353
	kgem_bo_binding_free(kgem, bo);
1354
 
1355
	if (IS_USER_MAP(bo->map)) {
1356
		assert(bo->rq == NULL);
1357
		assert(MAP(bo->map) != bo || bo->io);
1358
		if (bo != MAP(bo->map)) {
1359
			DBG(("%s: freeing snooped base\n", __FUNCTION__));
1360
			free(MAP(bo->map));
1361
		}
1362
		bo->map = NULL;
1363
	}
1364
	if (bo->map)
1365
		kgem_bo_release_map(kgem, bo);
1366
	assert(list_is_empty(&bo->vma));
1367
 
1368
	_list_del(&bo->list);
1369
	_list_del(&bo->request);
1370
	gem_close(kgem->fd, bo->handle);
1371
 
1372
	if (!bo->io) {
1373
		*(struct kgem_bo **)bo = __kgem_freed_bo;
1374
		__kgem_freed_bo = bo;
1375
	} else
1376
		free(bo);
1377
}
1378
 
1379
inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
1380
					    struct kgem_bo *bo)
1381
{
1382
	DBG(("%s: moving handle=%d to inactive\n", __FUNCTION__, bo->handle));
1383
 
1384
	assert(bo->refcnt == 0);
1385
	assert(bo->reusable);
1386
	assert(bo->rq == NULL);
1387
	assert(bo->exec == NULL);
1388
	assert(bo->domain != DOMAIN_GPU);
1389
	assert(!bo->proxy);
1390
	assert(!bo->io);
1391
	assert(!bo->scanout);
1392
	assert(!bo->needs_flush);
1393
	assert(list_is_empty(&bo->vma));
1394
	ASSERT_IDLE(kgem, bo->handle);
1395
 
1396
	kgem->need_expire = true;
1397
 
1398
	if (bucket(bo) >= NUM_CACHE_BUCKETS) {
1399
		list_move(&bo->list, &kgem->large_inactive);
1400
		return;
1401
	}
1402
 
1403
	assert(bo->flush == false);
1404
	list_move(&bo->list, &kgem->inactive[bucket(bo)]);
1405
	if (bo->map) {
1406
		int type = IS_CPU_MAP(bo->map);
1407
		if (bucket(bo) >= NUM_CACHE_BUCKETS ||
1408
		    (!type && !__kgem_bo_is_mappable(kgem, bo))) {
1409
//			munmap(MAP(bo->map), bytes(bo));
1410
			bo->map = NULL;
1411
		}
1412
		if (bo->map) {
1413
			list_add(&bo->vma, &kgem->vma[type].inactive[bucket(bo)]);
1414
			kgem->vma[type].count++;
1415
		}
1416
	}
1417
}
1418
 
1419
static struct kgem_bo *kgem_bo_replace_io(struct kgem_bo *bo)
1420
{
1421
	struct kgem_bo *base;
1422
 
1423
	if (!bo->io)
1424
		return bo;
1425
 
1426
	assert(!bo->snoop);
1427
	base = malloc(sizeof(*base));
1428
	if (base) {
1429
		DBG(("%s: transferring io handle=%d to bo\n",
1430
		     __FUNCTION__, bo->handle));
1431
		/* transfer the handle to a minimum bo */
1432
		memcpy(base, bo, sizeof(*base));
1433
		base->io = false;
1434
		list_init(&base->list);
1435
		list_replace(&bo->request, &base->request);
1436
		list_replace(&bo->vma, &base->vma);
1437
		free(bo);
1438
		bo = base;
1439
	} else
1440
		bo->reusable = false;
1441
 
1442
	return bo;
1443
}
1444
 
3256 Serge 1445
inline static void kgem_bo_remove_from_inactive(struct kgem *kgem,
1446
						struct kgem_bo *bo)
1447
{
1448
	DBG(("%s: removing handle=%d from inactive\n", __FUNCTION__, bo->handle));
1449
 
1450
	list_del(&bo->list);
1451
	assert(bo->rq == NULL);
1452
	assert(bo->exec == NULL);
1453
	if (bo->map) {
1454
		assert(!list_is_empty(&bo->vma));
1455
		list_del(&bo->vma);
1456
		kgem->vma[IS_CPU_MAP(bo->map)].count--;
1457
	}
3254 Serge 1458
}
1459
 
3258 Serge 1460
inline static void kgem_bo_remove_from_active(struct kgem *kgem,
1461
					      struct kgem_bo *bo)
1462
{
1463
	DBG(("%s: removing handle=%d from active\n", __FUNCTION__, bo->handle));
3254 Serge 1464
 
3258 Serge 1465
	list_del(&bo->list);
1466
	assert(bo->rq != NULL);
1467
	if (bo->rq == (void *)kgem)
1468
		list_del(&bo->request);
1469
	assert(list_is_empty(&bo->vma));
1470
}
3254 Serge 1471
 
3258 Serge 1472
static void kgem_bo_clear_scanout(struct kgem *kgem, struct kgem_bo *bo)
1473
{
1474
	assert(bo->scanout);
1475
	assert(!bo->refcnt);
1476
	assert(bo->exec == NULL);
1477
	assert(bo->proxy == NULL);
3256 Serge 1478
 
3258 Serge 1479
	DBG(("%s: handle=%d, fb=%d (reusable=%d)\n",
1480
	     __FUNCTION__, bo->handle, bo->delta, bo->reusable));
1481
	if (bo->delta) {
1482
		/* XXX will leak if we are not DRM_MASTER. *shrug* */
1483
//		drmModeRmFB(kgem->fd, bo->delta);
1484
		bo->delta = 0;
1485
	}
1486
 
1487
	bo->scanout = false;
1488
	bo->flush = false;
1489
	bo->reusable = true;
1490
 
1491
	if (kgem->has_llc &&
1492
	    !gem_set_cacheing(kgem->fd, bo->handle, SNOOPED))
1493
		bo->reusable = false;
1494
}
1495
 
1496
static void _kgem_bo_delete_buffer(struct kgem *kgem, struct kgem_bo *bo)
1497
{
1498
	struct kgem_buffer *io = (struct kgem_buffer *)bo->proxy;
1499
 
1500
	DBG(("%s: size=%d, offset=%d, parent used=%d\n",
1501
	     __FUNCTION__, bo->size.bytes, bo->delta, io->used));
1502
 
1503
	if (ALIGN(bo->delta + bo->size.bytes, UPLOAD_ALIGNMENT) == io->used)
1504
		io->used = bo->delta;
1505
}
1506
 
1507
static void kgem_bo_move_to_scanout(struct kgem *kgem, struct kgem_bo *bo)
1508
{
1509
	assert(bo->refcnt == 0);
1510
	assert(bo->scanout);
1511
	assert(bo->delta);
1512
	assert(!bo->snoop);
1513
	assert(!bo->io);
1514
 
1515
	DBG(("%s: moving %d [fb %d] to scanout cache, active? %d\n",
1516
	     __FUNCTION__, bo->handle, bo->delta, bo->rq != NULL));
1517
	if (bo->rq)
1518
		list_move_tail(&bo->list, &kgem->scanout);
1519
	else
1520
	list_move(&bo->list, &kgem->scanout);
1521
}
1522
 
1523
static void kgem_bo_move_to_snoop(struct kgem *kgem, struct kgem_bo *bo)
1524
{
1525
	assert(bo->refcnt == 0);
1526
	assert(bo->exec == NULL);
1527
 
1528
	if (num_pages(bo) > kgem->max_cpu_size >> 13) {
1529
		DBG(("%s handle=%d discarding large CPU buffer (%d >%d pages)\n",
1530
		     __FUNCTION__, bo->handle, num_pages(bo), kgem->max_cpu_size >> 13));
1531
		kgem_bo_free(kgem, bo);
1532
		return;
1533
	}
1534
 
1535
	assert(bo->tiling == I915_TILING_NONE);
1536
	assert(bo->rq == NULL);
1537
 
1538
	DBG(("%s: moving %d to snoop cachee\n", __FUNCTION__, bo->handle));
1539
	list_add(&bo->list, &kgem->snoop);
1540
}
1541
 
3256 Serge 1542
static struct kgem_bo *
3258 Serge 1543
search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
1544
{
1545
	struct kgem_bo *bo, *first = NULL;
1546
 
1547
	DBG(("%s: num_pages=%d, flags=%x\n", __FUNCTION__, num_pages, flags));
1548
 
1549
	if ((kgem->has_cacheing | kgem->has_userptr) == 0)
1550
		return NULL;
1551
 
1552
	if (list_is_empty(&kgem->snoop)) {
1553
		DBG(("%s: inactive and cache empty\n", __FUNCTION__));
1554
		if (!__kgem_throttle_retire(kgem, flags)) {
1555
			DBG(("%s: nothing retired\n", __FUNCTION__));
1556
			return NULL;
1557
		}
1558
	}
1559
 
1560
	list_for_each_entry(bo, &kgem->snoop, list) {
1561
		assert(bo->refcnt == 0);
1562
		assert(bo->snoop);
1563
		assert(!bo->scanout);
1564
		assert(bo->proxy == NULL);
1565
		assert(bo->tiling == I915_TILING_NONE);
1566
		assert(bo->rq == NULL);
1567
		assert(bo->exec == NULL);
1568
 
1569
		if (num_pages > num_pages(bo))
1570
			continue;
1571
 
1572
		if (num_pages(bo) > 2*num_pages) {
1573
			if (first == NULL)
1574
				first = bo;
1575
			continue;
1576
		}
1577
 
1578
		list_del(&bo->list);
1579
		bo->pitch = 0;
1580
		bo->delta = 0;
1581
 
1582
		DBG(("  %s: found handle=%d (num_pages=%d) in snoop cache\n",
1583
		     __FUNCTION__, bo->handle, num_pages(bo)));
1584
		return bo;
1585
	}
1586
 
1587
	if (first) {
1588
		list_del(&first->list);
1589
		first->pitch = 0;
1590
		first->delta = 0;
1591
 
1592
		DBG(("  %s: found handle=%d (num_pages=%d) in snoop cache\n",
1593
		     __FUNCTION__, first->handle, num_pages(first)));
1594
		return first;
1595
	}
1596
 
1597
	return NULL;
1598
}
1599
 
1600
static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
1601
{
1602
	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
1603
 
3291 Serge 1604
	printf("%s: handle=%d\n", __FUNCTION__, bo->handle);
1605
 
3258 Serge 1606
	assert(list_is_empty(&bo->list));
1607
	assert(bo->refcnt == 0);
1608
	assert(!bo->purged);
1609
	assert(bo->proxy == NULL);
1610
 
1611
	bo->binding.offset = 0;
1612
 
1613
	if (DBG_NO_CACHE)
1614
		goto destroy;
1615
 
1616
	if (bo->snoop && !bo->flush) {
1617
		DBG(("%s: handle=%d is snooped\n", __FUNCTION__, bo->handle));
1618
		assert(!bo->flush);
1619
		assert(list_is_empty(&bo->list));
1620
		if (bo->exec == NULL && bo->rq && !__kgem_busy(kgem, bo->handle))
1621
			__kgem_bo_clear_busy(bo);
1622
		if (bo->rq == NULL) {
1623
			assert(!bo->needs_flush);
1624
			kgem_bo_move_to_snoop(kgem, bo);
1625
		}
1626
		return;
1627
	}
1628
 
1629
	if (bo->scanout) {
1630
		kgem_bo_move_to_scanout(kgem, bo);
1631
		return;
1632
	}
1633
 
1634
	if (bo->io)
1635
		bo = kgem_bo_replace_io(bo);
1636
	if (!bo->reusable) {
1637
		DBG(("%s: handle=%d, not reusable\n",
1638
		     __FUNCTION__, bo->handle));
1639
		goto destroy;
1640
	}
1641
 
1642
	if (!kgem->has_llc && IS_CPU_MAP(bo->map) && bo->domain != DOMAIN_CPU)
1643
		kgem_bo_release_map(kgem, bo);
1644
 
1645
	assert(list_is_empty(&bo->vma));
1646
	assert(list_is_empty(&bo->list));
1647
	assert(bo->snoop == false);
1648
	assert(bo->io == false);
1649
	assert(bo->scanout == false);
1650
 
1651
	if (bo->exec && kgem->nexec == 1) {
1652
		DBG(("%s: only handle in batch, discarding last operations\n",
1653
		     __FUNCTION__));
1654
		assert(bo->exec == &kgem->exec[0]);
1655
		assert(kgem->exec[0].handle == bo->handle);
1656
		assert(RQ(bo->rq) == kgem->next_request);
1657
		bo->refcnt = 1;
1658
		kgem_reset(kgem);
1659
		bo->refcnt = 0;
1660
	}
1661
 
1662
	if (bo->rq && bo->exec == NULL && !__kgem_busy(kgem, bo->handle))
1663
		__kgem_bo_clear_busy(bo);
1664
 
1665
	if (bo->rq) {
1666
		struct list *cache;
1667
 
1668
		DBG(("%s: handle=%d -> active\n", __FUNCTION__, bo->handle));
1669
		if (bucket(bo) < NUM_CACHE_BUCKETS)
1670
			cache = &kgem->active[bucket(bo)][bo->tiling];
1671
		else
1672
			cache = &kgem->large;
1673
		list_add(&bo->list, cache);
1674
		return;
1675
	}
1676
 
1677
	assert(bo->exec == NULL);
1678
	assert(list_is_empty(&bo->request));
1679
 
1680
	if (!IS_CPU_MAP(bo->map)) {
1681
		if (!kgem_bo_set_purgeable(kgem, bo))
1682
			goto destroy;
1683
 
1684
		if (!kgem->has_llc && bo->domain == DOMAIN_CPU)
1685
			goto destroy;
1686
 
1687
		DBG(("%s: handle=%d, purged\n",
1688
		     __FUNCTION__, bo->handle));
1689
	}
1690
 
1691
	kgem_bo_move_to_inactive(kgem, bo);
1692
	return;
1693
 
1694
destroy:
1695
	if (!bo->exec)
1696
		kgem_bo_free(kgem, bo);
1697
}
1698
 
1699
static void kgem_bo_unref(struct kgem *kgem, struct kgem_bo *bo)
1700
{
1701
	assert(bo->refcnt);
1702
	if (--bo->refcnt == 0)
1703
		__kgem_bo_destroy(kgem, bo);
1704
}
1705
 
1706
static void kgem_buffer_release(struct kgem *kgem, struct kgem_buffer *bo)
1707
{
1708
	while (!list_is_empty(&bo->base.vma)) {
1709
		struct kgem_bo *cached;
1710
 
1711
		cached = list_first_entry(&bo->base.vma, struct kgem_bo, vma);
1712
		assert(cached->proxy == &bo->base);
1713
		list_del(&cached->vma);
1714
 
1715
		assert(*(struct kgem_bo **)cached->map == cached);
1716
		*(struct kgem_bo **)cached->map = NULL;
1717
		cached->map = NULL;
1718
 
1719
		kgem_bo_destroy(kgem, cached);
1720
	}
1721
}
1722
 
1723
static bool kgem_retire__buffers(struct kgem *kgem)
1724
{
1725
	bool retired = false;
1726
 
1727
	while (!list_is_empty(&kgem->active_buffers)) {
1728
		struct kgem_buffer *bo =
1729
			list_last_entry(&kgem->active_buffers,
1730
					struct kgem_buffer,
1731
					base.list);
1732
 
1733
		if (bo->base.rq)
1734
			break;
1735
 
1736
		DBG(("%s: releasing upload cache for handle=%d? %d\n",
1737
		     __FUNCTION__, bo->base.handle, !list_is_empty(&bo->base.vma)));
1738
		list_del(&bo->base.list);
1739
		kgem_buffer_release(kgem, bo);
1740
		kgem_bo_unref(kgem, &bo->base);
1741
		retired = true;
1742
	}
1743
 
1744
	return retired;
1745
}
1746
 
1747
static bool kgem_retire__flushing(struct kgem *kgem)
1748
{
1749
	struct kgem_bo *bo, *next;
1750
	bool retired = false;
1751
 
1752
	list_for_each_entry_safe(bo, next, &kgem->flushing, request) {
1753
		assert(bo->rq == (void *)kgem);
1754
		assert(bo->exec == NULL);
1755
 
1756
		if (__kgem_busy(kgem, bo->handle))
1757
			break;
1758
 
1759
		__kgem_bo_clear_busy(bo);
1760
 
1761
		if (bo->refcnt)
1762
			continue;
1763
 
1764
		if (bo->snoop) {
1765
			kgem_bo_move_to_snoop(kgem, bo);
1766
		} else if (bo->scanout) {
1767
			kgem_bo_move_to_scanout(kgem, bo);
1768
		} else if ((bo = kgem_bo_replace_io(bo))->reusable &&
1769
			   kgem_bo_set_purgeable(kgem, bo)) {
1770
			kgem_bo_move_to_inactive(kgem, bo);
1771
			retired = true;
1772
		} else
1773
			kgem_bo_free(kgem, bo);
1774
	}
1775
#if HAS_DEBUG_FULL
1776
	{
1777
		int count = 0;
1778
		list_for_each_entry(bo, &kgem->flushing, request)
1779
			count++;
1780
		printf("%s: %d bo on flushing list\n", __FUNCTION__, count);
1781
	}
1782
#endif
1783
 
1784
	kgem->need_retire |= !list_is_empty(&kgem->flushing);
1785
 
1786
	return retired;
1787
}
1788
 
1789
 
1790
static bool __kgem_retire_rq(struct kgem *kgem, struct kgem_request *rq)
1791
{
1792
	bool retired = false;
1793
 
1794
	DBG(("%s: request %d complete\n",
1795
	     __FUNCTION__, rq->bo->handle));
1796
 
1797
	while (!list_is_empty(&rq->buffers)) {
1798
		struct kgem_bo *bo;
1799
 
1800
		bo = list_first_entry(&rq->buffers,
1801
				      struct kgem_bo,
1802
				      request);
1803
 
1804
		assert(RQ(bo->rq) == rq);
1805
		assert(bo->exec == NULL);
1806
		assert(bo->domain == DOMAIN_GPU || bo->domain == DOMAIN_NONE);
1807
 
1808
		list_del(&bo->request);
1809
 
1810
		if (bo->needs_flush)
1811
			bo->needs_flush = __kgem_busy(kgem, bo->handle);
1812
		if (bo->needs_flush) {
1813
			DBG(("%s: moving %d to flushing\n",
1814
			     __FUNCTION__, bo->handle));
1815
			list_add(&bo->request, &kgem->flushing);
1816
			bo->rq = (void *)kgem;
1817
			continue;
1818
		}
1819
 
1820
		bo->domain = DOMAIN_NONE;
1821
		bo->rq = NULL;
1822
		if (bo->refcnt)
1823
			continue;
1824
 
1825
		if (bo->snoop) {
1826
			kgem_bo_move_to_snoop(kgem, bo);
1827
		} else if (bo->scanout) {
1828
			kgem_bo_move_to_scanout(kgem, bo);
1829
		} else if ((bo = kgem_bo_replace_io(bo))->reusable &&
1830
			   kgem_bo_set_purgeable(kgem, bo)) {
1831
			kgem_bo_move_to_inactive(kgem, bo);
1832
			retired = true;
1833
		} else {
1834
			DBG(("%s: closing %d\n",
1835
			     __FUNCTION__, bo->handle));
1836
			kgem_bo_free(kgem, bo);
1837
		}
1838
	}
1839
 
1840
	assert(rq->bo->rq == NULL);
1841
	assert(list_is_empty(&rq->bo->request));
1842
 
1843
	if (--rq->bo->refcnt == 0) {
1844
		if (kgem_bo_set_purgeable(kgem, rq->bo)) {
1845
			kgem_bo_move_to_inactive(kgem, rq->bo);
1846
			retired = true;
1847
		} else {
1848
			DBG(("%s: closing %d\n",
1849
			     __FUNCTION__, rq->bo->handle));
1850
			kgem_bo_free(kgem, rq->bo);
1851
		}
1852
	}
1853
 
1854
	__kgem_request_free(rq);
1855
	return retired;
1856
}
1857
 
1858
static bool kgem_retire__requests_ring(struct kgem *kgem, int ring)
1859
{
1860
	bool retired = false;
1861
 
1862
	while (!list_is_empty(&kgem->requests[ring])) {
1863
		struct kgem_request *rq;
1864
 
1865
		rq = list_first_entry(&kgem->requests[ring],
1866
				      struct kgem_request,
1867
				      list);
1868
		if (__kgem_busy(kgem, rq->bo->handle))
1869
			break;
1870
 
1871
		retired |= __kgem_retire_rq(kgem, rq);
1872
	}
1873
 
1874
#if HAS_DEBUG_FULL
1875
	{
1876
		struct kgem_bo *bo;
1877
		int count = 0;
1878
 
1879
		list_for_each_entry(bo, &kgem->requests[ring], request)
1880
			count++;
1881
 
1882
		bo = NULL;
1883
		if (!list_is_empty(&kgem->requests[ring]))
1884
			bo = list_first_entry(&kgem->requests[ring],
1885
					      struct kgem_request,
1886
					      list)->bo;
1887
 
1888
		printf("%s: ring=%d, %d outstanding requests, oldest=%d\n",
1889
		       __FUNCTION__, ring, count, bo ? bo->handle : 0);
1890
	}
1891
#endif
1892
 
1893
	return retired;
1894
}
1895
 
1896
static bool kgem_retire__requests(struct kgem *kgem)
1897
{
1898
	bool retired = false;
1899
	int n;
1900
 
1901
	for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) {
1902
		retired |= kgem_retire__requests_ring(kgem, n);
1903
		kgem->need_retire |= !list_is_empty(&kgem->requests[n]);
1904
	}
1905
 
1906
	return retired;
1907
}
1908
 
1909
bool kgem_retire(struct kgem *kgem)
1910
{
1911
	bool retired = false;
1912
 
1913
	DBG(("%s\n", __FUNCTION__));
1914
 
1915
	kgem->need_retire = false;
1916
 
1917
	retired |= kgem_retire__flushing(kgem);
1918
	retired |= kgem_retire__requests(kgem);
1919
	retired |= kgem_retire__buffers(kgem);
1920
 
1921
	DBG(("%s -- retired=%d, need_retire=%d\n",
1922
	     __FUNCTION__, retired, kgem->need_retire));
1923
 
1924
	kgem->retire(kgem);
1925
 
1926
	return retired;
1927
}
1928
 
3263 Serge 1929
bool __kgem_ring_is_idle(struct kgem *kgem, int ring)
1930
{
1931
	struct kgem_request *rq;
3258 Serge 1932
 
3263 Serge 1933
	assert(!list_is_empty(&kgem->requests[ring]));
3258 Serge 1934
 
3263 Serge 1935
	rq = list_last_entry(&kgem->requests[ring],
1936
			     struct kgem_request, list);
1937
	if (__kgem_busy(kgem, rq->bo->handle)) {
1938
		DBG(("%s: last requests handle=%d still busy\n",
1939
		     __FUNCTION__, rq->bo->handle));
1940
		return false;
1941
	}
3258 Serge 1942
 
3263 Serge 1943
	DBG(("%s: ring=%d idle (handle=%d)\n",
1944
	     __FUNCTION__, ring, rq->bo->handle));
3258 Serge 1945
 
3263 Serge 1946
	kgem_retire__requests_ring(kgem, ring);
1947
	assert(list_is_empty(&kgem->requests[ring]));
1948
	return true;
1949
}
3258 Serge 1950
 
1951
static void kgem_commit(struct kgem *kgem)
1952
{
1953
	struct kgem_request *rq = kgem->next_request;
1954
	struct kgem_bo *bo, *next;
1955
 
1956
	list_for_each_entry_safe(bo, next, &rq->buffers, request) {
1957
		assert(next->request.prev == &bo->request);
1958
 
1959
		DBG(("%s: release handle=%d (proxy? %d), dirty? %d flush? %d, snoop? %d -> offset=%x\n",
1960
		     __FUNCTION__, bo->handle, bo->proxy != NULL,
1961
		     bo->dirty, bo->needs_flush, bo->snoop,
1962
		     (unsigned)bo->exec->offset));
1963
 
1964
		assert(!bo->purged);
1965
		assert(bo->exec);
1966
		assert(bo->proxy == NULL || bo->exec == &_kgem_dummy_exec);
1967
		assert(RQ(bo->rq) == rq || (RQ(bo->proxy->rq) == rq));
1968
 
1969
		bo->presumed_offset = bo->exec->offset;
1970
		bo->exec = NULL;
1971
		bo->target_handle = -1;
1972
 
1973
		if (!bo->refcnt && !bo->reusable) {
1974
			assert(!bo->snoop);
1975
			kgem_bo_free(kgem, bo);
1976
			continue;
1977
		}
1978
 
1979
		bo->binding.offset = 0;
1980
		bo->domain = DOMAIN_GPU;
1981
		bo->dirty = false;
1982
 
1983
		if (bo->proxy) {
1984
			/* proxies are not used for domain tracking */
1985
			bo->exec = NULL;
1986
			__kgem_bo_clear_busy(bo);
1987
		}
1988
 
1989
		kgem->scanout_busy |= bo->scanout;
1990
	}
1991
 
1992
	if (rq == &kgem->static_request) {
1993
		struct drm_i915_gem_set_domain set_domain;
1994
 
1995
		DBG(("%s: syncing due to allocation failure\n", __FUNCTION__));
1996
 
1997
		VG_CLEAR(set_domain);
1998
		set_domain.handle = rq->bo->handle;
1999
		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
2000
		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
2001
		if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) {
2002
			DBG(("%s: sync: GPU hang detected\n", __FUNCTION__));
2003
			kgem_throttle(kgem);
2004
		}
2005
 
2006
		kgem_retire(kgem);
2007
		assert(list_is_empty(&rq->buffers));
2008
 
2009
		gem_close(kgem->fd, rq->bo->handle);
2010
		kgem_cleanup_cache(kgem);
2011
	} else {
2012
		list_add_tail(&rq->list, &kgem->requests[rq->ring]);
2013
		kgem->need_throttle = kgem->need_retire = 1;
2014
	}
2015
 
2016
	kgem->next_request = NULL;
2017
}
2018
 
2019
static void kgem_close_list(struct kgem *kgem, struct list *head)
2020
{
2021
	while (!list_is_empty(head))
2022
		kgem_bo_free(kgem, list_first_entry(head, struct kgem_bo, list));
2023
}
2024
 
2025
static void kgem_close_inactive(struct kgem *kgem)
2026
{
2027
	unsigned int i;
2028
 
2029
	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
2030
		kgem_close_list(kgem, &kgem->inactive[i]);
2031
}
2032
 
2033
static void kgem_finish_buffers(struct kgem *kgem)
2034
{
2035
	struct kgem_buffer *bo, *next;
2036
 
2037
	list_for_each_entry_safe(bo, next, &kgem->batch_buffers, base.list) {
2038
		DBG(("%s: buffer handle=%d, used=%d, exec?=%d, write=%d, mmapped=%d\n",
2039
		     __FUNCTION__, bo->base.handle, bo->used, bo->base.exec!=NULL,
2040
		     bo->write, bo->mmapped));
2041
 
2042
		assert(next->base.list.prev == &bo->base.list);
2043
		assert(bo->base.io);
2044
		assert(bo->base.refcnt >= 1);
2045
 
2046
		if (!bo->base.exec) {
2047
			DBG(("%s: skipping unattached handle=%d, used=%d\n",
2048
			     __FUNCTION__, bo->base.handle, bo->used));
2049
			continue;
2050
		}
2051
 
2052
		if (!bo->write) {
2053
			assert(bo->base.exec || bo->base.refcnt > 1);
2054
			goto decouple;
2055
		}
2056
 
2057
		if (bo->mmapped) {
2058
			int used;
2059
 
2060
			assert(!bo->need_io);
2061
 
2062
			used = ALIGN(bo->used, PAGE_SIZE);
2063
			if (!DBG_NO_UPLOAD_ACTIVE &&
2064
			    used + PAGE_SIZE <= bytes(&bo->base) &&
2065
			    (kgem->has_llc || !IS_CPU_MAP(bo->base.map) || bo->base.snoop)) {
2066
				DBG(("%s: retaining upload buffer (%d/%d)\n",
2067
				     __FUNCTION__, bo->used, bytes(&bo->base)));
2068
				bo->used = used;
2069
				list_move(&bo->base.list,
2070
					  &kgem->active_buffers);
2071
				continue;
2072
			}
2073
			DBG(("%s: discarding mmapped buffer, used=%d, map type=%d\n",
2074
			     __FUNCTION__, bo->used, (int)__MAP_TYPE(bo->base.map)));
2075
			goto decouple;
2076
		}
2077
 
2078
		if (!bo->used) {
2079
			/* Unless we replace the handle in the execbuffer,
2080
			 * then this bo will become active. So decouple it
2081
			 * from the buffer list and track it in the normal
2082
			 * manner.
2083
			 */
2084
			goto decouple;
2085
		}
2086
 
2087
		assert(bo->need_io);
2088
		assert(bo->base.rq == MAKE_REQUEST(kgem->next_request, kgem->ring));
2089
		assert(bo->base.domain != DOMAIN_GPU);
2090
 
2091
		if (bo->base.refcnt == 1 &&
2092
		    bo->base.size.pages.count > 1 &&
2093
		    bo->used < bytes(&bo->base) / 2) {
2094
			struct kgem_bo *shrink;
2095
			unsigned alloc = NUM_PAGES(bo->used);
2096
 
2097
			shrink = search_snoop_cache(kgem, alloc,
2098
						    CREATE_INACTIVE | CREATE_NO_RETIRE);
2099
			if (shrink) {
2100
				void *map;
2101
				int n;
2102
 
2103
				DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n",
2104
				     __FUNCTION__,
2105
				     bo->used, bytes(&bo->base), bytes(shrink),
2106
				     bo->base.handle, shrink->handle));
2107
 
2108
				assert(bo->used <= bytes(shrink));
2109
				map = kgem_bo_map__cpu(kgem, shrink);
2110
				if (map) {
2111
					kgem_bo_sync__cpu(kgem, shrink);
2112
					memcpy(map, bo->mem, bo->used);
2113
 
2114
					shrink->target_handle =
2115
						kgem->has_handle_lut ? bo->base.target_handle : shrink->handle;
2116
					for (n = 0; n < kgem->nreloc; n++) {
2117
						if (kgem->reloc[n].target_handle == bo->base.target_handle) {
2118
							kgem->reloc[n].target_handle = shrink->target_handle;
2119
							kgem->reloc[n].presumed_offset = shrink->presumed_offset;
2120
							kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] =
2121
								kgem->reloc[n].delta + shrink->presumed_offset;
2122
						}
2123
					}
2124
 
2125
					bo->base.exec->handle = shrink->handle;
2126
					bo->base.exec->offset = shrink->presumed_offset;
2127
					shrink->exec = bo->base.exec;
2128
					shrink->rq = bo->base.rq;
2129
					list_replace(&bo->base.request,
2130
						     &shrink->request);
2131
					list_init(&bo->base.request);
2132
					shrink->needs_flush = bo->base.dirty;
2133
 
2134
					bo->base.exec = NULL;
2135
					bo->base.rq = NULL;
2136
					bo->base.dirty = false;
2137
					bo->base.needs_flush = false;
2138
					bo->used = 0;
2139
 
2140
					goto decouple;
2141
				}
2142
 
2143
				__kgem_bo_destroy(kgem, shrink);
2144
			}
2145
 
2146
			shrink = search_linear_cache(kgem, alloc,
2147
						     CREATE_INACTIVE | CREATE_NO_RETIRE);
2148
			if (shrink) {
2149
				int n;
2150
 
2151
				DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n",
2152
				     __FUNCTION__,
2153
				     bo->used, bytes(&bo->base), bytes(shrink),
2154
				     bo->base.handle, shrink->handle));
2155
 
2156
				assert(bo->used <= bytes(shrink));
2157
				if (gem_write(kgem->fd, shrink->handle,
2158
					      0, bo->used, bo->mem) == 0) {
2159
					shrink->target_handle =
2160
						kgem->has_handle_lut ? bo->base.target_handle : shrink->handle;
2161
					for (n = 0; n < kgem->nreloc; n++) {
2162
						if (kgem->reloc[n].target_handle == bo->base.target_handle) {
2163
							kgem->reloc[n].target_handle = shrink->target_handle;
2164
							kgem->reloc[n].presumed_offset = shrink->presumed_offset;
2165
							kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] =
2166
								kgem->reloc[n].delta + shrink->presumed_offset;
2167
						}
2168
					}
2169
 
2170
					bo->base.exec->handle = shrink->handle;
2171
					bo->base.exec->offset = shrink->presumed_offset;
2172
					shrink->exec = bo->base.exec;
2173
					shrink->rq = bo->base.rq;
2174
					list_replace(&bo->base.request,
2175
						     &shrink->request);
2176
					list_init(&bo->base.request);
2177
					shrink->needs_flush = bo->base.dirty;
2178
 
2179
					bo->base.exec = NULL;
2180
					bo->base.rq = NULL;
2181
					bo->base.dirty = false;
2182
					bo->base.needs_flush = false;
2183
					bo->used = 0;
2184
 
2185
					goto decouple;
2186
				}
2187
 
2188
				__kgem_bo_destroy(kgem, shrink);
2189
			}
2190
		}
2191
 
2192
		DBG(("%s: handle=%d, uploading %d/%d\n",
2193
		     __FUNCTION__, bo->base.handle, bo->used, bytes(&bo->base)));
2194
		ASSERT_IDLE(kgem, bo->base.handle);
2195
		assert(bo->used <= bytes(&bo->base));
2196
		gem_write(kgem->fd, bo->base.handle,
2197
			  0, bo->used, bo->mem);
2198
		bo->need_io = 0;
2199
 
2200
decouple:
2201
		DBG(("%s: releasing handle=%d\n",
2202
		     __FUNCTION__, bo->base.handle));
2203
		list_del(&bo->base.list);
2204
		kgem_bo_unref(kgem, &bo->base);
2205
	}
2206
}
2207
 
2208
static void kgem_cleanup(struct kgem *kgem)
2209
{
2210
	int n;
2211
 
2212
	for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) {
2213
		while (!list_is_empty(&kgem->requests[n])) {
2214
			struct kgem_request *rq;
2215
 
2216
			rq = list_first_entry(&kgem->requests[n],
2217
					      struct kgem_request,
2218
					      list);
2219
			while (!list_is_empty(&rq->buffers)) {
2220
				struct kgem_bo *bo;
2221
 
2222
				bo = list_first_entry(&rq->buffers,
2223
						      struct kgem_bo,
2224
						      request);
2225
 
2226
				bo->exec = NULL;
2227
				bo->dirty = false;
2228
				__kgem_bo_clear_busy(bo);
2229
				if (bo->refcnt == 0)
2230
					kgem_bo_free(kgem, bo);
2231
			}
2232
 
2233
			__kgem_request_free(rq);
2234
		}
2235
	}
2236
 
2237
	kgem_close_inactive(kgem);
2238
}
2239
 
2240
static int kgem_batch_write(struct kgem *kgem, uint32_t handle, uint32_t size)
2241
{
2242
	int ret;
2243
 
2244
	ASSERT_IDLE(kgem, handle);
2245
 
2246
	/* If there is no surface data, just upload the batch */
2247
	if (kgem->surface == kgem->batch_size)
2248
		return gem_write(kgem->fd, handle,
2249
				 0, sizeof(uint32_t)*kgem->nbatch,
2250
				 kgem->batch);
2251
 
2252
	/* Are the batch pages conjoint with the surface pages? */
2253
	if (kgem->surface < kgem->nbatch + PAGE_SIZE/sizeof(uint32_t)) {
2254
		assert(size == PAGE_ALIGN(kgem->batch_size*sizeof(uint32_t)));
2255
		return gem_write(kgem->fd, handle,
2256
				 0, kgem->batch_size*sizeof(uint32_t),
2257
				 kgem->batch);
2258
	}
2259
 
2260
	/* Disjoint surface/batch, upload separately */
2261
	ret = gem_write(kgem->fd, handle,
2262
			0, sizeof(uint32_t)*kgem->nbatch,
2263
			kgem->batch);
2264
	if (ret)
2265
		return ret;
2266
 
2267
	ret = PAGE_ALIGN(sizeof(uint32_t) * kgem->batch_size);
2268
	ret -= sizeof(uint32_t) * kgem->surface;
2269
	assert(size-ret >= kgem->nbatch*sizeof(uint32_t));
2270
	return __gem_write(kgem->fd, handle,
2271
			size - ret, (kgem->batch_size - kgem->surface)*sizeof(uint32_t),
2272
			kgem->batch + kgem->surface);
2273
}
2274
 
2275
void kgem_reset(struct kgem *kgem)
2276
{
2277
	if (kgem->next_request) {
2278
		struct kgem_request *rq = kgem->next_request;
2279
 
2280
		while (!list_is_empty(&rq->buffers)) {
2281
			struct kgem_bo *bo =
2282
				list_first_entry(&rq->buffers,
2283
						 struct kgem_bo,
2284
						 request);
2285
			list_del(&bo->request);
2286
 
2287
			assert(RQ(bo->rq) == rq);
2288
 
2289
			bo->binding.offset = 0;
2290
			bo->exec = NULL;
2291
			bo->target_handle = -1;
2292
			bo->dirty = false;
2293
 
2294
			if (bo->needs_flush && __kgem_busy(kgem, bo->handle)) {
2295
				list_add(&bo->request, &kgem->flushing);
2296
				bo->rq = (void *)kgem;
2297
			} else
2298
				__kgem_bo_clear_busy(bo);
2299
 
2300
			if (!bo->refcnt && !bo->reusable) {
2301
				assert(!bo->snoop);
2302
				DBG(("%s: discarding handle=%d\n",
2303
				     __FUNCTION__, bo->handle));
2304
				kgem_bo_free(kgem, bo);
2305
			}
2306
		}
2307
 
2308
		if (rq != &kgem->static_request) {
2309
			list_init(&rq->list);
2310
			__kgem_request_free(rq);
2311
		}
2312
	}
2313
 
2314
	kgem->nfence = 0;
2315
	kgem->nexec = 0;
2316
	kgem->nreloc = 0;
2317
	kgem->nreloc__self = 0;
2318
	kgem->aperture = 0;
2319
	kgem->aperture_fenced = 0;
2320
	kgem->nbatch = 0;
2321
	kgem->surface = kgem->batch_size;
2322
	kgem->mode = KGEM_NONE;
2323
	kgem->flush = 0;
2324
	kgem->batch_flags = kgem->batch_flags_base;
2325
 
2326
	kgem->next_request = __kgem_request_alloc(kgem);
2327
 
2328
	kgem_sna_reset(kgem);
2329
}
2330
 
2331
static int compact_batch_surface(struct kgem *kgem)
2332
{
2333
	int size, shrink, n;
2334
 
2335
	if (!kgem->has_relaxed_delta)
2336
		return kgem->batch_size;
2337
 
2338
	/* See if we can pack the contents into one or two pages */
2339
	n = ALIGN(kgem->batch_size, 1024);
2340
	size = n - kgem->surface + kgem->nbatch;
2341
	size = ALIGN(size, 1024);
2342
 
2343
	shrink = n - size;
2344
	if (shrink) {
2345
		DBG(("shrinking from %d to %d\n", kgem->batch_size, size));
2346
 
2347
		shrink *= sizeof(uint32_t);
2348
		for (n = 0; n < kgem->nreloc; n++) {
2349
			if (kgem->reloc[n].read_domains == I915_GEM_DOMAIN_INSTRUCTION &&
2350
			    kgem->reloc[n].target_handle == ~0U)
2351
				kgem->reloc[n].delta -= shrink;
2352
 
2353
			if (kgem->reloc[n].offset >= sizeof(uint32_t)*kgem->nbatch)
2354
				kgem->reloc[n].offset -= shrink;
2355
		}
2356
	}
2357
 
2358
	return size * sizeof(uint32_t);
2359
}
2360
 
2361
static struct kgem_bo *
2362
kgem_create_batch(struct kgem *kgem, int size)
2363
{
2364
	struct drm_i915_gem_set_domain set_domain;
2365
	struct kgem_bo *bo;
2366
 
2367
	if (size <= 4096) {
2368
		bo = list_first_entry(&kgem->pinned_batches[0],
2369
				      struct kgem_bo,
2370
				      list);
2371
		if (!bo->rq) {
2372
out_4096:
2373
			list_move_tail(&bo->list, &kgem->pinned_batches[0]);
2374
			return kgem_bo_reference(bo);
2375
		}
2376
 
2377
		if (!__kgem_busy(kgem, bo->handle)) {
2378
			assert(RQ(bo->rq)->bo == bo);
2379
			__kgem_retire_rq(kgem, RQ(bo->rq));
2380
			goto out_4096;
2381
		}
2382
	}
2383
 
2384
	if (size <= 16384) {
2385
		bo = list_first_entry(&kgem->pinned_batches[1],
2386
				      struct kgem_bo,
2387
				      list);
2388
		if (!bo->rq) {
2389
out_16384:
2390
			list_move_tail(&bo->list, &kgem->pinned_batches[1]);
2391
			return kgem_bo_reference(bo);
2392
		}
2393
 
2394
		if (!__kgem_busy(kgem, bo->handle)) {
2395
			assert(RQ(bo->rq)->bo == bo);
2396
			__kgem_retire_rq(kgem, RQ(bo->rq));
2397
			goto out_16384;
2398
		}
2399
	}
2400
 
2401
	if (kgem->gen == 020 && !kgem->has_pinned_batches) {
2402
		assert(size <= 16384);
2403
 
2404
		bo = list_first_entry(&kgem->pinned_batches[size > 4096],
2405
				      struct kgem_bo,
2406
				      list);
2407
		list_move_tail(&bo->list, &kgem->pinned_batches[size > 4096]);
2408
 
2409
		DBG(("%s: syncing due to busy batches\n", __FUNCTION__));
2410
 
2411
		VG_CLEAR(set_domain);
2412
		set_domain.handle = bo->handle;
2413
		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
2414
		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
2415
		if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) {
2416
			DBG(("%s: sync: GPU hang detected\n", __FUNCTION__));
2417
			kgem_throttle(kgem);
2418
			return NULL;
2419
		}
2420
 
2421
		kgem_retire(kgem);
2422
		assert(bo->rq == NULL);
2423
		return kgem_bo_reference(bo);
2424
	}
2425
 
2426
	return kgem_create_linear(kgem, size, CREATE_NO_THROTTLE);
2427
}
2428
 
2429
void _kgem_submit(struct kgem *kgem)
2430
{
2431
	struct kgem_request *rq;
2432
	uint32_t batch_end;
2433
	int size;
2434
 
2435
	assert(!DBG_NO_HW);
2436
	assert(!kgem->wedged);
2437
 
2438
	assert(kgem->nbatch);
2439
	assert(kgem->nbatch <= KGEM_BATCH_SIZE(kgem));
2440
	assert(kgem->nbatch <= kgem->surface);
2441
 
2442
	batch_end = kgem_end_batch(kgem);
2443
	kgem_sna_flush(kgem);
2444
 
2445
	DBG(("batch[%d/%d]: %d %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d\n",
2446
	     kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface, kgem->batch_size,
2447
	     kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture));
2448
 
2449
	assert(kgem->nbatch <= kgem->batch_size);
2450
	assert(kgem->nbatch <= kgem->surface);
2451
	assert(kgem->nreloc <= ARRAY_SIZE(kgem->reloc));
2452
	assert(kgem->nexec < ARRAY_SIZE(kgem->exec));
2453
	assert(kgem->nfence <= kgem->fence_max);
2454
 
2455
	kgem_finish_buffers(kgem);
2456
 
2457
#if SHOW_BATCH
2458
	__kgem_batch_debug(kgem, batch_end);
2459
#endif
2460
 
2461
	rq = kgem->next_request;
2462
	if (kgem->surface != kgem->batch_size)
2463
		size = compact_batch_surface(kgem);
2464
	else
2465
		size = kgem->nbatch * sizeof(kgem->batch[0]);
2466
	rq->bo = kgem_create_batch(kgem, size);
2467
	if (rq->bo) {
2468
		uint32_t handle = rq->bo->handle;
2469
		int i;
2470
 
2471
		assert(!rq->bo->needs_flush);
2472
 
2473
		i = kgem->nexec++;
2474
		kgem->exec[i].handle = handle;
2475
		kgem->exec[i].relocation_count = kgem->nreloc;
2476
		kgem->exec[i].relocs_ptr = (uintptr_t)kgem->reloc;
2477
		kgem->exec[i].alignment = 0;
2478
		kgem->exec[i].offset = rq->bo->presumed_offset;
2479
		kgem->exec[i].flags = 0;
2480
		kgem->exec[i].rsvd1 = 0;
2481
		kgem->exec[i].rsvd2 = 0;
2482
 
2483
		rq->bo->target_handle = kgem->has_handle_lut ? i : handle;
2484
		rq->bo->exec = &kgem->exec[i];
2485
		rq->bo->rq = MAKE_REQUEST(rq, kgem->ring); /* useful sanity check */
2486
		list_add(&rq->bo->request, &rq->buffers);
2487
		rq->ring = kgem->ring == KGEM_BLT;
2488
 
2489
		kgem_fixup_self_relocs(kgem, rq->bo);
2490
 
2491
		if (kgem_batch_write(kgem, handle, size) == 0) {
2492
			struct drm_i915_gem_execbuffer2 execbuf;
2493
			int ret, retry = 3;
2494
 
2495
			VG_CLEAR(execbuf);
2496
			execbuf.buffers_ptr = (uintptr_t)kgem->exec;
2497
			execbuf.buffer_count = kgem->nexec;
2498
			execbuf.batch_start_offset = 0;
2499
			execbuf.batch_len = batch_end*sizeof(uint32_t);
2500
			execbuf.cliprects_ptr = 0;
2501
			execbuf.num_cliprects = 0;
2502
			execbuf.DR1 = 0;
2503
			execbuf.DR4 = 0;
2504
			execbuf.flags = kgem->ring | kgem->batch_flags;
2505
			execbuf.rsvd1 = 0;
2506
			execbuf.rsvd2 = 0;
2507
 
2508
 
2509
 
3263 Serge 2510
			ret = drmIoctl(kgem->fd,
2511
				       DRM_IOCTL_I915_GEM_EXECBUFFER2,
2512
				       &execbuf);
2513
			while (ret == -1 && errno == EBUSY && retry--) {
2514
				__kgem_throttle(kgem);
2515
				ret = drmIoctl(kgem->fd,
2516
					       DRM_IOCTL_I915_GEM_EXECBUFFER2,
2517
					       &execbuf);
2518
			}
3258 Serge 2519
			if (DEBUG_SYNC && ret == 0) {
2520
				struct drm_i915_gem_set_domain set_domain;
2521
 
2522
				VG_CLEAR(set_domain);
2523
				set_domain.handle = handle;
2524
				set_domain.read_domains = I915_GEM_DOMAIN_GTT;
2525
				set_domain.write_domain = I915_GEM_DOMAIN_GTT;
2526
 
2527
				ret = drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
2528
			}
2529
			if (ret == -1) {
2530
//				DBG(("%s: GPU hang detected [%d]\n",
2531
//				     __FUNCTION__, errno));
2532
				kgem_throttle(kgem);
2533
				kgem->wedged = true;
2534
 
2535
#if 0
2536
				ret = errno;
2537
				ErrorF("batch[%d/%d]: %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d: errno=%d\n",
2538
				       kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface,
2539
				       kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture, errno);
2540
 
2541
				for (i = 0; i < kgem->nexec; i++) {
2542
					struct kgem_bo *bo, *found = NULL;
2543
 
2544
					list_for_each_entry(bo, &kgem->next_request->buffers, request) {
2545
						if (bo->handle == kgem->exec[i].handle) {
2546
							found = bo;
2547
							break;
2548
						}
2549
					}
2550
					ErrorF("exec[%d] = handle:%d, presumed offset: %x, size: %d, tiling %d, fenced %d, snooped %d, deleted %d\n",
2551
					       i,
2552
					       kgem->exec[i].handle,
2553
					       (int)kgem->exec[i].offset,
2554
					       found ? kgem_bo_size(found) : -1,
2555
					       found ? found->tiling : -1,
2556
					       (int)(kgem->exec[i].flags & EXEC_OBJECT_NEEDS_FENCE),
2557
					       found ? found->snoop : -1,
2558
					       found ? found->purged : -1);
2559
				}
2560
				for (i = 0; i < kgem->nreloc; i++) {
2561
					ErrorF("reloc[%d] = pos:%d, target:%d, delta:%d, read:%x, write:%x, offset:%x\n",
2562
					       i,
2563
					       (int)kgem->reloc[i].offset,
2564
					       kgem->reloc[i].target_handle,
2565
					       kgem->reloc[i].delta,
2566
					       kgem->reloc[i].read_domains,
2567
					       kgem->reloc[i].write_domain,
2568
					       (int)kgem->reloc[i].presumed_offset);
2569
				}
2570
 
2571
				if (DEBUG_SYNC) {
2572
					int fd = open("/tmp/batchbuffer", O_WRONLY | O_CREAT | O_APPEND, 0666);
2573
					if (fd != -1) {
2574
						write(fd, kgem->batch, batch_end*sizeof(uint32_t));
2575
						close(fd);
2576
					}
2577
 
2578
					FatalError("SNA: failed to submit batchbuffer, errno=%d\n", ret);
2579
				}
2580
#endif
2581
			}
2582
		}
2583
 
2584
		kgem_commit(kgem);
2585
	}
2586
	if (kgem->wedged)
2587
		kgem_cleanup(kgem);
2588
 
2589
	kgem_reset(kgem);
2590
 
2591
	assert(kgem->next_request != NULL);
2592
}
2593
 
2594
void kgem_throttle(struct kgem *kgem)
2595
{
2596
	kgem->need_throttle = 0;
2597
	if (kgem->wedged)
2598
		return;
2599
 
2600
	kgem->wedged = __kgem_throttle(kgem);
2601
	if (kgem->wedged) {
2602
		printf("Detected a hung GPU, disabling acceleration.\n");
2603
		printf("When reporting this, please include i915_error_state from debugfs and the full dmesg.\n");
2604
	}
2605
}
2606
 
2607
void kgem_purge_cache(struct kgem *kgem)
2608
{
2609
	struct kgem_bo *bo, *next;
2610
	int i;
2611
 
2612
	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
2613
		list_for_each_entry_safe(bo, next, &kgem->inactive[i], list) {
2614
			if (!kgem_bo_is_retained(kgem, bo)) {
2615
				DBG(("%s: purging %d\n",
2616
				     __FUNCTION__, bo->handle));
2617
				kgem_bo_free(kgem, bo);
2618
			}
2619
		}
2620
	}
2621
 
2622
	kgem->need_purge = false;
2623
}
2624
 
2625
bool kgem_expire_cache(struct kgem *kgem)
2626
{
2627
	time_t now, expire;
2628
	struct kgem_bo *bo;
2629
	unsigned int size = 0, count = 0;
2630
	bool idle;
2631
	unsigned int i;
2632
 
2633
	time(&now);
2634
 
2635
	while (__kgem_freed_bo) {
2636
		bo = __kgem_freed_bo;
2637
		__kgem_freed_bo = *(struct kgem_bo **)bo;
2638
		free(bo);
2639
	}
2640
 
2641
	while (__kgem_freed_request) {
2642
		struct kgem_request *rq = __kgem_freed_request;
2643
		__kgem_freed_request = *(struct kgem_request **)rq;
2644
		free(rq);
2645
	}
2646
 
2647
	while (!list_is_empty(&kgem->large_inactive)) {
2648
		kgem_bo_free(kgem,
2649
			     list_first_entry(&kgem->large_inactive,
2650
					      struct kgem_bo, list));
2651
 
2652
	}
2653
 
2654
	while (!list_is_empty(&kgem->scanout)) {
2655
		bo = list_first_entry(&kgem->scanout, struct kgem_bo, list);
2656
		if (__kgem_busy(kgem, bo->handle))
2657
			break;
2658
 
2659
		list_del(&bo->list);
2660
		kgem_bo_clear_scanout(kgem, bo);
2661
		__kgem_bo_destroy(kgem, bo);
2662
	}
2663
 
2664
	expire = 0;
2665
	list_for_each_entry(bo, &kgem->snoop, list) {
2666
		if (bo->delta) {
2667
			expire = now - MAX_INACTIVE_TIME/2;
2668
			break;
2669
		}
2670
 
2671
		bo->delta = now;
2672
	}
2673
	if (expire) {
2674
		while (!list_is_empty(&kgem->snoop)) {
2675
			bo = list_last_entry(&kgem->snoop, struct kgem_bo, list);
2676
 
2677
			if (bo->delta > expire)
2678
				break;
2679
 
2680
			kgem_bo_free(kgem, bo);
2681
		}
2682
	}
2683
#ifdef DEBUG_MEMORY
2684
	{
2685
		long snoop_size = 0;
2686
		int snoop_count = 0;
2687
		list_for_each_entry(bo, &kgem->snoop, list)
2688
			snoop_count++, snoop_size += bytes(bo);
2689
		ErrorF("%s: still allocated %d bo, %ld bytes, in snoop cache\n",
2690
		       __FUNCTION__, snoop_count, snoop_size);
2691
	}
2692
#endif
2693
 
2694
	kgem_retire(kgem);
2695
	if (kgem->wedged)
2696
		kgem_cleanup(kgem);
2697
 
2698
	kgem->expire(kgem);
2699
 
2700
	if (kgem->need_purge)
2701
		kgem_purge_cache(kgem);
2702
 
2703
	expire = 0;
2704
 
2705
	idle = !kgem->need_retire;
2706
	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
2707
		idle &= list_is_empty(&kgem->inactive[i]);
2708
		list_for_each_entry(bo, &kgem->inactive[i], list) {
2709
			if (bo->delta) {
2710
				expire = now - MAX_INACTIVE_TIME;
2711
				break;
2712
			}
2713
 
2714
			bo->delta = now;
2715
		}
2716
	}
2717
	if (idle) {
2718
		DBG(("%s: idle\n", __FUNCTION__));
2719
		kgem->need_expire = false;
2720
		return false;
2721
	}
2722
	if (expire == 0)
2723
		return true;
2724
 
2725
	idle = !kgem->need_retire;
2726
	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
2727
		struct list preserve;
2728
 
2729
		list_init(&preserve);
2730
		while (!list_is_empty(&kgem->inactive[i])) {
2731
			bo = list_last_entry(&kgem->inactive[i],
2732
					     struct kgem_bo, list);
2733
 
2734
			if (bo->delta > expire) {
2735
				idle = false;
2736
				break;
2737
			}
2738
 
2739
			if (bo->map && bo->delta + MAP_PRESERVE_TIME > expire) {
2740
				idle = false;
2741
				list_move_tail(&bo->list, &preserve);
2742
			} else {
2743
				count++;
2744
				size += bytes(bo);
2745
				kgem_bo_free(kgem, bo);
2746
				DBG(("%s: expiring %d\n",
2747
				     __FUNCTION__, bo->handle));
2748
			}
2749
		}
2750
		if (!list_is_empty(&preserve)) {
2751
			preserve.prev->next = kgem->inactive[i].next;
2752
			kgem->inactive[i].next->prev = preserve.prev;
2753
			kgem->inactive[i].next = preserve.next;
2754
			preserve.next->prev = &kgem->inactive[i];
2755
		}
2756
	}
2757
 
2758
#ifdef DEBUG_MEMORY
2759
	{
2760
		long inactive_size = 0;
2761
		int inactive_count = 0;
2762
		for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
2763
			list_for_each_entry(bo, &kgem->inactive[i], list)
2764
				inactive_count++, inactive_size += bytes(bo);
2765
		ErrorF("%s: still allocated %d bo, %ld bytes, in inactive cache\n",
2766
		       __FUNCTION__, inactive_count, inactive_size);
2767
	}
2768
#endif
2769
 
2770
	DBG(("%s: expired %d objects, %d bytes, idle? %d\n",
2771
	     __FUNCTION__, count, size, idle));
2772
 
2773
	kgem->need_expire = !idle;
2774
	return !idle;
2775
	(void)count;
2776
	(void)size;
2777
}
2778
 
2779
void kgem_cleanup_cache(struct kgem *kgem)
2780
{
2781
	unsigned int i;
2782
	int n;
2783
 
2784
	/* sync to the most recent request */
2785
	for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) {
2786
		if (!list_is_empty(&kgem->requests[n])) {
2787
			struct kgem_request *rq;
2788
			struct drm_i915_gem_set_domain set_domain;
2789
 
2790
			rq = list_first_entry(&kgem->requests[n],
2791
					      struct kgem_request,
2792
					      list);
2793
 
2794
			DBG(("%s: sync on cleanup\n", __FUNCTION__));
2795
 
2796
			VG_CLEAR(set_domain);
2797
			set_domain.handle = rq->bo->handle;
2798
			set_domain.read_domains = I915_GEM_DOMAIN_GTT;
2799
			set_domain.write_domain = I915_GEM_DOMAIN_GTT;
2800
			(void)drmIoctl(kgem->fd,
2801
				       DRM_IOCTL_I915_GEM_SET_DOMAIN,
2802
				       &set_domain);
2803
		}
2804
	}
2805
 
2806
	kgem_retire(kgem);
2807
	kgem_cleanup(kgem);
2808
 
2809
	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
2810
		while (!list_is_empty(&kgem->inactive[i]))
2811
			kgem_bo_free(kgem,
2812
				     list_last_entry(&kgem->inactive[i],
2813
						     struct kgem_bo, list));
2814
	}
2815
 
2816
	while (!list_is_empty(&kgem->snoop))
2817
		kgem_bo_free(kgem,
2818
			     list_last_entry(&kgem->snoop,
2819
					     struct kgem_bo, list));
2820
 
2821
	while (__kgem_freed_bo) {
2822
		struct kgem_bo *bo = __kgem_freed_bo;
2823
		__kgem_freed_bo = *(struct kgem_bo **)bo;
2824
		free(bo);
2825
	}
2826
 
2827
	kgem->need_purge = false;
2828
	kgem->need_expire = false;
2829
}
2830
 
2831
static struct kgem_bo *
3256 Serge 2832
search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
2833
{
2834
	struct kgem_bo *bo, *first = NULL;
2835
	bool use_active = (flags & CREATE_INACTIVE) == 0;
2836
	struct list *cache;
2837
 
2838
	DBG(("%s: num_pages=%d, flags=%x, use_active? %d\n",
2839
	     __FUNCTION__, num_pages, flags, use_active));
2840
 
2841
	if (num_pages >= MAX_CACHE_SIZE / PAGE_SIZE)
2842
		return NULL;
2843
 
2844
	if (!use_active && list_is_empty(inactive(kgem, num_pages))) {
2845
		DBG(("%s: inactive and cache bucket empty\n",
2846
		     __FUNCTION__));
2847
 
2848
		if (flags & CREATE_NO_RETIRE) {
2849
			DBG(("%s: can not retire\n", __FUNCTION__));
2850
			return NULL;
2851
		}
2852
 
2853
		if (list_is_empty(active(kgem, num_pages, I915_TILING_NONE))) {
2854
			DBG(("%s: active cache bucket empty\n", __FUNCTION__));
2855
			return NULL;
2856
		}
2857
 
2858
		if (!__kgem_throttle_retire(kgem, flags)) {
2859
			DBG(("%s: nothing retired\n", __FUNCTION__));
2860
			return NULL;
2861
		}
2862
 
2863
		if (list_is_empty(inactive(kgem, num_pages))) {
2864
			DBG(("%s: active cache bucket still empty after retire\n",
2865
			     __FUNCTION__));
2866
			return NULL;
2867
		}
2868
	}
2869
 
2870
	if (!use_active && flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
2871
		int for_cpu = !!(flags & CREATE_CPU_MAP);
2872
		DBG(("%s: searching for inactive %s map\n",
2873
		     __FUNCTION__, for_cpu ? "cpu" : "gtt"));
2874
		cache = &kgem->vma[for_cpu].inactive[cache_bucket(num_pages)];
2875
		list_for_each_entry(bo, cache, vma) {
2876
			assert(IS_CPU_MAP(bo->map) == for_cpu);
2877
			assert(bucket(bo) == cache_bucket(num_pages));
2878
			assert(bo->proxy == NULL);
2879
			assert(bo->rq == NULL);
2880
			assert(bo->exec == NULL);
2881
			assert(!bo->scanout);
2882
 
2883
			if (num_pages > num_pages(bo)) {
2884
				DBG(("inactive too small: %d < %d\n",
2885
				     num_pages(bo), num_pages));
2886
				continue;
2887
			}
2888
 
2889
			if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
2890
				kgem_bo_free(kgem, bo);
2891
				break;
2892
			}
2893
 
2894
			if (I915_TILING_NONE != bo->tiling &&
2895
			    !gem_set_tiling(kgem->fd, bo->handle,
2896
					    I915_TILING_NONE, 0))
2897
				continue;
2898
 
2899
			kgem_bo_remove_from_inactive(kgem, bo);
2900
 
2901
			bo->tiling = I915_TILING_NONE;
2902
			bo->pitch = 0;
2903
			bo->delta = 0;
2904
			DBG(("  %s: found handle=%d (num_pages=%d) in linear vma cache\n",
2905
			     __FUNCTION__, bo->handle, num_pages(bo)));
2906
			assert(use_active || bo->domain != DOMAIN_GPU);
2907
			assert(!bo->needs_flush);
2908
			ASSERT_MAYBE_IDLE(kgem, bo->handle, !use_active);
2909
			return bo;
2910
		}
2911
 
2912
		if (flags & CREATE_EXACT)
2913
			return NULL;
2914
 
2915
		if (flags & CREATE_CPU_MAP && !kgem->has_llc)
2916
			return NULL;
2917
	}
2918
 
2919
	cache = use_active ? active(kgem, num_pages, I915_TILING_NONE) : inactive(kgem, num_pages);
2920
	list_for_each_entry(bo, cache, list) {
2921
		assert(bo->refcnt == 0);
2922
		assert(bo->reusable);
2923
		assert(!!bo->rq == !!use_active);
2924
		assert(bo->proxy == NULL);
2925
		assert(!bo->scanout);
2926
 
2927
		if (num_pages > num_pages(bo))
2928
			continue;
2929
 
2930
		if (use_active &&
2931
		    kgem->gen <= 040 &&
2932
		    bo->tiling != I915_TILING_NONE)
2933
			continue;
2934
 
2935
		if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
2936
			kgem_bo_free(kgem, bo);
2937
			break;
2938
		}
2939
 
2940
		if (I915_TILING_NONE != bo->tiling) {
2941
			if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP))
2942
				continue;
2943
 
2944
			if (first)
2945
				continue;
2946
 
2947
			if (!gem_set_tiling(kgem->fd, bo->handle,
2948
					    I915_TILING_NONE, 0))
2949
				continue;
2950
 
2951
			bo->tiling = I915_TILING_NONE;
2952
			bo->pitch = 0;
2953
		}
2954
 
2955
		if (bo->map) {
2956
			if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
2957
				int for_cpu = !!(flags & CREATE_CPU_MAP);
2958
				if (IS_CPU_MAP(bo->map) != for_cpu) {
2959
					if (first != NULL)
2960
						break;
2961
 
2962
					first = bo;
2963
					continue;
2964
				}
2965
			} else {
2966
				if (first != NULL)
2967
					break;
2968
 
2969
				first = bo;
2970
				continue;
2971
			}
2972
		} else {
2973
			if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
2974
				if (first != NULL)
2975
					break;
2976
 
2977
				first = bo;
2978
				continue;
2979
			}
2980
		}
2981
 
2982
		if (use_active)
2983
			kgem_bo_remove_from_active(kgem, bo);
2984
		else
2985
			kgem_bo_remove_from_inactive(kgem, bo);
2986
 
2987
		assert(bo->tiling == I915_TILING_NONE);
2988
		bo->pitch = 0;
2989
		bo->delta = 0;
2990
		DBG(("  %s: found handle=%d (num_pages=%d) in linear %s cache\n",
2991
		     __FUNCTION__, bo->handle, num_pages(bo),
2992
		     use_active ? "active" : "inactive"));
2993
		assert(list_is_empty(&bo->list));
2994
		assert(use_active || bo->domain != DOMAIN_GPU);
2995
		assert(!bo->needs_flush || use_active);
2996
		ASSERT_MAYBE_IDLE(kgem, bo->handle, !use_active);
2997
		return bo;
2998
	}
2999
 
3000
	if (first) {
3001
		assert(first->tiling == I915_TILING_NONE);
3002
 
3003
		if (use_active)
3004
			kgem_bo_remove_from_active(kgem, first);
3005
		else
3006
			kgem_bo_remove_from_inactive(kgem, first);
3007
 
3008
		first->pitch = 0;
3009
		first->delta = 0;
3010
		DBG(("  %s: found handle=%d (near-miss) (num_pages=%d) in linear %s cache\n",
3011
		     __FUNCTION__, first->handle, num_pages(first),
3012
		     use_active ? "active" : "inactive"));
3013
		assert(list_is_empty(&first->list));
3014
		assert(use_active || first->domain != DOMAIN_GPU);
3015
		assert(!first->needs_flush || use_active);
3016
		ASSERT_MAYBE_IDLE(kgem, first->handle, !use_active);
3017
		return first;
3018
	}
3019
 
3020
	return NULL;
3021
}
3022
 
3023
 
3024
struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags)
3025
{
3026
	struct kgem_bo *bo;
3027
	uint32_t handle;
3028
 
3029
	DBG(("%s(%d)\n", __FUNCTION__, size));
3030
 
3031
	if (flags & CREATE_GTT_MAP && kgem->has_llc) {
3032
		flags &= ~CREATE_GTT_MAP;
3033
		flags |= CREATE_CPU_MAP;
3034
	}
3035
 
3036
	size = (size + PAGE_SIZE - 1) / PAGE_SIZE;
3037
	bo = search_linear_cache(kgem, size, CREATE_INACTIVE | flags);
3038
	if (bo) {
3039
		assert(bo->domain != DOMAIN_GPU);
3040
		ASSERT_IDLE(kgem, bo->handle);
3041
		bo->refcnt = 1;
3042
		return bo;
3043
	}
3044
 
3045
	if (flags & CREATE_CACHED)
3046
		return NULL;
3047
 
3048
	handle = gem_create(kgem->fd, size);
3049
	if (handle == 0)
3050
		return NULL;
3051
 
3052
	DBG(("%s: new handle=%d, num_pages=%d\n", __FUNCTION__, handle, size));
3053
	bo = __kgem_bo_alloc(handle, size);
3054
	if (bo == NULL) {
3055
		gem_close(kgem->fd, handle);
3056
		return NULL;
3057
	}
3058
 
3059
	debug_alloc__bo(kgem, bo);
3060
	return bo;
3061
}
3062
 
3258 Serge 3063
inline int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo)
3064
{
3065
	unsigned int size;
3256 Serge 3066
 
3258 Serge 3067
	assert(bo->tiling);
3068
	assert(kgem->gen < 040);
3256 Serge 3069
 
3258 Serge 3070
	if (kgem->gen < 030)
3071
		size = 512 * 1024;
3072
	else
3073
		size = 1024 * 1024;
3074
	while (size < bytes(bo))
3075
		size *= 2;
3256 Serge 3076
 
3258 Serge 3077
	return size;
3078
}
3256 Serge 3079
 
3258 Serge 3080
struct kgem_bo *kgem_create_2d(struct kgem *kgem,
3081
			       int width,
3082
			       int height,
3083
			       int bpp,
3084
			       int tiling,
3085
			       uint32_t flags)
3086
{
3087
	struct list *cache;
3088
	struct kgem_bo *bo;
3089
	uint32_t pitch, untiled_pitch, tiled_height, size;
3090
	uint32_t handle;
3091
	int i, bucket, retry;
3092
 
3093
	if (tiling < 0)
3094
		tiling = -tiling, flags |= CREATE_EXACT;
3095
 
3096
	DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d, cpu-mapping=%d, gtt-mapping=%d, scanout?=%d, prime?=%d, temp?=%d)\n", __FUNCTION__,
3097
	     width, height, bpp, tiling,
3098
	     !!(flags & CREATE_EXACT),
3099
	     !!(flags & CREATE_INACTIVE),
3100
	     !!(flags & CREATE_CPU_MAP),
3101
	     !!(flags & CREATE_GTT_MAP),
3102
	     !!(flags & CREATE_SCANOUT),
3103
	     !!(flags & CREATE_PRIME),
3104
	     !!(flags & CREATE_TEMPORARY)));
3105
 
3106
	size = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags,
3107
				 width, height, bpp, tiling, &pitch);
3108
	assert(size && size <= kgem->max_object_size);
3109
	size /= PAGE_SIZE;
3110
	bucket = cache_bucket(size);
3111
 
3112
	if (flags & CREATE_SCANOUT) {
3113
		assert((flags & CREATE_INACTIVE) == 0);
3114
		list_for_each_entry_reverse(bo, &kgem->scanout, list) {
3115
			assert(bo->scanout);
3116
			assert(bo->delta);
3117
			assert(!bo->purged);
3118
 
3119
			if (size > num_pages(bo) || num_pages(bo) > 2*size)
3120
				continue;
3121
 
3122
			if (bo->tiling != tiling ||
3123
			    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
3124
				if (!gem_set_tiling(kgem->fd, bo->handle,
3125
						    tiling, pitch))
3126
					continue;
3127
 
3128
				bo->tiling = tiling;
3129
				bo->pitch = pitch;
3130
			}
3131
 
3132
			list_del(&bo->list);
3133
 
3134
			bo->unique_id = kgem_get_unique_id(kgem);
3135
			DBG(("  1:from scanout: pitch=%d, tiling=%d, handle=%d, id=%d\n",
3136
			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3137
			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
3138
			bo->refcnt = 1;
3139
			return bo;
3140
		}
3141
	}
3142
 
3143
	if (bucket >= NUM_CACHE_BUCKETS) {
3144
		DBG(("%s: large bo num pages=%d, bucket=%d\n",
3145
		     __FUNCTION__, size, bucket));
3146
 
3147
		if (flags & CREATE_INACTIVE)
3148
			goto large_inactive;
3149
 
3150
		tiled_height = kgem_aligned_height(kgem, height, tiling);
3151
		untiled_pitch = kgem_untiled_pitch(kgem, width, bpp, flags);
3152
 
3153
		list_for_each_entry(bo, &kgem->large, list) {
3154
			assert(!bo->purged);
3155
			assert(!bo->scanout);
3156
			assert(bo->refcnt == 0);
3157
			assert(bo->reusable);
3158
			assert(bo->flush == true);
3159
 
3160
			if (kgem->gen < 040) {
3161
				if (bo->pitch < pitch) {
3162
					DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n",
3163
					     bo->tiling, tiling,
3164
					     bo->pitch, pitch));
3165
					continue;
3166
				}
3167
 
3168
				if (bo->pitch * tiled_height > bytes(bo))
3169
					continue;
3170
			} else {
3171
				if (num_pages(bo) < size)
3172
					continue;
3173
 
3174
				if (bo->pitch != pitch || bo->tiling != tiling) {
3175
					if (!gem_set_tiling(kgem->fd, bo->handle,
3176
							    tiling, pitch))
3177
						continue;
3178
 
3179
					bo->pitch = pitch;
3180
					bo->tiling = tiling;
3181
				}
3182
			}
3183
 
3184
			kgem_bo_remove_from_active(kgem, bo);
3185
 
3186
			bo->unique_id = kgem_get_unique_id(kgem);
3187
			bo->delta = 0;
3188
			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
3189
			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3190
			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
3191
			bo->refcnt = 1;
3192
			return bo;
3193
		}
3194
 
3195
large_inactive:
3196
		list_for_each_entry(bo, &kgem->large_inactive, list) {
3197
			assert(bo->refcnt == 0);
3198
			assert(bo->reusable);
3199
			assert(!bo->scanout);
3200
 
3201
			if (size > num_pages(bo))
3202
				continue;
3203
 
3204
			if (bo->tiling != tiling ||
3205
			    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
3206
				if (!gem_set_tiling(kgem->fd, bo->handle,
3207
						    tiling, pitch))
3208
					continue;
3209
 
3210
				bo->tiling = tiling;
3211
				bo->pitch = pitch;
3212
			}
3213
 
3214
			if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
3215
				kgem_bo_free(kgem, bo);
3216
				break;
3217
			}
3218
 
3219
			list_del(&bo->list);
3220
 
3221
			bo->unique_id = kgem_get_unique_id(kgem);
3222
			bo->pitch = pitch;
3223
			bo->delta = 0;
3224
			DBG(("  1:from large inactive: pitch=%d, tiling=%d, handle=%d, id=%d\n",
3225
			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3226
			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
3227
			bo->refcnt = 1;
3228
			return bo;
3229
		}
3230
 
3231
		goto create;
3232
	}
3233
 
3234
	if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
3235
		int for_cpu = !!(flags & CREATE_CPU_MAP);
3236
		if (kgem->has_llc && tiling == I915_TILING_NONE)
3237
			for_cpu = 1;
3238
		/* We presume that we will need to upload to this bo,
3239
		 * and so would prefer to have an active VMA.
3240
		 */
3241
		cache = &kgem->vma[for_cpu].inactive[bucket];
3242
		do {
3243
			list_for_each_entry(bo, cache, vma) {
3244
				assert(bucket(bo) == bucket);
3245
				assert(bo->refcnt == 0);
3246
				assert(!bo->scanout);
3247
				assert(bo->map);
3248
				assert(IS_CPU_MAP(bo->map) == for_cpu);
3249
				assert(bo->rq == NULL);
3250
				assert(list_is_empty(&bo->request));
3251
				assert(bo->flush == false);
3252
 
3253
				if (size > num_pages(bo)) {
3254
					DBG(("inactive too small: %d < %d\n",
3255
					     num_pages(bo), size));
3256
					continue;
3257
				}
3258
 
3259
				if (bo->tiling != tiling ||
3260
				    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
3261
					DBG(("inactive vma with wrong tiling: %d < %d\n",
3262
					     bo->tiling, tiling));
3263
					continue;
3264
				}
3265
 
3266
				if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
3267
					kgem_bo_free(kgem, bo);
3268
					break;
3269
				}
3270
 
3271
				bo->pitch = pitch;
3272
				bo->delta = 0;
3273
				bo->unique_id = kgem_get_unique_id(kgem);
3274
 
3275
				kgem_bo_remove_from_inactive(kgem, bo);
3276
 
3277
				DBG(("  from inactive vma: pitch=%d, tiling=%d: handle=%d, id=%d\n",
3278
				     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3279
				assert(bo->reusable);
3280
				assert(bo->domain != DOMAIN_GPU);
3281
				ASSERT_IDLE(kgem, bo->handle);
3282
				assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
3283
				bo->refcnt = 1;
3284
				return bo;
3285
			}
3286
		} while (!list_is_empty(cache) &&
3287
			 __kgem_throttle_retire(kgem, flags));
3288
 
3289
		if (flags & CREATE_CPU_MAP && !kgem->has_llc)
3290
			goto create;
3291
	}
3292
 
3293
	if (flags & CREATE_INACTIVE)
3294
		goto skip_active_search;
3295
 
3296
	/* Best active match */
3297
	retry = NUM_CACHE_BUCKETS - bucket;
3298
	if (retry > 3 && (flags & CREATE_TEMPORARY) == 0)
3299
		retry = 3;
3300
search_again:
3301
	assert(bucket < NUM_CACHE_BUCKETS);
3302
	cache = &kgem->active[bucket][tiling];
3303
	if (tiling) {
3304
		tiled_height = kgem_aligned_height(kgem, height, tiling);
3305
		list_for_each_entry(bo, cache, list) {
3306
			assert(!bo->purged);
3307
			assert(bo->refcnt == 0);
3308
			assert(bucket(bo) == bucket);
3309
			assert(bo->reusable);
3310
			assert(bo->tiling == tiling);
3311
			assert(bo->flush == false);
3312
			assert(!bo->scanout);
3313
 
3314
			if (kgem->gen < 040) {
3315
				if (bo->pitch < pitch) {
3316
					DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n",
3317
					     bo->tiling, tiling,
3318
					     bo->pitch, pitch));
3319
					continue;
3320
				}
3321
 
3322
				if (bo->pitch * tiled_height > bytes(bo))
3323
					continue;
3324
			} else {
3325
				if (num_pages(bo) < size)
3326
					continue;
3327
 
3328
				if (bo->pitch != pitch) {
3329
					if (!gem_set_tiling(kgem->fd,
3330
							    bo->handle,
3331
							    tiling, pitch))
3332
						continue;
3333
 
3334
					bo->pitch = pitch;
3335
				}
3336
			}
3337
 
3338
			kgem_bo_remove_from_active(kgem, bo);
3339
 
3340
			bo->unique_id = kgem_get_unique_id(kgem);
3341
			bo->delta = 0;
3342
			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
3343
			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3344
			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
3345
			bo->refcnt = 1;
3346
			return bo;
3347
		}
3348
	} else {
3349
		list_for_each_entry(bo, cache, list) {
3350
			assert(bucket(bo) == bucket);
3351
			assert(!bo->purged);
3352
			assert(bo->refcnt == 0);
3353
			assert(bo->reusable);
3354
			assert(!bo->scanout);
3355
			assert(bo->tiling == tiling);
3356
			assert(bo->flush == false);
3357
 
3358
			if (num_pages(bo) < size)
3359
				continue;
3360
 
3361
			kgem_bo_remove_from_active(kgem, bo);
3362
 
3363
			bo->pitch = pitch;
3364
			bo->unique_id = kgem_get_unique_id(kgem);
3365
			bo->delta = 0;
3366
			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
3367
			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3368
			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
3369
			bo->refcnt = 1;
3370
			return bo;
3371
		}
3372
	}
3373
 
3374
	if (--retry && flags & CREATE_EXACT) {
3375
		if (kgem->gen >= 040) {
3376
			for (i = I915_TILING_NONE; i <= I915_TILING_Y; i++) {
3377
				if (i == tiling)
3378
					continue;
3379
 
3380
				cache = &kgem->active[bucket][i];
3381
				list_for_each_entry(bo, cache, list) {
3382
					assert(!bo->purged);
3383
					assert(bo->refcnt == 0);
3384
					assert(bo->reusable);
3385
					assert(!bo->scanout);
3386
					assert(bo->flush == false);
3387
 
3388
					if (num_pages(bo) < size)
3389
						continue;
3390
 
3391
					if (!gem_set_tiling(kgem->fd,
3392
							    bo->handle,
3393
							    tiling, pitch))
3394
						continue;
3395
 
3396
					kgem_bo_remove_from_active(kgem, bo);
3397
 
3398
					bo->unique_id = kgem_get_unique_id(kgem);
3399
					bo->pitch = pitch;
3400
					bo->tiling = tiling;
3401
					bo->delta = 0;
3402
					DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
3403
					     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3404
					assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
3405
					bo->refcnt = 1;
3406
					return bo;
3407
				}
3408
			}
3409
		}
3410
 
3411
		bucket++;
3412
		goto search_again;
3413
	}
3414
 
3415
	if ((flags & CREATE_EXACT) == 0) { /* allow an active near-miss? */
3416
		untiled_pitch = kgem_untiled_pitch(kgem, width, bpp, flags);
3417
		i = tiling;
3418
		while (--i >= 0) {
3419
			tiled_height = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags,
3420
							 width, height, bpp, tiling, &pitch);
3421
			cache = active(kgem, tiled_height / PAGE_SIZE, i);
3422
			tiled_height = kgem_aligned_height(kgem, height, i);
3423
			list_for_each_entry(bo, cache, list) {
3424
				assert(!bo->purged);
3425
				assert(bo->refcnt == 0);
3426
				assert(bo->reusable);
3427
				assert(!bo->scanout);
3428
				assert(bo->flush == false);
3429
 
3430
				if (bo->tiling) {
3431
					if (bo->pitch < pitch) {
3432
						DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n",
3433
						     bo->tiling, tiling,
3434
						     bo->pitch, pitch));
3435
						continue;
3436
					}
3437
				} else
3438
					bo->pitch = untiled_pitch;
3439
 
3440
				if (bo->pitch * tiled_height > bytes(bo))
3441
					continue;
3442
 
3443
				kgem_bo_remove_from_active(kgem, bo);
3444
 
3445
				bo->unique_id = kgem_get_unique_id(kgem);
3446
				bo->delta = 0;
3447
				DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
3448
				     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3449
				assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
3450
				bo->refcnt = 1;
3451
				return bo;
3452
			}
3453
		}
3454
	}
3455
 
3456
skip_active_search:
3457
	bucket = cache_bucket(size);
3458
	retry = NUM_CACHE_BUCKETS - bucket;
3459
	if (retry > 3)
3460
		retry = 3;
3461
search_inactive:
3462
	/* Now just look for a close match and prefer any currently active */
3463
	assert(bucket < NUM_CACHE_BUCKETS);
3464
	cache = &kgem->inactive[bucket];
3465
	list_for_each_entry(bo, cache, list) {
3466
		assert(bucket(bo) == bucket);
3467
		assert(bo->reusable);
3468
		assert(!bo->scanout);
3469
		assert(bo->flush == false);
3470
 
3471
		if (size > num_pages(bo)) {
3472
			DBG(("inactive too small: %d < %d\n",
3473
			     num_pages(bo), size));
3474
			continue;
3475
		}
3476
 
3477
		if (bo->tiling != tiling ||
3478
		    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
3479
			if (!gem_set_tiling(kgem->fd, bo->handle,
3480
					    tiling, pitch))
3481
				continue;
3482
 
3483
			if (bo->map)
3484
				kgem_bo_release_map(kgem, bo);
3485
		}
3486
 
3487
		if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
3488
			kgem_bo_free(kgem, bo);
3489
			break;
3490
		}
3491
 
3492
		kgem_bo_remove_from_inactive(kgem, bo);
3493
 
3494
		bo->pitch = pitch;
3495
		bo->tiling = tiling;
3496
 
3497
		bo->delta = 0;
3498
		bo->unique_id = kgem_get_unique_id(kgem);
3499
		assert(bo->pitch);
3500
		DBG(("  from inactive: pitch=%d, tiling=%d: handle=%d, id=%d\n",
3501
		     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3502
		assert(bo->refcnt == 0);
3503
		assert(bo->reusable);
3504
		assert((flags & CREATE_INACTIVE) == 0 || bo->domain != DOMAIN_GPU);
3505
		ASSERT_MAYBE_IDLE(kgem, bo->handle, flags & CREATE_INACTIVE);
3506
		assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
3507
		bo->refcnt = 1;
3508
		return bo;
3509
	}
3510
 
3511
	if (flags & CREATE_INACTIVE &&
3512
	    !list_is_empty(&kgem->active[bucket][tiling]) &&
3513
	    __kgem_throttle_retire(kgem, flags)) {
3514
		flags &= ~CREATE_INACTIVE;
3515
		goto search_inactive;
3516
	}
3517
 
3518
	if (--retry) {
3519
		bucket++;
3520
		flags &= ~CREATE_INACTIVE;
3521
		goto search_inactive;
3522
	}
3523
 
3524
create:
3525
	if (bucket >= NUM_CACHE_BUCKETS)
3526
		size = ALIGN(size, 1024);
3527
	handle = gem_create(kgem->fd, size);
3528
	if (handle == 0)
3529
		return NULL;
3530
 
3531
	bo = __kgem_bo_alloc(handle, size);
3532
	if (!bo) {
3533
		gem_close(kgem->fd, handle);
3534
		return NULL;
3535
	}
3536
 
3537
	bo->domain = DOMAIN_CPU;
3538
	bo->unique_id = kgem_get_unique_id(kgem);
3539
	bo->pitch = pitch;
3540
	if (tiling != I915_TILING_NONE &&
3541
	    gem_set_tiling(kgem->fd, handle, tiling, pitch))
3542
		bo->tiling = tiling;
3543
	if (bucket >= NUM_CACHE_BUCKETS) {
3544
		DBG(("%s: marking large bo for automatic flushing\n",
3545
		     __FUNCTION__));
3546
		bo->flush = true;
3547
	}
3548
 
3549
	assert(bytes(bo) >= bo->pitch * kgem_aligned_height(kgem, height, bo->tiling));
3550
 
3551
	debug_alloc__bo(kgem, bo);
3552
 
3553
	DBG(("  new pitch=%d, tiling=%d, handle=%d, id=%d, num_pages=%d [%d], bucket=%d\n",
3554
	     bo->pitch, bo->tiling, bo->handle, bo->unique_id,
3555
	     size, num_pages(bo), bucket(bo)));
3556
	return bo;
3557
}
3558
 
3263 Serge 3559
#if 0
3258 Serge 3560
struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
3561
				   int width,
3562
				   int height,
3563
				   int bpp,
3564
				   uint32_t flags)
3565
{
3566
	struct kgem_bo *bo;
3567
	int stride, size;
3568
 
3569
	if (DBG_NO_CPU)
3570
		return NULL;
3571
 
3572
	DBG(("%s(%dx%d, bpp=%d)\n", __FUNCTION__, width, height, bpp));
3573
 
3574
	if (kgem->has_llc) {
3575
		bo = kgem_create_2d(kgem, width, height, bpp,
3576
				    I915_TILING_NONE, flags);
3577
		if (bo == NULL)
3578
			return bo;
3579
 
3580
		assert(bo->tiling == I915_TILING_NONE);
3581
 
3582
		if (kgem_bo_map__cpu(kgem, bo) == NULL) {
3583
			kgem_bo_destroy(kgem, bo);
3584
			return NULL;
3585
		}
3586
 
3587
		return bo;
3588
	}
3589
 
3590
	assert(width > 0 && height > 0);
3591
	stride = ALIGN(width, 2) * bpp >> 3;
3592
	stride = ALIGN(stride, 4);
3593
	size = stride * ALIGN(height, 2);
3594
	assert(size >= PAGE_SIZE);
3595
 
3596
	DBG(("%s: %dx%d, %d bpp, stride=%d\n",
3597
	     __FUNCTION__, width, height, bpp, stride));
3598
 
3599
	bo = search_snoop_cache(kgem, NUM_PAGES(size), 0);
3600
	if (bo) {
3601
		assert(bo->tiling == I915_TILING_NONE);
3602
		assert(bo->snoop);
3603
		bo->refcnt = 1;
3604
		bo->pitch = stride;
3605
		bo->unique_id = kgem_get_unique_id(kgem);
3606
		return bo;
3607
	}
3608
 
3609
	if (kgem->has_cacheing) {
3610
		bo = kgem_create_linear(kgem, size, flags);
3611
		if (bo == NULL)
3612
			return NULL;
3613
 
3614
		assert(bo->tiling == I915_TILING_NONE);
3615
 
3616
		if (!gem_set_cacheing(kgem->fd, bo->handle, SNOOPED)) {
3617
			kgem_bo_destroy(kgem, bo);
3618
			return NULL;
3619
		}
3620
		bo->snoop = true;
3621
 
3622
		if (kgem_bo_map__cpu(kgem, bo) == NULL) {
3623
			kgem_bo_destroy(kgem, bo);
3624
			return NULL;
3625
		}
3626
 
3627
		bo->pitch = stride;
3628
		bo->unique_id = kgem_get_unique_id(kgem);
3629
		return bo;
3630
	}
3631
 
3632
	if (kgem->has_userptr) {
3633
		void *ptr;
3634
 
3635
		/* XXX */
3636
		//if (posix_memalign(&ptr, 64, ALIGN(size, 64)))
3637
		if (posix_memalign(&ptr, PAGE_SIZE, ALIGN(size, PAGE_SIZE)))
3638
			return NULL;
3639
 
3640
		bo = kgem_create_map(kgem, ptr, size, false);
3641
		if (bo == NULL) {
3642
			free(ptr);
3643
			return NULL;
3644
		}
3645
 
3646
		bo->map = MAKE_USER_MAP(ptr);
3647
		bo->pitch = stride;
3648
		bo->unique_id = kgem_get_unique_id(kgem);
3649
		return bo;
3650
	}
3651
 
3652
		return NULL;
3653
}
3654
 
3655
 
3656
#endif
3657
 
3658
 
3659
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
3660
{
3661
	DBG(("%s: handle=%d, proxy? %d\n",
3662
	     __FUNCTION__, bo->handle, bo->proxy != NULL));
3663
 
3664
	if (bo->proxy) {
3665
		_list_del(&bo->vma);
3666
		_list_del(&bo->request);
3667
		if (bo->io && bo->exec == NULL)
3668
			_kgem_bo_delete_buffer(kgem, bo);
3669
		kgem_bo_unref(kgem, bo->proxy);
3670
		kgem_bo_binding_free(kgem, bo);
3671
		free(bo);
3672
		return;
3673
		}
3674
 
3675
	__kgem_bo_destroy(kgem, bo);
3676
}
3677
 
3263 Serge 3678
void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo)
3679
{
3680
	assert(bo->rq);
3681
	assert(bo->exec == NULL);
3682
	assert(bo->needs_flush);
3258 Serge 3683
 
3263 Serge 3684
	/* The kernel will emit a flush *and* update its own flushing lists. */
3685
	if (!__kgem_busy(kgem, bo->handle))
3686
		__kgem_bo_clear_busy(bo);
3258 Serge 3687
 
3263 Serge 3688
	DBG(("%s: handle=%d, busy?=%d\n",
3689
	     __FUNCTION__, bo->handle, bo->rq != NULL));
3690
}
3258 Serge 3691
 
3263 Serge 3692
inline static bool needs_semaphore(struct kgem *kgem, struct kgem_bo *bo)
3693
{
3694
	return kgem->nreloc && bo->rq && RQ_RING(bo->rq) != kgem->ring;
3695
}
3258 Serge 3696
 
3263 Serge 3697
bool kgem_check_bo(struct kgem *kgem, ...)
3698
{
3699
	va_list ap;
3700
	struct kgem_bo *bo;
3701
	int num_exec = 0;
3702
	int num_pages = 0;
3703
	bool flush = false;
3258 Serge 3704
 
3263 Serge 3705
	va_start(ap, kgem);
3706
	while ((bo = va_arg(ap, struct kgem_bo *))) {
3707
		while (bo->proxy)
3708
			bo = bo->proxy;
3709
		if (bo->exec)
3710
			continue;
3258 Serge 3711
 
3263 Serge 3712
		if (needs_semaphore(kgem, bo))
3713
			return false;
3258 Serge 3714
 
3263 Serge 3715
		num_pages += num_pages(bo);
3716
		num_exec++;
3258 Serge 3717
 
3263 Serge 3718
		flush |= bo->flush;
3719
	}
3720
	va_end(ap);
3258 Serge 3721
 
3263 Serge 3722
	DBG(("%s: num_pages=+%d, num_exec=+%d\n",
3723
	     __FUNCTION__, num_pages, num_exec));
3258 Serge 3724
 
3263 Serge 3725
	if (!num_pages)
3726
		return true;
3258 Serge 3727
 
3263 Serge 3728
	if (kgem_flush(kgem, flush))
3729
		return false;
3258 Serge 3730
 
3263 Serge 3731
	if (kgem->aperture > kgem->aperture_low &&
3732
	    kgem_ring_is_idle(kgem, kgem->ring)) {
3733
		DBG(("%s: current aperture usage (%d) is greater than low water mark (%d)\n",
3734
		     __FUNCTION__, kgem->aperture, kgem->aperture_low));
3735
		return false;
3736
	}
3258 Serge 3737
 
3263 Serge 3738
	if (num_pages + kgem->aperture > kgem->aperture_high) {
3739
		DBG(("%s: final aperture usage (%d) is greater than high water mark (%d)\n",
3740
		     __FUNCTION__, num_pages + kgem->aperture, kgem->aperture_high));
3741
		return false;
3742
	}
3258 Serge 3743
 
3263 Serge 3744
	if (kgem->nexec + num_exec >= KGEM_EXEC_SIZE(kgem)) {
3745
		DBG(("%s: out of exec slots (%d + %d / %d)\n", __FUNCTION__,
3746
		     kgem->nexec, num_exec, KGEM_EXEC_SIZE(kgem)));
3747
		return false;
3748
	}
3258 Serge 3749
 
3263 Serge 3750
	return true;
3751
}
3258 Serge 3752
 
3753
 
3754
 
3755
 
3756
 
3757
 
3758
 
3759
 
3760
 
3761
 
3762
 
3763
 
3764
 
3765
 
3766
 
3767
 
3768
 
3769
 
3770
 
3771
 
3263 Serge 3772
 
3773
 
3774
 
3775
 
3776
 
3777
 
3778
 
3779
 
3780
 
3781
 
3258 Serge 3782
uint32_t kgem_add_reloc(struct kgem *kgem,
3783
			uint32_t pos,
3784
			struct kgem_bo *bo,
3785
			uint32_t read_write_domain,
3786
			uint32_t delta)
3787
{
3788
	int index;
3789
 
3790
	DBG(("%s: handle=%d, pos=%d, delta=%d, domains=%08x\n",
3791
	     __FUNCTION__, bo ? bo->handle : 0, pos, delta, read_write_domain));
3792
 
3793
	assert((read_write_domain & 0x7fff) == 0 || bo != NULL);
3794
 
3266 Serge 3795
    if( bo != NULL && bo->handle == -2)
3796
    {
3797
   		if (bo->exec == NULL)
3798
			kgem_add_bo(kgem, bo);
3799
 
3800
		if (read_write_domain & 0x7fff && !bo->dirty) {
3801
			assert(!bo->snoop || kgem->can_blt_cpu);
3802
			__kgem_bo_mark_dirty(bo);
3803
		}
3804
        return 0;
3805
    };
3263 Serge 3806
 
3258 Serge 3807
	index = kgem->nreloc++;
3808
	assert(index < ARRAY_SIZE(kgem->reloc));
3809
	kgem->reloc[index].offset = pos * sizeof(kgem->batch[0]);
3810
	if (bo) {
3811
		assert(bo->refcnt);
3812
		assert(!bo->purged);
3813
 
3814
		while (bo->proxy) {
3815
			DBG(("%s: adding proxy [delta=%d] for handle=%d\n",
3816
			     __FUNCTION__, bo->delta, bo->handle));
3817
			delta += bo->delta;
3818
			assert(bo->handle == bo->proxy->handle);
3819
			/* need to release the cache upon batch submit */
3820
			if (bo->exec == NULL) {
3821
				list_move_tail(&bo->request,
3822
					       &kgem->next_request->buffers);
3823
				bo->rq = MAKE_REQUEST(kgem->next_request,
3824
						      kgem->ring);
3825
				bo->exec = &_kgem_dummy_exec;
3826
		}
3827
 
3828
			if (read_write_domain & 0x7fff && !bo->dirty)
3829
				__kgem_bo_mark_dirty(bo);
3830
 
3831
			bo = bo->proxy;
3832
			assert(bo->refcnt);
3833
			assert(!bo->purged);
3834
		}
3835
 
3836
		if (bo->exec == NULL)
3837
			kgem_add_bo(kgem, bo);
3838
		assert(bo->rq == MAKE_REQUEST(kgem->next_request, kgem->ring));
3839
		assert(RQ_RING(bo->rq) == kgem->ring);
3840
 
3841
		if (kgem->gen < 040 && read_write_domain & KGEM_RELOC_FENCED) {
3842
			if (bo->tiling &&
3843
			    (bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0) {
3844
				assert(kgem->nfence < kgem->fence_max);
3845
				kgem->aperture_fenced +=
3846
					kgem_bo_fenced_size(kgem, bo);
3847
				kgem->nfence++;
3848
			}
3849
			bo->exec->flags |= EXEC_OBJECT_NEEDS_FENCE;
3850
		}
3851
 
3852
		kgem->reloc[index].delta = delta;
3853
		kgem->reloc[index].target_handle = bo->target_handle;
3854
		kgem->reloc[index].presumed_offset = bo->presumed_offset;
3855
 
3856
		if (read_write_domain & 0x7fff && !bo->dirty) {
3857
			assert(!bo->snoop || kgem->can_blt_cpu);
3858
			__kgem_bo_mark_dirty(bo);
3859
		}
3860
 
3861
		delta += bo->presumed_offset;
3862
	} else {
3863
		kgem->reloc[index].delta = delta;
3864
		kgem->reloc[index].target_handle = ~0U;
3865
		kgem->reloc[index].presumed_offset = 0;
3866
		if (kgem->nreloc__self < 256)
3867
			kgem->reloc__self[kgem->nreloc__self++] = index;
3868
		}
3869
	kgem->reloc[index].read_domains = read_write_domain >> 16;
3870
	kgem->reloc[index].write_domain = read_write_domain & 0x7fff;
3871
 
3872
	return delta;
3873
}
3874
 
3875
static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket)
3876
{
3877
	int i, j;
3878
 
3879
	DBG(("%s: type=%d, count=%d (bucket: %d)\n",
3880
	     __FUNCTION__, type, kgem->vma[type].count, bucket));
3881
	if (kgem->vma[type].count <= 0)
3882
	       return;
3883
 
3884
	if (kgem->need_purge)
3885
		kgem_purge_cache(kgem);
3886
 
3887
	/* vma are limited on a per-process basis to around 64k.
3888
	 * This includes all malloc arenas as well as other file
3889
	 * mappings. In order to be fair and not hog the cache,
3890
	 * and more importantly not to exhaust that limit and to
3891
	 * start failing mappings, we keep our own number of open
3892
	 * vma to within a conservative value.
3893
	 */
3894
	i = 0;
3895
	while (kgem->vma[type].count > 0) {
3896
		struct kgem_bo *bo = NULL;
3897
 
3898
		for (j = 0;
3899
		     bo == NULL && j < ARRAY_SIZE(kgem->vma[type].inactive);
3900
		     j++) {
3901
			struct list *head = &kgem->vma[type].inactive[i++%ARRAY_SIZE(kgem->vma[type].inactive)];
3902
			if (!list_is_empty(head))
3903
				bo = list_last_entry(head, struct kgem_bo, vma);
3904
	}
3905
		if (bo == NULL)
3906
			break;
3907
 
3908
		DBG(("%s: discarding inactive %s vma cache for %d\n",
3909
		     __FUNCTION__,
3910
		     IS_CPU_MAP(bo->map) ? "CPU" : "GTT", bo->handle));
3911
		assert(IS_CPU_MAP(bo->map) == type);
3912
		assert(bo->map);
3913
			assert(bo->rq == NULL);
3914
 
3915
		VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo)));
3916
//		munmap(MAP(bo->map), bytes(bo));
3917
		bo->map = NULL;
3918
		list_del(&bo->vma);
3919
		kgem->vma[type].count--;
3920
 
3921
		if (!bo->purged && !kgem_bo_set_purgeable(kgem, bo)) {
3922
			DBG(("%s: freeing unpurgeable old mapping\n",
3923
			     __FUNCTION__));
3924
				kgem_bo_free(kgem, bo);
3925
			}
3926
	}
3927
}
3928
 
3929
 
3930
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
3931
{
3932
	void *ptr;
3933
 
3934
	DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__,
3935
	     bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain));
3936
 
3937
	assert(!bo->purged);
3938
	assert(bo->proxy == NULL);
3939
	assert(list_is_empty(&bo->list));
3940
	assert(bo->exec == NULL);
3941
 
3942
	if (bo->tiling == I915_TILING_NONE && !bo->scanout &&
3943
	    (kgem->has_llc || bo->domain == DOMAIN_CPU)) {
3944
		DBG(("%s: converting request for GTT map into CPU map\n",
3945
		     __FUNCTION__));
3946
		ptr = kgem_bo_map__cpu(kgem, bo);
3947
		kgem_bo_sync__cpu(kgem, bo);
3948
		return ptr;
3949
	}
3950
 
3951
	if (IS_CPU_MAP(bo->map))
3952
		kgem_bo_release_map(kgem, bo);
3953
 
3954
	ptr = bo->map;
3955
	if (ptr == NULL) {
3956
		assert(kgem_bo_size(bo) <= kgem->aperture_mappable / 2);
3957
		assert(kgem->gen != 021 || bo->tiling != I915_TILING_Y);
3958
 
3959
		kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
3960
 
3961
		ptr = __kgem_bo_map__gtt(kgem, bo);
3962
		if (ptr == NULL)
3963
			return NULL;
3964
 
3965
		/* Cache this mapping to avoid the overhead of an
3966
		 * excruciatingly slow GTT pagefault. This is more an
3967
		 * issue with compositing managers which need to frequently
3968
		 * flush CPU damage to their GPU bo.
3969
		 */
3970
		bo->map = ptr;
3971
		DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle));
3972
		}
3973
 
3974
	if (bo->domain != DOMAIN_GTT) {
3975
		struct drm_i915_gem_set_domain set_domain;
3976
 
3977
		DBG(("%s: sync: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__,
3978
		     bo->needs_flush, bo->domain, __kgem_busy(kgem, bo->handle)));
3979
 
3980
		/* XXX use PROT_READ to avoid the write flush? */
3981
 
3982
		VG_CLEAR(set_domain);
3983
		set_domain.handle = bo->handle;
3984
		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
3985
		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
3986
		if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) {
3987
			kgem_bo_retire(kgem, bo);
3988
			bo->domain = DOMAIN_GTT;
3989
		}
3990
		}
3991
 
3992
	return ptr;
3993
}
3994
 
3995
void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
3996
{
3997
	void *ptr;
3998
 
3999
	DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__,
4000
	     bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain));
4001
 
4002
	assert(!bo->purged);
4003
	assert(bo->exec == NULL);
4004
	assert(list_is_empty(&bo->list));
4005
 
4006
	if (IS_CPU_MAP(bo->map))
4007
		kgem_bo_release_map(kgem, bo);
4008
 
4009
	ptr = bo->map;
4010
	if (ptr == NULL) {
4011
		assert(bytes(bo) <= kgem->aperture_mappable / 4);
4012
 
4013
		kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
4014
 
4015
		ptr = __kgem_bo_map__gtt(kgem, bo);
4016
		if (ptr == NULL)
4017
			return NULL;
4018
 
4019
		/* Cache this mapping to avoid the overhead of an
4020
		 * excruciatingly slow GTT pagefault. This is more an
4021
		 * issue with compositing managers which need to frequently
4022
		 * flush CPU damage to their GPU bo.
4023
		 */
4024
		bo->map = ptr;
4025
		DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle));
4026
	}
4027
 
4028
	return ptr;
4029
}
4030
 
4031
 
4032
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
4033
{
4034
	struct drm_i915_gem_mmap mmap_arg;
4035
 
4036
	DBG(("%s(handle=%d, size=%d, mapped? %d)\n",
4037
	     __FUNCTION__, bo->handle, bytes(bo), (int)__MAP_TYPE(bo->map)));
4038
	assert(!bo->purged);
4039
	assert(list_is_empty(&bo->list));
4040
	assert(!bo->scanout);
4041
	assert(bo->proxy == NULL);
4042
 
4043
	if (IS_CPU_MAP(bo->map))
4044
		return MAP(bo->map);
4045
 
4046
	if (bo->map)
4047
		kgem_bo_release_map(kgem, bo);
4048
 
4049
	kgem_trim_vma_cache(kgem, MAP_CPU, bucket(bo));
4050
 
4051
retry:
4052
	VG_CLEAR(mmap_arg);
4053
	mmap_arg.handle = bo->handle;
4054
	mmap_arg.offset = 0;
4055
	mmap_arg.size = bytes(bo);
4056
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
4057
		printf("%s: failed to mmap %d, %d bytes, into CPU domain: %d\n",
4058
		       __FUNCTION__, bo->handle, bytes(bo), 0);
4059
		if (__kgem_throttle_retire(kgem, 0))
4060
			goto retry;
4061
 
4062
		if (kgem->need_expire) {
4063
			kgem_cleanup_cache(kgem);
4064
			goto retry;
4065
		}
4066
 
4067
		return NULL;
4068
	}
4069
 
4070
	VG(VALGRIND_MAKE_MEM_DEFINED(mmap_arg.addr_ptr, bytes(bo)));
4071
 
4072
	DBG(("%s: caching CPU vma for %d\n", __FUNCTION__, bo->handle));
4073
	bo->map = MAKE_CPU_MAP(mmap_arg.addr_ptr);
4074
	return (void *)(uintptr_t)mmap_arg.addr_ptr;
4075
}
4076
 
4077
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo)
4078
{
4079
	assert(bo->proxy == NULL);
4080
	kgem_bo_submit(kgem, bo);
4081
 
4082
	if (bo->domain != DOMAIN_CPU) {
4083
		struct drm_i915_gem_set_domain set_domain;
4084
 
4085
		DBG(("%s: SYNC: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__,
4086
		     bo->needs_flush, bo->domain, __kgem_busy(kgem, bo->handle)));
4087
 
4088
		VG_CLEAR(set_domain);
4089
		set_domain.handle = bo->handle;
4090
		set_domain.read_domains = I915_GEM_DOMAIN_CPU;
4091
		set_domain.write_domain = I915_GEM_DOMAIN_CPU;
4092
 
4093
		if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) {
4094
			kgem_bo_retire(kgem, bo);
4095
			bo->domain = DOMAIN_CPU;
4096
		}
4097
	}
4098
}
4099
 
3254 Serge 4100
void kgem_clear_dirty(struct kgem *kgem)
4101
{
4102
	struct list * const buffers = &kgem->next_request->buffers;
4103
	struct kgem_bo *bo;
4104
 
4105
	list_for_each_entry(bo, buffers, request) {
4106
		if (!bo->dirty)
4107
			break;
4108
 
4109
		bo->dirty = false;
4110
	}
4111
}
4112
 
3263 Serge 4113
struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
4114
				  struct kgem_bo *target,
4115
				  int offset, int length)
4116
{
4117
	struct kgem_bo *bo;
4118
 
4119
	DBG(("%s: target handle=%d [proxy? %d], offset=%d, length=%d, io=%d\n",
4120
	     __FUNCTION__, target->handle, target->proxy ? target->proxy->delta : -1,
4121
	     offset, length, target->io));
4122
 
4123
	bo = __kgem_bo_alloc(target->handle, length);
4124
	if (bo == NULL)
4125
		return NULL;
4126
 
4127
	bo->unique_id = kgem_get_unique_id(kgem);
4128
	bo->reusable = false;
4129
	bo->size.bytes = length;
4130
 
4131
	bo->io = target->io && target->proxy == NULL;
4132
	bo->dirty = target->dirty;
4133
	bo->tiling = target->tiling;
4134
	bo->pitch = target->pitch;
4135
 
4136
	assert(!bo->scanout);
4137
	bo->proxy = kgem_bo_reference(target);
4138
	bo->delta = offset;
4139
 
4140
	if (target->exec) {
4141
		list_move_tail(&bo->request, &kgem->next_request->buffers);
4142
		bo->exec = &_kgem_dummy_exec;
4143
	}
4144
	bo->rq = target->rq;
4145
 
4146
	return bo;
4147
}
4148
 
3254 Serge 4149
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format)
4150
{
4151
	struct kgem_bo_binding *b;
4152
 
4153
	for (b = &bo->binding; b && b->offset; b = b->next)
4154
		if (format == b->format)
4155
			return b->offset;
4156
 
4157
	return 0;
4158
}
4159
 
4160
void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset)
4161
{
4162
	struct kgem_bo_binding *b;
4163
 
4164
	for (b = &bo->binding; b; b = b->next) {
4165
		if (b->offset)
4166
			continue;
4167
 
4168
		b->offset = offset;
4169
		b->format = format;
4170
 
4171
		if (b->next)
4172
			b->next->offset = 0;
4173
 
4174
		return;
4175
	}
4176
 
4177
	b = malloc(sizeof(*b));
4178
	if (b) {
4179
		b->next = bo->binding.next;
4180
		b->format = format;
4181
		b->offset = offset;
4182
		bo->binding.next = b;
4183
	}
4184
}
4185
 
4186
 
3263 Serge 4187
int kgem_init_fb(struct kgem *kgem, struct sna_fb *fb)
4188
{
4189
    struct kgem_bo *bo;
4190
    size_t size;
4191
    int ret;
3254 Serge 4192
 
3263 Serge 4193
	ret = drmIoctl(kgem->fd, SRV_FBINFO, fb);
4194
	if( ret != 0 )
4195
	    return 0;
4196
 
4197
    size = fb->pitch * fb->height / PAGE_SIZE;
4198
 
4199
  	bo = __kgem_bo_alloc(-2, size);
4200
	if (!bo) {
4201
		return 0;
4202
	}
3254 Serge 4203
 
3263 Serge 4204
	bo->domain    = DOMAIN_GTT;
4205
	bo->unique_id = kgem_get_unique_id(kgem);
4206
	bo->pitch     = fb->pitch;
4207
    bo->tiling    = I915_TILING_NONE;
4208
    bo->scanout   = 1;
4209
	fb->fb_bo     = bo;
4210
 
4211
    printf("fb width %d height %d pitch %d bo %p\n",
4212
            fb->width, fb->height, fb->pitch, fb->fb_bo);
4213
 
4214
    return 1;
4215
};
4216
 
3291 Serge 4217
void kgem_close_batches(struct kgem *kgem)
4218
{
4219
    int n;
3263 Serge 4220
 
3291 Serge 4221
	for (n = 0; n < ARRAY_SIZE(kgem->pinned_batches); n++) {
4222
		while (!list_is_empty(&kgem->pinned_batches[n])) {
4223
			kgem_bo_destroy(kgem,
4224
					list_first_entry(&kgem->pinned_batches[n],
4225
							 struct kgem_bo, list));
4226
		}
4227
	}
4228
};
3263 Serge 4229
 
4230
 
4231