Subversion Repositories Kolibri OS

Rev

Rev 3256 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3256 Rev 3258
Line 113... Line 113...
113
 
113
 
114
struct local_i915_gem_cacheing {
114
struct local_i915_gem_cacheing {
115
	uint32_t handle;
115
	uint32_t handle;
116
	uint32_t cacheing;
116
	uint32_t cacheing;
-
 
117
};
-
 
118
 
-
 
119
#define LOCAL_IOCTL_I915_GEM_SET_CACHEING SRV_I915_GEM_SET_CACHEING
-
 
120
 
-
 
121
struct kgem_buffer {
-
 
122
	struct kgem_bo base;
-
 
123
	void *mem;
-
 
124
	uint32_t used;
-
 
125
	uint32_t need_io : 1;
-
 
126
	uint32_t write : 2;
-
 
127
	uint32_t mmapped : 1;
-
 
128
};
117
};
129
 
118
static struct kgem_bo *__kgem_freed_bo;
130
static struct kgem_bo *__kgem_freed_bo;
-
 
131
static struct kgem_request *__kgem_freed_request;
-
 
132
static struct drm_i915_gem_exec_object2 _kgem_dummy_exec;
-
 
133
 
-
 
134
static inline int bytes(struct kgem_bo *bo)
-
 
135
{
-
 
136
	return __kgem_bo_size(bo);
Line 119... Line 137...
119
static struct kgem_request *__kgem_freed_request;
137
}
120
 
138
 
Line 121... Line 139...
121
#define bucket(B) (B)->size.pages.bucket
139
#define bucket(B) (B)->size.pages.bucket
Line 134... Line 152...
134
#else
152
#else
135
#define debug_alloc(k, b)
153
#define debug_alloc(k, b)
136
#define debug_alloc__bo(k, b)
154
#define debug_alloc__bo(k, b)
137
#endif
155
#endif
Line -... Line 156...
-
 
156
 
-
 
157
static void kgem_sna_reset(struct kgem *kgem)
-
 
158
{
-
 
159
	struct sna *sna = container_of(kgem, struct sna, kgem);
-
 
160
 
-
 
161
	sna->render.reset(sna);
-
 
162
	sna->blt_state.fill_bo = 0;
-
 
163
}
-
 
164
 
-
 
165
static void kgem_sna_flush(struct kgem *kgem)
-
 
166
{
-
 
167
	struct sna *sna = container_of(kgem, struct sna, kgem);
-
 
168
 
-
 
169
	sna->render.flush(sna);
-
 
170
 
-
 
171
//	if (sna->render.solid_cache.dirty)
-
 
172
//		sna_render_flush_solid(sna);
-
 
173
}
138
 
174
 
139
static bool gem_set_tiling(int fd, uint32_t handle, int tiling, int stride)
175
static bool gem_set_tiling(int fd, uint32_t handle, int tiling, int stride)
140
{
176
{
141
	struct drm_i915_gem_set_tiling set_tiling;
177
	struct drm_i915_gem_set_tiling set_tiling;
Line 157... Line 193...
157
}
193
}
Line 158... Line 194...
158
 
194
 
159
static bool gem_set_cacheing(int fd, uint32_t handle, int cacheing)
195
static bool gem_set_cacheing(int fd, uint32_t handle, int cacheing)
160
{
196
{
161
	struct local_i915_gem_cacheing arg;
-
 
Line 162... Line 197...
162
    ioctl_t  io;
197
	struct local_i915_gem_cacheing arg;
163
 
198
 
164
	VG_CLEAR(arg);
199
	VG_CLEAR(arg);
-
 
200
	arg.handle = handle;
-
 
201
	arg.cacheing = cacheing;
-
 
202
	return drmIoctl(fd, LOCAL_IOCTL_I915_GEM_SET_CACHEING, &arg) == 0;
Line 165... Line -...
165
	arg.handle = handle;
-
 
166
	arg.cacheing = cacheing;
-
 
167
	
-
 
168
    io.handle   = fd;
-
 
169
    io.io_code  = SRV_I915_GEM_SET_CACHEING;
-
 
170
    io.input    = &arg;
-
 
Line 171... Line -...
171
    io.inp_size = sizeof(arg);
-
 
Line 172... Line -...
172
    io.output   = NULL;
-
 
Line 173... Line 203...
173
    io.out_size = 0;
203
}
174
 
204
	
175
	return call_service(&io) == 0;
205
	
176
	
206
 
Line 186... Line 216...
186
	if (!kgem->need_retire) {
216
	if (!kgem->need_retire) {
187
		DBG(("%s: nothing to retire\n", __FUNCTION__));
217
		DBG(("%s: nothing to retire\n", __FUNCTION__));
188
		return false;
218
		return false;
189
	}
219
	}
Line 190... Line 220...
190
 
220
 
191
//	if (kgem_retire(kgem))
221
	if (kgem_retire(kgem))
Line 192... Line 222...
192
//		return true;
222
		return true;
193
 
223
 
194
	if (flags & CREATE_NO_THROTTLE || !kgem->need_throttle) {
224
	if (flags & CREATE_NO_THROTTLE || !kgem->need_throttle) {
195
		DBG(("%s: not throttling\n", __FUNCTION__));
225
		DBG(("%s: not throttling\n", __FUNCTION__));
Line 196... Line 226...
196
		return false;
226
		return false;
197
	}
227
	}
-
 
228
 
-
 
229
	kgem_throttle(kgem);
-
 
230
	return kgem_retire(kgem);
-
 
231
}
-
 
232
 
-
 
233
static void *__kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
-
 
234
{
-
 
235
	struct drm_i915_gem_mmap_gtt mmap_arg;
-
 
236
	void *ptr;
-
 
237
 
-
 
238
	DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__,
-
 
239
	     bo->handle, bytes(bo)));
-
 
240
	assert(bo->proxy == NULL);
-
 
241
 
-
 
242
retry_gtt:
-
 
243
	VG_CLEAR(mmap_arg);
-
 
244
	mmap_arg.handle = bo->handle;
-
 
245
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg)) {
-
 
246
		printf("%s: failed to retrieve GTT offset for handle=%d: %d\n",
-
 
247
		       __FUNCTION__, bo->handle, 0);
-
 
248
		(void)__kgem_throttle_retire(kgem, 0);
-
 
249
		if (kgem_expire_cache(kgem))
-
 
250
			goto retry_gtt;
-
 
251
 
-
 
252
		if (kgem->need_expire) {
-
 
253
			kgem_cleanup_cache(kgem);
198
 
254
			goto retry_gtt;
-
 
255
		}
-
 
256
 
-
 
257
		return NULL;
-
 
258
	}
-
 
259
 
-
 
260
retry_mmap:
-
 
261
//	ptr = mmap(0, bytes(bo), PROT_READ | PROT_WRITE, MAP_SHARED,
-
 
262
//		   kgem->fd, mmap_arg.offset);
-
 
263
	if (ptr == 0) {
-
 
264
		printf("%s: failed to mmap %d, %d bytes, into GTT domain: %d\n",
-
 
265
		       __FUNCTION__, bo->handle, bytes(bo), 0);
-
 
266
		if (__kgem_throttle_retire(kgem, 0))
-
 
267
			goto retry_mmap;
-
 
268
 
-
 
269
		if (kgem->need_expire) {
-
 
270
			kgem_cleanup_cache(kgem);
-
 
271
			goto retry_mmap;
-
 
272
		}
-
 
273
 
-
 
274
		ptr = NULL;
-
 
275
	}
-
 
276
 
-
 
277
	return ptr;
-
 
278
}
-
 
279
 
-
 
280
static int __gem_write(int fd, uint32_t handle,
-
 
281
		       int offset, int length,
Line -... Line 282...
-
 
282
		       const void *src)
-
 
283
{
-
 
284
	struct drm_i915_gem_pwrite pwrite;
-
 
285
 
-
 
286
	DBG(("%s(handle=%d, offset=%d, len=%d)\n", __FUNCTION__,
-
 
287
	     handle, offset, length));
-
 
288
 
-
 
289
	VG_CLEAR(pwrite);
-
 
290
	pwrite.handle = handle;
199
//	kgem_throttle(kgem);
291
	pwrite.offset = offset;
Line 200... Line 292...
200
//	return kgem_retire(kgem);
292
	pwrite.size = length;
201
		return false;
293
	pwrite.data_ptr = (uintptr_t)src;
202
 
294
	return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
Line 221... Line 313...
221
	} else {
313
	} else {
222
		pwrite.offset = offset;
314
		pwrite.offset = offset;
223
		pwrite.size = length;
315
		pwrite.size = length;
224
		pwrite.data_ptr = (uintptr_t)src;
316
		pwrite.data_ptr = (uintptr_t)src;
225
	}
317
	}
226
//	return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
318
	return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
-
 
319
}
-
 
320
	
-
 
321
 
-
 
322
bool __kgem_busy(struct kgem *kgem, int handle)
-
 
323
{
-
 
324
	struct drm_i915_gem_busy busy;
-
 
325
    
-
 
326
	VG_CLEAR(busy);
-
 
327
	busy.handle = handle;
-
 
328
	busy.busy = !kgem->wedged;
-
 
329
	(void)drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
-
 
330
	DBG(("%s: handle=%d, busy=%d, wedged=%d\n",
-
 
331
	     __FUNCTION__, handle, busy.busy, kgem->wedged));
-
 
332
 
227
    return -1;
333
	return busy.busy;
228
}
334
}
Line -... Line 335...
-
 
335
 
-
 
336
static void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo)
-
 
337
{
-
 
338
	DBG(("%s: retiring bo handle=%d (needed flush? %d), rq? %d [busy?=%d]\n",
-
 
339
	     __FUNCTION__, bo->handle, bo->needs_flush, bo->rq != NULL,
-
 
340
	     __kgem_busy(kgem, bo->handle)));
-
 
341
	assert(bo->exec == NULL);
-
 
342
	assert(list_is_empty(&bo->vma));
-
 
343
 
-
 
344
	if (bo->rq) {
-
 
345
		if (!__kgem_busy(kgem, bo->handle)) {
-
 
346
			__kgem_bo_clear_busy(bo);
-
 
347
			kgem_retire(kgem);
-
 
348
		}
-
 
349
	} else {
-
 
350
		assert(!bo->needs_flush);
-
 
351
		ASSERT_IDLE(kgem, bo->handle);
-
 
352
	}
Line 229... Line 353...
229
 
353
}
230
 
354
 
231
bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
355
bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
232
		   const void *data, int length)
356
		   const void *data, int length)
Line 240... Line 364...
240
	if (gem_write(kgem->fd, bo->handle, 0, length, data))
364
	if (gem_write(kgem->fd, bo->handle, 0, length, data))
241
		return false;
365
		return false;
Line 242... Line 366...
242
 
366
 
243
	DBG(("%s: flush=%d, domain=%d\n", __FUNCTION__, bo->flush, bo->domain));
367
	DBG(("%s: flush=%d, domain=%d\n", __FUNCTION__, bo->flush, bo->domain));
244
	if (bo->exec == NULL) {
368
	if (bo->exec == NULL) {
245
//		kgem_bo_retire(kgem, bo);
369
		kgem_bo_retire(kgem, bo);
246
		bo->domain = DOMAIN_NONE;
370
		bo->domain = DOMAIN_NONE;
247
	}
371
	}
248
	return true;
372
	return true;
Line 249... Line 373...
249
}
373
}
250
 
374
 
251
static uint32_t gem_create(int fd, int num_pages)
375
static uint32_t gem_create(int fd, int num_pages)
252
{
-
 
Line 253... Line 376...
253
	struct drm_i915_gem_create create;
376
{
254
    ioctl_t  io;
377
	struct drm_i915_gem_create create;
255
 
378
 
256
	VG_CLEAR(create);
-
 
257
	create.handle = 0;
-
 
258
	create.size = PAGE_SIZE * num_pages;
379
	VG_CLEAR(create);
259
	
-
 
260
    io.handle   = fd;
-
 
261
    io.io_code  = SRV_I915_GEM_CREATE;
-
 
262
    io.input    = &create;
-
 
263
    io.inp_size = sizeof(create);
-
 
264
    io.output   = NULL;
-
 
265
    io.out_size = 0;
-
 
Line 266... Line 380...
266
 
380
	create.handle = 0;
267
    if (call_service(&io)!=0)
381
	create.size = PAGE_SIZE * num_pages;
Line 268... Line 382...
268
        return 0;
382
	(void)drmIoctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
Line 339... Line 453...
339
}
453
}
Line 340... Line 454...
340
 
454
 
341
static void gem_close(int fd, uint32_t handle)
455
static void gem_close(int fd, uint32_t handle)
342
{
456
{
343
	struct drm_gem_close close;
-
 
Line 344... Line 457...
344
    ioctl_t  io;
457
	struct drm_gem_close close;
345
 
458
 
346
	VG_CLEAR(close);
-
 
347
	close.handle = handle;
-
 
348
 
459
	VG_CLEAR(close);
349
    io.handle   = fd;
-
 
350
    io.io_code  = SRV_DRM_GEM_CLOSE;
-
 
351
    io.input    = &close;
-
 
352
    io.inp_size = sizeof(close);
-
 
353
    io.output   = NULL;
-
 
354
    io.out_size = 0;
-
 
355
 
460
	close.handle = handle;
Line 356... Line 461...
356
    call_service(&io);
461
	(void)drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
357
}
462
}
358
 
463
 
Line 478... Line 583...
478
        : "memory");
583
        : "memory");
Line 479... Line 584...
479
    
584
    
480
    return size != -1 ? size : 0;
585
    return size != -1 ? size : 0;
Line 481... Line -...
481
}
-
 
482
 
586
}
483
 
587
 
484
static int gem_param(struct kgem *kgem, int name)
-
 
485
{
-
 
486
    ioctl_t  io;
588
static int gem_param(struct kgem *kgem, int name)
487
 
589
{
Line 488... Line 590...
488
    drm_i915_getparam_t gp;
590
    drm_i915_getparam_t gp;
489
    int v = -1; /* No param uses the sign bit, reserve it for errors */
591
    int v = -1; /* No param uses the sign bit, reserve it for errors */
490
 
592
 
491
    VG_CLEAR(gp);
-
 
492
    gp.param = name;
-
 
493
    gp.value = &v;
593
    VG_CLEAR(gp);
494
 
-
 
495
    io.handle   = kgem->fd;
-
 
496
    io.io_code  = SRV_GET_PARAM;
-
 
497
    io.input    = &gp;
-
 
498
    io.inp_size = sizeof(gp);
-
 
499
    io.output   = NULL;
-
 
500
    io.out_size = 0;
594
    gp.param = name;
Line 501... Line 595...
501
 
595
    gp.value = &v;
502
    if (call_service(&io)!=0)
596
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GETPARAM, &gp))
503
        return -1;
597
        return -1;
Line 670... Line 764...
670
}
764
}
Line 671... Line 765...
671
 
765
 
672
 
766
 
673
static bool kgem_init_pinned_batches(struct kgem *kgem)
-
 
674
{
-
 
675
	ioctl_t  io;
767
static bool kgem_init_pinned_batches(struct kgem *kgem)
676
 
768
{
677
	int count[2] = { 4, 2 };
769
	int count[2] = { 4, 2 };
Line 678... Line 770...
678
	int size[2] = { 1, 4 };
770
	int size[2] = { 1, 4 };
Line 700... Line 792...
700
				gem_close(kgem->fd, pin.handle);
792
				gem_close(kgem->fd, pin.handle);
701
				goto err;
793
				goto err;
702
			}
794
			}
Line 703... Line 795...
703
 
795
 
704
			pin.alignment = 0;
-
 
705
			
-
 
706
            io.handle   = kgem->fd;
796
			pin.alignment = 0;
707
            io.io_code  = SRV_I915_GEM_PIN;
-
 
708
            io.input    = &pin;
-
 
709
            io.inp_size = sizeof(pin);
-
 
710
            io.output   = NULL;
-
 
711
            io.out_size = 0;
-
 
712
 
-
 
713
            if (call_service(&io)!=0){
797
			if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_PIN, &pin)) {
714
				gem_close(kgem->fd, pin.handle);
798
				gem_close(kgem->fd, pin.handle);
715
				goto err;
799
				goto err;
716
			}
800
			}
717
			bo->presumed_offset = pin.offset;
801
			bo->presumed_offset = pin.offset;
Line 756... Line 840...
756
{
840
{
757
    struct drm_i915_gem_get_aperture aperture;
841
    struct drm_i915_gem_get_aperture aperture;
758
    size_t totalram;
842
    size_t totalram;
759
    unsigned half_gpu_max;
843
    unsigned half_gpu_max;
760
    unsigned int i, j;
844
    unsigned int i, j;
761
    ioctl_t   io;
-
 
Line 762... Line 845...
762
 
845
 
Line 763... Line 846...
763
    DBG(("%s: fd=%d, gen=%d\n", __FUNCTION__, fd, gen));
846
    DBG(("%s: fd=%d, gen=%d\n", __FUNCTION__, fd, gen));
Line 884... Line 967...
884
         !DBG_NO_CPU && (kgem->has_llc | kgem->has_userptr | kgem->has_cacheing),
967
         !DBG_NO_CPU && (kgem->has_llc | kgem->has_userptr | kgem->has_cacheing),
885
         kgem->has_llc, kgem->has_cacheing, kgem->has_userptr));
968
         kgem->has_llc, kgem->has_cacheing, kgem->has_userptr));
Line 886... Line 969...
886
 
969
 
887
    VG_CLEAR(aperture);
970
    VG_CLEAR(aperture);
888
    aperture.aper_size = 0;
-
 
889
    
-
 
890
    io.handle   = fd;
971
    aperture.aper_size = 0;
891
    io.io_code  = SRV_I915_GEM_GET_APERTURE;
-
 
892
    io.input    = &aperture;
-
 
893
    io.inp_size = sizeof(aperture);
-
 
894
    io.output   = NULL;
-
 
895
    io.out_size = 0;
-
 
896
 
-
 
897
    (void)call_service(&io);
-
 
898
 
972
	(void)drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
899
    if (aperture.aper_size == 0)
973
    if (aperture.aper_size == 0)
Line 900... Line 974...
900
        aperture.aper_size = 64*1024*1024;
974
        aperture.aper_size = 64*1024*1024;
901
 
975
 
Line 1000... Line 1074...
1000
    if (kgem->has_pinned_batches)
1074
    if (kgem->has_pinned_batches)
1001
        kgem->batch_flags_base |= LOCAL_I915_EXEC_IS_PINNED;
1075
        kgem->batch_flags_base |= LOCAL_I915_EXEC_IS_PINNED;
Line 1002... Line 1076...
1002
 
1076
 
Line -... Line 1077...
-
 
1077
}
-
 
1078
 
-
 
1079
static struct drm_i915_gem_exec_object2 *
-
 
1080
kgem_add_handle(struct kgem *kgem, struct kgem_bo *bo)
-
 
1081
{
-
 
1082
	struct drm_i915_gem_exec_object2 *exec;
-
 
1083
 
-
 
1084
	DBG(("%s: handle=%d, index=%d\n",
-
 
1085
	     __FUNCTION__, bo->handle, kgem->nexec));
-
 
1086
 
-
 
1087
	assert(kgem->nexec < ARRAY_SIZE(kgem->exec));
-
 
1088
	bo->target_handle = kgem->has_handle_lut ? kgem->nexec : bo->handle;
-
 
1089
	exec = memset(&kgem->exec[kgem->nexec++], 0, sizeof(*exec));
-
 
1090
	exec->handle = bo->handle;
-
 
1091
	exec->offset = bo->presumed_offset;
-
 
1092
 
-
 
1093
	kgem->aperture += num_pages(bo);
-
 
1094
 
-
 
1095
	return exec;
-
 
1096
}
-
 
1097
 
-
 
1098
static void kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
-
 
1099
{
-
 
1100
	bo->exec = kgem_add_handle(kgem, bo);
-
 
1101
	bo->rq = MAKE_REQUEST(kgem->next_request, kgem->ring);
-
 
1102
 
-
 
1103
	list_move_tail(&bo->request, &kgem->next_request->buffers);
-
 
1104
 
-
 
1105
	/* XXX is it worth working around gcc here? */
-
 
1106
	kgem->flush |= bo->flush;
-
 
1107
}
-
 
1108
 
-
 
1109
static uint32_t kgem_end_batch(struct kgem *kgem)
-
 
1110
{
-
 
1111
	kgem->batch[kgem->nbatch++] = MI_BATCH_BUFFER_END;
-
 
1112
	if (kgem->nbatch & 1)
-
 
1113
		kgem->batch[kgem->nbatch++] = MI_NOOP;
-
 
1114
 
-
 
1115
	return kgem->nbatch;
-
 
1116
}
-
 
1117
 
-
 
1118
static void kgem_fixup_self_relocs(struct kgem *kgem, struct kgem_bo *bo)
-
 
1119
{
-
 
1120
	int n;
-
 
1121
 
-
 
1122
	if (kgem->nreloc__self == 0)
-
 
1123
		return;
-
 
1124
 
-
 
1125
	for (n = 0; n < kgem->nreloc__self; n++) {
-
 
1126
		int i = kgem->reloc__self[n];
-
 
1127
		assert(kgem->reloc[i].target_handle == ~0U);
-
 
1128
		kgem->reloc[i].target_handle = bo->target_handle;
-
 
1129
		kgem->reloc[i].presumed_offset = bo->presumed_offset;
-
 
1130
		kgem->batch[kgem->reloc[i].offset/sizeof(kgem->batch[0])] =
-
 
1131
			kgem->reloc[i].delta + bo->presumed_offset;
-
 
1132
	}
-
 
1133
 
-
 
1134
	if (n == 256) {
-
 
1135
		for (n = kgem->reloc__self[255]; n < kgem->nreloc; n++) {
-
 
1136
			if (kgem->reloc[n].target_handle == ~0U) {
-
 
1137
				kgem->reloc[n].target_handle = bo->target_handle;
-
 
1138
				kgem->reloc[n].presumed_offset = bo->presumed_offset;
-
 
1139
				kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] =
-
 
1140
					kgem->reloc[n].delta + bo->presumed_offset;
-
 
1141
			}
-
 
1142
		}
-
 
1143
 
-
 
1144
	}
-
 
1145
 
-
 
1146
}
-
 
1147
 
-
 
1148
static void kgem_bo_binding_free(struct kgem *kgem, struct kgem_bo *bo)
-
 
1149
{
-
 
1150
	struct kgem_bo_binding *b;
-
 
1151
 
-
 
1152
	b = bo->binding.next;
-
 
1153
	while (b) {
-
 
1154
		struct kgem_bo_binding *next = b->next;
-
 
1155
		free (b);
-
 
1156
		b = next;
-
 
1157
	}
-
 
1158
}
-
 
1159
 
-
 
1160
static void kgem_bo_release_map(struct kgem *kgem, struct kgem_bo *bo)
-
 
1161
{
-
 
1162
	int type = IS_CPU_MAP(bo->map);
-
 
1163
 
-
 
1164
	assert(!IS_USER_MAP(bo->map));
-
 
1165
 
-
 
1166
	DBG(("%s: releasing %s vma for handle=%d, count=%d\n",
-
 
1167
	     __FUNCTION__, type ? "CPU" : "GTT",
-
 
1168
	     bo->handle, kgem->vma[type].count));
-
 
1169
 
-
 
1170
	VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo)));
-
 
1171
//	munmap(MAP(bo->map), bytes(bo));
-
 
1172
	bo->map = NULL;
-
 
1173
 
-
 
1174
	if (!list_is_empty(&bo->vma)) {
-
 
1175
		list_del(&bo->vma);
-
 
1176
		kgem->vma[type].count--;
-
 
1177
	}
-
 
1178
}
-
 
1179
 
-
 
1180
static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
-
 
1181
{
-
 
1182
	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
-
 
1183
	assert(bo->refcnt == 0);
-
 
1184
	assert(bo->exec == NULL);
-
 
1185
	assert(!bo->snoop || bo->rq == NULL);
-
 
1186
 
-
 
1187
#ifdef DEBUG_MEMORY
-
 
1188
	kgem->debug_memory.bo_allocs--;
-
 
1189
	kgem->debug_memory.bo_bytes -= bytes(bo);
-
 
1190
#endif
-
 
1191
 
-
 
1192
	kgem_bo_binding_free(kgem, bo);
-
 
1193
 
-
 
1194
	if (IS_USER_MAP(bo->map)) {
-
 
1195
		assert(bo->rq == NULL);
-
 
1196
		assert(MAP(bo->map) != bo || bo->io);
-
 
1197
		if (bo != MAP(bo->map)) {
-
 
1198
			DBG(("%s: freeing snooped base\n", __FUNCTION__));
-
 
1199
			free(MAP(bo->map));
-
 
1200
		}
-
 
1201
		bo->map = NULL;
-
 
1202
	}
-
 
1203
	if (bo->map)
-
 
1204
		kgem_bo_release_map(kgem, bo);
-
 
1205
	assert(list_is_empty(&bo->vma));
-
 
1206
 
-
 
1207
	_list_del(&bo->list);
-
 
1208
	_list_del(&bo->request);
-
 
1209
	gem_close(kgem->fd, bo->handle);
-
 
1210
 
-
 
1211
	if (!bo->io) {
-
 
1212
		*(struct kgem_bo **)bo = __kgem_freed_bo;
-
 
1213
		__kgem_freed_bo = bo;
-
 
1214
	} else
-
 
1215
		free(bo);
-
 
1216
}
-
 
1217
 
-
 
1218
inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
-
 
1219
					    struct kgem_bo *bo)
-
 
1220
{
-
 
1221
	DBG(("%s: moving handle=%d to inactive\n", __FUNCTION__, bo->handle));
-
 
1222
 
-
 
1223
	assert(bo->refcnt == 0);
-
 
1224
	assert(bo->reusable);
-
 
1225
	assert(bo->rq == NULL);
-
 
1226
	assert(bo->exec == NULL);
-
 
1227
	assert(bo->domain != DOMAIN_GPU);
-
 
1228
	assert(!bo->proxy);
-
 
1229
	assert(!bo->io);
-
 
1230
	assert(!bo->scanout);
-
 
1231
	assert(!bo->needs_flush);
-
 
1232
	assert(list_is_empty(&bo->vma));
-
 
1233
	ASSERT_IDLE(kgem, bo->handle);
-
 
1234
 
-
 
1235
	kgem->need_expire = true;
-
 
1236
 
-
 
1237
	if (bucket(bo) >= NUM_CACHE_BUCKETS) {
-
 
1238
		list_move(&bo->list, &kgem->large_inactive);
-
 
1239
		return;
-
 
1240
	}
-
 
1241
 
-
 
1242
	assert(bo->flush == false);
-
 
1243
	list_move(&bo->list, &kgem->inactive[bucket(bo)]);
-
 
1244
	if (bo->map) {
-
 
1245
		int type = IS_CPU_MAP(bo->map);
-
 
1246
		if (bucket(bo) >= NUM_CACHE_BUCKETS ||
-
 
1247
		    (!type && !__kgem_bo_is_mappable(kgem, bo))) {
-
 
1248
//			munmap(MAP(bo->map), bytes(bo));
-
 
1249
			bo->map = NULL;
-
 
1250
		}
-
 
1251
		if (bo->map) {
-
 
1252
			list_add(&bo->vma, &kgem->vma[type].inactive[bucket(bo)]);
-
 
1253
			kgem->vma[type].count++;
-
 
1254
		}
-
 
1255
	}
-
 
1256
}
-
 
1257
 
-
 
1258
static struct kgem_bo *kgem_bo_replace_io(struct kgem_bo *bo)
-
 
1259
{
-
 
1260
	struct kgem_bo *base;
-
 
1261
 
-
 
1262
	if (!bo->io)
-
 
1263
		return bo;
-
 
1264
 
-
 
1265
	assert(!bo->snoop);
-
 
1266
	base = malloc(sizeof(*base));
-
 
1267
	if (base) {
-
 
1268
		DBG(("%s: transferring io handle=%d to bo\n",
-
 
1269
		     __FUNCTION__, bo->handle));
-
 
1270
		/* transfer the handle to a minimum bo */
-
 
1271
		memcpy(base, bo, sizeof(*base));
-
 
1272
		base->io = false;
-
 
1273
		list_init(&base->list);
-
 
1274
		list_replace(&bo->request, &base->request);
-
 
1275
		list_replace(&bo->vma, &base->vma);
-
 
1276
		free(bo);
-
 
1277
		bo = base;
-
 
1278
	} else
-
 
1279
		bo->reusable = false;
-
 
1280
 
Line 1003... Line 1281...
1003
}
1281
	return bo;
1004
 
1282
}
1005
 
1283
 
1006
inline static void kgem_bo_remove_from_inactive(struct kgem *kgem,
1284
inline static void kgem_bo_remove_from_inactive(struct kgem *kgem,
Line 1016... Line 1294...
1016
		list_del(&bo->vma);
1294
		list_del(&bo->vma);
1017
		kgem->vma[IS_CPU_MAP(bo->map)].count--;
1295
		kgem->vma[IS_CPU_MAP(bo->map)].count--;
1018
	}
1296
	}
1019
}
1297
}
Line -... Line 1298...
-
 
1298
 
-
 
1299
inline static void kgem_bo_remove_from_active(struct kgem *kgem,
-
 
1300
					      struct kgem_bo *bo)
-
 
1301
{
-
 
1302
	DBG(("%s: removing handle=%d from active\n", __FUNCTION__, bo->handle));
-
 
1303
 
-
 
1304
	list_del(&bo->list);
-
 
1305
	assert(bo->rq != NULL);
-
 
1306
	if (bo->rq == (void *)kgem)
-
 
1307
		list_del(&bo->request);
-
 
1308
	assert(list_is_empty(&bo->vma));
-
 
1309
}
-
 
1310
 
-
 
1311
static void kgem_bo_clear_scanout(struct kgem *kgem, struct kgem_bo *bo)
-
 
1312
{
-
 
1313
	assert(bo->scanout);
-
 
1314
	assert(!bo->refcnt);
-
 
1315
	assert(bo->exec == NULL);
-
 
1316
	assert(bo->proxy == NULL);
-
 
1317
 
-
 
1318
	DBG(("%s: handle=%d, fb=%d (reusable=%d)\n",
-
 
1319
	     __FUNCTION__, bo->handle, bo->delta, bo->reusable));
-
 
1320
	if (bo->delta) {
-
 
1321
		/* XXX will leak if we are not DRM_MASTER. *shrug* */
-
 
1322
//		drmModeRmFB(kgem->fd, bo->delta);
-
 
1323
		bo->delta = 0;
-
 
1324
	}
-
 
1325
 
-
 
1326
	bo->scanout = false;
-
 
1327
	bo->flush = false;
-
 
1328
	bo->reusable = true;
-
 
1329
 
-
 
1330
	if (kgem->has_llc &&
-
 
1331
	    !gem_set_cacheing(kgem->fd, bo->handle, SNOOPED))
-
 
1332
		bo->reusable = false;
-
 
1333
}
-
 
1334
 
-
 
1335
static void _kgem_bo_delete_buffer(struct kgem *kgem, struct kgem_bo *bo)
-
 
1336
{
-
 
1337
	struct kgem_buffer *io = (struct kgem_buffer *)bo->proxy;
-
 
1338
 
-
 
1339
	DBG(("%s: size=%d, offset=%d, parent used=%d\n",
-
 
1340
	     __FUNCTION__, bo->size.bytes, bo->delta, io->used));
-
 
1341
 
-
 
1342
	if (ALIGN(bo->delta + bo->size.bytes, UPLOAD_ALIGNMENT) == io->used)
-
 
1343
		io->used = bo->delta;
-
 
1344
}
-
 
1345
 
-
 
1346
static void kgem_bo_move_to_scanout(struct kgem *kgem, struct kgem_bo *bo)
-
 
1347
{
-
 
1348
	assert(bo->refcnt == 0);
-
 
1349
	assert(bo->scanout);
-
 
1350
	assert(bo->delta);
-
 
1351
	assert(!bo->snoop);
-
 
1352
	assert(!bo->io);
-
 
1353
 
-
 
1354
	DBG(("%s: moving %d [fb %d] to scanout cache, active? %d\n",
-
 
1355
	     __FUNCTION__, bo->handle, bo->delta, bo->rq != NULL));
-
 
1356
	if (bo->rq)
-
 
1357
		list_move_tail(&bo->list, &kgem->scanout);
-
 
1358
	else
-
 
1359
	list_move(&bo->list, &kgem->scanout);
-
 
1360
}
-
 
1361
 
-
 
1362
static void kgem_bo_move_to_snoop(struct kgem *kgem, struct kgem_bo *bo)
-
 
1363
{
-
 
1364
	assert(bo->refcnt == 0);
-
 
1365
	assert(bo->exec == NULL);
-
 
1366
 
-
 
1367
	if (num_pages(bo) > kgem->max_cpu_size >> 13) {
-
 
1368
		DBG(("%s handle=%d discarding large CPU buffer (%d >%d pages)\n",
-
 
1369
		     __FUNCTION__, bo->handle, num_pages(bo), kgem->max_cpu_size >> 13));
-
 
1370
		kgem_bo_free(kgem, bo);
-
 
1371
		return;
-
 
1372
	}
-
 
1373
 
-
 
1374
	assert(bo->tiling == I915_TILING_NONE);
-
 
1375
	assert(bo->rq == NULL);
-
 
1376
 
-
 
1377
	DBG(("%s: moving %d to snoop cachee\n", __FUNCTION__, bo->handle));
-
 
1378
	list_add(&bo->list, &kgem->snoop);
-
 
1379
}
-
 
1380
 
-
 
1381
static struct kgem_bo *
-
 
1382
search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
-
 
1383
{
-
 
1384
	struct kgem_bo *bo, *first = NULL;
-
 
1385
 
-
 
1386
	DBG(("%s: num_pages=%d, flags=%x\n", __FUNCTION__, num_pages, flags));
-
 
1387
 
-
 
1388
	if ((kgem->has_cacheing | kgem->has_userptr) == 0)
-
 
1389
		return NULL;
-
 
1390
 
-
 
1391
	if (list_is_empty(&kgem->snoop)) {
-
 
1392
		DBG(("%s: inactive and cache empty\n", __FUNCTION__));
-
 
1393
		if (!__kgem_throttle_retire(kgem, flags)) {
-
 
1394
			DBG(("%s: nothing retired\n", __FUNCTION__));
-
 
1395
			return NULL;
-
 
1396
		}
-
 
1397
	}
-
 
1398
 
-
 
1399
	list_for_each_entry(bo, &kgem->snoop, list) {
-
 
1400
		assert(bo->refcnt == 0);
-
 
1401
		assert(bo->snoop);
-
 
1402
		assert(!bo->scanout);
-
 
1403
		assert(bo->proxy == NULL);
-
 
1404
		assert(bo->tiling == I915_TILING_NONE);
-
 
1405
		assert(bo->rq == NULL);
-
 
1406
		assert(bo->exec == NULL);
-
 
1407
 
-
 
1408
		if (num_pages > num_pages(bo))
-
 
1409
			continue;
-
 
1410
 
-
 
1411
		if (num_pages(bo) > 2*num_pages) {
-
 
1412
			if (first == NULL)
-
 
1413
				first = bo;
-
 
1414
			continue;
-
 
1415
		}
-
 
1416
 
-
 
1417
		list_del(&bo->list);
-
 
1418
		bo->pitch = 0;
-
 
1419
		bo->delta = 0;
-
 
1420
 
-
 
1421
		DBG(("  %s: found handle=%d (num_pages=%d) in snoop cache\n",
-
 
1422
		     __FUNCTION__, bo->handle, num_pages(bo)));
-
 
1423
		return bo;
-
 
1424
	}
-
 
1425
 
-
 
1426
	if (first) {
-
 
1427
		list_del(&first->list);
-
 
1428
		first->pitch = 0;
-
 
1429
		first->delta = 0;
-
 
1430
 
-
 
1431
		DBG(("  %s: found handle=%d (num_pages=%d) in snoop cache\n",
-
 
1432
		     __FUNCTION__, first->handle, num_pages(first)));
-
 
1433
		return first;
-
 
1434
	}
-
 
1435
 
-
 
1436
	return NULL;
-
 
1437
}
-
 
1438
 
-
 
1439
static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
-
 
1440
{
-
 
1441
	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
-
 
1442
 
-
 
1443
	assert(list_is_empty(&bo->list));
-
 
1444
	assert(bo->refcnt == 0);
-
 
1445
	assert(!bo->purged);
-
 
1446
	assert(bo->proxy == NULL);
-
 
1447
 
-
 
1448
	bo->binding.offset = 0;
-
 
1449
 
-
 
1450
	if (DBG_NO_CACHE)
-
 
1451
		goto destroy;
-
 
1452
 
-
 
1453
	if (bo->snoop && !bo->flush) {
-
 
1454
		DBG(("%s: handle=%d is snooped\n", __FUNCTION__, bo->handle));
-
 
1455
		assert(!bo->flush);
-
 
1456
		assert(list_is_empty(&bo->list));
-
 
1457
		if (bo->exec == NULL && bo->rq && !__kgem_busy(kgem, bo->handle))
-
 
1458
			__kgem_bo_clear_busy(bo);
-
 
1459
		if (bo->rq == NULL) {
-
 
1460
			assert(!bo->needs_flush);
-
 
1461
			kgem_bo_move_to_snoop(kgem, bo);
-
 
1462
		}
-
 
1463
		return;
-
 
1464
	}
-
 
1465
 
-
 
1466
	if (bo->scanout) {
-
 
1467
		kgem_bo_move_to_scanout(kgem, bo);
-
 
1468
		return;
-
 
1469
	}
-
 
1470
 
-
 
1471
	if (bo->io)
-
 
1472
		bo = kgem_bo_replace_io(bo);
-
 
1473
	if (!bo->reusable) {
-
 
1474
		DBG(("%s: handle=%d, not reusable\n",
-
 
1475
		     __FUNCTION__, bo->handle));
-
 
1476
		goto destroy;
-
 
1477
	}
-
 
1478
 
-
 
1479
	if (!kgem->has_llc && IS_CPU_MAP(bo->map) && bo->domain != DOMAIN_CPU)
-
 
1480
		kgem_bo_release_map(kgem, bo);
-
 
1481
 
-
 
1482
	assert(list_is_empty(&bo->vma));
-
 
1483
	assert(list_is_empty(&bo->list));
-
 
1484
	assert(bo->snoop == false);
-
 
1485
	assert(bo->io == false);
-
 
1486
	assert(bo->scanout == false);
-
 
1487
 
-
 
1488
	if (bo->exec && kgem->nexec == 1) {
-
 
1489
		DBG(("%s: only handle in batch, discarding last operations\n",
-
 
1490
		     __FUNCTION__));
-
 
1491
		assert(bo->exec == &kgem->exec[0]);
-
 
1492
		assert(kgem->exec[0].handle == bo->handle);
-
 
1493
		assert(RQ(bo->rq) == kgem->next_request);
-
 
1494
		bo->refcnt = 1;
-
 
1495
		kgem_reset(kgem);
-
 
1496
		bo->refcnt = 0;
-
 
1497
	}
-
 
1498
 
-
 
1499
	if (bo->rq && bo->exec == NULL && !__kgem_busy(kgem, bo->handle))
-
 
1500
		__kgem_bo_clear_busy(bo);
-
 
1501
 
-
 
1502
	if (bo->rq) {
-
 
1503
		struct list *cache;
-
 
1504
 
-
 
1505
		DBG(("%s: handle=%d -> active\n", __FUNCTION__, bo->handle));
-
 
1506
		if (bucket(bo) < NUM_CACHE_BUCKETS)
-
 
1507
			cache = &kgem->active[bucket(bo)][bo->tiling];
-
 
1508
		else
-
 
1509
			cache = &kgem->large;
-
 
1510
		list_add(&bo->list, cache);
-
 
1511
		return;
-
 
1512
	}
-
 
1513
 
-
 
1514
	assert(bo->exec == NULL);
-
 
1515
	assert(list_is_empty(&bo->request));
-
 
1516
 
-
 
1517
	if (!IS_CPU_MAP(bo->map)) {
-
 
1518
		if (!kgem_bo_set_purgeable(kgem, bo))
-
 
1519
			goto destroy;
-
 
1520
 
-
 
1521
		if (!kgem->has_llc && bo->domain == DOMAIN_CPU)
-
 
1522
			goto destroy;
-
 
1523
 
-
 
1524
		DBG(("%s: handle=%d, purged\n",
-
 
1525
		     __FUNCTION__, bo->handle));
-
 
1526
	}
-
 
1527
 
-
 
1528
	kgem_bo_move_to_inactive(kgem, bo);
-
 
1529
	return;
-
 
1530
 
-
 
1531
destroy:
-
 
1532
	if (!bo->exec)
-
 
1533
		kgem_bo_free(kgem, bo);
-
 
1534
}
-
 
1535
 
-
 
1536
static void kgem_bo_unref(struct kgem *kgem, struct kgem_bo *bo)
-
 
1537
{
-
 
1538
	assert(bo->refcnt);
-
 
1539
	if (--bo->refcnt == 0)
-
 
1540
		__kgem_bo_destroy(kgem, bo);
-
 
1541
}
-
 
1542
 
-
 
1543
static void kgem_buffer_release(struct kgem *kgem, struct kgem_buffer *bo)
-
 
1544
{
-
 
1545
	while (!list_is_empty(&bo->base.vma)) {
-
 
1546
		struct kgem_bo *cached;
-
 
1547
 
-
 
1548
		cached = list_first_entry(&bo->base.vma, struct kgem_bo, vma);
-
 
1549
		assert(cached->proxy == &bo->base);
-
 
1550
		list_del(&cached->vma);
-
 
1551
 
-
 
1552
		assert(*(struct kgem_bo **)cached->map == cached);
-
 
1553
		*(struct kgem_bo **)cached->map = NULL;
-
 
1554
		cached->map = NULL;
-
 
1555
 
-
 
1556
		kgem_bo_destroy(kgem, cached);
-
 
1557
	}
-
 
1558
}
-
 
1559
 
-
 
1560
static bool kgem_retire__buffers(struct kgem *kgem)
-
 
1561
{
-
 
1562
	bool retired = false;
-
 
1563
 
-
 
1564
	while (!list_is_empty(&kgem->active_buffers)) {
-
 
1565
		struct kgem_buffer *bo =
-
 
1566
			list_last_entry(&kgem->active_buffers,
-
 
1567
					struct kgem_buffer,
-
 
1568
					base.list);
-
 
1569
 
-
 
1570
		if (bo->base.rq)
-
 
1571
			break;
-
 
1572
 
-
 
1573
		DBG(("%s: releasing upload cache for handle=%d? %d\n",
-
 
1574
		     __FUNCTION__, bo->base.handle, !list_is_empty(&bo->base.vma)));
-
 
1575
		list_del(&bo->base.list);
-
 
1576
		kgem_buffer_release(kgem, bo);
-
 
1577
		kgem_bo_unref(kgem, &bo->base);
-
 
1578
		retired = true;
-
 
1579
	}
-
 
1580
 
-
 
1581
	return retired;
-
 
1582
}
-
 
1583
 
-
 
1584
static bool kgem_retire__flushing(struct kgem *kgem)
-
 
1585
{
-
 
1586
	struct kgem_bo *bo, *next;
-
 
1587
	bool retired = false;
-
 
1588
 
-
 
1589
	list_for_each_entry_safe(bo, next, &kgem->flushing, request) {
-
 
1590
		assert(bo->rq == (void *)kgem);
-
 
1591
		assert(bo->exec == NULL);
-
 
1592
 
-
 
1593
		if (__kgem_busy(kgem, bo->handle))
-
 
1594
			break;
-
 
1595
 
-
 
1596
		__kgem_bo_clear_busy(bo);
-
 
1597
 
-
 
1598
		if (bo->refcnt)
-
 
1599
			continue;
-
 
1600
 
-
 
1601
		if (bo->snoop) {
-
 
1602
			kgem_bo_move_to_snoop(kgem, bo);
-
 
1603
		} else if (bo->scanout) {
-
 
1604
			kgem_bo_move_to_scanout(kgem, bo);
-
 
1605
		} else if ((bo = kgem_bo_replace_io(bo))->reusable &&
-
 
1606
			   kgem_bo_set_purgeable(kgem, bo)) {
-
 
1607
			kgem_bo_move_to_inactive(kgem, bo);
-
 
1608
			retired = true;
-
 
1609
		} else
-
 
1610
			kgem_bo_free(kgem, bo);
-
 
1611
	}
-
 
1612
#if HAS_DEBUG_FULL
-
 
1613
	{
-
 
1614
		int count = 0;
-
 
1615
		list_for_each_entry(bo, &kgem->flushing, request)
-
 
1616
			count++;
-
 
1617
		printf("%s: %d bo on flushing list\n", __FUNCTION__, count);
-
 
1618
	}
-
 
1619
#endif
-
 
1620
 
-
 
1621
	kgem->need_retire |= !list_is_empty(&kgem->flushing);
-
 
1622
 
-
 
1623
	return retired;
-
 
1624
}
-
 
1625
 
-
 
1626
 
-
 
1627
static bool __kgem_retire_rq(struct kgem *kgem, struct kgem_request *rq)
-
 
1628
{
-
 
1629
	bool retired = false;
-
 
1630
 
-
 
1631
	DBG(("%s: request %d complete\n",
-
 
1632
	     __FUNCTION__, rq->bo->handle));
-
 
1633
 
-
 
1634
	while (!list_is_empty(&rq->buffers)) {
-
 
1635
		struct kgem_bo *bo;
-
 
1636
 
-
 
1637
		bo = list_first_entry(&rq->buffers,
-
 
1638
				      struct kgem_bo,
-
 
1639
				      request);
-
 
1640
 
-
 
1641
		assert(RQ(bo->rq) == rq);
-
 
1642
		assert(bo->exec == NULL);
-
 
1643
		assert(bo->domain == DOMAIN_GPU || bo->domain == DOMAIN_NONE);
-
 
1644
 
-
 
1645
		list_del(&bo->request);
-
 
1646
 
-
 
1647
		if (bo->needs_flush)
-
 
1648
			bo->needs_flush = __kgem_busy(kgem, bo->handle);
-
 
1649
		if (bo->needs_flush) {
-
 
1650
			DBG(("%s: moving %d to flushing\n",
-
 
1651
			     __FUNCTION__, bo->handle));
-
 
1652
			list_add(&bo->request, &kgem->flushing);
-
 
1653
			bo->rq = (void *)kgem;
-
 
1654
			continue;
-
 
1655
		}
-
 
1656
 
-
 
1657
		bo->domain = DOMAIN_NONE;
-
 
1658
		bo->rq = NULL;
-
 
1659
		if (bo->refcnt)
-
 
1660
			continue;
-
 
1661
 
-
 
1662
		if (bo->snoop) {
-
 
1663
			kgem_bo_move_to_snoop(kgem, bo);
-
 
1664
		} else if (bo->scanout) {
-
 
1665
			kgem_bo_move_to_scanout(kgem, bo);
-
 
1666
		} else if ((bo = kgem_bo_replace_io(bo))->reusable &&
-
 
1667
			   kgem_bo_set_purgeable(kgem, bo)) {
-
 
1668
			kgem_bo_move_to_inactive(kgem, bo);
-
 
1669
			retired = true;
-
 
1670
		} else {
-
 
1671
			DBG(("%s: closing %d\n",
-
 
1672
			     __FUNCTION__, bo->handle));
-
 
1673
			kgem_bo_free(kgem, bo);
-
 
1674
		}
-
 
1675
	}
-
 
1676
 
-
 
1677
	assert(rq->bo->rq == NULL);
-
 
1678
	assert(list_is_empty(&rq->bo->request));
-
 
1679
 
-
 
1680
	if (--rq->bo->refcnt == 0) {
-
 
1681
		if (kgem_bo_set_purgeable(kgem, rq->bo)) {
-
 
1682
			kgem_bo_move_to_inactive(kgem, rq->bo);
-
 
1683
			retired = true;
-
 
1684
		} else {
-
 
1685
			DBG(("%s: closing %d\n",
-
 
1686
			     __FUNCTION__, rq->bo->handle));
-
 
1687
			kgem_bo_free(kgem, rq->bo);
-
 
1688
		}
-
 
1689
	}
-
 
1690
 
-
 
1691
	__kgem_request_free(rq);
-
 
1692
	return retired;
-
 
1693
}
-
 
1694
 
-
 
1695
static bool kgem_retire__requests_ring(struct kgem *kgem, int ring)
-
 
1696
{
-
 
1697
	bool retired = false;
-
 
1698
 
-
 
1699
	while (!list_is_empty(&kgem->requests[ring])) {
-
 
1700
		struct kgem_request *rq;
-
 
1701
 
-
 
1702
		rq = list_first_entry(&kgem->requests[ring],
-
 
1703
				      struct kgem_request,
-
 
1704
				      list);
-
 
1705
		if (__kgem_busy(kgem, rq->bo->handle))
-
 
1706
			break;
-
 
1707
 
-
 
1708
		retired |= __kgem_retire_rq(kgem, rq);
-
 
1709
	}
-
 
1710
 
-
 
1711
#if HAS_DEBUG_FULL
-
 
1712
	{
-
 
1713
		struct kgem_bo *bo;
-
 
1714
		int count = 0;
-
 
1715
 
-
 
1716
		list_for_each_entry(bo, &kgem->requests[ring], request)
-
 
1717
			count++;
-
 
1718
 
-
 
1719
		bo = NULL;
-
 
1720
		if (!list_is_empty(&kgem->requests[ring]))
-
 
1721
			bo = list_first_entry(&kgem->requests[ring],
-
 
1722
					      struct kgem_request,
-
 
1723
					      list)->bo;
-
 
1724
 
-
 
1725
		printf("%s: ring=%d, %d outstanding requests, oldest=%d\n",
-
 
1726
		       __FUNCTION__, ring, count, bo ? bo->handle : 0);
-
 
1727
	}
-
 
1728
#endif
-
 
1729
 
-
 
1730
	return retired;
-
 
1731
}
-
 
1732
 
-
 
1733
static bool kgem_retire__requests(struct kgem *kgem)
-
 
1734
{
-
 
1735
	bool retired = false;
-
 
1736
	int n;
-
 
1737
 
-
 
1738
	for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) {
-
 
1739
		retired |= kgem_retire__requests_ring(kgem, n);
-
 
1740
		kgem->need_retire |= !list_is_empty(&kgem->requests[n]);
-
 
1741
	}
-
 
1742
 
-
 
1743
	return retired;
-
 
1744
}
-
 
1745
 
-
 
1746
bool kgem_retire(struct kgem *kgem)
-
 
1747
{
-
 
1748
	bool retired = false;
-
 
1749
 
-
 
1750
	DBG(("%s\n", __FUNCTION__));
-
 
1751
 
-
 
1752
	kgem->need_retire = false;
-
 
1753
 
-
 
1754
	retired |= kgem_retire__flushing(kgem);
-
 
1755
	retired |= kgem_retire__requests(kgem);
-
 
1756
	retired |= kgem_retire__buffers(kgem);
-
 
1757
 
-
 
1758
	DBG(("%s -- retired=%d, need_retire=%d\n",
-
 
1759
	     __FUNCTION__, retired, kgem->need_retire));
-
 
1760
 
-
 
1761
	kgem->retire(kgem);
-
 
1762
 
-
 
1763
	return retired;
-
 
1764
}
-
 
1765
 
-
 
1766
 
-
 
1767
 
-
 
1768
 
-
 
1769
 
-
 
1770
 
-
 
1771
 
-
 
1772
static void kgem_commit(struct kgem *kgem)
-
 
1773
{
-
 
1774
	struct kgem_request *rq = kgem->next_request;
-
 
1775
	struct kgem_bo *bo, *next;
-
 
1776
 
-
 
1777
	list_for_each_entry_safe(bo, next, &rq->buffers, request) {
-
 
1778
		assert(next->request.prev == &bo->request);
-
 
1779
 
-
 
1780
		DBG(("%s: release handle=%d (proxy? %d), dirty? %d flush? %d, snoop? %d -> offset=%x\n",
-
 
1781
		     __FUNCTION__, bo->handle, bo->proxy != NULL,
-
 
1782
		     bo->dirty, bo->needs_flush, bo->snoop,
-
 
1783
		     (unsigned)bo->exec->offset));
-
 
1784
 
-
 
1785
		assert(!bo->purged);
-
 
1786
		assert(bo->exec);
-
 
1787
		assert(bo->proxy == NULL || bo->exec == &_kgem_dummy_exec);
-
 
1788
		assert(RQ(bo->rq) == rq || (RQ(bo->proxy->rq) == rq));
-
 
1789
 
-
 
1790
		bo->presumed_offset = bo->exec->offset;
-
 
1791
		bo->exec = NULL;
-
 
1792
		bo->target_handle = -1;
-
 
1793
 
-
 
1794
		if (!bo->refcnt && !bo->reusable) {
-
 
1795
			assert(!bo->snoop);
-
 
1796
			kgem_bo_free(kgem, bo);
-
 
1797
			continue;
-
 
1798
		}
-
 
1799
 
-
 
1800
		bo->binding.offset = 0;
-
 
1801
		bo->domain = DOMAIN_GPU;
-
 
1802
		bo->dirty = false;
-
 
1803
 
-
 
1804
		if (bo->proxy) {
-
 
1805
			/* proxies are not used for domain tracking */
-
 
1806
			bo->exec = NULL;
-
 
1807
			__kgem_bo_clear_busy(bo);
-
 
1808
		}
-
 
1809
 
-
 
1810
		kgem->scanout_busy |= bo->scanout;
-
 
1811
	}
-
 
1812
 
-
 
1813
	if (rq == &kgem->static_request) {
-
 
1814
		struct drm_i915_gem_set_domain set_domain;
-
 
1815
 
-
 
1816
		DBG(("%s: syncing due to allocation failure\n", __FUNCTION__));
-
 
1817
 
-
 
1818
		VG_CLEAR(set_domain);
-
 
1819
		set_domain.handle = rq->bo->handle;
-
 
1820
		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
-
 
1821
		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
-
 
1822
		if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) {
-
 
1823
			DBG(("%s: sync: GPU hang detected\n", __FUNCTION__));
-
 
1824
			kgem_throttle(kgem);
-
 
1825
		}
-
 
1826
 
-
 
1827
		kgem_retire(kgem);
-
 
1828
		assert(list_is_empty(&rq->buffers));
-
 
1829
 
-
 
1830
		gem_close(kgem->fd, rq->bo->handle);
-
 
1831
		kgem_cleanup_cache(kgem);
-
 
1832
	} else {
-
 
1833
		list_add_tail(&rq->list, &kgem->requests[rq->ring]);
-
 
1834
		kgem->need_throttle = kgem->need_retire = 1;
-
 
1835
	}
-
 
1836
 
-
 
1837
	kgem->next_request = NULL;
-
 
1838
}
-
 
1839
 
-
 
1840
static void kgem_close_list(struct kgem *kgem, struct list *head)
-
 
1841
{
-
 
1842
	while (!list_is_empty(head))
-
 
1843
		kgem_bo_free(kgem, list_first_entry(head, struct kgem_bo, list));
-
 
1844
}
-
 
1845
 
-
 
1846
static void kgem_close_inactive(struct kgem *kgem)
-
 
1847
{
-
 
1848
	unsigned int i;
-
 
1849
 
-
 
1850
	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
-
 
1851
		kgem_close_list(kgem, &kgem->inactive[i]);
-
 
1852
}
-
 
1853
 
-
 
1854
static void kgem_finish_buffers(struct kgem *kgem)
-
 
1855
{
-
 
1856
	struct kgem_buffer *bo, *next;
-
 
1857
 
-
 
1858
	list_for_each_entry_safe(bo, next, &kgem->batch_buffers, base.list) {
-
 
1859
		DBG(("%s: buffer handle=%d, used=%d, exec?=%d, write=%d, mmapped=%d\n",
-
 
1860
		     __FUNCTION__, bo->base.handle, bo->used, bo->base.exec!=NULL,
-
 
1861
		     bo->write, bo->mmapped));
-
 
1862
 
-
 
1863
		assert(next->base.list.prev == &bo->base.list);
-
 
1864
		assert(bo->base.io);
-
 
1865
		assert(bo->base.refcnt >= 1);
-
 
1866
 
-
 
1867
		if (!bo->base.exec) {
-
 
1868
			DBG(("%s: skipping unattached handle=%d, used=%d\n",
-
 
1869
			     __FUNCTION__, bo->base.handle, bo->used));
-
 
1870
			continue;
-
 
1871
		}
-
 
1872
 
-
 
1873
		if (!bo->write) {
-
 
1874
			assert(bo->base.exec || bo->base.refcnt > 1);
-
 
1875
			goto decouple;
-
 
1876
		}
-
 
1877
 
-
 
1878
		if (bo->mmapped) {
-
 
1879
			int used;
-
 
1880
 
-
 
1881
			assert(!bo->need_io);
-
 
1882
 
-
 
1883
			used = ALIGN(bo->used, PAGE_SIZE);
-
 
1884
			if (!DBG_NO_UPLOAD_ACTIVE &&
-
 
1885
			    used + PAGE_SIZE <= bytes(&bo->base) &&
-
 
1886
			    (kgem->has_llc || !IS_CPU_MAP(bo->base.map) || bo->base.snoop)) {
-
 
1887
				DBG(("%s: retaining upload buffer (%d/%d)\n",
-
 
1888
				     __FUNCTION__, bo->used, bytes(&bo->base)));
-
 
1889
				bo->used = used;
-
 
1890
				list_move(&bo->base.list,
-
 
1891
					  &kgem->active_buffers);
-
 
1892
				continue;
-
 
1893
			}
-
 
1894
			DBG(("%s: discarding mmapped buffer, used=%d, map type=%d\n",
-
 
1895
			     __FUNCTION__, bo->used, (int)__MAP_TYPE(bo->base.map)));
-
 
1896
			goto decouple;
-
 
1897
		}
-
 
1898
 
-
 
1899
		if (!bo->used) {
-
 
1900
			/* Unless we replace the handle in the execbuffer,
-
 
1901
			 * then this bo will become active. So decouple it
-
 
1902
			 * from the buffer list and track it in the normal
-
 
1903
			 * manner.
-
 
1904
			 */
-
 
1905
			goto decouple;
-
 
1906
		}
-
 
1907
 
-
 
1908
		assert(bo->need_io);
-
 
1909
		assert(bo->base.rq == MAKE_REQUEST(kgem->next_request, kgem->ring));
-
 
1910
		assert(bo->base.domain != DOMAIN_GPU);
-
 
1911
 
-
 
1912
		if (bo->base.refcnt == 1 &&
-
 
1913
		    bo->base.size.pages.count > 1 &&
-
 
1914
		    bo->used < bytes(&bo->base) / 2) {
-
 
1915
			struct kgem_bo *shrink;
-
 
1916
			unsigned alloc = NUM_PAGES(bo->used);
-
 
1917
 
-
 
1918
			shrink = search_snoop_cache(kgem, alloc,
-
 
1919
						    CREATE_INACTIVE | CREATE_NO_RETIRE);
-
 
1920
			if (shrink) {
-
 
1921
				void *map;
-
 
1922
				int n;
-
 
1923
 
-
 
1924
				DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n",
-
 
1925
				     __FUNCTION__,
-
 
1926
				     bo->used, bytes(&bo->base), bytes(shrink),
-
 
1927
				     bo->base.handle, shrink->handle));
-
 
1928
 
-
 
1929
				assert(bo->used <= bytes(shrink));
-
 
1930
				map = kgem_bo_map__cpu(kgem, shrink);
-
 
1931
				if (map) {
-
 
1932
					kgem_bo_sync__cpu(kgem, shrink);
-
 
1933
					memcpy(map, bo->mem, bo->used);
-
 
1934
 
-
 
1935
					shrink->target_handle =
-
 
1936
						kgem->has_handle_lut ? bo->base.target_handle : shrink->handle;
-
 
1937
					for (n = 0; n < kgem->nreloc; n++) {
-
 
1938
						if (kgem->reloc[n].target_handle == bo->base.target_handle) {
-
 
1939
							kgem->reloc[n].target_handle = shrink->target_handle;
-
 
1940
							kgem->reloc[n].presumed_offset = shrink->presumed_offset;
-
 
1941
							kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] =
-
 
1942
								kgem->reloc[n].delta + shrink->presumed_offset;
-
 
1943
						}
-
 
1944
					}
-
 
1945
 
-
 
1946
					bo->base.exec->handle = shrink->handle;
-
 
1947
					bo->base.exec->offset = shrink->presumed_offset;
-
 
1948
					shrink->exec = bo->base.exec;
-
 
1949
					shrink->rq = bo->base.rq;
-
 
1950
					list_replace(&bo->base.request,
-
 
1951
						     &shrink->request);
-
 
1952
					list_init(&bo->base.request);
-
 
1953
					shrink->needs_flush = bo->base.dirty;
-
 
1954
 
-
 
1955
					bo->base.exec = NULL;
-
 
1956
					bo->base.rq = NULL;
-
 
1957
					bo->base.dirty = false;
-
 
1958
					bo->base.needs_flush = false;
-
 
1959
					bo->used = 0;
-
 
1960
 
-
 
1961
					goto decouple;
-
 
1962
				}
-
 
1963
 
-
 
1964
				__kgem_bo_destroy(kgem, shrink);
-
 
1965
			}
-
 
1966
 
-
 
1967
			shrink = search_linear_cache(kgem, alloc,
-
 
1968
						     CREATE_INACTIVE | CREATE_NO_RETIRE);
-
 
1969
			if (shrink) {
-
 
1970
				int n;
-
 
1971
 
-
 
1972
				DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n",
-
 
1973
				     __FUNCTION__,
-
 
1974
				     bo->used, bytes(&bo->base), bytes(shrink),
-
 
1975
				     bo->base.handle, shrink->handle));
-
 
1976
 
-
 
1977
				assert(bo->used <= bytes(shrink));
-
 
1978
				if (gem_write(kgem->fd, shrink->handle,
-
 
1979
					      0, bo->used, bo->mem) == 0) {
-
 
1980
					shrink->target_handle =
-
 
1981
						kgem->has_handle_lut ? bo->base.target_handle : shrink->handle;
-
 
1982
					for (n = 0; n < kgem->nreloc; n++) {
-
 
1983
						if (kgem->reloc[n].target_handle == bo->base.target_handle) {
-
 
1984
							kgem->reloc[n].target_handle = shrink->target_handle;
-
 
1985
							kgem->reloc[n].presumed_offset = shrink->presumed_offset;
-
 
1986
							kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] =
-
 
1987
								kgem->reloc[n].delta + shrink->presumed_offset;
-
 
1988
						}
-
 
1989
					}
-
 
1990
 
-
 
1991
					bo->base.exec->handle = shrink->handle;
-
 
1992
					bo->base.exec->offset = shrink->presumed_offset;
-
 
1993
					shrink->exec = bo->base.exec;
-
 
1994
					shrink->rq = bo->base.rq;
-
 
1995
					list_replace(&bo->base.request,
-
 
1996
						     &shrink->request);
-
 
1997
					list_init(&bo->base.request);
-
 
1998
					shrink->needs_flush = bo->base.dirty;
-
 
1999
 
-
 
2000
					bo->base.exec = NULL;
-
 
2001
					bo->base.rq = NULL;
-
 
2002
					bo->base.dirty = false;
-
 
2003
					bo->base.needs_flush = false;
-
 
2004
					bo->used = 0;
-
 
2005
 
-
 
2006
					goto decouple;
-
 
2007
				}
-
 
2008
 
-
 
2009
				__kgem_bo_destroy(kgem, shrink);
-
 
2010
			}
-
 
2011
		}
-
 
2012
 
-
 
2013
		DBG(("%s: handle=%d, uploading %d/%d\n",
-
 
2014
		     __FUNCTION__, bo->base.handle, bo->used, bytes(&bo->base)));
-
 
2015
		ASSERT_IDLE(kgem, bo->base.handle);
-
 
2016
		assert(bo->used <= bytes(&bo->base));
-
 
2017
		gem_write(kgem->fd, bo->base.handle,
-
 
2018
			  0, bo->used, bo->mem);
-
 
2019
		bo->need_io = 0;
-
 
2020
 
-
 
2021
decouple:
-
 
2022
		DBG(("%s: releasing handle=%d\n",
-
 
2023
		     __FUNCTION__, bo->base.handle));
-
 
2024
		list_del(&bo->base.list);
-
 
2025
		kgem_bo_unref(kgem, &bo->base);
-
 
2026
	}
-
 
2027
}
-
 
2028
 
-
 
2029
static void kgem_cleanup(struct kgem *kgem)
-
 
2030
{
-
 
2031
	int n;
-
 
2032
 
-
 
2033
	for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) {
-
 
2034
		while (!list_is_empty(&kgem->requests[n])) {
-
 
2035
			struct kgem_request *rq;
-
 
2036
 
-
 
2037
			rq = list_first_entry(&kgem->requests[n],
-
 
2038
					      struct kgem_request,
-
 
2039
					      list);
-
 
2040
			while (!list_is_empty(&rq->buffers)) {
-
 
2041
				struct kgem_bo *bo;
-
 
2042
 
-
 
2043
				bo = list_first_entry(&rq->buffers,
-
 
2044
						      struct kgem_bo,
-
 
2045
						      request);
-
 
2046
 
-
 
2047
				bo->exec = NULL;
-
 
2048
				bo->dirty = false;
-
 
2049
				__kgem_bo_clear_busy(bo);
-
 
2050
				if (bo->refcnt == 0)
-
 
2051
					kgem_bo_free(kgem, bo);
-
 
2052
			}
-
 
2053
 
-
 
2054
			__kgem_request_free(rq);
-
 
2055
		}
-
 
2056
	}
-
 
2057
 
-
 
2058
	kgem_close_inactive(kgem);
-
 
2059
}
-
 
2060
 
-
 
2061
static int kgem_batch_write(struct kgem *kgem, uint32_t handle, uint32_t size)
-
 
2062
{
-
 
2063
	int ret;
-
 
2064
 
-
 
2065
	ASSERT_IDLE(kgem, handle);
-
 
2066
 
-
 
2067
	/* If there is no surface data, just upload the batch */
-
 
2068
	if (kgem->surface == kgem->batch_size)
-
 
2069
		return gem_write(kgem->fd, handle,
-
 
2070
				 0, sizeof(uint32_t)*kgem->nbatch,
-
 
2071
				 kgem->batch);
-
 
2072
 
-
 
2073
	/* Are the batch pages conjoint with the surface pages? */
-
 
2074
	if (kgem->surface < kgem->nbatch + PAGE_SIZE/sizeof(uint32_t)) {
-
 
2075
		assert(size == PAGE_ALIGN(kgem->batch_size*sizeof(uint32_t)));
-
 
2076
		return gem_write(kgem->fd, handle,
-
 
2077
				 0, kgem->batch_size*sizeof(uint32_t),
-
 
2078
				 kgem->batch);
-
 
2079
	}
-
 
2080
 
-
 
2081
	/* Disjoint surface/batch, upload separately */
-
 
2082
	ret = gem_write(kgem->fd, handle,
-
 
2083
			0, sizeof(uint32_t)*kgem->nbatch,
-
 
2084
			kgem->batch);
-
 
2085
	if (ret)
-
 
2086
		return ret;
-
 
2087
 
-
 
2088
	ret = PAGE_ALIGN(sizeof(uint32_t) * kgem->batch_size);
-
 
2089
	ret -= sizeof(uint32_t) * kgem->surface;
-
 
2090
	assert(size-ret >= kgem->nbatch*sizeof(uint32_t));
-
 
2091
	return __gem_write(kgem->fd, handle,
-
 
2092
			size - ret, (kgem->batch_size - kgem->surface)*sizeof(uint32_t),
-
 
2093
			kgem->batch + kgem->surface);
-
 
2094
}
-
 
2095
 
-
 
2096
void kgem_reset(struct kgem *kgem)
-
 
2097
{
-
 
2098
	if (kgem->next_request) {
-
 
2099
		struct kgem_request *rq = kgem->next_request;
-
 
2100
 
-
 
2101
		while (!list_is_empty(&rq->buffers)) {
-
 
2102
			struct kgem_bo *bo =
-
 
2103
				list_first_entry(&rq->buffers,
-
 
2104
						 struct kgem_bo,
-
 
2105
						 request);
-
 
2106
			list_del(&bo->request);
-
 
2107
 
-
 
2108
			assert(RQ(bo->rq) == rq);
-
 
2109
 
-
 
2110
			bo->binding.offset = 0;
-
 
2111
			bo->exec = NULL;
-
 
2112
			bo->target_handle = -1;
-
 
2113
			bo->dirty = false;
-
 
2114
 
-
 
2115
			if (bo->needs_flush && __kgem_busy(kgem, bo->handle)) {
-
 
2116
				list_add(&bo->request, &kgem->flushing);
-
 
2117
				bo->rq = (void *)kgem;
-
 
2118
			} else
-
 
2119
				__kgem_bo_clear_busy(bo);
-
 
2120
 
-
 
2121
			if (!bo->refcnt && !bo->reusable) {
-
 
2122
				assert(!bo->snoop);
-
 
2123
				DBG(("%s: discarding handle=%d\n",
-
 
2124
				     __FUNCTION__, bo->handle));
-
 
2125
				kgem_bo_free(kgem, bo);
-
 
2126
			}
-
 
2127
		}
-
 
2128
 
-
 
2129
		if (rq != &kgem->static_request) {
-
 
2130
			list_init(&rq->list);
-
 
2131
			__kgem_request_free(rq);
-
 
2132
		}
-
 
2133
	}
-
 
2134
 
-
 
2135
	kgem->nfence = 0;
-
 
2136
	kgem->nexec = 0;
-
 
2137
	kgem->nreloc = 0;
-
 
2138
	kgem->nreloc__self = 0;
-
 
2139
	kgem->aperture = 0;
-
 
2140
	kgem->aperture_fenced = 0;
-
 
2141
	kgem->nbatch = 0;
-
 
2142
	kgem->surface = kgem->batch_size;
-
 
2143
	kgem->mode = KGEM_NONE;
-
 
2144
	kgem->flush = 0;
-
 
2145
	kgem->batch_flags = kgem->batch_flags_base;
-
 
2146
 
-
 
2147
	kgem->next_request = __kgem_request_alloc(kgem);
-
 
2148
 
-
 
2149
	kgem_sna_reset(kgem);
-
 
2150
}
-
 
2151
 
-
 
2152
static int compact_batch_surface(struct kgem *kgem)
-
 
2153
{
-
 
2154
	int size, shrink, n;
-
 
2155
 
-
 
2156
	if (!kgem->has_relaxed_delta)
-
 
2157
		return kgem->batch_size;
-
 
2158
 
-
 
2159
	/* See if we can pack the contents into one or two pages */
-
 
2160
	n = ALIGN(kgem->batch_size, 1024);
-
 
2161
	size = n - kgem->surface + kgem->nbatch;
-
 
2162
	size = ALIGN(size, 1024);
-
 
2163
 
-
 
2164
	shrink = n - size;
-
 
2165
	if (shrink) {
-
 
2166
		DBG(("shrinking from %d to %d\n", kgem->batch_size, size));
-
 
2167
 
-
 
2168
		shrink *= sizeof(uint32_t);
-
 
2169
		for (n = 0; n < kgem->nreloc; n++) {
-
 
2170
			if (kgem->reloc[n].read_domains == I915_GEM_DOMAIN_INSTRUCTION &&
-
 
2171
			    kgem->reloc[n].target_handle == ~0U)
-
 
2172
				kgem->reloc[n].delta -= shrink;
-
 
2173
 
-
 
2174
			if (kgem->reloc[n].offset >= sizeof(uint32_t)*kgem->nbatch)
-
 
2175
				kgem->reloc[n].offset -= shrink;
-
 
2176
		}
-
 
2177
	}
-
 
2178
 
-
 
2179
	return size * sizeof(uint32_t);
-
 
2180
}
-
 
2181
 
-
 
2182
static struct kgem_bo *
-
 
2183
kgem_create_batch(struct kgem *kgem, int size)
-
 
2184
{
-
 
2185
	struct drm_i915_gem_set_domain set_domain;
-
 
2186
	struct kgem_bo *bo;
-
 
2187
 
-
 
2188
	if (size <= 4096) {
-
 
2189
		bo = list_first_entry(&kgem->pinned_batches[0],
-
 
2190
				      struct kgem_bo,
-
 
2191
				      list);
-
 
2192
		if (!bo->rq) {
-
 
2193
out_4096:
-
 
2194
			list_move_tail(&bo->list, &kgem->pinned_batches[0]);
-
 
2195
			return kgem_bo_reference(bo);
-
 
2196
		}
-
 
2197
 
-
 
2198
		if (!__kgem_busy(kgem, bo->handle)) {
-
 
2199
			assert(RQ(bo->rq)->bo == bo);
-
 
2200
			__kgem_retire_rq(kgem, RQ(bo->rq));
-
 
2201
			goto out_4096;
-
 
2202
		}
-
 
2203
	}
-
 
2204
 
-
 
2205
	if (size <= 16384) {
-
 
2206
		bo = list_first_entry(&kgem->pinned_batches[1],
-
 
2207
				      struct kgem_bo,
-
 
2208
				      list);
-
 
2209
		if (!bo->rq) {
-
 
2210
out_16384:
-
 
2211
			list_move_tail(&bo->list, &kgem->pinned_batches[1]);
-
 
2212
			return kgem_bo_reference(bo);
-
 
2213
		}
-
 
2214
 
-
 
2215
		if (!__kgem_busy(kgem, bo->handle)) {
-
 
2216
			assert(RQ(bo->rq)->bo == bo);
-
 
2217
			__kgem_retire_rq(kgem, RQ(bo->rq));
-
 
2218
			goto out_16384;
-
 
2219
		}
-
 
2220
	}
-
 
2221
 
-
 
2222
	if (kgem->gen == 020 && !kgem->has_pinned_batches) {
-
 
2223
		assert(size <= 16384);
-
 
2224
 
-
 
2225
		bo = list_first_entry(&kgem->pinned_batches[size > 4096],
-
 
2226
				      struct kgem_bo,
-
 
2227
				      list);
-
 
2228
		list_move_tail(&bo->list, &kgem->pinned_batches[size > 4096]);
-
 
2229
 
-
 
2230
		DBG(("%s: syncing due to busy batches\n", __FUNCTION__));
-
 
2231
 
-
 
2232
		VG_CLEAR(set_domain);
-
 
2233
		set_domain.handle = bo->handle;
-
 
2234
		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
-
 
2235
		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
-
 
2236
		if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) {
-
 
2237
			DBG(("%s: sync: GPU hang detected\n", __FUNCTION__));
-
 
2238
			kgem_throttle(kgem);
-
 
2239
			return NULL;
-
 
2240
		}
-
 
2241
 
-
 
2242
		kgem_retire(kgem);
-
 
2243
		assert(bo->rq == NULL);
-
 
2244
		return kgem_bo_reference(bo);
-
 
2245
	}
-
 
2246
 
-
 
2247
	return kgem_create_linear(kgem, size, CREATE_NO_THROTTLE);
-
 
2248
}
-
 
2249
 
-
 
2250
void _kgem_submit(struct kgem *kgem)
-
 
2251
{
-
 
2252
	struct kgem_request *rq;
-
 
2253
	uint32_t batch_end;
-
 
2254
	int size;
-
 
2255
 
-
 
2256
	assert(!DBG_NO_HW);
-
 
2257
	assert(!kgem->wedged);
-
 
2258
 
-
 
2259
	assert(kgem->nbatch);
-
 
2260
	assert(kgem->nbatch <= KGEM_BATCH_SIZE(kgem));
-
 
2261
	assert(kgem->nbatch <= kgem->surface);
-
 
2262
 
-
 
2263
	batch_end = kgem_end_batch(kgem);
-
 
2264
	kgem_sna_flush(kgem);
-
 
2265
 
-
 
2266
	DBG(("batch[%d/%d]: %d %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d\n",
-
 
2267
	     kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface, kgem->batch_size,
-
 
2268
	     kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture));
-
 
2269
 
-
 
2270
	assert(kgem->nbatch <= kgem->batch_size);
-
 
2271
	assert(kgem->nbatch <= kgem->surface);
-
 
2272
	assert(kgem->nreloc <= ARRAY_SIZE(kgem->reloc));
-
 
2273
	assert(kgem->nexec < ARRAY_SIZE(kgem->exec));
-
 
2274
	assert(kgem->nfence <= kgem->fence_max);
-
 
2275
 
-
 
2276
	kgem_finish_buffers(kgem);
-
 
2277
 
-
 
2278
#if SHOW_BATCH
-
 
2279
	__kgem_batch_debug(kgem, batch_end);
-
 
2280
#endif
-
 
2281
 
-
 
2282
	rq = kgem->next_request;
-
 
2283
	if (kgem->surface != kgem->batch_size)
-
 
2284
		size = compact_batch_surface(kgem);
-
 
2285
	else
-
 
2286
		size = kgem->nbatch * sizeof(kgem->batch[0]);
-
 
2287
	rq->bo = kgem_create_batch(kgem, size);
-
 
2288
	if (rq->bo) {
-
 
2289
		uint32_t handle = rq->bo->handle;
-
 
2290
		int i;
-
 
2291
 
-
 
2292
		assert(!rq->bo->needs_flush);
-
 
2293
 
-
 
2294
		i = kgem->nexec++;
-
 
2295
		kgem->exec[i].handle = handle;
-
 
2296
		kgem->exec[i].relocation_count = kgem->nreloc;
-
 
2297
		kgem->exec[i].relocs_ptr = (uintptr_t)kgem->reloc;
-
 
2298
		kgem->exec[i].alignment = 0;
-
 
2299
		kgem->exec[i].offset = rq->bo->presumed_offset;
-
 
2300
		kgem->exec[i].flags = 0;
-
 
2301
		kgem->exec[i].rsvd1 = 0;
-
 
2302
		kgem->exec[i].rsvd2 = 0;
-
 
2303
 
-
 
2304
		rq->bo->target_handle = kgem->has_handle_lut ? i : handle;
-
 
2305
		rq->bo->exec = &kgem->exec[i];
-
 
2306
		rq->bo->rq = MAKE_REQUEST(rq, kgem->ring); /* useful sanity check */
-
 
2307
		list_add(&rq->bo->request, &rq->buffers);
-
 
2308
		rq->ring = kgem->ring == KGEM_BLT;
-
 
2309
 
-
 
2310
		kgem_fixup_self_relocs(kgem, rq->bo);
-
 
2311
 
-
 
2312
		if (kgem_batch_write(kgem, handle, size) == 0) {
-
 
2313
			struct drm_i915_gem_execbuffer2 execbuf;
-
 
2314
			int ret, retry = 3;
-
 
2315
 
-
 
2316
			VG_CLEAR(execbuf);
-
 
2317
			execbuf.buffers_ptr = (uintptr_t)kgem->exec;
-
 
2318
			execbuf.buffer_count = kgem->nexec;
-
 
2319
			execbuf.batch_start_offset = 0;
-
 
2320
			execbuf.batch_len = batch_end*sizeof(uint32_t);
-
 
2321
			execbuf.cliprects_ptr = 0;
-
 
2322
			execbuf.num_cliprects = 0;
-
 
2323
			execbuf.DR1 = 0;
-
 
2324
			execbuf.DR4 = 0;
-
 
2325
			execbuf.flags = kgem->ring | kgem->batch_flags;
-
 
2326
			execbuf.rsvd1 = 0;
-
 
2327
			execbuf.rsvd2 = 0;
-
 
2328
 
-
 
2329
 
-
 
2330
 
-
 
2331
//			ret = drmIoctl(kgem->fd,
-
 
2332
//				       DRM_IOCTL_I915_GEM_EXECBUFFER2,
-
 
2333
//				       &execbuf);
-
 
2334
//			while (ret == -1 && errno == EBUSY && retry--) {
-
 
2335
//				__kgem_throttle(kgem);
-
 
2336
//				ret = drmIoctl(kgem->fd,
-
 
2337
//					       DRM_IOCTL_I915_GEM_EXECBUFFER2,
-
 
2338
//					       &execbuf);
-
 
2339
//			}
-
 
2340
			if (DEBUG_SYNC && ret == 0) {
-
 
2341
				struct drm_i915_gem_set_domain set_domain;
-
 
2342
 
-
 
2343
				VG_CLEAR(set_domain);
-
 
2344
				set_domain.handle = handle;
-
 
2345
				set_domain.read_domains = I915_GEM_DOMAIN_GTT;
-
 
2346
				set_domain.write_domain = I915_GEM_DOMAIN_GTT;
-
 
2347
 
-
 
2348
				ret = drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
-
 
2349
			}
-
 
2350
			if (ret == -1) {
-
 
2351
//				DBG(("%s: GPU hang detected [%d]\n",
-
 
2352
//				     __FUNCTION__, errno));
-
 
2353
				kgem_throttle(kgem);
-
 
2354
				kgem->wedged = true;
-
 
2355
 
-
 
2356
#if 0
-
 
2357
				ret = errno;
-
 
2358
				ErrorF("batch[%d/%d]: %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d: errno=%d\n",
-
 
2359
				       kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface,
-
 
2360
				       kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture, errno);
-
 
2361
 
-
 
2362
				for (i = 0; i < kgem->nexec; i++) {
-
 
2363
					struct kgem_bo *bo, *found = NULL;
-
 
2364
 
-
 
2365
					list_for_each_entry(bo, &kgem->next_request->buffers, request) {
-
 
2366
						if (bo->handle == kgem->exec[i].handle) {
-
 
2367
							found = bo;
-
 
2368
							break;
-
 
2369
						}
-
 
2370
					}
-
 
2371
					ErrorF("exec[%d] = handle:%d, presumed offset: %x, size: %d, tiling %d, fenced %d, snooped %d, deleted %d\n",
-
 
2372
					       i,
-
 
2373
					       kgem->exec[i].handle,
-
 
2374
					       (int)kgem->exec[i].offset,
-
 
2375
					       found ? kgem_bo_size(found) : -1,
-
 
2376
					       found ? found->tiling : -1,
-
 
2377
					       (int)(kgem->exec[i].flags & EXEC_OBJECT_NEEDS_FENCE),
-
 
2378
					       found ? found->snoop : -1,
-
 
2379
					       found ? found->purged : -1);
-
 
2380
				}
-
 
2381
				for (i = 0; i < kgem->nreloc; i++) {
-
 
2382
					ErrorF("reloc[%d] = pos:%d, target:%d, delta:%d, read:%x, write:%x, offset:%x\n",
-
 
2383
					       i,
-
 
2384
					       (int)kgem->reloc[i].offset,
-
 
2385
					       kgem->reloc[i].target_handle,
-
 
2386
					       kgem->reloc[i].delta,
-
 
2387
					       kgem->reloc[i].read_domains,
-
 
2388
					       kgem->reloc[i].write_domain,
-
 
2389
					       (int)kgem->reloc[i].presumed_offset);
-
 
2390
				}
-
 
2391
 
-
 
2392
				if (DEBUG_SYNC) {
-
 
2393
					int fd = open("/tmp/batchbuffer", O_WRONLY | O_CREAT | O_APPEND, 0666);
-
 
2394
					if (fd != -1) {
-
 
2395
						write(fd, kgem->batch, batch_end*sizeof(uint32_t));
-
 
2396
						close(fd);
-
 
2397
					}
-
 
2398
 
-
 
2399
					FatalError("SNA: failed to submit batchbuffer, errno=%d\n", ret);
-
 
2400
				}
-
 
2401
#endif
-
 
2402
			}
-
 
2403
		}
-
 
2404
 
-
 
2405
		kgem_commit(kgem);
-
 
2406
	}
-
 
2407
	if (kgem->wedged)
-
 
2408
		kgem_cleanup(kgem);
-
 
2409
 
-
 
2410
	kgem_reset(kgem);
-
 
2411
 
-
 
2412
	assert(kgem->next_request != NULL);
-
 
2413
}
-
 
2414
 
-
 
2415
void kgem_throttle(struct kgem *kgem)
-
 
2416
{
-
 
2417
	kgem->need_throttle = 0;
-
 
2418
	if (kgem->wedged)
-
 
2419
		return;
-
 
2420
 
-
 
2421
	kgem->wedged = __kgem_throttle(kgem);
-
 
2422
	if (kgem->wedged) {
-
 
2423
		printf("Detected a hung GPU, disabling acceleration.\n");
-
 
2424
		printf("When reporting this, please include i915_error_state from debugfs and the full dmesg.\n");
-
 
2425
	}
-
 
2426
}
-
 
2427
 
-
 
2428
void kgem_purge_cache(struct kgem *kgem)
-
 
2429
{
-
 
2430
	struct kgem_bo *bo, *next;
-
 
2431
	int i;
-
 
2432
 
-
 
2433
	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
-
 
2434
		list_for_each_entry_safe(bo, next, &kgem->inactive[i], list) {
-
 
2435
			if (!kgem_bo_is_retained(kgem, bo)) {
-
 
2436
				DBG(("%s: purging %d\n",
-
 
2437
				     __FUNCTION__, bo->handle));
-
 
2438
				kgem_bo_free(kgem, bo);
-
 
2439
			}
-
 
2440
		}
-
 
2441
	}
-
 
2442
 
-
 
2443
	kgem->need_purge = false;
-
 
2444
}
-
 
2445
 
-
 
2446
bool kgem_expire_cache(struct kgem *kgem)
-
 
2447
{
-
 
2448
	time_t now, expire;
-
 
2449
	struct kgem_bo *bo;
-
 
2450
	unsigned int size = 0, count = 0;
-
 
2451
	bool idle;
-
 
2452
	unsigned int i;
-
 
2453
 
-
 
2454
	time(&now);
-
 
2455
 
-
 
2456
	while (__kgem_freed_bo) {
-
 
2457
		bo = __kgem_freed_bo;
-
 
2458
		__kgem_freed_bo = *(struct kgem_bo **)bo;
-
 
2459
		free(bo);
-
 
2460
	}
-
 
2461
 
-
 
2462
	while (__kgem_freed_request) {
-
 
2463
		struct kgem_request *rq = __kgem_freed_request;
-
 
2464
		__kgem_freed_request = *(struct kgem_request **)rq;
-
 
2465
		free(rq);
-
 
2466
	}
-
 
2467
 
-
 
2468
	while (!list_is_empty(&kgem->large_inactive)) {
-
 
2469
		kgem_bo_free(kgem,
-
 
2470
			     list_first_entry(&kgem->large_inactive,
-
 
2471
					      struct kgem_bo, list));
-
 
2472
 
-
 
2473
	}
-
 
2474
 
-
 
2475
	while (!list_is_empty(&kgem->scanout)) {
-
 
2476
		bo = list_first_entry(&kgem->scanout, struct kgem_bo, list);
-
 
2477
		if (__kgem_busy(kgem, bo->handle))
-
 
2478
			break;
-
 
2479
 
-
 
2480
		list_del(&bo->list);
-
 
2481
		kgem_bo_clear_scanout(kgem, bo);
-
 
2482
		__kgem_bo_destroy(kgem, bo);
-
 
2483
	}
-
 
2484
 
-
 
2485
	expire = 0;
-
 
2486
	list_for_each_entry(bo, &kgem->snoop, list) {
-
 
2487
		if (bo->delta) {
-
 
2488
			expire = now - MAX_INACTIVE_TIME/2;
-
 
2489
			break;
-
 
2490
		}
-
 
2491
 
-
 
2492
		bo->delta = now;
-
 
2493
	}
-
 
2494
	if (expire) {
-
 
2495
		while (!list_is_empty(&kgem->snoop)) {
-
 
2496
			bo = list_last_entry(&kgem->snoop, struct kgem_bo, list);
-
 
2497
 
-
 
2498
			if (bo->delta > expire)
-
 
2499
				break;
-
 
2500
 
-
 
2501
			kgem_bo_free(kgem, bo);
-
 
2502
		}
-
 
2503
	}
-
 
2504
#ifdef DEBUG_MEMORY
-
 
2505
	{
-
 
2506
		long snoop_size = 0;
-
 
2507
		int snoop_count = 0;
-
 
2508
		list_for_each_entry(bo, &kgem->snoop, list)
-
 
2509
			snoop_count++, snoop_size += bytes(bo);
-
 
2510
		ErrorF("%s: still allocated %d bo, %ld bytes, in snoop cache\n",
-
 
2511
		       __FUNCTION__, snoop_count, snoop_size);
-
 
2512
	}
-
 
2513
#endif
-
 
2514
 
-
 
2515
	kgem_retire(kgem);
-
 
2516
	if (kgem->wedged)
-
 
2517
		kgem_cleanup(kgem);
-
 
2518
 
-
 
2519
	kgem->expire(kgem);
-
 
2520
 
-
 
2521
	if (kgem->need_purge)
-
 
2522
		kgem_purge_cache(kgem);
-
 
2523
 
-
 
2524
	expire = 0;
-
 
2525
 
-
 
2526
	idle = !kgem->need_retire;
-
 
2527
	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
-
 
2528
		idle &= list_is_empty(&kgem->inactive[i]);
-
 
2529
		list_for_each_entry(bo, &kgem->inactive[i], list) {
-
 
2530
			if (bo->delta) {
-
 
2531
				expire = now - MAX_INACTIVE_TIME;
-
 
2532
				break;
-
 
2533
			}
-
 
2534
 
-
 
2535
			bo->delta = now;
-
 
2536
		}
-
 
2537
	}
-
 
2538
	if (idle) {
-
 
2539
		DBG(("%s: idle\n", __FUNCTION__));
-
 
2540
		kgem->need_expire = false;
-
 
2541
		return false;
-
 
2542
	}
-
 
2543
	if (expire == 0)
-
 
2544
		return true;
-
 
2545
 
-
 
2546
	idle = !kgem->need_retire;
-
 
2547
	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
-
 
2548
		struct list preserve;
-
 
2549
 
-
 
2550
		list_init(&preserve);
-
 
2551
		while (!list_is_empty(&kgem->inactive[i])) {
-
 
2552
			bo = list_last_entry(&kgem->inactive[i],
Line -... Line 2553...
-
 
2553
					     struct kgem_bo, list);
-
 
2554
 
-
 
2555
			if (bo->delta > expire) {
-
 
2556
				idle = false;
Line -... Line 2557...
-
 
2557
				break;
-
 
2558
			}
-
 
2559
 
-
 
2560
			if (bo->map && bo->delta + MAP_PRESERVE_TIME > expire) {
-
 
2561
				idle = false;
-
 
2562
				list_move_tail(&bo->list, &preserve);
-
 
2563
			} else {
-
 
2564
				count++;
-
 
2565
				size += bytes(bo);
-
 
2566
				kgem_bo_free(kgem, bo);
-
 
2567
				DBG(("%s: expiring %d\n",
-
 
2568
				     __FUNCTION__, bo->handle));
-
 
2569
			}
-
 
2570
		}
-
 
2571
		if (!list_is_empty(&preserve)) {
-
 
2572
			preserve.prev->next = kgem->inactive[i].next;
-
 
2573
			kgem->inactive[i].next->prev = preserve.prev;
-
 
2574
			kgem->inactive[i].next = preserve.next;
-
 
2575
			preserve.next->prev = &kgem->inactive[i];
-
 
2576
		}
-
 
2577
	}
-
 
2578
 
-
 
2579
#ifdef DEBUG_MEMORY
-
 
2580
	{
-
 
2581
		long inactive_size = 0;
-
 
2582
		int inactive_count = 0;
-
 
2583
		for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
-
 
2584
			list_for_each_entry(bo, &kgem->inactive[i], list)
-
 
2585
				inactive_count++, inactive_size += bytes(bo);
-
 
2586
		ErrorF("%s: still allocated %d bo, %ld bytes, in inactive cache\n",
-
 
2587
		       __FUNCTION__, inactive_count, inactive_size);
-
 
2588
	}
-
 
2589
#endif
-
 
2590
 
-
 
2591
	DBG(("%s: expired %d objects, %d bytes, idle? %d\n",
-
 
2592
	     __FUNCTION__, count, size, idle));
-
 
2593
 
-
 
2594
	kgem->need_expire = !idle;
-
 
2595
	return !idle;
-
 
2596
	(void)count;
-
 
2597
	(void)size;
-
 
2598
}
-
 
2599
 
-
 
2600
void kgem_cleanup_cache(struct kgem *kgem)
-
 
2601
{
-
 
2602
	unsigned int i;
-
 
2603
	int n;
-
 
2604
 
-
 
2605
	/* sync to the most recent request */
-
 
2606
	for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) {
-
 
2607
		if (!list_is_empty(&kgem->requests[n])) {
-
 
2608
			struct kgem_request *rq;
-
 
2609
			struct drm_i915_gem_set_domain set_domain;
-
 
2610
 
-
 
2611
			rq = list_first_entry(&kgem->requests[n],
-
 
2612
					      struct kgem_request,
-
 
2613
					      list);
-
 
2614
 
-
 
2615
			DBG(("%s: sync on cleanup\n", __FUNCTION__));
-
 
2616
 
-
 
2617
			VG_CLEAR(set_domain);
-
 
2618
			set_domain.handle = rq->bo->handle;
-
 
2619
			set_domain.read_domains = I915_GEM_DOMAIN_GTT;
-
 
2620
			set_domain.write_domain = I915_GEM_DOMAIN_GTT;
-
 
2621
			(void)drmIoctl(kgem->fd,
-
 
2622
				       DRM_IOCTL_I915_GEM_SET_DOMAIN,
-
 
2623
				       &set_domain);
-
 
2624
		}
-
 
2625
	}
-
 
2626
 
-
 
2627
	kgem_retire(kgem);
-
 
2628
	kgem_cleanup(kgem);
-
 
2629
 
-
 
2630
	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
-
 
2631
		while (!list_is_empty(&kgem->inactive[i]))
-
 
2632
			kgem_bo_free(kgem,
-
 
2633
				     list_last_entry(&kgem->inactive[i],
-
 
2634
						     struct kgem_bo, list));
-
 
2635
	}
-
 
2636
 
-
 
2637
	while (!list_is_empty(&kgem->snoop))
-
 
2638
		kgem_bo_free(kgem,
-
 
2639
			     list_last_entry(&kgem->snoop,
-
 
2640
					     struct kgem_bo, list));
-
 
2641
 
-
 
2642
	while (__kgem_freed_bo) {
-
 
2643
		struct kgem_bo *bo = __kgem_freed_bo;
-
 
2644
		__kgem_freed_bo = *(struct kgem_bo **)bo;
-
 
2645
		free(bo);
-
 
2646
	}
-
 
2647
 
Line 1020... Line 2648...
1020
 
2648
	kgem->need_purge = false;
1021
 
2649
	kgem->need_expire = false;
1022
 
2650
}
1023
 
2651
 
Line 1251... Line 2879...
1251
 
2879
 
1252
	debug_alloc__bo(kgem, bo);
2880
	debug_alloc__bo(kgem, bo);
1253
	return bo;
2881
	return bo;
Line -... Line 2882...
-
 
2882
}
-
 
2883
 
-
 
2884
inline int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo)
-
 
2885
{
-
 
2886
	unsigned int size;
-
 
2887
 
-
 
2888
	assert(bo->tiling);
-
 
2889
	assert(kgem->gen < 040);
-
 
2890
 
-
 
2891
	if (kgem->gen < 030)
-
 
2892
		size = 512 * 1024;
-
 
2893
	else
-
 
2894
		size = 1024 * 1024;
-
 
2895
	while (size < bytes(bo))
-
 
2896
		size *= 2;
-
 
2897
 
-
 
2898
	return size;
-
 
2899
}
-
 
2900
 
-
 
2901
#if 0
-
 
2902
 
-
 
2903
struct kgem_bo *kgem_create_2d(struct kgem *kgem,
-
 
2904
			       int width,
-
 
2905
			       int height,
-
 
2906
			       int bpp,
-
 
2907
			       int tiling,
-
 
2908
			       uint32_t flags)
-
 
2909
{
-
 
2910
	struct list *cache;
-
 
2911
	struct kgem_bo *bo;
-
 
2912
	uint32_t pitch, untiled_pitch, tiled_height, size;
-
 
2913
	uint32_t handle;
-
 
2914
	int i, bucket, retry;
-
 
2915
 
-
 
2916
	if (tiling < 0)
-
 
2917
		tiling = -tiling, flags |= CREATE_EXACT;
-
 
2918
 
-
 
2919
	DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d, cpu-mapping=%d, gtt-mapping=%d, scanout?=%d, prime?=%d, temp?=%d)\n", __FUNCTION__,
-
 
2920
	     width, height, bpp, tiling,
-
 
2921
	     !!(flags & CREATE_EXACT),
-
 
2922
	     !!(flags & CREATE_INACTIVE),
-
 
2923
	     !!(flags & CREATE_CPU_MAP),
-
 
2924
	     !!(flags & CREATE_GTT_MAP),
-
 
2925
	     !!(flags & CREATE_SCANOUT),
-
 
2926
	     !!(flags & CREATE_PRIME),
-
 
2927
	     !!(flags & CREATE_TEMPORARY)));
-
 
2928
 
-
 
2929
	size = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags,
-
 
2930
				 width, height, bpp, tiling, &pitch);
-
 
2931
	assert(size && size <= kgem->max_object_size);
-
 
2932
	size /= PAGE_SIZE;
-
 
2933
	bucket = cache_bucket(size);
-
 
2934
 
-
 
2935
	if (flags & CREATE_SCANOUT) {
-
 
2936
		assert((flags & CREATE_INACTIVE) == 0);
-
 
2937
		list_for_each_entry_reverse(bo, &kgem->scanout, list) {
-
 
2938
			assert(bo->scanout);
-
 
2939
			assert(bo->delta);
-
 
2940
			assert(!bo->purged);
-
 
2941
 
-
 
2942
			if (size > num_pages(bo) || num_pages(bo) > 2*size)
-
 
2943
				continue;
-
 
2944
 
-
 
2945
			if (bo->tiling != tiling ||
-
 
2946
			    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
-
 
2947
				if (!gem_set_tiling(kgem->fd, bo->handle,
-
 
2948
						    tiling, pitch))
-
 
2949
					continue;
-
 
2950
 
-
 
2951
				bo->tiling = tiling;
-
 
2952
				bo->pitch = pitch;
-
 
2953
			}
-
 
2954
 
-
 
2955
			list_del(&bo->list);
-
 
2956
 
-
 
2957
			bo->unique_id = kgem_get_unique_id(kgem);
-
 
2958
			DBG(("  1:from scanout: pitch=%d, tiling=%d, handle=%d, id=%d\n",
-
 
2959
			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
-
 
2960
			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
-
 
2961
			bo->refcnt = 1;
-
 
2962
			return bo;
-
 
2963
		}
-
 
2964
	}
-
 
2965
 
-
 
2966
	if (bucket >= NUM_CACHE_BUCKETS) {
-
 
2967
		DBG(("%s: large bo num pages=%d, bucket=%d\n",
-
 
2968
		     __FUNCTION__, size, bucket));
-
 
2969
 
-
 
2970
		if (flags & CREATE_INACTIVE)
-
 
2971
			goto large_inactive;
-
 
2972
 
-
 
2973
		tiled_height = kgem_aligned_height(kgem, height, tiling);
-
 
2974
		untiled_pitch = kgem_untiled_pitch(kgem, width, bpp, flags);
-
 
2975
 
-
 
2976
		list_for_each_entry(bo, &kgem->large, list) {
-
 
2977
			assert(!bo->purged);
-
 
2978
			assert(!bo->scanout);
-
 
2979
			assert(bo->refcnt == 0);
-
 
2980
			assert(bo->reusable);
-
 
2981
			assert(bo->flush == true);
-
 
2982
 
-
 
2983
			if (kgem->gen < 040) {
-
 
2984
				if (bo->pitch < pitch) {
-
 
2985
					DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n",
-
 
2986
					     bo->tiling, tiling,
-
 
2987
					     bo->pitch, pitch));
-
 
2988
					continue;
-
 
2989
				}
-
 
2990
 
-
 
2991
				if (bo->pitch * tiled_height > bytes(bo))
-
 
2992
					continue;
-
 
2993
			} else {
-
 
2994
				if (num_pages(bo) < size)
-
 
2995
					continue;
-
 
2996
 
-
 
2997
				if (bo->pitch != pitch || bo->tiling != tiling) {
-
 
2998
					if (!gem_set_tiling(kgem->fd, bo->handle,
-
 
2999
							    tiling, pitch))
-
 
3000
						continue;
-
 
3001
 
-
 
3002
					bo->pitch = pitch;
-
 
3003
					bo->tiling = tiling;
-
 
3004
				}
-
 
3005
			}
-
 
3006
 
-
 
3007
			kgem_bo_remove_from_active(kgem, bo);
-
 
3008
 
-
 
3009
			bo->unique_id = kgem_get_unique_id(kgem);
-
 
3010
			bo->delta = 0;
-
 
3011
			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
-
 
3012
			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
-
 
3013
			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
-
 
3014
			bo->refcnt = 1;
-
 
3015
			return bo;
-
 
3016
		}
-
 
3017
 
-
 
3018
large_inactive:
-
 
3019
		list_for_each_entry(bo, &kgem->large_inactive, list) {
-
 
3020
			assert(bo->refcnt == 0);
-
 
3021
			assert(bo->reusable);
-
 
3022
			assert(!bo->scanout);
-
 
3023
 
-
 
3024
			if (size > num_pages(bo))
-
 
3025
				continue;
-
 
3026
 
-
 
3027
			if (bo->tiling != tiling ||
-
 
3028
			    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
-
 
3029
				if (!gem_set_tiling(kgem->fd, bo->handle,
-
 
3030
						    tiling, pitch))
-
 
3031
					continue;
-
 
3032
 
-
 
3033
				bo->tiling = tiling;
-
 
3034
				bo->pitch = pitch;
-
 
3035
			}
-
 
3036
 
-
 
3037
			if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
-
 
3038
				kgem_bo_free(kgem, bo);
-
 
3039
				break;
-
 
3040
			}
-
 
3041
 
-
 
3042
			list_del(&bo->list);
-
 
3043
 
-
 
3044
			bo->unique_id = kgem_get_unique_id(kgem);
-
 
3045
			bo->pitch = pitch;
-
 
3046
			bo->delta = 0;
-
 
3047
			DBG(("  1:from large inactive: pitch=%d, tiling=%d, handle=%d, id=%d\n",
-
 
3048
			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
-
 
3049
			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
-
 
3050
			bo->refcnt = 1;
-
 
3051
			return bo;
-
 
3052
		}
-
 
3053
 
-
 
3054
		goto create;
-
 
3055
	}
-
 
3056
 
-
 
3057
	if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
-
 
3058
		int for_cpu = !!(flags & CREATE_CPU_MAP);
-
 
3059
		if (kgem->has_llc && tiling == I915_TILING_NONE)
-
 
3060
			for_cpu = 1;
-
 
3061
		/* We presume that we will need to upload to this bo,
-
 
3062
		 * and so would prefer to have an active VMA.
-
 
3063
		 */
-
 
3064
		cache = &kgem->vma[for_cpu].inactive[bucket];
-
 
3065
		do {
-
 
3066
			list_for_each_entry(bo, cache, vma) {
-
 
3067
				assert(bucket(bo) == bucket);
-
 
3068
				assert(bo->refcnt == 0);
-
 
3069
				assert(!bo->scanout);
-
 
3070
				assert(bo->map);
-
 
3071
				assert(IS_CPU_MAP(bo->map) == for_cpu);
-
 
3072
				assert(bo->rq == NULL);
-
 
3073
				assert(list_is_empty(&bo->request));
-
 
3074
				assert(bo->flush == false);
-
 
3075
 
-
 
3076
				if (size > num_pages(bo)) {
-
 
3077
					DBG(("inactive too small: %d < %d\n",
-
 
3078
					     num_pages(bo), size));
-
 
3079
					continue;
-
 
3080
				}
-
 
3081
 
-
 
3082
				if (bo->tiling != tiling ||
-
 
3083
				    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
-
 
3084
					DBG(("inactive vma with wrong tiling: %d < %d\n",
-
 
3085
					     bo->tiling, tiling));
-
 
3086
					continue;
-
 
3087
				}
-
 
3088
 
-
 
3089
				if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
-
 
3090
					kgem_bo_free(kgem, bo);
-
 
3091
					break;
-
 
3092
				}
-
 
3093
 
-
 
3094
				bo->pitch = pitch;
-
 
3095
				bo->delta = 0;
-
 
3096
				bo->unique_id = kgem_get_unique_id(kgem);
-
 
3097
 
-
 
3098
				kgem_bo_remove_from_inactive(kgem, bo);
-
 
3099
 
-
 
3100
				DBG(("  from inactive vma: pitch=%d, tiling=%d: handle=%d, id=%d\n",
-
 
3101
				     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
-
 
3102
				assert(bo->reusable);
-
 
3103
				assert(bo->domain != DOMAIN_GPU);
-
 
3104
				ASSERT_IDLE(kgem, bo->handle);
-
 
3105
				assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
-
 
3106
				bo->refcnt = 1;
-
 
3107
				return bo;
-
 
3108
			}
-
 
3109
		} while (!list_is_empty(cache) &&
-
 
3110
			 __kgem_throttle_retire(kgem, flags));
-
 
3111
 
-
 
3112
		if (flags & CREATE_CPU_MAP && !kgem->has_llc)
Line -... Line 3113...
-
 
3113
			goto create;
-
 
3114
	}
Line -... Line 3115...
-
 
3115
 
-
 
3116
	if (flags & CREATE_INACTIVE)
-
 
3117
		goto skip_active_search;
-
 
3118
 
-
 
3119
	/* Best active match */
-
 
3120
	retry = NUM_CACHE_BUCKETS - bucket;
-
 
3121
	if (retry > 3 && (flags & CREATE_TEMPORARY) == 0)
-
 
3122
		retry = 3;
-
 
3123
search_again:
-
 
3124
	assert(bucket < NUM_CACHE_BUCKETS);
-
 
3125
	cache = &kgem->active[bucket][tiling];
-
 
3126
	if (tiling) {
-
 
3127
		tiled_height = kgem_aligned_height(kgem, height, tiling);
-
 
3128
		list_for_each_entry(bo, cache, list) {
-
 
3129
			assert(!bo->purged);
-
 
3130
			assert(bo->refcnt == 0);
-
 
3131
			assert(bucket(bo) == bucket);
Line -... Line 3132...
-
 
3132
			assert(bo->reusable);
-
 
3133
			assert(bo->tiling == tiling);
-
 
3134
			assert(bo->flush == false);
-
 
3135
			assert(!bo->scanout);
-
 
3136
 
-
 
3137
			if (kgem->gen < 040) {
-
 
3138
				if (bo->pitch < pitch) {
-
 
3139
					DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n",
-
 
3140
					     bo->tiling, tiling,
-
 
3141
					     bo->pitch, pitch));
-
 
3142
					continue;
-
 
3143
				}
-
 
3144
 
-
 
3145
				if (bo->pitch * tiled_height > bytes(bo))
-
 
3146
					continue;
-
 
3147
			} else {
-
 
3148
				if (num_pages(bo) < size)
-
 
3149
					continue;
-
 
3150
 
-
 
3151
				if (bo->pitch != pitch) {
-
 
3152
					if (!gem_set_tiling(kgem->fd,
-
 
3153
							    bo->handle,
-
 
3154
							    tiling, pitch))
-
 
3155
						continue;
-
 
3156
 
-
 
3157
					bo->pitch = pitch;
-
 
3158
				}
-
 
3159
			}
-
 
3160
 
-
 
3161
			kgem_bo_remove_from_active(kgem, bo);
-
 
3162
 
-
 
3163
			bo->unique_id = kgem_get_unique_id(kgem);
-
 
3164
			bo->delta = 0;
-
 
3165
			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
-
 
3166
			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
-
 
3167
			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
-
 
3168
			bo->refcnt = 1;
-
 
3169
			return bo;
-
 
3170
		}
-
 
3171
	} else {
-
 
3172
		list_for_each_entry(bo, cache, list) {
-
 
3173
			assert(bucket(bo) == bucket);
-
 
3174
			assert(!bo->purged);
-
 
3175
			assert(bo->refcnt == 0);
-
 
3176
			assert(bo->reusable);
-
 
3177
			assert(!bo->scanout);
-
 
3178
			assert(bo->tiling == tiling);
-
 
3179
			assert(bo->flush == false);
-
 
3180
 
-
 
3181
			if (num_pages(bo) < size)
-
 
3182
				continue;
-
 
3183
 
-
 
3184
			kgem_bo_remove_from_active(kgem, bo);
-
 
3185
 
-
 
3186
			bo->pitch = pitch;
-
 
3187
			bo->unique_id = kgem_get_unique_id(kgem);
-
 
3188
			bo->delta = 0;
-
 
3189
			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
-
 
3190
			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
-
 
3191
			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
-
 
3192
			bo->refcnt = 1;
-
 
3193
			return bo;
-
 
3194
		}
-
 
3195
	}
-
 
3196
 
-
 
3197
	if (--retry && flags & CREATE_EXACT) {
-
 
3198
		if (kgem->gen >= 040) {
-
 
3199
			for (i = I915_TILING_NONE; i <= I915_TILING_Y; i++) {
-
 
3200
				if (i == tiling)
-
 
3201
					continue;
-
 
3202
 
-
 
3203
				cache = &kgem->active[bucket][i];
-
 
3204
				list_for_each_entry(bo, cache, list) {
-
 
3205
					assert(!bo->purged);
-
 
3206
					assert(bo->refcnt == 0);
-
 
3207
					assert(bo->reusable);
-
 
3208
					assert(!bo->scanout);
-
 
3209
					assert(bo->flush == false);
-
 
3210
 
-
 
3211
					if (num_pages(bo) < size)
-
 
3212
						continue;
-
 
3213
 
-
 
3214
					if (!gem_set_tiling(kgem->fd,
-
 
3215
							    bo->handle,
-
 
3216
							    tiling, pitch))
-
 
3217
						continue;
-
 
3218
 
-
 
3219
					kgem_bo_remove_from_active(kgem, bo);
-
 
3220
 
-
 
3221
					bo->unique_id = kgem_get_unique_id(kgem);
-
 
3222
					bo->pitch = pitch;
-
 
3223
					bo->tiling = tiling;
-
 
3224
					bo->delta = 0;
-
 
3225
					DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
-
 
3226
					     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
-
 
3227
					assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
-
 
3228
					bo->refcnt = 1;
-
 
3229
					return bo;
-
 
3230
				}
-
 
3231
			}
-
 
3232
		}
-
 
3233
 
-
 
3234
		bucket++;
-
 
3235
		goto search_again;
-
 
3236
	}
-
 
3237
 
-
 
3238
	if ((flags & CREATE_EXACT) == 0) { /* allow an active near-miss? */
-
 
3239
		untiled_pitch = kgem_untiled_pitch(kgem, width, bpp, flags);
-
 
3240
		i = tiling;
-
 
3241
		while (--i >= 0) {
-
 
3242
			tiled_height = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags,
-
 
3243
							 width, height, bpp, tiling, &pitch);
-
 
3244
			cache = active(kgem, tiled_height / PAGE_SIZE, i);
-
 
3245
			tiled_height = kgem_aligned_height(kgem, height, i);
-
 
3246
			list_for_each_entry(bo, cache, list) {
-
 
3247
				assert(!bo->purged);
-
 
3248
				assert(bo->refcnt == 0);
-
 
3249
				assert(bo->reusable);
-
 
3250
				assert(!bo->scanout);
-
 
3251
				assert(bo->flush == false);
-
 
3252
 
-
 
3253
				if (bo->tiling) {
-
 
3254
					if (bo->pitch < pitch) {
-
 
3255
						DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n",
-
 
3256
						     bo->tiling, tiling,
-
 
3257
						     bo->pitch, pitch));
-
 
3258
						continue;
-
 
3259
					}
-
 
3260
				} else
-
 
3261
					bo->pitch = untiled_pitch;
-
 
3262
 
-
 
3263
				if (bo->pitch * tiled_height > bytes(bo))
-
 
3264
					continue;
-
 
3265
 
-
 
3266
				kgem_bo_remove_from_active(kgem, bo);
-
 
3267
 
-
 
3268
				bo->unique_id = kgem_get_unique_id(kgem);
-
 
3269
				bo->delta = 0;
-
 
3270
				DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
-
 
3271
				     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
-
 
3272
				assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
-
 
3273
				bo->refcnt = 1;
-
 
3274
				return bo;
-
 
3275
			}
-
 
3276
		}
-
 
3277
	}
-
 
3278
 
-
 
3279
skip_active_search:
-
 
3280
	bucket = cache_bucket(size);
-
 
3281
	retry = NUM_CACHE_BUCKETS - bucket;
-
 
3282
	if (retry > 3)
-
 
3283
		retry = 3;
-
 
3284
search_inactive:
-
 
3285
	/* Now just look for a close match and prefer any currently active */
-
 
3286
	assert(bucket < NUM_CACHE_BUCKETS);
-
 
3287
	cache = &kgem->inactive[bucket];
-
 
3288
	list_for_each_entry(bo, cache, list) {
-
 
3289
		assert(bucket(bo) == bucket);
-
 
3290
		assert(bo->reusable);
-
 
3291
		assert(!bo->scanout);
-
 
3292
		assert(bo->flush == false);
-
 
3293
 
-
 
3294
		if (size > num_pages(bo)) {
-
 
3295
			DBG(("inactive too small: %d < %d\n",
-
 
3296
			     num_pages(bo), size));
-
 
3297
			continue;
-
 
3298
		}
-
 
3299
 
-
 
3300
		if (bo->tiling != tiling ||
-
 
3301
		    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
-
 
3302
			if (!gem_set_tiling(kgem->fd, bo->handle,
-
 
3303
					    tiling, pitch))
-
 
3304
				continue;
-
 
3305
 
-
 
3306
			if (bo->map)
-
 
3307
				kgem_bo_release_map(kgem, bo);
-
 
3308
		}
-
 
3309
 
-
 
3310
		if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
-
 
3311
			kgem_bo_free(kgem, bo);
-
 
3312
			break;
-
 
3313
		}
-
 
3314
 
-
 
3315
		kgem_bo_remove_from_inactive(kgem, bo);
-
 
3316
 
-
 
3317
		bo->pitch = pitch;
-
 
3318
		bo->tiling = tiling;
-
 
3319
 
-
 
3320
		bo->delta = 0;
-
 
3321
		bo->unique_id = kgem_get_unique_id(kgem);
-
 
3322
		assert(bo->pitch);
-
 
3323
		DBG(("  from inactive: pitch=%d, tiling=%d: handle=%d, id=%d\n",
-
 
3324
		     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
-
 
3325
		assert(bo->refcnt == 0);
-
 
3326
		assert(bo->reusable);
-
 
3327
		assert((flags & CREATE_INACTIVE) == 0 || bo->domain != DOMAIN_GPU);
-
 
3328
		ASSERT_MAYBE_IDLE(kgem, bo->handle, flags & CREATE_INACTIVE);
-
 
3329
		assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
-
 
3330
		bo->refcnt = 1;
-
 
3331
		return bo;
-
 
3332
	}
-
 
3333
 
-
 
3334
	if (flags & CREATE_INACTIVE &&
-
 
3335
	    !list_is_empty(&kgem->active[bucket][tiling]) &&
-
 
3336
	    __kgem_throttle_retire(kgem, flags)) {
-
 
3337
		flags &= ~CREATE_INACTIVE;
-
 
3338
		goto search_inactive;
-
 
3339
	}
-
 
3340
 
-
 
3341
	if (--retry) {
-
 
3342
		bucket++;
-
 
3343
		flags &= ~CREATE_INACTIVE;
-
 
3344
		goto search_inactive;
-
 
3345
	}
-
 
3346
 
-
 
3347
create:
-
 
3348
	if (bucket >= NUM_CACHE_BUCKETS)
-
 
3349
		size = ALIGN(size, 1024);
-
 
3350
	handle = gem_create(kgem->fd, size);
-
 
3351
	if (handle == 0)
-
 
3352
		return NULL;
-
 
3353
 
-
 
3354
	bo = __kgem_bo_alloc(handle, size);
-
 
3355
	if (!bo) {
-
 
3356
		gem_close(kgem->fd, handle);
-
 
3357
		return NULL;
-
 
3358
	}
-
 
3359
 
-
 
3360
	bo->domain = DOMAIN_CPU;
-
 
3361
	bo->unique_id = kgem_get_unique_id(kgem);
-
 
3362
	bo->pitch = pitch;
-
 
3363
	if (tiling != I915_TILING_NONE &&
-
 
3364
	    gem_set_tiling(kgem->fd, handle, tiling, pitch))
-
 
3365
		bo->tiling = tiling;
-
 
3366
	if (bucket >= NUM_CACHE_BUCKETS) {
-
 
3367
		DBG(("%s: marking large bo for automatic flushing\n",
-
 
3368
		     __FUNCTION__));
-
 
3369
		bo->flush = true;
-
 
3370
	}
-
 
3371
 
-
 
3372
	assert(bytes(bo) >= bo->pitch * kgem_aligned_height(kgem, height, bo->tiling));
-
 
3373
 
-
 
3374
	debug_alloc__bo(kgem, bo);
-
 
3375
 
-
 
3376
	DBG(("  new pitch=%d, tiling=%d, handle=%d, id=%d, num_pages=%d [%d], bucket=%d\n",
-
 
3377
	     bo->pitch, bo->tiling, bo->handle, bo->unique_id,
-
 
3378
	     size, num_pages(bo), bucket(bo)));
-
 
3379
	return bo;
-
 
3380
}
-
 
3381
 
-
 
3382
struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
-
 
3383
				   int width,
-
 
3384
				   int height,
-
 
3385
				   int bpp,
-
 
3386
				   uint32_t flags)
-
 
3387
{
-
 
3388
	struct kgem_bo *bo;
-
 
3389
	int stride, size;
-
 
3390
 
-
 
3391
	if (DBG_NO_CPU)
-
 
3392
		return NULL;
-
 
3393
 
-
 
3394
	DBG(("%s(%dx%d, bpp=%d)\n", __FUNCTION__, width, height, bpp));
-
 
3395
 
-
 
3396
	if (kgem->has_llc) {
-
 
3397
		bo = kgem_create_2d(kgem, width, height, bpp,
-
 
3398
				    I915_TILING_NONE, flags);
-
 
3399
		if (bo == NULL)
-
 
3400
			return bo;
-
 
3401
 
-
 
3402
		assert(bo->tiling == I915_TILING_NONE);
-
 
3403
 
-
 
3404
		if (kgem_bo_map__cpu(kgem, bo) == NULL) {
-
 
3405
			kgem_bo_destroy(kgem, bo);
-
 
3406
			return NULL;
-
 
3407
		}
-
 
3408
 
-
 
3409
		return bo;
-
 
3410
	}
-
 
3411
 
-
 
3412
	assert(width > 0 && height > 0);
-
 
3413
	stride = ALIGN(width, 2) * bpp >> 3;
-
 
3414
	stride = ALIGN(stride, 4);
-
 
3415
	size = stride * ALIGN(height, 2);
-
 
3416
	assert(size >= PAGE_SIZE);
-
 
3417
 
-
 
3418
	DBG(("%s: %dx%d, %d bpp, stride=%d\n",
-
 
3419
	     __FUNCTION__, width, height, bpp, stride));
-
 
3420
 
-
 
3421
	bo = search_snoop_cache(kgem, NUM_PAGES(size), 0);
-
 
3422
	if (bo) {
-
 
3423
		assert(bo->tiling == I915_TILING_NONE);
-
 
3424
		assert(bo->snoop);
-
 
3425
		bo->refcnt = 1;
-
 
3426
		bo->pitch = stride;
-
 
3427
		bo->unique_id = kgem_get_unique_id(kgem);
-
 
3428
		return bo;
-
 
3429
	}
-
 
3430
 
-
 
3431
	if (kgem->has_cacheing) {
-
 
3432
		bo = kgem_create_linear(kgem, size, flags);
-
 
3433
		if (bo == NULL)
-
 
3434
			return NULL;
-
 
3435
 
-
 
3436
		assert(bo->tiling == I915_TILING_NONE);
-
 
3437
 
-
 
3438
		if (!gem_set_cacheing(kgem->fd, bo->handle, SNOOPED)) {
-
 
3439
			kgem_bo_destroy(kgem, bo);
-
 
3440
			return NULL;
-
 
3441
		}
-
 
3442
		bo->snoop = true;
-
 
3443
 
-
 
3444
		if (kgem_bo_map__cpu(kgem, bo) == NULL) {
-
 
3445
			kgem_bo_destroy(kgem, bo);
-
 
3446
			return NULL;
-
 
3447
		}
-
 
3448
 
-
 
3449
		bo->pitch = stride;
-
 
3450
		bo->unique_id = kgem_get_unique_id(kgem);
-
 
3451
		return bo;
-
 
3452
	}
-
 
3453
 
-
 
3454
	if (kgem->has_userptr) {
-
 
3455
		void *ptr;
-
 
3456
 
-
 
3457
		/* XXX */
-
 
3458
		//if (posix_memalign(&ptr, 64, ALIGN(size, 64)))
-
 
3459
		if (posix_memalign(&ptr, PAGE_SIZE, ALIGN(size, PAGE_SIZE)))
-
 
3460
			return NULL;
-
 
3461
 
-
 
3462
		bo = kgem_create_map(kgem, ptr, size, false);
-
 
3463
		if (bo == NULL) {
-
 
3464
			free(ptr);
-
 
3465
			return NULL;
-
 
3466
		}
-
 
3467
 
-
 
3468
		bo->map = MAKE_USER_MAP(ptr);
-
 
3469
		bo->pitch = stride;
-
 
3470
		bo->unique_id = kgem_get_unique_id(kgem);
-
 
3471
		return bo;
-
 
3472
	}
-
 
3473
 
-
 
3474
		return NULL;
-
 
3475
}
-
 
3476
 
-
 
3477
 
-
 
3478
#endif
-
 
3479
 
-
 
3480
 
-
 
3481
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
-
 
3482
{
-
 
3483
	DBG(("%s: handle=%d, proxy? %d\n",
-
 
3484
	     __FUNCTION__, bo->handle, bo->proxy != NULL));
-
 
3485
 
-
 
3486
	if (bo->proxy) {
-
 
3487
		_list_del(&bo->vma);
-
 
3488
		_list_del(&bo->request);
-
 
3489
		if (bo->io && bo->exec == NULL)
-
 
3490
			_kgem_bo_delete_buffer(kgem, bo);
-
 
3491
		kgem_bo_unref(kgem, bo->proxy);
-
 
3492
		kgem_bo_binding_free(kgem, bo);
-
 
3493
		free(bo);
-
 
3494
		return;
-
 
3495
		}
-
 
3496
 
-
 
3497
	__kgem_bo_destroy(kgem, bo);
-
 
3498
}
-
 
3499
 
-
 
3500
 
-
 
3501
 
-
 
3502
 
-
 
3503
 
-
 
3504
 
-
 
3505
 
-
 
3506
 
-
 
3507
 
-
 
3508
 
-
 
3509
 
-
 
3510
 
-
 
3511
 
-
 
3512
 
-
 
3513
 
-
 
3514
 
-
 
3515
 
-
 
3516
 
-
 
3517
 
-
 
3518
 
-
 
3519
 
-
 
3520
 
-
 
3521
 
-
 
3522
 
-
 
3523
 
-
 
3524
 
-
 
3525
 
-
 
3526
 
-
 
3527
 
-
 
3528
 
-
 
3529
 
-
 
3530
 
-
 
3531
 
-
 
3532
 
-
 
3533
 
-
 
3534
 
-
 
3535
uint32_t kgem_add_reloc(struct kgem *kgem,
-
 
3536
			uint32_t pos,
-
 
3537
			struct kgem_bo *bo,
-
 
3538
			uint32_t read_write_domain,
-
 
3539
			uint32_t delta)
-
 
3540
{
-
 
3541
	int index;
-
 
3542
 
-
 
3543
	DBG(("%s: handle=%d, pos=%d, delta=%d, domains=%08x\n",
-
 
3544
	     __FUNCTION__, bo ? bo->handle : 0, pos, delta, read_write_domain));
-
 
3545
 
-
 
3546
	assert((read_write_domain & 0x7fff) == 0 || bo != NULL);
-
 
3547
 
-
 
3548
	index = kgem->nreloc++;
-
 
3549
	assert(index < ARRAY_SIZE(kgem->reloc));
-
 
3550
	kgem->reloc[index].offset = pos * sizeof(kgem->batch[0]);
-
 
3551
	if (bo) {
-
 
3552
		assert(bo->refcnt);
-
 
3553
		assert(!bo->purged);
-
 
3554
 
-
 
3555
		while (bo->proxy) {
-
 
3556
			DBG(("%s: adding proxy [delta=%d] for handle=%d\n",
-
 
3557
			     __FUNCTION__, bo->delta, bo->handle));
-
 
3558
			delta += bo->delta;
-
 
3559
			assert(bo->handle == bo->proxy->handle);
-
 
3560
			/* need to release the cache upon batch submit */
-
 
3561
			if (bo->exec == NULL) {
-
 
3562
				list_move_tail(&bo->request,
-
 
3563
					       &kgem->next_request->buffers);
-
 
3564
				bo->rq = MAKE_REQUEST(kgem->next_request,
-
 
3565
						      kgem->ring);
-
 
3566
				bo->exec = &_kgem_dummy_exec;
-
 
3567
		}
-
 
3568
 
-
 
3569
			if (read_write_domain & 0x7fff && !bo->dirty)
-
 
3570
				__kgem_bo_mark_dirty(bo);
-
 
3571
 
-
 
3572
			bo = bo->proxy;
-
 
3573
			assert(bo->refcnt);
-
 
3574
			assert(!bo->purged);
-
 
3575
		}
-
 
3576
 
-
 
3577
		if (bo->exec == NULL)
-
 
3578
			kgem_add_bo(kgem, bo);
-
 
3579
		assert(bo->rq == MAKE_REQUEST(kgem->next_request, kgem->ring));
-
 
3580
		assert(RQ_RING(bo->rq) == kgem->ring);
-
 
3581
 
-
 
3582
		if (kgem->gen < 040 && read_write_domain & KGEM_RELOC_FENCED) {
-
 
3583
			if (bo->tiling &&
-
 
3584
			    (bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0) {
-
 
3585
				assert(kgem->nfence < kgem->fence_max);
-
 
3586
				kgem->aperture_fenced +=
-
 
3587
					kgem_bo_fenced_size(kgem, bo);
-
 
3588
				kgem->nfence++;
-
 
3589
			}
-
 
3590
			bo->exec->flags |= EXEC_OBJECT_NEEDS_FENCE;
-
 
3591
		}
-
 
3592
 
-
 
3593
		kgem->reloc[index].delta = delta;
-
 
3594
		kgem->reloc[index].target_handle = bo->target_handle;
-
 
3595
		kgem->reloc[index].presumed_offset = bo->presumed_offset;
-
 
3596
 
-
 
3597
		if (read_write_domain & 0x7fff && !bo->dirty) {
-
 
3598
			assert(!bo->snoop || kgem->can_blt_cpu);
-
 
3599
			__kgem_bo_mark_dirty(bo);
-
 
3600
		}
-
 
3601
 
-
 
3602
		delta += bo->presumed_offset;
-
 
3603
	} else {
-
 
3604
		kgem->reloc[index].delta = delta;
-
 
3605
		kgem->reloc[index].target_handle = ~0U;
-
 
3606
		kgem->reloc[index].presumed_offset = 0;
-
 
3607
		if (kgem->nreloc__self < 256)
-
 
3608
			kgem->reloc__self[kgem->nreloc__self++] = index;
-
 
3609
		}
-
 
3610
	kgem->reloc[index].read_domains = read_write_domain >> 16;
-
 
3611
	kgem->reloc[index].write_domain = read_write_domain & 0x7fff;
-
 
3612
 
-
 
3613
	return delta;
-
 
3614
}
-
 
3615
 
-
 
3616
static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket)
-
 
3617
{
-
 
3618
	int i, j;
-
 
3619
 
-
 
3620
	DBG(("%s: type=%d, count=%d (bucket: %d)\n",
-
 
3621
	     __FUNCTION__, type, kgem->vma[type].count, bucket));
-
 
3622
	if (kgem->vma[type].count <= 0)
-
 
3623
	       return;
-
 
3624
 
-
 
3625
	if (kgem->need_purge)
-
 
3626
		kgem_purge_cache(kgem);
-
 
3627
 
-
 
3628
	/* vma are limited on a per-process basis to around 64k.
-
 
3629
	 * This includes all malloc arenas as well as other file
-
 
3630
	 * mappings. In order to be fair and not hog the cache,
-
 
3631
	 * and more importantly not to exhaust that limit and to
-
 
3632
	 * start failing mappings, we keep our own number of open
-
 
3633
	 * vma to within a conservative value.
-
 
3634
	 */
-
 
3635
	i = 0;
-
 
3636
	while (kgem->vma[type].count > 0) {
-
 
3637
		struct kgem_bo *bo = NULL;
-
 
3638
 
-
 
3639
		for (j = 0;
-
 
3640
		     bo == NULL && j < ARRAY_SIZE(kgem->vma[type].inactive);
-
 
3641
		     j++) {
-
 
3642
			struct list *head = &kgem->vma[type].inactive[i++%ARRAY_SIZE(kgem->vma[type].inactive)];
-
 
3643
			if (!list_is_empty(head))
-
 
3644
				bo = list_last_entry(head, struct kgem_bo, vma);
-
 
3645
	}
-
 
3646
		if (bo == NULL)
-
 
3647
			break;
-
 
3648
 
-
 
3649
		DBG(("%s: discarding inactive %s vma cache for %d\n",
-
 
3650
		     __FUNCTION__,
-
 
3651
		     IS_CPU_MAP(bo->map) ? "CPU" : "GTT", bo->handle));
-
 
3652
		assert(IS_CPU_MAP(bo->map) == type);
-
 
3653
		assert(bo->map);
-
 
3654
			assert(bo->rq == NULL);
-
 
3655
 
-
 
3656
		VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo)));
-
 
3657
//		munmap(MAP(bo->map), bytes(bo));
-
 
3658
		bo->map = NULL;
-
 
3659
		list_del(&bo->vma);
-
 
3660
		kgem->vma[type].count--;
-
 
3661
 
-
 
3662
		if (!bo->purged && !kgem_bo_set_purgeable(kgem, bo)) {
-
 
3663
			DBG(("%s: freeing unpurgeable old mapping\n",
-
 
3664
			     __FUNCTION__));
-
 
3665
				kgem_bo_free(kgem, bo);
-
 
3666
			}
-
 
3667
	}
-
 
3668
}
-
 
3669
 
-
 
3670
 
-
 
3671
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
-
 
3672
{
-
 
3673
	void *ptr;
-
 
3674
 
-
 
3675
	DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__,
-
 
3676
	     bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain));
-
 
3677
 
-
 
3678
	assert(!bo->purged);
-
 
3679
	assert(bo->proxy == NULL);
-
 
3680
	assert(list_is_empty(&bo->list));
-
 
3681
	assert(bo->exec == NULL);
-
 
3682
 
-
 
3683
	if (bo->tiling == I915_TILING_NONE && !bo->scanout &&
-
 
3684
	    (kgem->has_llc || bo->domain == DOMAIN_CPU)) {
-
 
3685
		DBG(("%s: converting request for GTT map into CPU map\n",
-
 
3686
		     __FUNCTION__));
-
 
3687
		ptr = kgem_bo_map__cpu(kgem, bo);
-
 
3688
		kgem_bo_sync__cpu(kgem, bo);
-
 
3689
		return ptr;
-
 
3690
	}
-
 
3691
 
-
 
3692
	if (IS_CPU_MAP(bo->map))
-
 
3693
		kgem_bo_release_map(kgem, bo);
-
 
3694
 
-
 
3695
	ptr = bo->map;
-
 
3696
	if (ptr == NULL) {
-
 
3697
		assert(kgem_bo_size(bo) <= kgem->aperture_mappable / 2);
-
 
3698
		assert(kgem->gen != 021 || bo->tiling != I915_TILING_Y);
-
 
3699
 
-
 
3700
		kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
-
 
3701
 
-
 
3702
		ptr = __kgem_bo_map__gtt(kgem, bo);
-
 
3703
		if (ptr == NULL)
-
 
3704
			return NULL;
-
 
3705
 
-
 
3706
		/* Cache this mapping to avoid the overhead of an
-
 
3707
		 * excruciatingly slow GTT pagefault. This is more an
-
 
3708
		 * issue with compositing managers which need to frequently
-
 
3709
		 * flush CPU damage to their GPU bo.
-
 
3710
		 */
-
 
3711
		bo->map = ptr;
-
 
3712
		DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle));
-
 
3713
		}
-
 
3714
 
-
 
3715
	if (bo->domain != DOMAIN_GTT) {
-
 
3716
		struct drm_i915_gem_set_domain set_domain;
-
 
3717
 
-
 
3718
		DBG(("%s: sync: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__,
-
 
3719
		     bo->needs_flush, bo->domain, __kgem_busy(kgem, bo->handle)));
-
 
3720
 
-
 
3721
		/* XXX use PROT_READ to avoid the write flush? */
-
 
3722
 
-
 
3723
		VG_CLEAR(set_domain);
-
 
3724
		set_domain.handle = bo->handle;
-
 
3725
		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
-
 
3726
		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
-
 
3727
		if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) {
-
 
3728
			kgem_bo_retire(kgem, bo);
-
 
3729
			bo->domain = DOMAIN_GTT;
-
 
3730
		}
-
 
3731
		}
-
 
3732
 
-
 
3733
	return ptr;
-
 
3734
}
-
 
3735
 
-
 
3736
void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
-
 
3737
{
-
 
3738
	void *ptr;
-
 
3739
 
-
 
3740
	DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__,
-
 
3741
	     bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain));
-
 
3742
 
-
 
3743
	assert(!bo->purged);
-
 
3744
	assert(bo->exec == NULL);
-
 
3745
	assert(list_is_empty(&bo->list));
-
 
3746
 
-
 
3747
	if (IS_CPU_MAP(bo->map))
-
 
3748
		kgem_bo_release_map(kgem, bo);
-
 
3749
 
-
 
3750
	ptr = bo->map;
-
 
3751
	if (ptr == NULL) {
-
 
3752
		assert(bytes(bo) <= kgem->aperture_mappable / 4);
-
 
3753
 
-
 
3754
		kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
-
 
3755
 
-
 
3756
		ptr = __kgem_bo_map__gtt(kgem, bo);
-
 
3757
		if (ptr == NULL)
-
 
3758
			return NULL;
-
 
3759
 
-
 
3760
		/* Cache this mapping to avoid the overhead of an
-
 
3761
		 * excruciatingly slow GTT pagefault. This is more an
-
 
3762
		 * issue with compositing managers which need to frequently
-
 
3763
		 * flush CPU damage to their GPU bo.
-
 
3764
		 */
-
 
3765
		bo->map = ptr;
-
 
3766
		DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle));
-
 
3767
	}
-
 
3768
 
-
 
3769
	return ptr;
-
 
3770
}
-
 
3771
 
-
 
3772
 
-
 
3773
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
-
 
3774
{
-
 
3775
	struct drm_i915_gem_mmap mmap_arg;
-
 
3776
 
-
 
3777
	DBG(("%s(handle=%d, size=%d, mapped? %d)\n",
-
 
3778
	     __FUNCTION__, bo->handle, bytes(bo), (int)__MAP_TYPE(bo->map)));
-
 
3779
	assert(!bo->purged);
-
 
3780
	assert(list_is_empty(&bo->list));
-
 
3781
	assert(!bo->scanout);
-
 
3782
	assert(bo->proxy == NULL);
-
 
3783
 
-
 
3784
	if (IS_CPU_MAP(bo->map))
-
 
3785
		return MAP(bo->map);
-
 
3786
 
-
 
3787
	if (bo->map)
-
 
3788
		kgem_bo_release_map(kgem, bo);
-
 
3789
 
-
 
3790
	kgem_trim_vma_cache(kgem, MAP_CPU, bucket(bo));
-
 
3791
 
-
 
3792
retry:
-
 
3793
	VG_CLEAR(mmap_arg);
-
 
3794
	mmap_arg.handle = bo->handle;
-
 
3795
	mmap_arg.offset = 0;
-
 
3796
	mmap_arg.size = bytes(bo);
-
 
3797
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
-
 
3798
		printf("%s: failed to mmap %d, %d bytes, into CPU domain: %d\n",
-
 
3799
		       __FUNCTION__, bo->handle, bytes(bo), 0);
-
 
3800
		if (__kgem_throttle_retire(kgem, 0))
-
 
3801
			goto retry;
-
 
3802
 
-
 
3803
		if (kgem->need_expire) {
-
 
3804
			kgem_cleanup_cache(kgem);
-
 
3805
			goto retry;
-
 
3806
		}
-
 
3807
 
-
 
3808
		return NULL;
-
 
3809
	}
-
 
3810
 
-
 
3811
	VG(VALGRIND_MAKE_MEM_DEFINED(mmap_arg.addr_ptr, bytes(bo)));
-
 
3812
 
-
 
3813
	DBG(("%s: caching CPU vma for %d\n", __FUNCTION__, bo->handle));
-
 
3814
	bo->map = MAKE_CPU_MAP(mmap_arg.addr_ptr);
-
 
3815
	return (void *)(uintptr_t)mmap_arg.addr_ptr;
-
 
3816
}
-
 
3817
 
-
 
3818
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo)
-
 
3819
{
-
 
3820
	assert(bo->proxy == NULL);
-
 
3821
	kgem_bo_submit(kgem, bo);
-
 
3822
 
-
 
3823
	if (bo->domain != DOMAIN_CPU) {
-
 
3824
		struct drm_i915_gem_set_domain set_domain;
-
 
3825
 
-
 
3826
		DBG(("%s: SYNC: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__,
-
 
3827
		     bo->needs_flush, bo->domain, __kgem_busy(kgem, bo->handle)));
-
 
3828
 
-
 
3829
		VG_CLEAR(set_domain);
-
 
3830
		set_domain.handle = bo->handle;
-
 
3831
		set_domain.read_domains = I915_GEM_DOMAIN_CPU;
-
 
3832
		set_domain.write_domain = I915_GEM_DOMAIN_CPU;
-
 
3833
 
-
 
3834
		if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) {
Line 1254... Line 3835...
1254
}
3835
			kgem_bo_retire(kgem, bo);
1255
 
3836
			bo->domain = DOMAIN_CPU;
1256
 
3837
		}
1257
 
3838
	}
Line 1268... Line 3849...
1268
 
3849
 
1269
		bo->dirty = false;
3850
		bo->dirty = false;
1270
	}
3851
	}
Line 1271... Line -...
1271
}
-
 
1272
 
-
 
1273
 
3852
}
1274
 
3853
 
1275
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format)
3854
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format)
Line 1276... Line 3855...
1276
{
3855
{
Line 1307... Line 3886...
1307
		b->offset = offset;
3886
		b->offset = offset;
1308
		bo->binding.next = b;
3887
		bo->binding.next = b;
1309
	}
3888
	}
1310
}
3889
}
Line 1311... Line -...
1311
 
-
 
1312
uint32_t kgem_add_reloc(struct kgem *kgem,
-
 
1313
			uint32_t pos,
-
 
1314
			struct kgem_bo *bo,
-
 
1315
			uint32_t read_write_domain,
-
 
1316
			uint32_t delta)
-
 
1317
{
-
 
1318
    return 0;
-
 
1319
}
-
 
1320
 
-
 
1321
void kgem_reset(struct kgem *kgem)
-
 
1322
{
-
 
1323
 
-
 
Line 1324... Line -...
1324
};
-
 
1325
 
-
 
1326
void _kgem_submit(struct kgem *kgem)
-
 
Line 1327... Line -...
1327
{
-
 
1328
};
-
 
1329
 
-
 
1330
 
-
 
1331
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
-