Subversion Repositories Kolibri OS

Rev

Rev 4403 | Rev 4930 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4403 Rev 4501
Line 45... Line 45...
45
#include 
45
#include 
46
#endif
46
#endif
Line 47... Line 47...
47
 
47
 
Line 48... Line -...
48
#include "sna_cpuid.h"
-
 
49
 
48
#include "sna_cpuid.h"
50
 
49
 
Line 51... Line 50...
51
static struct kgem_bo *
50
static struct kgem_bo *
52
search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
51
search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
Line 58... Line 57...
58
#define DBG_NO_TILING 0
57
#define DBG_NO_TILING 0
59
#define DBG_NO_CACHE 0
58
#define DBG_NO_CACHE 0
60
#define DBG_NO_CACHE_LEVEL 0
59
#define DBG_NO_CACHE_LEVEL 0
61
#define DBG_NO_CPU 0
60
#define DBG_NO_CPU 0
62
#define DBG_NO_CREATE2 1
61
#define DBG_NO_CREATE2 1
63
#define DBG_NO_USERPTR 0
62
#define DBG_NO_USERPTR 1
64
#define DBG_NO_UNSYNCHRONIZED_USERPTR 0
63
#define DBG_NO_UNSYNCHRONIZED_USERPTR 0
65
#define DBG_NO_LLC 0
64
#define DBG_NO_LLC 0
66
#define DBG_NO_SEMAPHORES 0
65
#define DBG_NO_SEMAPHORES 0
67
#define DBG_NO_MADV 1
66
#define DBG_NO_MADV 1
68
#define DBG_NO_UPLOAD_CACHE 0
67
#define DBG_NO_UPLOAD_CACHE 0
Line 70... Line 69...
70
#define DBG_NO_MAP_UPLOAD 0
69
#define DBG_NO_MAP_UPLOAD 0
71
#define DBG_NO_RELAXED_FENCING 0
70
#define DBG_NO_RELAXED_FENCING 0
72
#define DBG_NO_SECURE_BATCHES 0
71
#define DBG_NO_SECURE_BATCHES 0
73
#define DBG_NO_PINNED_BATCHES 0
72
#define DBG_NO_PINNED_BATCHES 0
74
#define DBG_NO_FAST_RELOC 0
73
#define DBG_NO_FAST_RELOC 0
75
#define DBG_NO_HANDLE_LUT 1
74
#define DBG_NO_HANDLE_LUT 0
76
#define DBG_NO_WT 0
75
#define DBG_NO_WT 0
77
#define DBG_DUMP 0
76
#define DBG_DUMP 0
Line 78... Line 77...
78
 
77
 
Line 103... Line 102...
103
 
102
 
104
#define MAX_GTT_VMA_CACHE 512
103
#define MAX_GTT_VMA_CACHE 512
105
#define MAX_CPU_VMA_CACHE INT16_MAX
104
#define MAX_CPU_VMA_CACHE INT16_MAX
Line 106... Line -...
106
#define MAP_PRESERVE_TIME 10
-
 
107
 
105
#define MAP_PRESERVE_TIME 10
108
#define MAKE_CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1))
106
 
109
#define MAKE_USER_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3))
-
 
Line 110... Line 107...
110
#define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 2)
107
#define MAKE_USER_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1))
Line 111... Line 108...
111
#define __MAP_TYPE(ptr) ((uintptr_t)(ptr) & 3)
108
#define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 1)
112
 
109
 
Line 156... Line 153...
156
	struct kgem_bo base;
153
	struct kgem_bo base;
157
	void *mem;
154
	void *mem;
158
	uint32_t used;
155
	uint32_t used;
159
	uint32_t need_io : 1;
156
	uint32_t need_io : 1;
160
	uint32_t write : 2;
157
	uint32_t write : 2;
161
	uint32_t mmapped : 1;
158
	uint32_t mmapped : 2;
-
 
159
};
-
 
160
enum {
-
 
161
	MMAPPED_NONE,
-
 
162
	MMAPPED_GTT,
-
 
163
	MMAPPED_CPU
162
};
164
};
Line 163... Line 165...
163
 
165
 
164
static struct kgem_bo *__kgem_freed_bo;
166
static struct kgem_bo *__kgem_freed_bo;
165
static struct kgem_request *__kgem_freed_request;
167
static struct kgem_request *__kgem_freed_request;
Line 250... Line 252...
250
	arg.handle = handle;
252
	arg.handle = handle;
251
	arg.caching = caching;
253
	arg.caching = caching;
252
	return drmIoctl(fd, LOCAL_IOCTL_I915_GEM_SET_CACHING, &arg) == 0;
254
	return drmIoctl(fd, LOCAL_IOCTL_I915_GEM_SET_CACHING, &arg) == 0;
253
}
255
}
Line -... Line 256...
-
 
256
 
254
 
257
static uint32_t gem_userptr(int fd, void *ptr, int size, int read_only)
255
 
258
{
256
 
259
    return 0;
Line 257... Line 260...
257
 
260
}
258
 
261
 
259
static bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags)
262
static bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags)
260
{
263
{
Line 287... Line 290...
287
 
290
 
288
	DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__,
291
	DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__,
289
	     bo->handle, bytes(bo)));
292
	     bo->handle, bytes(bo)));
290
	assert(bo->proxy == NULL);
293
	assert(bo->proxy == NULL);
291
	assert(!bo->snoop);
294
	assert(!bo->snoop);
Line 292... Line 295...
292
	assert(kgem_bo_can_map(kgem, bo));
295
	assert(num_pages(bo) <= kgem->aperture_mappable / 4);
293
 
296
 
294
retry_gtt:
297
retry_gtt:
295
	VG_CLEAR(mmap_arg);
298
	VG_CLEAR(mmap_arg);
-
 
299
	mmap_arg.handle = bo->handle;
Line 296... Line 300...
296
	mmap_arg.handle = bo->handle;
300
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg)) {
297
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg)) {
301
		int err = 0;
298
 
302
 
Line 299... Line -...
299
		(void)__kgem_throttle_retire(kgem, 0);
-
 
300
		if (kgem_expire_cache(kgem))
303
		(void)__kgem_throttle_retire(kgem, 0);
301
			goto retry_gtt;
304
		if (kgem_expire_cache(kgem))
302
 
-
 
Line 303... Line 305...
303
		if (kgem->need_expire) {
305
			goto retry_gtt;
304
			kgem_cleanup_cache(kgem);
306
 
305
			goto retry_gtt;
307
		if (kgem_cleanup_cache(kgem))
306
		}
308
			goto retry_gtt;
Line 307... Line 309...
307
 
309
 
308
		printf("%s: failed to retrieve GTT offset for handle=%d\n",
310
		ErrorF("%s: failed to retrieve GTT offset for handle=%d: %d\n",
Line 319... Line 321...
319
	}
321
	}
Line 320... Line 322...
320
 
322
 
321
	return ptr;
323
	return ptr;
Line 322... Line 324...
322
}
324
}
323
 
325
 
324
static int __gem_write(int fd, uint32_t handle,
326
static int gem_write(int fd, uint32_t handle,
325
		       int offset, int length,
327
		       int offset, int length,
326
		       const void *src)
328
		       const void *src)
Line 336... Line 338...
336
	pwrite.size = length;
338
	pwrite.size = length;
337
	pwrite.data_ptr = (uintptr_t)src;
339
	pwrite.data_ptr = (uintptr_t)src;
338
	return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
340
	return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
339
}
341
}
Line 340... Line 342...
340
 
342
 
341
static int gem_write(int fd, uint32_t handle,
343
static int gem_write__cachealigned(int fd, uint32_t handle,
342
		     int offset, int length,
344
		     int offset, int length,
343
		     const void *src)
345
		     const void *src)
344
{
346
{
Line 629... Line 631...
629
}
631
}
Line 630... Line 632...
630
 
632
 
631
static unsigned
633
static unsigned
632
cpu_cache_size__cpuid4(void)
634
cpu_cache_size__cpuid4(void)
633
{
635
{
634
	/* Deterministic Cache Parmaeters (Function 04h)":
636
	/* Deterministic Cache Parameters (Function 04h)":
635
	 *    When EAX is initialized to a value of 4, the CPUID instruction
637
	 *    When EAX is initialized to a value of 4, the CPUID instruction
636
	 *    returns deterministic cache information in the EAX, EBX, ECX
638
	 *    returns deterministic cache information in the EAX, EBX, ECX
637
	 *    and EDX registers.  This function requires ECX be initialized
639
	 *    and EDX registers.  This function requires ECX be initialized
638
	 *    with an index which indicates which cache to return information
640
	 *    with an index which indicates which cache to return information
Line 753... Line 755...
753
	/* Although pre-855gm the GMCH is fubar, it works mostly. So
755
	/* Although pre-855gm the GMCH is fubar, it works mostly. So
754
	 * let the user decide through "NoAccel" whether or not to risk
756
	 * let the user decide through "NoAccel" whether or not to risk
755
	 * hw acceleration.
757
	 * hw acceleration.
756
	 */
758
	 */
Line 757... Line 759...
757
 
759
 
758
	if (kgem->gen == 060 && dev->revision < 8) {
760
	if (kgem->gen == 060 && dev && dev->revision < 8) {
759
		/* pre-production SNB with dysfunctional BLT */
761
		/* pre-production SNB with dysfunctional BLT */
760
		return false;
762
		return false;
Line 761... Line 763...
761
	}
763
	}
Line 879... Line 881...
879
}
881
}
Line 880... Line 882...
880
 
882
 
881
 
883
 
882
static bool kgem_init_pinned_batches(struct kgem *kgem)
884
static bool kgem_init_pinned_batches(struct kgem *kgem)
883
{
885
{
884
	int count[2] = { 2, 2 };
886
	int count[2] = { 4, 4 };
Line 885... Line 887...
885
	int size[2] = { 1, 2 };
887
	int size[2] = { 1, 2 };
886
	int n, i;
888
	int n, i;
Line 909... Line 911...
909
			}
911
			}
Line 910... Line 912...
910
 
912
 
911
			pin.alignment = 0;
913
			pin.alignment = 0;
912
			if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_PIN, &pin)) {
914
			if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_PIN, &pin)) {
-
 
915
				gem_close(kgem->fd, pin.handle);
913
				gem_close(kgem->fd, pin.handle);
916
				free(bo);
914
				goto err;
917
				goto err;
915
			}
918
			}
916
			bo->presumed_offset = pin.offset;
919
			bo->presumed_offset = pin.offset;
917
			debug_alloc__bo(kgem, bo);
920
			debug_alloc__bo(kgem, bo);
Line 1026... Line 1029...
1026
    kgem->has_no_reloc = test_has_no_reloc(kgem);
1029
    kgem->has_no_reloc = test_has_no_reloc(kgem);
1027
    DBG(("%s: has no-reloc? %d\n", __FUNCTION__,
1030
    DBG(("%s: has no-reloc? %d\n", __FUNCTION__,
1028
         kgem->has_no_reloc));
1031
         kgem->has_no_reloc));
Line 1029... Line 1032...
1029
 
1032
 
1030
    kgem->has_handle_lut = test_has_handle_lut(kgem);
-
 
1031
    kgem->has_handle_lut = 0;
1033
    kgem->has_handle_lut = test_has_handle_lut(kgem);
1032
    DBG(("%s: has handle-lut? %d\n", __FUNCTION__,
1034
    DBG(("%s: has handle-lut? %d\n", __FUNCTION__,
Line 1033... Line 1035...
1033
         kgem->has_handle_lut));
1035
         kgem->has_handle_lut));
1034
 
1036
 
Line 1040... Line 1042...
1040
 
1042
 
1041
    kgem->can_blt_cpu = gen >= 030;
1043
    kgem->can_blt_cpu = gen >= 030;
1042
    DBG(("%s: can blt to cpu? %d\n", __FUNCTION__,
1044
    DBG(("%s: can blt to cpu? %d\n", __FUNCTION__,
Line -... Line 1045...
-
 
1045
         kgem->can_blt_cpu));
-
 
1046
 
-
 
1047
	kgem->can_render_y = gen != 021 && (gen >> 3) != 4;
-
 
1048
	DBG(("%s: can render to Y-tiled surfaces? %d\n", __FUNCTION__,
1043
         kgem->can_blt_cpu));
1049
	     kgem->can_render_y));
1044
 
1050
 
1045
    kgem->has_secure_batches = test_has_secure_batches(kgem);
1051
    kgem->has_secure_batches = test_has_secure_batches(kgem);
Line 1046... Line 1052...
1046
    DBG(("%s: can use privileged batchbuffers? %d\n", __FUNCTION__,
1052
    DBG(("%s: can use privileged batchbuffers? %d\n", __FUNCTION__,
Line 1113... Line 1119...
1113
    }
1119
    }
1114
    DBG(("%s: aperture low=%d [%d], high=%d [%d]\n", __FUNCTION__,
1120
    DBG(("%s: aperture low=%d [%d], high=%d [%d]\n", __FUNCTION__,
1115
         kgem->aperture_low, kgem->aperture_low / (1024*1024),
1121
         kgem->aperture_low, kgem->aperture_low / (1024*1024),
1116
         kgem->aperture_high, kgem->aperture_high / (1024*1024)));
1122
         kgem->aperture_high, kgem->aperture_high / (1024*1024)));
Line -... Line 1123...
-
 
1123
 
-
 
1124
	kgem->aperture_mappable = 256 * 1024 * 1024;
1117
 
1125
	if (dev != NULL)
1118
    kgem->aperture_mappable = agp_aperture_size(dev, gen);
1126
    kgem->aperture_mappable = agp_aperture_size(dev, gen);
1119
    if (kgem->aperture_mappable == 0 ||
1127
    if (kgem->aperture_mappable == 0 ||
1120
        kgem->aperture_mappable > aperture.aper_size)
1128
        kgem->aperture_mappable > aperture.aper_size)
1121
        kgem->aperture_mappable = aperture.aper_size;
1129
        kgem->aperture_mappable = aperture.aper_size;
Line 1147... Line 1155...
1147
    if (kgem->max_object_size > totalram / 2)
1155
    if (kgem->max_object_size > totalram / 2)
1148
        kgem->max_object_size = totalram / 2;
1156
        kgem->max_object_size = totalram / 2;
1149
    if (kgem->max_gpu_size > totalram / 4)
1157
    if (kgem->max_gpu_size > totalram / 4)
1150
        kgem->max_gpu_size = totalram / 4;
1158
        kgem->max_gpu_size = totalram / 4;
Line -... Line 1159...
-
 
1159
 
-
 
1160
	if (kgem->aperture_high > totalram / 2) {
-
 
1161
		kgem->aperture_high = totalram / 2;
-
 
1162
		kgem->aperture_low = kgem->aperture_high / 4;
-
 
1163
		DBG(("%s: reduced aperture watermaks to fit into ram; low=%d [%d], high=%d [%d]\n", __FUNCTION__,
-
 
1164
		     kgem->aperture_low, kgem->aperture_low / (1024*1024),
-
 
1165
		     kgem->aperture_high, kgem->aperture_high / (1024*1024)));
-
 
1166
	}
1151
 
1167
 
Line 1152... Line 1168...
1152
    kgem->max_cpu_size = kgem->max_object_size;
1168
    kgem->max_cpu_size = kgem->max_object_size;
1153
 
1169
 
1154
    half_gpu_max = kgem->max_gpu_size / 2;
1170
    half_gpu_max = kgem->max_gpu_size / 2;
Line 1195... Line 1211...
1195
         __FUNCTION__,
1211
         __FUNCTION__,
1196
         kgem->max_gpu_size, kgem->max_cpu_size,
1212
         kgem->max_gpu_size, kgem->max_cpu_size,
1197
         kgem->max_upload_tile_size, kgem->max_copy_tile_size));
1213
         kgem->max_upload_tile_size, kgem->max_copy_tile_size));
Line 1198... Line 1214...
1198
 
1214
 
-
 
1215
    /* Convert the aperture thresholds to pages */
1199
    /* Convert the aperture thresholds to pages */
1216
	kgem->aperture_mappable /= PAGE_SIZE;
1200
    kgem->aperture_low /= PAGE_SIZE;
1217
    kgem->aperture_low /= PAGE_SIZE;
-
 
1218
    kgem->aperture_high /= PAGE_SIZE;
Line 1201... Line 1219...
1201
    kgem->aperture_high /= PAGE_SIZE;
1219
	kgem->aperture_total /= PAGE_SIZE;
1202
 
1220
 
1203
    kgem->fence_max = gem_param(kgem, I915_PARAM_NUM_FENCES_AVAIL) - 2;
1221
    kgem->fence_max = gem_param(kgem, I915_PARAM_NUM_FENCES_AVAIL) - 2;
1204
    if ((int)kgem->fence_max < 0)
1222
    if ((int)kgem->fence_max < 0)
Line 1231... Line 1249...
1231
	if (flags & CREATE_SCANOUT)
1249
	if (flags & CREATE_SCANOUT)
1232
		return 64;
1250
		return 64;
1233
	return kgem->min_alignment;
1251
	return kgem->min_alignment;
1234
}
1252
}
Line 1235... Line 1253...
1235
 
1253
 
1236
void kgem_get_tile_size(struct kgem *kgem, int tiling,
1254
void kgem_get_tile_size(struct kgem *kgem, int tiling, int pitch,
1237
			int *tile_width, int *tile_height, int *tile_size)
1255
			int *tile_width, int *tile_height, int *tile_size)
1238
{
1256
{
1239
	if (kgem->gen <= 030) {
1257
	if (kgem->gen <= 030) {
1240
		if (tiling) {
1258
		if (tiling) {
Line 1268... Line 1286...
1268
		*tile_width = 128;
1286
		*tile_width = 128;
1269
		*tile_height = 32;
1287
		*tile_height = 32;
1270
		*tile_size = 4096;
1288
		*tile_size = 4096;
1271
		break;
1289
		break;
1272
	}
1290
	}
-
 
1291
 
-
 
1292
	/* Force offset alignment to tile-row */
-
 
1293
	if (tiling && kgem->gen < 033)
-
 
1294
		*tile_width = pitch;
1273
}
1295
}
Line 1274... Line 1296...
1274
 
1296
 
1275
uint32_t kgem_surface_size(struct kgem *kgem,
1297
uint32_t kgem_surface_size(struct kgem *kgem,
1276
				  bool relaxed_fencing,
1298
				  bool relaxed_fencing,
Line 1398... Line 1420...
1398
	return exec;
1420
	return exec;
1399
}
1421
}
Line 1400... Line 1422...
1400
 
1422
 
1401
static void kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
1423
static void kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
-
 
1424
{
-
 
1425
	assert(bo->refcnt);
-
 
1426
	assert(bo->proxy == NULL);
1402
{
1427
 
1403
	bo->exec = kgem_add_handle(kgem, bo);
1428
	bo->exec = kgem_add_handle(kgem, bo);
Line 1404... Line 1429...
1404
	bo->rq = MAKE_REQUEST(kgem->next_request, kgem->ring);
1429
	bo->rq = MAKE_REQUEST(kgem->next_request, kgem->ring);
-
 
1430
 
-
 
1431
	list_move_tail(&bo->request, &kgem->next_request->buffers);
Line 1405... Line 1432...
1405
 
1432
	if (bo->io && !list_is_empty(&bo->list))
1406
	list_move_tail(&bo->request, &kgem->next_request->buffers);
1433
		list_move(&bo->list, &kgem->batch_buffers);
1407
 
1434
 
Line 1459... Line 1486...
1459
		free (b);
1486
		free(b);
1460
		b = next;
1487
		b = next;
1461
	}
1488
	}
1462
}
1489
}
Line 1463... Line -...
1463
 
-
 
1464
static void kgem_bo_release_map(struct kgem *kgem, struct kgem_bo *bo)
-
 
1465
{
-
 
1466
	int type = IS_CPU_MAP(bo->map);
-
 
1467
 
-
 
1468
	assert(!IS_USER_MAP(bo->map));
-
 
1469
 
-
 
1470
	DBG(("%s: releasing %s vma for handle=%d, count=%d\n",
-
 
1471
	     __FUNCTION__, type ? "CPU" : "GTT",
-
 
1472
	     bo->handle, kgem->vma[type].count));
-
 
1473
 
-
 
1474
	VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo)));
-
 
1475
	user_free(MAP(bo->map));
-
 
1476
	bo->map = NULL;
-
 
1477
 
-
 
1478
	if (!list_is_empty(&bo->vma)) {
-
 
1479
		list_del(&bo->vma);
-
 
1480
		kgem->vma[type].count--;
-
 
1481
	}
-
 
1482
}
-
 
1483
 
1490
 
1484
static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
1491
static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
1485
{
1492
{
1486
	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
1493
	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
1487
	assert(bo->refcnt == 0);
1494
	assert(bo->refcnt == 0);
Line 1494... Line 1501...
1494
	kgem->debug_memory.bo_bytes -= bytes(bo);
1501
	kgem->debug_memory.bo_bytes -= bytes(bo);
1495
#endif
1502
#endif
Line 1496... Line 1503...
1496
 
1503
 
Line 1497... Line 1504...
1497
	kgem_bo_binding_free(kgem, bo);
1504
	kgem_bo_binding_free(kgem, bo);
1498
 
1505
 
1499
	if (IS_USER_MAP(bo->map)) {
1506
	if (IS_USER_MAP(bo->map__cpu)) {
1500
		assert(bo->rq == NULL);
1507
		assert(bo->rq == NULL);
1501
		assert(!__kgem_busy(kgem, bo->handle));
1508
		assert(!__kgem_busy(kgem, bo->handle));
1502
		assert(MAP(bo->map) != bo || bo->io || bo->flush);
1509
		assert(MAP(bo->map__cpu) != bo || bo->io || bo->flush);
1503
		if (!(bo->io || bo->flush)) {
1510
		if (!(bo->io || bo->flush)) {
1504
			DBG(("%s: freeing snooped base\n", __FUNCTION__));
1511
			DBG(("%s: freeing snooped base\n", __FUNCTION__));
1505
			assert(bo != MAP(bo->map));
1512
			assert(bo != MAP(bo->map__cpu));
1506
			free(MAP(bo->map));
1513
			free(MAP(bo->map__cpu));
1507
		}
1514
		}
-
 
1515
		bo->map__cpu = NULL;
-
 
1516
	}
-
 
1517
 
-
 
1518
	DBG(("%s: releasing %p:%p vma for handle=%d, count=%d\n",
-
 
1519
	     __FUNCTION__, bo->map__gtt, bo->map__cpu,
-
 
1520
	     bo->handle, list_is_empty(&bo->vma) ? 0 : kgem->vma[bo->map__gtt == NULL].count));
1508
		bo->map = NULL;
1521
 
1509
	}
1522
	if (!list_is_empty(&bo->vma)) {
-
 
1523
		_list_del(&bo->vma);
-
 
1524
		kgem->vma[bo->map__gtt == NULL].count--;
-
 
1525
	}
1510
	if (bo->map)
1526
 
1511
		kgem_bo_release_map(kgem, bo);
1527
//   if (bo->map__gtt)
-
 
1528
//       munmap(MAP(bo->map__gtt), bytes(bo));
Line 1512... Line 1529...
1512
	assert(list_is_empty(&bo->vma));
1529
//   if (bo->map__cpu)
1513
	assert(bo->map == NULL);
1530
//       munmap(MAP(bo->map__cpu), bytes(bo));
1514
 
1531
 
Line 1544... Line 1561...
1544
	ASSERT_IDLE(kgem, bo->handle);
1561
	ASSERT_IDLE(kgem, bo->handle);
Line 1545... Line 1562...
1545
 
1562
 
Line 1546... Line 1563...
1546
	kgem->need_expire = true;
1563
	kgem->need_expire = true;
-
 
1564
 
1547
 
1565
	if (bucket(bo) >= NUM_CACHE_BUCKETS) {
1548
	if (bucket(bo) >= NUM_CACHE_BUCKETS) {
1566
		if (bo->map__gtt) {
1549
		list_move(&bo->list, &kgem->large_inactive);
1567
//           munmap(MAP(bo->map__gtt), bytes(bo));
Line -... Line 1568...
-
 
1568
			bo->map__gtt = NULL;
-
 
1569
	}
1550
		return;
1570
 
1551
	}
1571
		list_move(&bo->list, &kgem->large_inactive);
1552
 
1572
	} else {
1553
	assert(bo->flush == false);
-
 
1554
	list_move(&bo->list, &kgem->inactive[bucket(bo)]);
-
 
1555
	if (bo->map) {
1573
	assert(bo->flush == false);
1556
		int type = IS_CPU_MAP(bo->map);
1574
	list_move(&bo->list, &kgem->inactive[bucket(bo)]);
1557
		if (bucket(bo) >= NUM_CACHE_BUCKETS ||
1575
		if (bo->map__gtt) {
1558
		    (!type && !__kgem_bo_is_mappable(kgem, bo))) {
1576
			if (!kgem_bo_can_map(kgem, bo)) {
1559
//			munmap(MAP(bo->map), bytes(bo));
1577
//				munmap(MAP(bo->map__gtt), bytes(bo));
-
 
1578
				bo->map__gtt = NULL;
-
 
1579
			}
-
 
1580
			if (bo->map__gtt) {
-
 
1581
				list_add(&bo->vma, &kgem->vma[0].inactive[bucket(bo)]);
-
 
1582
				kgem->vma[0].count++;
1560
			bo->map = NULL;
1583
			}
1561
		}
1584
		}
1562
		if (bo->map) {
1585
		if (bo->map__cpu && !bo->map__gtt) {
1563
			list_add(&bo->vma, &kgem->vma[type].inactive[bucket(bo)]);
1586
			list_add(&bo->vma, &kgem->vma[1].inactive[bucket(bo)]);
1564
			kgem->vma[type].count++;
1587
			kgem->vma[1].count++;
Line 1565... Line 1588...
1565
		}
1588
		}
Line 1572... Line 1595...
1572
 
1595
 
1573
	if (!bo->io)
1596
	if (!bo->io)
Line 1574... Line 1597...
1574
		return bo;
1597
		return bo;
-
 
1598
 
-
 
1599
	assert(!bo->snoop);
-
 
1600
	if (__kgem_freed_bo) {
-
 
1601
		base = __kgem_freed_bo;
1575
 
1602
		__kgem_freed_bo = *(struct kgem_bo **)base;
1576
	assert(!bo->snoop);
1603
	} else
1577
	base = malloc(sizeof(*base));
1604
	base = malloc(sizeof(*base));
1578
	if (base) {
1605
	if (base) {
1579
		DBG(("%s: transferring io handle=%d to bo\n",
1606
		DBG(("%s: transferring io handle=%d to bo\n",
Line 1598... Line 1625...
1598
	DBG(("%s: removing handle=%d from inactive\n", __FUNCTION__, bo->handle));
1625
	DBG(("%s: removing handle=%d from inactive\n", __FUNCTION__, bo->handle));
Line 1599... Line 1626...
1599
 
1626
 
1600
	list_del(&bo->list);
1627
	list_del(&bo->list);
1601
	assert(bo->rq == NULL);
1628
	assert(bo->rq == NULL);
1602
	assert(bo->exec == NULL);
1629
	assert(bo->exec == NULL);
1603
	if (bo->map) {
1630
	if (!list_is_empty(&bo->vma)) {
1604
		assert(!list_is_empty(&bo->vma));
1631
		assert(bo->map__gtt || bo->map__cpu);
1605
		list_del(&bo->vma);
1632
		list_del(&bo->vma);
1606
		kgem->vma[IS_CPU_MAP(bo->map)].count--;
1633
		kgem->vma[bo->map__gtt == NULL].count--;
1607
	}
1634
	}
Line 1608... Line 1635...
1608
}
1635
}
1609
 
1636
 
1610
inline static void kgem_bo_remove_from_active(struct kgem *kgem,
1637
inline static void kgem_bo_remove_from_active(struct kgem *kgem,
1611
					      struct kgem_bo *bo)
1638
					      struct kgem_bo *bo)
Line 1612... Line 1639...
1612
{
1639
{
1613
	DBG(("%s: removing handle=%d from active\n", __FUNCTION__, bo->handle));
1640
	DBG(("%s: removing handle=%d from active\n", __FUNCTION__, bo->handle));
1614
 
1641
 
-
 
1642
	list_del(&bo->list);
1615
	list_del(&bo->list);
1643
	assert(bo->rq != NULL);
-
 
1644
	if (RQ(bo->rq) == (void *)kgem) {
1616
	assert(bo->rq != NULL);
1645
		assert(bo->exec == NULL);
1617
	if (bo->rq == (void *)kgem)
1646
		list_del(&bo->request);
Line 1618... Line 1647...
1618
		list_del(&bo->request);
1647
	}
1619
	assert(list_is_empty(&bo->vma));
1648
	assert(list_is_empty(&bo->vma));
Line 1738... Line 1767...
1738
void kgem_bo_undo(struct kgem *kgem, struct kgem_bo *bo)
1767
void kgem_bo_undo(struct kgem *kgem, struct kgem_bo *bo)
1739
{
1768
{
1740
	if (kgem->nexec != 1 || bo->exec == NULL)
1769
	if (kgem->nexec != 1 || bo->exec == NULL)
1741
		return;
1770
		return;
Line -... Line 1771...
-
 
1771
 
1742
 
1772
	assert(bo);
1743
	DBG(("%s: only handle in batch, discarding last operations for handle=%d\n",
1773
	DBG(("%s: only handle in batch, discarding last operations for handle=%d\n",
Line 1744... Line 1774...
1744
	     __FUNCTION__, bo->handle));
1774
	     __FUNCTION__, bo->handle));
1745
 
1775
 
1746
	assert(bo->exec == &kgem->exec[0]);
1776
	assert(bo->exec == &kgem->exec[0]);
Line 1747... Line 1777...
1747
	assert(kgem->exec[0].handle == bo->handle);
1777
	assert(kgem->exec[0].handle == bo->handle);
1748
	assert(RQ(bo->rq) == kgem->next_request);
1778
	assert(RQ(bo->rq) == kgem->next_request);
1749
 
1779
 
-
 
1780
	bo->refcnt++;
-
 
1781
	kgem_reset(kgem);
-
 
1782
	bo->refcnt--;
-
 
1783
 
1750
	bo->refcnt++;
1784
	assert(kgem->nreloc == 0);
Line 1751... Line 1785...
1751
	kgem_reset(kgem);
1785
	assert(kgem->nexec == 0);
1752
	bo->refcnt--;
1786
	assert(bo->exec == NULL);
1753
}
1787
}
Line 1775... Line 1809...
1775
			__kgem_bo_clear_busy(bo);
1809
			__kgem_bo_clear_busy(bo);
1776
		if (bo->rq == NULL)
1810
		if (bo->rq == NULL)
1777
			kgem_bo_move_to_snoop(kgem, bo);
1811
			kgem_bo_move_to_snoop(kgem, bo);
1778
		return;
1812
		return;
1779
	}
1813
	}
1780
	if (!IS_USER_MAP(bo->map))
1814
	if (!IS_USER_MAP(bo->map__cpu))
1781
		bo->flush = false;
1815
		bo->flush = false;
Line 1782... Line 1816...
1782
 
1816
 
1783
	if (bo->scanout) {
1817
	if (bo->scanout) {
1784
		kgem_bo_move_to_scanout(kgem, bo);
1818
		kgem_bo_move_to_scanout(kgem, bo);
Line 1791... Line 1825...
1791
		DBG(("%s: handle=%d, not reusable\n",
1825
		DBG(("%s: handle=%d, not reusable\n",
1792
		     __FUNCTION__, bo->handle));
1826
		     __FUNCTION__, bo->handle));
1793
		goto destroy;
1827
		goto destroy;
1794
	}
1828
	}
Line 1795... Line -...
1795
 
-
 
1796
	if (!kgem->has_llc && IS_CPU_MAP(bo->map) && bo->domain != DOMAIN_CPU)
-
 
1797
		kgem_bo_release_map(kgem, bo);
-
 
1798
 
1829
 
1799
	assert(list_is_empty(&bo->vma));
1830
	assert(list_is_empty(&bo->vma));
1800
	assert(list_is_empty(&bo->list));
1831
	assert(list_is_empty(&bo->list));
1801
	assert(bo->flush == false);
1832
	assert(bo->flush == false);
1802
	assert(bo->snoop == false);
1833
	assert(bo->snoop == false);
Line 1822... Line 1853...
1822
	}
1853
	}
Line 1823... Line 1854...
1823
 
1854
 
1824
	assert(bo->exec == NULL);
1855
	assert(bo->exec == NULL);
Line 1825... Line 1856...
1825
	assert(list_is_empty(&bo->request));
1856
	assert(list_is_empty(&bo->request));
1826
 
1857
 
1827
	if (!IS_CPU_MAP(bo->map)) {
1858
	if (bo->map__cpu == NULL || bucket(bo) >= NUM_CACHE_BUCKETS) {
Line 1828... Line 1859...
1828
		if (!kgem_bo_set_purgeable(kgem, bo))
1859
		if (!kgem_bo_set_purgeable(kgem, bo))
1829
			goto destroy;
1860
			goto destroy;
Line 1850... Line 1881...
1850
		__kgem_bo_destroy(kgem, bo);
1881
		__kgem_bo_destroy(kgem, bo);
1851
}
1882
}
Line 1852... Line 1883...
1852
 
1883
 
1853
static void kgem_buffer_release(struct kgem *kgem, struct kgem_buffer *bo)
1884
static void kgem_buffer_release(struct kgem *kgem, struct kgem_buffer *bo)
-
 
1885
{
1854
{
1886
	assert(bo->base.io);
1855
	while (!list_is_empty(&bo->base.vma)) {
1887
	while (!list_is_empty(&bo->base.vma)) {
Line 1856... Line 1888...
1856
		struct kgem_bo *cached;
1888
		struct kgem_bo *cached;
1857
 
1889
 
-
 
1890
		cached = list_first_entry(&bo->base.vma, struct kgem_bo, vma);
1858
		cached = list_first_entry(&bo->base.vma, struct kgem_bo, vma);
1891
		assert(cached->proxy == &bo->base);
Line 1859... Line 1892...
1859
		assert(cached->proxy == &bo->base);
1892
		assert(cached != &bo->base);
1860
		list_del(&cached->vma);
1893
		list_del(&cached->vma);
1861
 
1894
 
Line 1862... Line 1895...
1862
		assert(*(struct kgem_bo **)cached->map == cached);
1895
		assert(*(struct kgem_bo **)cached->map__gtt == cached);
1863
		*(struct kgem_bo **)cached->map = NULL;
1896
		*(struct kgem_bo **)cached->map__gtt = NULL;
1864
		cached->map = NULL;
1897
		cached->map__gtt = NULL;
Line 1875... Line 1908...
1875
		struct kgem_buffer *bo =
1908
		struct kgem_buffer *bo =
1876
			list_last_entry(&kgem->active_buffers,
1909
			list_last_entry(&kgem->active_buffers,
1877
					struct kgem_buffer,
1910
					struct kgem_buffer,
1878
					base.list);
1911
					base.list);
Line -... Line 1912...
-
 
1912
 
-
 
1913
		DBG(("%s: handle=%d, busy? %d [%d]\n",
-
 
1914
		     __FUNCTION__, bo->base.handle, bo->base.rq != NULL, bo->base.exec != NULL));
-
 
1915
 
1879
 
1916
		assert(bo->base.exec == NULL || RQ(bo->base.rq) == kgem->next_request);
1880
		if (bo->base.rq)
1917
		if (bo->base.rq)
Line 1881... Line 1918...
1881
			break;
1918
			break;
1882
 
1919
 
Line 1895... Line 1932...
1895
{
1932
{
1896
	struct kgem_bo *bo, *next;
1933
	struct kgem_bo *bo, *next;
1897
	bool retired = false;
1934
	bool retired = false;
Line 1898... Line 1935...
1898
 
1935
 
1899
	list_for_each_entry_safe(bo, next, &kgem->flushing, request) {
1936
	list_for_each_entry_safe(bo, next, &kgem->flushing, request) {
1900
		assert(bo->rq == (void *)kgem);
1937
		assert(RQ(bo->rq) == (void *)kgem);
Line 1901... Line 1938...
1901
		assert(bo->exec == NULL);
1938
		assert(bo->exec == NULL);
1902
 
1939
 
Line 1958... Line 1995...
1958
			bo->needs_flush = __kgem_busy(kgem, bo->handle);
1995
			bo->needs_flush = __kgem_busy(kgem, bo->handle);
1959
		if (bo->needs_flush) {
1996
		if (bo->needs_flush) {
1960
			DBG(("%s: moving %d to flushing\n",
1997
			DBG(("%s: moving %d to flushing\n",
1961
			     __FUNCTION__, bo->handle));
1998
			     __FUNCTION__, bo->handle));
1962
			list_add(&bo->request, &kgem->flushing);
1999
			list_add(&bo->request, &kgem->flushing);
-
 
2000
			bo->rq = MAKE_REQUEST(kgem, RQ_RING(bo->rq));
1963
			bo->rq = (void *)kgem;
2001
			kgem->need_retire = true;
1964
			continue;
2002
			continue;
1965
		}
2003
		}
Line 1966... Line 2004...
1966
 
2004
 
1967
		bo->domain = DOMAIN_NONE;
2005
		bo->domain = DOMAIN_NONE;
Line 1983... Line 2021...
1983
			kgem_bo_free(kgem, bo);
2021
			kgem_bo_free(kgem, bo);
1984
		}
2022
		}
1985
	}
2023
	}
Line 1986... Line 2024...
1986
 
2024
 
-
 
2025
	assert(rq->bo->rq == NULL);
1987
	assert(rq->bo->rq == NULL);
2026
	assert(rq->bo->exec == NULL);
Line 1988... Line 2027...
1988
	assert(list_is_empty(&rq->bo->request));
2027
	assert(list_is_empty(&rq->bo->request));
1989
 
2028
 
1990
	if (--rq->bo->refcnt == 0) {
2029
	if (--rq->bo->refcnt == 0) {
Line 2055... Line 2094...
2055
 
2094
 
2056
bool kgem_retire(struct kgem *kgem)
2095
bool kgem_retire(struct kgem *kgem)
2057
{
2096
{
Line 2058... Line 2097...
2058
	bool retired = false;
2097
	bool retired = false;
Line 2059... Line 2098...
2059
 
2098
 
Line 2060... Line 2099...
2060
	DBG(("%s\n", __FUNCTION__));
2099
	DBG(("%s, need_retire?=%d\n", __FUNCTION__, kgem->need_retire));
2061
 
2100
 
Line 2075... Line 2114...
2075
 
2114
 
2076
bool __kgem_ring_is_idle(struct kgem *kgem, int ring)
2115
bool __kgem_ring_is_idle(struct kgem *kgem, int ring)
2077
{
2116
{
Line -... Line 2117...
-
 
2117
	struct kgem_request *rq;
2078
	struct kgem_request *rq;
2118
 
Line 2079... Line 2119...
2079
 
2119
	assert(ring < ARRAY_SIZE(kgem->requests));
2080
	assert(!list_is_empty(&kgem->requests[ring]));
2120
	assert(!list_is_empty(&kgem->requests[ring]));
2081
 
2121
 
Line 2089... Line 2129...
2089
 
2129
 
2090
	DBG(("%s: ring=%d idle (handle=%d)\n",
2130
	DBG(("%s: ring=%d idle (handle=%d)\n",
Line 2091... Line 2131...
2091
	     __FUNCTION__, ring, rq->bo->handle));
2131
	     __FUNCTION__, ring, rq->bo->handle));
-
 
2132
 
-
 
2133
	kgem_retire__requests_ring(kgem, ring);
2092
 
2134
	kgem_retire__buffers(kgem);
2093
	kgem_retire__requests_ring(kgem, ring);
2135
 
2094
	assert(list_is_empty(&kgem->requests[ring]));
2136
	assert(list_is_empty(&kgem->requests[ring]));
Line -... Line 2137...
-
 
2137
	return true;
-
 
2138
}
-
 
2139
 
-
 
2140
#ifndef NDEBUG
-
 
2141
static void kgem_commit__check_buffers(struct kgem *kgem)
-
 
2142
{
-
 
2143
	struct kgem_buffer *bo;
-
 
2144
 
-
 
2145
	list_for_each_entry(bo, &kgem->active_buffers, base.list)
-
 
2146
		assert(bo->base.exec == NULL);
-
 
2147
}
-
 
2148
#else
2095
	return true;
2149
#define kgem_commit__check_buffers(kgem)
2096
}
2150
#endif
2097
 
2151
 
2098
static void kgem_commit(struct kgem *kgem)
2152
static void kgem_commit(struct kgem *kgem)
Line 2116... Line 2170...
2116
		bo->exec = NULL;
2170
		bo->exec = NULL;
2117
		bo->target_handle = -1;
2171
		bo->target_handle = -1;
Line 2118... Line 2172...
2118
 
2172
 
2119
		if (!bo->refcnt && !bo->reusable) {
2173
		if (!bo->refcnt && !bo->reusable) {
-
 
2174
			assert(!bo->snoop);
2120
			assert(!bo->snoop);
2175
			assert(!bo->proxy);
2121
			kgem_bo_free(kgem, bo);
2176
			kgem_bo_free(kgem, bo);
2122
			continue;
2177
			continue;
Line 2123... Line 2178...
2123
		}
2178
		}
2124
 
2179
 
2125
		bo->binding.offset = 0;
2180
		bo->binding.offset = 0;
Line 2126... Line 2181...
2126
		bo->domain = DOMAIN_GPU;
2181
		bo->domain = DOMAIN_GPU;
2127
		bo->gpu_dirty = false;
2182
		bo->gpu_dirty = false;
2128
 
-
 
2129
		if (bo->proxy) {
2183
 
2130
			/* proxies are not used for domain tracking */
2184
		if (bo->proxy) {
Line 2131... Line 2185...
2131
			bo->exec = NULL;
2185
			/* proxies are not used for domain tracking */
2132
			__kgem_bo_clear_busy(bo);
2186
			__kgem_bo_clear_busy(bo);
Line 2150... Line 2204...
2150
		}
2204
		}
Line 2151... Line 2205...
2151
 
2205
 
2152
		kgem_retire(kgem);
2206
		kgem_retire(kgem);
Line -... Line 2207...
-
 
2207
		assert(list_is_empty(&rq->buffers));
2153
		assert(list_is_empty(&rq->buffers));
2208
 
2154
 
2209
		assert(rq->bo->map__gtt == NULL);
2155
		assert(rq->bo->map == NULL);
2210
		assert(rq->bo->map__cpu == NULL);
2156
		gem_close(kgem->fd, rq->bo->handle);
2211
		gem_close(kgem->fd, rq->bo->handle);
2157
		kgem_cleanup_cache(kgem);
2212
		kgem_cleanup_cache(kgem);
2158
	} else {
2213
	} else {
2159
		list_add_tail(&rq->list, &kgem->requests[rq->ring]);
2214
		list_add_tail(&rq->list, &kgem->requests[rq->ring]);
Line 2160... Line 2215...
2160
		kgem->need_throttle = kgem->need_retire = 1;
2215
		kgem->need_throttle = kgem->need_retire = 1;
-
 
2216
	}
-
 
2217
 
2161
	}
2218
	kgem->next_request = NULL;
Line 2162... Line 2219...
2162
 
2219
 
2163
	kgem->next_request = NULL;
2220
	kgem_commit__check_buffers(kgem);
2164
}
2221
}
Line 2180... Line 2237...
2180
static void kgem_finish_buffers(struct kgem *kgem)
2237
static void kgem_finish_buffers(struct kgem *kgem)
2181
{
2238
{
2182
	struct kgem_buffer *bo, *next;
2239
	struct kgem_buffer *bo, *next;
Line 2183... Line 2240...
2183
 
2240
 
2184
	list_for_each_entry_safe(bo, next, &kgem->batch_buffers, base.list) {
2241
	list_for_each_entry_safe(bo, next, &kgem->batch_buffers, base.list) {
2185
		DBG(("%s: buffer handle=%d, used=%d, exec?=%d, write=%d, mmapped=%s\n",
2242
		DBG(("%s: buffer handle=%d, used=%d, exec?=%d, write=%d, mmapped=%s, refcnt=%d\n",
2186
		     __FUNCTION__, bo->base.handle, bo->used, bo->base.exec!=NULL,
2243
		     __FUNCTION__, bo->base.handle, bo->used, bo->base.exec!=NULL,
-
 
2244
		     bo->write, bo->mmapped == MMAPPED_CPU ? "cpu" : bo->mmapped == MMAPPED_GTT ? "gtt" : "no",
Line 2187... Line 2245...
2187
		     bo->write, bo->mmapped ? IS_CPU_MAP(bo->base.map) ? "cpu" : "gtt" : "no"));
2245
		     bo->base.refcnt));
2188
 
2246
 
2189
		assert(next->base.list.prev == &bo->base.list);
2247
		assert(next->base.list.prev == &bo->base.list);
Line 2190... Line 2248...
2190
		assert(bo->base.io);
2248
		assert(bo->base.io);
2191
		assert(bo->base.refcnt >= 1);
2249
		assert(bo->base.refcnt >= 1);
2192
 
2250
 
2193
		if (!bo->base.exec) {
2251
		if (bo->base.refcnt > 1 && !bo->base.exec) {
2194
			DBG(("%s: skipping unattached handle=%d, used=%d\n",
2252
			DBG(("%s: skipping unattached handle=%d, used=%d, refcnt=%d\n",
Line 2195... Line 2253...
2195
			     __FUNCTION__, bo->base.handle, bo->used));
2253
			     __FUNCTION__, bo->base.handle, bo->used, bo->base.refcnt));
2196
			continue;
2254
			continue;
2197
		}
2255
		}
2198
 
2256
 
Line 2199... Line 2257...
2199
		if (!bo->write) {
2257
		if (!bo->write) {
2200
			assert(bo->base.exec || bo->base.refcnt > 1);
2258
			assert(bo->base.exec || bo->base.refcnt > 1);
Line 2201... Line 2259...
2201
			goto decouple;
2259
			goto decouple;
Line 2202... Line 2260...
2202
		}
2260
		}
2203
 
2261
 
2204
		if (bo->mmapped) {
2262
		if (bo->mmapped) {
2205
			int used;
2263
			uint32_t used;
2206
 
2264
 
2207
			assert(!bo->need_io);
2265
			assert(!bo->need_io);
2208
 
2266
 
2209
			used = ALIGN(bo->used, PAGE_SIZE);
2267
			used = ALIGN(bo->used, PAGE_SIZE);
2210
			if (!DBG_NO_UPLOAD_ACTIVE &&
2268
			if (!DBG_NO_UPLOAD_ACTIVE &&
-
 
2269
			    used + PAGE_SIZE <= bytes(&bo->base) &&
2211
			    used + PAGE_SIZE <= bytes(&bo->base) &&
2270
			    (kgem->has_llc || bo->mmapped == MMAPPED_GTT || bo->base.snoop)) {
2212
			    (kgem->has_llc || !IS_CPU_MAP(bo->base.map) || bo->base.snoop)) {
2271
				DBG(("%s: retaining upload buffer (%d/%d): used=%d, refcnt=%d\n",
2213
				DBG(("%s: retaining upload buffer (%d/%d)\n",
2272
				     __FUNCTION__, bo->used, bytes(&bo->base), used, bo->base.refcnt));
2214
				     __FUNCTION__, bo->used, bytes(&bo->base)));
2273
				bo->used = used;
2215
				bo->used = used;
2274
				list_move(&bo->base.list,
2216
				list_move(&bo->base.list,
2275
					  &kgem->active_buffers);
Line 2217... Line 2276...
2217
					  &kgem->active_buffers);
2276
				kgem->need_retire = true;
2218
				continue;
2277
				continue;
2219
			}
2278
			}
2220
			DBG(("%s: discarding mmapped buffer, used=%d, map type=%d\n",
2279
			DBG(("%s: discarding mmapped buffer, used=%d, map type=%d\n",
2221
			     __FUNCTION__, bo->used, (int)__MAP_TYPE(bo->base.map)));
2280
			     __FUNCTION__, bo->used, bo->mmapped));
2222
			goto decouple;
2281
			goto decouple;
Line 2299... Line 2358...
2299
				     __FUNCTION__,
2358
				     __FUNCTION__,
2300
				     bo->used, bytes(&bo->base), bytes(shrink),
2359
				     bo->used, bytes(&bo->base), bytes(shrink),
2301
				     bo->base.handle, shrink->handle));
2360
				     bo->base.handle, shrink->handle));
Line 2302... Line 2361...
2302
 
2361
 
2303
				assert(bo->used <= bytes(shrink));
2362
				assert(bo->used <= bytes(shrink));
2304
				if (gem_write(kgem->fd, shrink->handle,
2363
				if (gem_write__cachealigned(kgem->fd, shrink->handle,
2305
					      0, bo->used, bo->mem) == 0) {
2364
					      0, bo->used, bo->mem) == 0) {
2306
					shrink->target_handle =
2365
					shrink->target_handle =
2307
						kgem->has_handle_lut ? bo->base.target_handle : shrink->handle;
2366
						kgem->has_handle_lut ? bo->base.target_handle : shrink->handle;
2308
					for (n = 0; n < kgem->nreloc; n++) {
2367
					for (n = 0; n < kgem->nreloc; n++) {
Line 2338... Line 2397...
2338
 
2397
 
2339
		DBG(("%s: handle=%d, uploading %d/%d\n",
2398
		DBG(("%s: handle=%d, uploading %d/%d\n",
2340
		     __FUNCTION__, bo->base.handle, bo->used, bytes(&bo->base)));
2399
		     __FUNCTION__, bo->base.handle, bo->used, bytes(&bo->base)));
2341
		ASSERT_IDLE(kgem, bo->base.handle);
2400
		ASSERT_IDLE(kgem, bo->base.handle);
2342
		assert(bo->used <= bytes(&bo->base));
2401
		assert(bo->used <= bytes(&bo->base));
2343
		gem_write(kgem->fd, bo->base.handle,
2402
		gem_write__cachealigned(kgem->fd, bo->base.handle,
2344
			  0, bo->used, bo->mem);
2403
			  0, bo->used, bo->mem);
Line 2345... Line 2404...
2345
		bo->need_io = 0;
2404
		bo->need_io = 0;
2346
 
2405
 
Line 2388... Line 2447...
2388
{
2447
{
2389
	int ret;
2448
	int ret;
Line 2390... Line 2449...
2390
 
2449
 
Line -... Line 2450...
-
 
2450
	ASSERT_IDLE(kgem, handle);
2391
	ASSERT_IDLE(kgem, handle);
2451
 
2392
 
2452
retry:
2393
	/* If there is no surface data, just upload the batch */
2453
	/* If there is no surface data, just upload the batch */
2394
	if (kgem->surface == kgem->batch_size)
2454
	if (kgem->surface == kgem->batch_size) {
2395
		return gem_write(kgem->fd, handle,
2455
		if (gem_write__cachealigned(kgem->fd, handle,
-
 
2456
				 0, sizeof(uint32_t)*kgem->nbatch,
-
 
2457
					    kgem->batch) == 0)
-
 
2458
			return 0;
-
 
2459
 
Line 2396... Line 2460...
2396
				 0, sizeof(uint32_t)*kgem->nbatch,
2460
		goto expire;
2397
				 kgem->batch);
2461
	}
2398
 
2462
 
2399
	/* Are the batch pages conjoint with the surface pages? */
2463
	/* Are the batch pages conjoint with the surface pages? */
2400
	if (kgem->surface < kgem->nbatch + PAGE_SIZE/sizeof(uint32_t)) {
2464
	if (kgem->surface < kgem->nbatch + PAGE_SIZE/sizeof(uint32_t)) {
2401
		assert(size == PAGE_ALIGN(kgem->batch_size*sizeof(uint32_t)));
2465
		assert(size == PAGE_ALIGN(kgem->batch_size*sizeof(uint32_t)));
-
 
2466
		if (gem_write__cachealigned(kgem->fd, handle,
-
 
2467
				 0, kgem->batch_size*sizeof(uint32_t),
-
 
2468
					    kgem->batch) == 0)
2402
		return gem_write(kgem->fd, handle,
2469
			return 0;
Line 2403... Line 2470...
2403
				 0, kgem->batch_size*sizeof(uint32_t),
2470
 
2404
				 kgem->batch);
2471
		goto expire;
2405
	}
2472
	}
2406
 
2473
 
2407
	/* Disjoint surface/batch, upload separately */
-
 
2408
	ret = gem_write(kgem->fd, handle,
2474
	/* Disjoint surface/batch, upload separately */
Line 2409... Line 2475...
2409
			0, sizeof(uint32_t)*kgem->nbatch,
2475
	if (gem_write__cachealigned(kgem->fd, handle,
2410
			kgem->batch);
2476
			0, sizeof(uint32_t)*kgem->nbatch,
2411
	if (ret)
2477
				    kgem->batch))
2412
		return ret;
2478
		goto expire;
2413
 
2479
 
2414
	ret = PAGE_ALIGN(sizeof(uint32_t) * kgem->batch_size);
2480
	ret = PAGE_ALIGN(sizeof(uint32_t) * kgem->batch_size);
-
 
2481
	ret -= sizeof(uint32_t) * kgem->surface;
-
 
2482
	assert(size-ret >= kgem->nbatch*sizeof(uint32_t));
-
 
2483
	if (gem_write(kgem->fd, handle,
-
 
2484
			size - ret, (kgem->batch_size - kgem->surface)*sizeof(uint32_t),
-
 
2485
		      kgem->batch + kgem->surface))
-
 
2486
		goto expire;
-
 
2487
 
-
 
2488
	return 0;
-
 
2489
 
-
 
2490
expire:
-
 
2491
	ret = errno;
-
 
2492
	assert(ret != EINVAL);
-
 
2493
 
-
 
2494
	(void)__kgem_throttle_retire(kgem, 0);
-
 
2495
	if (kgem_expire_cache(kgem))
-
 
2496
		goto retry;
-
 
2497
 
-
 
2498
	if (kgem_cleanup_cache(kgem))
2415
	ret -= sizeof(uint32_t) * kgem->surface;
2499
		goto retry;
Line 2416... Line 2500...
2416
	assert(size-ret >= kgem->nbatch*sizeof(uint32_t));
2500
 
2417
	return __gem_write(kgem->fd, handle,
2501
	ErrorF("%s: failed to write batch (handle=%d): %d\n",
2418
			size - ret, (kgem->batch_size - kgem->surface)*sizeof(uint32_t),
2502
	       __FUNCTION__, handle, ret);
Line 2440... Line 2524...
2440
 
2524
 
2441
			if (bo->needs_flush && __kgem_busy(kgem, bo->handle)) {
2525
			if (bo->needs_flush && __kgem_busy(kgem, bo->handle)) {
2442
				assert(bo->domain == DOMAIN_GPU || bo->domain == DOMAIN_NONE);
2526
				assert(bo->domain == DOMAIN_GPU || bo->domain == DOMAIN_NONE);
2443
				list_add(&bo->request, &kgem->flushing);
2527
				list_add(&bo->request, &kgem->flushing);
-
 
2528
				bo->rq = (void *)kgem;
2444
				bo->rq = (void *)kgem;
2529
				kgem->need_retire = true;
2445
			} else
2530
			} else
Line 2446... Line 2531...
2446
				__kgem_bo_clear_busy(bo);
2531
				__kgem_bo_clear_busy(bo);
2447
 
2532
 
Line 2472... Line 2557...
2472
	kgem->nexec = 0;
2557
	kgem->nexec = 0;
2473
	kgem->nreloc = 0;
2558
	kgem->nreloc = 0;
2474
	kgem->nreloc__self = 0;
2559
	kgem->nreloc__self = 0;
2475
	kgem->aperture = 0;
2560
	kgem->aperture = 0;
2476
	kgem->aperture_fenced = 0;
2561
	kgem->aperture_fenced = 0;
-
 
2562
	kgem->aperture_max_fence = 0;
2477
	kgem->nbatch = 0;
2563
	kgem->nbatch = 0;
2478
	kgem->surface = kgem->batch_size;
2564
	kgem->surface = kgem->batch_size;
2479
	kgem->mode = KGEM_NONE;
2565
	kgem->mode = KGEM_NONE;
2480
	kgem->flush = 0;
2566
	kgem->flush = 0;
2481
	kgem->batch_flags = kgem->batch_flags_base;
2567
	kgem->batch_flags = kgem->batch_flags_base;
Line 2597... Line 2683...
2597
	assert(kgem->nbatch <= kgem->surface);
2683
	assert(kgem->nbatch <= kgem->surface);
Line 2598... Line 2684...
2598
 
2684
 
2599
	batch_end = kgem_end_batch(kgem);
2685
	batch_end = kgem_end_batch(kgem);
Line 2600... Line 2686...
2600
	kgem_sna_flush(kgem);
2686
	kgem_sna_flush(kgem);
2601
 
2687
 
2602
	DBG(("batch[%d/%d, flags=%x]: %d %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d\n",
2688
	DBG(("batch[%d/%d, flags=%x]: %d %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d [fenced=%d]\n",
2603
	     kgem->mode, kgem->ring, kgem->batch_flags,
2689
	     kgem->mode, kgem->ring, kgem->batch_flags,
Line 2604... Line 2690...
2604
	     batch_end, kgem->nbatch, kgem->surface, kgem->batch_size,
2690
	     batch_end, kgem->nbatch, kgem->surface, kgem->batch_size,
2605
	     kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture));
2691
	     kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture, kgem->aperture_fenced));
2606
 
2692
 
2607
	assert(kgem->nbatch <= kgem->batch_size);
2693
	assert(kgem->nbatch <= kgem->batch_size);
Line 2658... Line 2744...
2658
 
2744
 
2659
   		    if (DEBUG_DUMP)
2745
   		    if (DEBUG_DUMP)
2660
            {
2746
            {
2661
                int fd = open("/tmp1/1/batchbuffer.bin", O_CREAT|O_WRONLY|O_BINARY);
2747
                int fd = open("/tmp1/1/batchbuffer.bin", O_CREAT|O_WRONLY|O_BINARY);
2662
				if (fd != -1) {
2748
				if (fd != -1) {
2663
                    write(fd, kgem->batch, size);
2749
					ret = write(fd, kgem->batch, batch_end*sizeof(uint32_t));
2664
					close(fd);
2750
					fd = close(fd);
2665
				}
2751
				}
2666
                else printf("SNA: failed to write batchbuffer\n");
2752
                else printf("SNA: failed to write batchbuffer\n");
2667
                asm volatile("int3");
2753
                asm volatile("int3");
Line 2692... Line 2778...
2692
				kgem_throttle(kgem);
2778
				kgem_throttle(kgem);
2693
				kgem->wedged = true;
2779
				kgem->wedged = true;
Line 2694... Line 2780...
2694
 
2780
 
2695
#if 0
2781
#if 0
2696
				ret = errno;
2782
				ret = errno;
2697
				ErrorF("batch[%d/%d]: %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d: errno=%d\n",
2783
				ErrorF("batch[%d/%d]: %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d, fenced=%d, high=%d,%d: errno=%d\n",
2698
				       kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface,
2784
				       kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface,
Line 2699... Line 2785...
2699
				       kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture, errno);
2785
				       kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture, kgem->aperture_fenced, kgem->aperture_high, kgem->aperture_total, errno);
2700
 
2786
 
Line 2701... Line 2787...
2701
				for (i = 0; i < kgem->nexec; i++) {
2787
				for (i = 0; i < kgem->nexec; i++) {
Line 2762... Line 2848...
2762
		printf("Detected a hung GPU, disabling acceleration.\n");
2848
		printf("Detected a hung GPU, disabling acceleration.\n");
2763
		printf("When reporting this, please include i915_error_state from debugfs and the full dmesg.\n");
2849
		printf("When reporting this, please include i915_error_state from debugfs and the full dmesg.\n");
2764
	}
2850
	}
2765
}
2851
}
Line 2766... Line 2852...
2766
 
2852
 
2767
void kgem_purge_cache(struct kgem *kgem)
2853
static void kgem_purge_cache(struct kgem *kgem)
2768
{
2854
{
2769
	struct kgem_bo *bo, *next;
2855
	struct kgem_bo *bo, *next;
Line 2770... Line 2856...
2770
	int i;
2856
	int i;
Line 2890... Line 2976...
2890
			if (bo->delta > expire) {
2976
			if (bo->delta > expire) {
2891
				idle = false;
2977
				idle = false;
2892
				break;
2978
				break;
2893
			}
2979
			}
Line 2894... Line 2980...
2894
 
2980
 
2895
			if (bo->map && bo->delta + MAP_PRESERVE_TIME > expire) {
2981
			if (bo->map__cpu && bo->delta + MAP_PRESERVE_TIME > expire) {
2896
				idle = false;
2982
				idle = false;
2897
				list_move_tail(&bo->list, &preserve);
2983
				list_move_tail(&bo->list, &preserve);
2898
			} else {
2984
			} else {
2899
				count++;
2985
				count++;
Line 2930... Line 3016...
2930
	return !idle;
3016
	return !idle;
2931
	(void)count;
3017
	(void)count;
2932
	(void)size;
3018
	(void)size;
2933
}
3019
}
Line 2934... Line 3020...
2934
 
3020
 
2935
void kgem_cleanup_cache(struct kgem *kgem)
3021
bool kgem_cleanup_cache(struct kgem *kgem)
2936
{
3022
{
2937
	unsigned int i;
3023
	unsigned int i;
Line 2938... Line 3024...
2938
	int n;
3024
	int n;
Line 2960... Line 3046...
2960
	}
3046
	}
Line 2961... Line 3047...
2961
 
3047
 
2962
	kgem_retire(kgem);
3048
	kgem_retire(kgem);
Line -... Line 3049...
-
 
3049
	kgem_cleanup(kgem);
-
 
3050
 
-
 
3051
	if (!kgem->need_expire)
2963
	kgem_cleanup(kgem);
3052
		return false;
2964
 
3053
 
2965
	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
3054
	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
2966
		while (!list_is_empty(&kgem->inactive[i]))
3055
		while (!list_is_empty(&kgem->inactive[i]))
2967
			kgem_bo_free(kgem,
3056
			kgem_bo_free(kgem,
Line 2982... Line 3071...
2982
		free(bo);
3071
		free(bo);
2983
	}
3072
	}
Line 2984... Line 3073...
2984
 
3073
 
2985
	kgem->need_purge = false;
3074
	kgem->need_purge = false;
-
 
3075
	kgem->need_expire = false;
2986
	kgem->need_expire = false;
3076
	return true;
Line 2987... Line 3077...
2987
}
3077
}
2988
 
3078
 
2989
static struct kgem_bo *
3079
static struct kgem_bo *
Line 3026... Line 3116...
3026
 
3116
 
3027
			if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo))
3117
			if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo))
Line 3028... Line 3118...
3028
				goto discard;
3118
				goto discard;
3029
 
3119
 
-
 
3120
			list_del(&bo->list);
3030
			list_del(&bo->list);
3121
			if (RQ(bo->rq) == (void *)kgem) {
-
 
3122
				assert(bo->exec == NULL);
Line 3031... Line 3123...
3031
			if (bo->rq == (void *)kgem)
3123
				list_del(&bo->request);
3032
				list_del(&bo->request);
3124
			}
3033
 
3125
 
Line 3081... Line 3173...
3081
		int for_cpu = !!(flags & CREATE_CPU_MAP);
3173
		int for_cpu = !!(flags & CREATE_CPU_MAP);
3082
		DBG(("%s: searching for inactive %s map\n",
3174
		DBG(("%s: searching for inactive %s map\n",
3083
		     __FUNCTION__, for_cpu ? "cpu" : "gtt"));
3175
		     __FUNCTION__, for_cpu ? "cpu" : "gtt"));
3084
		cache = &kgem->vma[for_cpu].inactive[cache_bucket(num_pages)];
3176
		cache = &kgem->vma[for_cpu].inactive[cache_bucket(num_pages)];
3085
		list_for_each_entry(bo, cache, vma) {
3177
		list_for_each_entry(bo, cache, vma) {
3086
			assert(IS_CPU_MAP(bo->map) == for_cpu);
3178
			assert(for_cpu ? bo->map__cpu : bo->map__gtt);
3087
			assert(bucket(bo) == cache_bucket(num_pages));
3179
			assert(bucket(bo) == cache_bucket(num_pages));
3088
			assert(bo->proxy == NULL);
3180
			assert(bo->proxy == NULL);
3089
			assert(bo->rq == NULL);
3181
			assert(bo->rq == NULL);
3090
			assert(bo->exec == NULL);
3182
			assert(bo->exec == NULL);
3091
			assert(!bo->scanout);
3183
			assert(!bo->scanout);
Line 3105... Line 3197...
3105
			    !gem_set_tiling(kgem->fd, bo->handle,
3197
			    !gem_set_tiling(kgem->fd, bo->handle,
3106
					    I915_TILING_NONE, 0))
3198
					    I915_TILING_NONE, 0))
3107
				continue;
3199
				continue;
Line 3108... Line 3200...
3108
 
3200
 
-
 
3201
			kgem_bo_remove_from_inactive(kgem, bo);
-
 
3202
			assert(list_is_empty(&bo->vma));
Line 3109... Line 3203...
3109
			kgem_bo_remove_from_inactive(kgem, bo);
3203
			assert(list_is_empty(&bo->list));
3110
 
3204
 
3111
			bo->tiling = I915_TILING_NONE;
3205
			bo->tiling = I915_TILING_NONE;
3112
			bo->pitch = 0;
3206
			bo->pitch = 0;
Line 3161... Line 3255...
3161
 
3255
 
3162
			bo->tiling = I915_TILING_NONE;
3256
			bo->tiling = I915_TILING_NONE;
3163
			bo->pitch = 0;
3257
			bo->pitch = 0;
Line 3164... Line 3258...
3164
		}
3258
		}
3165
 
3259
 
3166
		if (bo->map) {
3260
		if (bo->map__gtt || bo->map__cpu) {
3167
			if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
3261
			if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
3168
				int for_cpu = !!(flags & CREATE_CPU_MAP);
3262
				int for_cpu = !!(flags & CREATE_CPU_MAP);
3169
				if (IS_CPU_MAP(bo->map) != for_cpu) {
3263
				if (for_cpu ? bo->map__cpu : bo->map__gtt){
Line 3170... Line 3264...
3170
					if (first != NULL)
3264
					if (first != NULL)
3171
						break;
3265
						break;
Line 3179... Line 3273...
3179
 
3273
 
3180
				first = bo;
3274
				first = bo;
3181
				continue;
3275
				continue;
3182
			}
3276
			}
-
 
3277
		} else {
-
 
3278
			if (flags & CREATE_GTT_MAP && !kgem_bo_can_map(kgem, bo))
-
 
3279
				continue;
3183
		} else {
3280
 
3184
			if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
3281
			if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
3185
				if (first != NULL)
3282
				if (first != NULL)
Line 3186... Line 3283...
3186
					break;
3283
					break;
Line 3200... Line 3297...
3200
		bo->delta = 0;
3297
		bo->delta = 0;
3201
		DBG(("  %s: found handle=%d (num_pages=%d) in linear %s cache\n",
3298
		DBG(("  %s: found handle=%d (num_pages=%d) in linear %s cache\n",
3202
		     __FUNCTION__, bo->handle, num_pages(bo),
3299
		     __FUNCTION__, bo->handle, num_pages(bo),
3203
		     use_active ? "active" : "inactive"));
3300
		     use_active ? "active" : "inactive"));
3204
		assert(list_is_empty(&bo->list));
3301
		assert(list_is_empty(&bo->list));
-
 
3302
		assert(list_is_empty(&bo->vma));
3205
		assert(use_active || bo->domain != DOMAIN_GPU);
3303
		assert(use_active || bo->domain != DOMAIN_GPU);
3206
		assert(!bo->needs_flush || use_active);
3304
		assert(!bo->needs_flush || use_active);
3207
		assert_tiling(kgem, bo);
3305
		assert_tiling(kgem, bo);
3208
		ASSERT_MAYBE_IDLE(kgem, bo->handle, !use_active);
3306
		ASSERT_MAYBE_IDLE(kgem, bo->handle, !use_active);
3209
		return bo;
3307
		return bo;
Line 3221... Line 3319...
3221
		first->delta = 0;
3319
		first->delta = 0;
3222
		DBG(("  %s: found handle=%d (near-miss) (num_pages=%d) in linear %s cache\n",
3320
		DBG(("  %s: found handle=%d (near-miss) (num_pages=%d) in linear %s cache\n",
3223
		     __FUNCTION__, first->handle, num_pages(first),
3321
		     __FUNCTION__, first->handle, num_pages(first),
3224
		     use_active ? "active" : "inactive"));
3322
		     use_active ? "active" : "inactive"));
3225
		assert(list_is_empty(&first->list));
3323
		assert(list_is_empty(&first->list));
-
 
3324
		assert(list_is_empty(&first->vma));
3226
		assert(use_active || first->domain != DOMAIN_GPU);
3325
		assert(use_active || first->domain != DOMAIN_GPU);
3227
		assert(!first->needs_flush || use_active);
3326
		assert(!first->needs_flush || use_active);
3228
		ASSERT_MAYBE_IDLE(kgem, first->handle, !use_active);
3327
		ASSERT_MAYBE_IDLE(kgem, first->handle, !use_active);
3229
		return first;
3328
		return first;
3230
	}
3329
	}
Line 3280... Line 3379...
3280
	assert(bo->tiling);
3379
	assert(bo->tiling);
3281
	assert_tiling(kgem, bo);
3380
	assert_tiling(kgem, bo);
3282
	assert(kgem->gen < 040);
3381
	assert(kgem->gen < 040);
Line 3283... Line 3382...
3283
 
3382
 
3284
	if (kgem->gen < 030)
3383
	if (kgem->gen < 030)
3285
		size = 512 * 1024;
3384
		size = 512 * 1024 / PAGE_SIZE;
3286
	else
3385
	else
3287
		size = 1024 * 1024;
3386
		size = 1024 * 1024 / PAGE_SIZE;
3288
	while (size < bytes(bo))
3387
	while (size < num_pages(bo))
Line 3289... Line 3388...
3289
		size *= 2;
3388
		size <<= 1;
3290
 
3389
 
Line 3291... Line 3390...
3291
	return size;
3390
	return size;
Line 3306... Line 3405...
3306
	bool exact = flags & (CREATE_EXACT | CREATE_SCANOUT);
3405
	bool exact = flags & (CREATE_EXACT | CREATE_SCANOUT);
Line 3307... Line 3406...
3307
 
3406
 
3308
	if (tiling < 0)
3407
	if (tiling < 0)
Line 3309... Line -...
3309
		exact = true, tiling = -tiling;
-
 
3310
 
3408
		exact = true, tiling = -tiling;
3311
 
3409
 
3312
	DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d, cpu-mapping=%d, gtt-mapping=%d, scanout?=%d, prime?=%d, temp?=%d)\n", __FUNCTION__,
3410
	DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d, cpu-mapping=%d, gtt-mapping=%d, scanout?=%d, prime?=%d, temp?=%d)\n", __FUNCTION__,
3313
	     width, height, bpp, tiling, exact,
3411
	     width, height, bpp, tiling, exact,
3314
	     !!(flags & CREATE_INACTIVE),
3412
	     !!(flags & CREATE_INACTIVE),
Line 3322... Line 3420...
3322
				 width, height, bpp, tiling, &pitch);
3420
				 width, height, bpp, tiling, &pitch);
3323
	assert(size && size <= kgem->max_object_size);
3421
	assert(size && size <= kgem->max_object_size);
3324
	size /= PAGE_SIZE;
3422
	size /= PAGE_SIZE;
3325
	bucket = cache_bucket(size);
3423
	bucket = cache_bucket(size);
Line 3326... Line -...
3326
 
-
 
3327
	if (flags & CREATE_SCANOUT) {
-
 
3328
		struct kgem_bo *last = NULL;
-
 
3329
 
-
 
3330
		list_for_each_entry_reverse(bo, &kgem->scanout, list) {
-
 
3331
			assert(bo->scanout);
-
 
3332
			assert(bo->delta);
-
 
3333
			assert(!bo->flush);
-
 
3334
			assert_tiling(kgem, bo);
-
 
3335
 
-
 
3336
			if (size > num_pages(bo) || num_pages(bo) > 2*size)
-
 
3337
				continue;
-
 
3338
 
-
 
3339
			if (bo->tiling != tiling ||
-
 
3340
			    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
-
 
3341
				if (!gem_set_tiling(kgem->fd, bo->handle,
-
 
3342
						    tiling, pitch))
-
 
3343
					continue;
-
 
3344
 
-
 
3345
				bo->tiling = tiling;
-
 
3346
				bo->pitch = pitch;
-
 
3347
			}
-
 
3348
 
-
 
3349
			if (flags & CREATE_INACTIVE && bo->rq) {
-
 
3350
				last = bo;
-
 
3351
				continue;
-
 
3352
			}
-
 
3353
 
-
 
3354
			list_del(&bo->list);
-
 
3355
 
-
 
3356
			bo->unique_id = kgem_get_unique_id(kgem);
-
 
3357
			DBG(("  1:from scanout: pitch=%d, tiling=%d, handle=%d, id=%d\n",
-
 
3358
			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
-
 
3359
			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
-
 
3360
			assert_tiling(kgem, bo);
-
 
3361
			bo->refcnt = 1;
-
 
3362
			return bo;
-
 
3363
		}
-
 
3364
 
-
 
3365
		if (last) {
-
 
3366
			list_del(&last->list);
-
 
3367
 
-
 
3368
			last->unique_id = kgem_get_unique_id(kgem);
-
 
3369
			DBG(("  1:from scanout: pitch=%d, tiling=%d, handle=%d, id=%d\n",
-
 
3370
			     last->pitch, last->tiling, last->handle, last->unique_id));
-
 
3371
			assert(last->pitch*kgem_aligned_height(kgem, height, last->tiling) <= kgem_bo_size(last));
-
 
3372
			assert_tiling(kgem, last);
-
 
3373
			last->refcnt = 1;
-
 
3374
			return last;
-
 
3375
		}
-
 
3376
 
-
 
3377
		bo = NULL; //__kgem_bo_create_as_display(kgem, size, tiling, pitch);
-
 
3378
		if (bo)
-
 
3379
			return bo;
-
 
3380
	}
-
 
3381
 
3424
 
3382
	if (bucket >= NUM_CACHE_BUCKETS) {
3425
	if (bucket >= NUM_CACHE_BUCKETS) {
3383
		DBG(("%s: large bo num pages=%d, bucket=%d\n",
3426
		DBG(("%s: large bo num pages=%d, bucket=%d\n",
Line 3384... Line 3427...
3384
		     __FUNCTION__, size, bucket));
3427
		     __FUNCTION__, size, bucket));
Line 3426... Line 3469...
3426
			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
3469
			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
3427
			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3470
			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3428
			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
3471
			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
3429
			assert_tiling(kgem, bo);
3472
			assert_tiling(kgem, bo);
3430
			bo->refcnt = 1;
3473
			bo->refcnt = 1;
3431
			bo->flush = true;
-
 
3432
			return bo;
3474
			return bo;
3433
		}
3475
		}
Line 3434... Line 3476...
3434
 
3476
 
3435
large_inactive:
3477
large_inactive:
Line 3486... Line 3528...
3486
		do {
3528
		do {
3487
			list_for_each_entry(bo, cache, vma) {
3529
			list_for_each_entry(bo, cache, vma) {
3488
				assert(bucket(bo) == bucket);
3530
				assert(bucket(bo) == bucket);
3489
				assert(bo->refcnt == 0);
3531
				assert(bo->refcnt == 0);
3490
				assert(!bo->scanout);
3532
				assert(!bo->scanout);
3491
				assert(bo->map);
-
 
3492
				assert(IS_CPU_MAP(bo->map) == for_cpu);
3533
				assert(for_cpu ? bo->map__cpu : bo->map__gtt);
3493
				assert(bo->rq == NULL);
3534
				assert(bo->rq == NULL);
-
 
3535
				assert(bo->exec == NULL);
3494
				assert(list_is_empty(&bo->request));
3536
				assert(list_is_empty(&bo->request));
3495
				assert(bo->flush == false);
3537
				assert(bo->flush == false);
3496
				assert_tiling(kgem, bo);
3538
				assert_tiling(kgem, bo);
Line 3497... Line 3539...
3497
 
3539
 
Line 3518... Line 3560...
3518
				bo->delta = 0;
3560
				bo->delta = 0;
3519
				bo->unique_id = kgem_get_unique_id(kgem);
3561
				bo->unique_id = kgem_get_unique_id(kgem);
3520
				bo->domain = DOMAIN_NONE;
3562
				bo->domain = DOMAIN_NONE;
Line 3521... Line 3563...
3521
 
3563
 
-
 
3564
				kgem_bo_remove_from_inactive(kgem, bo);
-
 
3565
				assert(list_is_empty(&bo->list));
Line 3522... Line 3566...
3522
				kgem_bo_remove_from_inactive(kgem, bo);
3566
				assert(list_is_empty(&bo->vma));
3523
 
3567
 
3524
				DBG(("  from inactive vma: pitch=%d, tiling=%d: handle=%d, id=%d\n",
3568
				DBG(("  from inactive vma: pitch=%d, tiling=%d: handle=%d, id=%d\n",
3525
				     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
3569
				     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
Line 3738... Line 3782...
3738
		if (bo->tiling != tiling ||
3782
		if (bo->tiling != tiling ||
3739
		    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
3783
		    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
3740
			if (!gem_set_tiling(kgem->fd, bo->handle,
3784
			if (!gem_set_tiling(kgem->fd, bo->handle,
3741
					    tiling, pitch))
3785
					    tiling, pitch))
3742
				continue;
3786
				continue;
3743
 
-
 
3744
			if (bo->map)
-
 
3745
				kgem_bo_release_map(kgem, bo);
-
 
3746
		}
3787
		}
Line 3747... Line 3788...
3747
 
3788
 
3748
		if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
3789
		if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
3749
			kgem_bo_free(kgem, bo);
3790
			kgem_bo_free(kgem, bo);
3750
			break;
3791
			break;
Line 3751... Line 3792...
3751
		}
3792
		}
-
 
3793
 
-
 
3794
		kgem_bo_remove_from_inactive(kgem, bo);
Line 3752... Line 3795...
3752
 
3795
		assert(list_is_empty(&bo->list));
3753
		kgem_bo_remove_from_inactive(kgem, bo);
3796
		assert(list_is_empty(&bo->vma));
Line 3754... Line 3797...
3754
 
3797
 
Line 3797... Line 3840...
3797
	if (!bo) {
3840
	if (!bo) {
3798
		gem_close(kgem->fd, handle);
3841
		gem_close(kgem->fd, handle);
3799
		return NULL;
3842
		return NULL;
3800
	}
3843
	}
Line 3801... Line -...
3801
 
-
 
3802
	if (bucket >= NUM_CACHE_BUCKETS) {
-
 
3803
		DBG(("%s: marking large bo for automatic flushing\n",
-
 
3804
		     __FUNCTION__));
-
 
3805
		bo->flush = true;
-
 
3806
	}
-
 
3807
 
3844
 
3808
	bo->unique_id = kgem_get_unique_id(kgem);
3845
	bo->unique_id = kgem_get_unique_id(kgem);
3809
	if (tiling == I915_TILING_NONE ||
3846
	if (tiling == I915_TILING_NONE ||
3810
	    gem_set_tiling(kgem->fd, handle, tiling, pitch)) {
3847
	    gem_set_tiling(kgem->fd, handle, tiling, pitch)) {
3811
		bo->tiling = tiling;
3848
		bo->tiling = tiling;
Line 3933... Line 3970...
3933
{
3970
{
3934
	DBG(("%s: handle=%d, proxy? %d\n",
3971
	DBG(("%s: handle=%d, proxy? %d\n",
3935
	     __FUNCTION__, bo->handle, bo->proxy != NULL));
3972
	     __FUNCTION__, bo->handle, bo->proxy != NULL));
Line 3936... Line 3973...
3936
 
3973
 
-
 
3974
	if (bo->proxy) {
-
 
3975
		assert(!bo->reusable);
-
 
3976
		kgem_bo_binding_free(kgem, bo);
-
 
3977
 
3937
	if (bo->proxy) {
3978
		assert(list_is_empty(&bo->list));
3938
		_list_del(&bo->vma);
3979
		_list_del(&bo->vma);
-
 
3980
		_list_del(&bo->request);
3939
		_list_del(&bo->request);
3981
 
3940
		if (bo->io && bo->exec == NULL)
3982
		if (bo->io && bo->domain == DOMAIN_CPU)
-
 
3983
			_kgem_bo_delete_buffer(kgem, bo);
3941
			_kgem_bo_delete_buffer(kgem, bo);
3984
 
3942
		kgem_bo_unref(kgem, bo->proxy);
-
 
3943
		kgem_bo_binding_free(kgem, bo);
-
 
3944
		free(bo);
-
 
3945
		return;
-
 
Line -... Line 3985...
-
 
3985
		kgem_bo_unref(kgem, bo->proxy);
-
 
3986
 
-
 
3987
		*(struct kgem_bo **)bo = __kgem_freed_bo;
3946
		}
3988
		__kgem_freed_bo = bo;
3947
 
3989
	} else
Line 3948... Line 3990...
3948
	__kgem_bo_destroy(kgem, bo);
3990
	__kgem_bo_destroy(kgem, bo);
3949
}
3991
}
Line 3987... Line 4029...
3987
inline static bool needs_semaphore(struct kgem *kgem, struct kgem_bo *bo)
4029
inline static bool needs_semaphore(struct kgem *kgem, struct kgem_bo *bo)
3988
{
4030
{
3989
	return kgem->nreloc && bo->rq && RQ_RING(bo->rq) != kgem->ring;
4031
	return kgem->nreloc && bo->rq && RQ_RING(bo->rq) != kgem->ring;
3990
}
4032
}
Line -... Line 4033...
-
 
4033
 
-
 
4034
static bool aperture_check(struct kgem *kgem, unsigned num_pages)
-
 
4035
{
-
 
4036
	if (kgem->aperture) {
-
 
4037
		struct drm_i915_gem_get_aperture aperture;
-
 
4038
 
-
 
4039
		VG_CLEAR(aperture);
-
 
4040
		aperture.aper_available_size = kgem->aperture_high;
-
 
4041
		aperture.aper_available_size *= PAGE_SIZE;
-
 
4042
		(void)drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
-
 
4043
 
-
 
4044
		DBG(("%s: aperture required %ld bytes, available %ld bytes\n",
-
 
4045
		     __FUNCTION__,
-
 
4046
		     (long)num_pages * PAGE_SIZE,
-
 
4047
		     (long)aperture.aper_available_size));
-
 
4048
 
-
 
4049
		/* Leave some space in case of alignment issues */
-
 
4050
		aperture.aper_available_size -= 1024 * 1024;
-
 
4051
		aperture.aper_available_size -= kgem->aperture_mappable * PAGE_SIZE / 2;
-
 
4052
		if (kgem->gen < 033)
-
 
4053
			aperture.aper_available_size -= kgem->aperture_max_fence * PAGE_SIZE;
-
 
4054
		if (!kgem->has_llc)
-
 
4055
			aperture.aper_available_size -= 2 * kgem->nexec * PAGE_SIZE;
-
 
4056
 
-
 
4057
		DBG(("%s: num_pages=%d, estimated max usable=%ld\n",
-
 
4058
		     __FUNCTION__, num_pages, (long)(aperture.aper_available_size/PAGE_SIZE)));
-
 
4059
 
-
 
4060
		if (num_pages <= aperture.aper_available_size / PAGE_SIZE)
-
 
4061
			return true;
-
 
4062
	}
-
 
4063
 
-
 
4064
	return false;
-
 
4065
}
-
 
4066
 
-
 
4067
static inline bool kgem_flush(struct kgem *kgem, bool flush)
-
 
4068
{
-
 
4069
	if (unlikely(kgem->wedged))
-
 
4070
		return false;
-
 
4071
 
-
 
4072
	if (kgem->nreloc == 0)
-
 
4073
		return true;
-
 
4074
 
-
 
4075
	if (container_of(kgem, struct sna, kgem)->flags & SNA_POWERSAVE)
-
 
4076
		return true;
-
 
4077
 
-
 
4078
	if (kgem->flush == flush && kgem->aperture < kgem->aperture_low)
-
 
4079
		return true;
-
 
4080
 
-
 
4081
	DBG(("%s: opportunistic flushing? flush=%d,%d, aperture=%d/%d, idle?=%d\n",
-
 
4082
	     __FUNCTION__, kgem->flush, flush, kgem->aperture, kgem->aperture_low, kgem_ring_is_idle(kgem, kgem->ring)));
-
 
4083
	return !kgem_ring_is_idle(kgem, kgem->ring);
-
 
4084
}
3991
 
4085
 
3992
bool kgem_check_bo(struct kgem *kgem, ...)
4086
bool kgem_check_bo(struct kgem *kgem, ...)
3993
{
4087
{
3994
	va_list ap;
4088
	va_list ap;
3995
	struct kgem_bo *bo;
4089
	struct kgem_bo *bo;
3996
	int num_exec = 0;
4090
	int num_exec = 0;
3997
	int num_pages = 0;
4091
	int num_pages = 0;
-
 
4092
	bool flush = false;
Line 3998... Line 4093...
3998
	bool flush = false;
4093
	bool busy = true;
3999
 
4094
 
4000
	va_start(ap, kgem);
4095
	va_start(ap, kgem);
4001
	while ((bo = va_arg(ap, struct kgem_bo *))) {
4096
	while ((bo = va_arg(ap, struct kgem_bo *))) {
4002
		while (bo->proxy)
4097
		while (bo->proxy)
4003
			bo = bo->proxy;
4098
			bo = bo->proxy;
Line 4004... Line 4099...
4004
		if (bo->exec)
4099
		if (bo->exec)
-
 
4100
			continue;
4005
			continue;
4101
 
-
 
4102
		if (needs_semaphore(kgem, bo)) {
Line 4006... Line 4103...
4006
 
4103
			DBG(("%s: flushing for required semaphore\n", __FUNCTION__));
4007
		if (needs_semaphore(kgem, bo))
4104
			return false;
Line 4008... Line 4105...
4008
			return false;
4105
		}
-
 
4106
 
4009
 
4107
		num_pages += num_pages(bo);
4010
		num_pages += num_pages(bo);
4108
		num_exec++;
Line 4011... Line 4109...
4011
		num_exec++;
4109
 
4012
 
4110
		flush |= bo->flush;
Line 4013... Line 4111...
4013
		flush |= bo->flush;
4111
		busy &= bo->rq != NULL;
4014
	}
4112
	}
Line 4015... Line -...
4015
	va_end(ap);
-
 
4016
 
-
 
4017
	DBG(("%s: num_pages=+%d, num_exec=+%d\n",
-
 
4018
	     __FUNCTION__, num_pages, num_exec));
4113
	va_end(ap);
4019
 
-
 
4020
	if (!num_pages)
4114
 
4021
		return true;
4115
	DBG(("%s: num_pages=+%d, num_exec=+%d\n",
4022
 
4116
	     __FUNCTION__, num_pages, num_exec));
4023
	if (kgem_flush(kgem, flush))
4117
 
Line 4024... Line 4118...
4024
		return false;
4118
	if (!num_pages)
4025
 
4119
		return true;
4026
	if (kgem->aperture > kgem->aperture_low &&
4120
 
-
 
4121
	if (kgem->nexec + num_exec >= KGEM_EXEC_SIZE(kgem)) {
4027
	    kgem_ring_is_idle(kgem, kgem->ring)) {
4122
		DBG(("%s: out of exec slots (%d + %d / %d)\n", __FUNCTION__,
4028
		DBG(("%s: current aperture usage (%d) is greater than low water mark (%d)\n",
4123
		     kgem->nexec, num_exec, KGEM_EXEC_SIZE(kgem)));
Line 4029... Line -...
4029
		     __FUNCTION__, kgem->aperture, kgem->aperture_low));
-
 
4030
		return false;
-
 
4031
	}
-
 
4032
 
4124
		return false;
4033
	if (num_pages + kgem->aperture > kgem->aperture_high) {
-
 
4034
		DBG(("%s: final aperture usage (%d) is greater than high water mark (%d)\n",
-
 
4035
		     __FUNCTION__, num_pages + kgem->aperture, kgem->aperture_high));
4125
	}
-
 
4126
 
-
 
4127
	if (num_pages + kgem->aperture > kgem->aperture_high) {
4036
		return false;
4128
		DBG(("%s: final aperture usage (%d) is greater than high water mark (%d)\n",
Line -... Line 4129...
-
 
4129
		     __FUNCTION__, num_pages + kgem->aperture, kgem->aperture_high));
-
 
4130
		if (!aperture_check(kgem, num_pages + kgem->aperture))
-
 
4131
		return false;
-
 
4132
	}
-
 
4133
 
-
 
4134
	if (busy)
-
 
4135
		return true;
Line -... Line 4136...
-
 
4136
 
-
 
4137
	return kgem_flush(kgem, flush);
-
 
4138
}
-
 
4139
 
-
 
4140
#if 0
Line -... Line 4141...
-
 
4141
bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo)
Line -... Line 4142...
-
 
4142
{
-
 
4143
	assert(bo->refcnt);
Line -... Line 4144...
-
 
4144
	while (bo->proxy)
-
 
4145
		bo = bo->proxy;
-
 
4146
	assert(bo->refcnt);
-
 
4147
 
-
 
4148
	if (bo->exec) {
-
 
4149
		if (kgem->gen < 040 &&
-
 
4150
		    bo->tiling != I915_TILING_NONE &&
-
 
4151
		    (bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0) {
-
 
4152
			uint32_t size;
-
 
4153
 
Line -... Line 4154...
-
 
4154
			assert(bo->tiling == I915_TILING_X);
-
 
4155
 
-
 
4156
			if (kgem->nfence >= kgem->fence_max)
-
 
4157
				return false;
-
 
4158
 
-
 
4159
			if (kgem->aperture_fenced) {
-
 
4160
				size = 3*kgem->aperture_fenced;
-
 
4161
				if (kgem->aperture_total == kgem->aperture_mappable)
-
 
4162
					size += kgem->aperture;
-
 
4163
				if (size > kgem->aperture_mappable &&
-
 
4164
				    kgem_ring_is_idle(kgem, kgem->ring)) {
-
 
4165
					DBG(("%s: opportunistic fence flush\n", __FUNCTION__));
-
 
4166
					return false;
-
 
4167
				}
Line -... Line 4168...
-
 
4168
			}
-
 
4169
 
Line -... Line 4170...
-
 
4170
			size = kgem_bo_fenced_size(kgem, bo);
-
 
4171
			if (size > kgem->aperture_max_fence)
Line -... Line 4172...
-
 
4172
				kgem->aperture_max_fence = size;
-
 
4173
			size += kgem->aperture_fenced;
-
 
4174
			if (kgem->gen < 033)
-
 
4175
				size += kgem->aperture_max_fence;
Line -... Line 4176...
-
 
4176
			if (kgem->aperture_total == kgem->aperture_mappable)
-
 
4177
				size += kgem->aperture;
-
 
4178
			if (size > kgem->aperture_mappable) {
-
 
4179
				DBG(("%s: estimated fence space required [%d] exceed aperture [%d]\n",
-
 
4180
				     __FUNCTION__, size, kgem->aperture_mappable));
-
 
4181
				return false;
-
 
4182
			}
-
 
4183
		}
-
 
4184
 
-
 
4185
		return true;
-
 
4186
	}
-
 
4187
 
-
 
4188
	if (kgem->nexec >= KGEM_EXEC_SIZE(kgem) - 1)
-
 
4189
		return false;
-
 
4190
 
-
 
4191
	if (needs_semaphore(kgem, bo)) {
-
 
4192
		DBG(("%s: flushing for required semaphore\n", __FUNCTION__));
-
 
4193
		return false;
-
 
4194
	}
Line -... Line 4195...
-
 
4195
 
-
 
4196
	assert_tiling(kgem, bo);
-
 
4197
	if (kgem->gen < 040 && bo->tiling != I915_TILING_NONE) {
-
 
4198
		uint32_t size;
-
 
4199
 
-
 
4200
		assert(bo->tiling == I915_TILING_X);
-
 
4201
 
-
 
4202
		if (kgem->nfence >= kgem->fence_max)
-
 
4203
			return false;
-
 
4204
 
-
 
4205
		if (kgem->aperture_fenced) {
-
 
4206
			size = 3*kgem->aperture_fenced;
-
 
4207
			if (kgem->aperture_total == kgem->aperture_mappable)
-
 
4208
				size += kgem->aperture;
-
 
4209
			if (size > kgem->aperture_mappable &&
-
 
4210
			    kgem_ring_is_idle(kgem, kgem->ring)) {
-
 
4211
				DBG(("%s: opportunistic fence flush\n", __FUNCTION__));
-
 
4212
				return false;
-
 
4213
			}
-
 
4214
		}
-
 
4215
 
Line -... Line 4216...
-
 
4216
		size = kgem_bo_fenced_size(kgem, bo);
-
 
4217
		if (size > kgem->aperture_max_fence)
Line -... Line 4218...
-
 
4218
			kgem->aperture_max_fence = size;
-
 
4219
		size += kgem->aperture_fenced;
-
 
4220
		if (kgem->gen < 033)
Line 4083... Line 4267...
4083
	int index;
4267
	int index;
Line 4084... Line 4268...
4084
 
4268
 
4085
	DBG(("%s: handle=%d, pos=%d, delta=%d, domains=%08x\n",
4269
	DBG(("%s: handle=%d, pos=%d, delta=%d, domains=%08x\n",
Line -... Line 4270...
-
 
4270
	     __FUNCTION__, bo ? bo->handle : 0, pos, delta, read_write_domain));
4086
	     __FUNCTION__, bo ? bo->handle : 0, pos, delta, read_write_domain));
4271
 
Line 4087... Line -...
4087
 
-
 
4088
	assert((read_write_domain & 0x7fff) == 0 || bo != NULL);
-
 
4089
 
-
 
4090
    if( bo != NULL && bo->handle == -2)
-
 
4091
    {
-
 
4092
   		if (bo->exec == NULL)
-
 
4093
			kgem_add_bo(kgem, bo);
-
 
4094
 
-
 
4095
		if (read_write_domain & 0x7fff && !bo->gpu_dirty) {
-
 
4096
			__kgem_bo_mark_dirty(bo);
-
 
4097
		}
-
 
4098
        return 0;
4272
	assert(kgem->gen < 0100);
4099
    };
4273
	assert((read_write_domain & 0x7fff) == 0 || bo != NULL);
4100
 
4274
 
4101
	index = kgem->nreloc++;
4275
	index = kgem->nreloc++;
-
 
4276
	assert(index < ARRAY_SIZE(kgem->reloc));
4102
	assert(index < ARRAY_SIZE(kgem->reloc));
4277
	kgem->reloc[index].offset = pos * sizeof(kgem->batch[0]);
4103
	kgem->reloc[index].offset = pos * sizeof(kgem->batch[0]);
4278
	if (bo) {
4104
	if (bo) {
4279
		assert(kgem->mode != KGEM_NONE);
4105
		assert(bo->refcnt);
4280
		assert(bo->refcnt);
4106
		while (bo->proxy) {
4281
		while (bo->proxy) {
Line 4113... Line 4288...
4113
				list_move_tail(&bo->request,
4288
				list_move_tail(&bo->request,
4114
					       &kgem->next_request->buffers);
4289
					       &kgem->next_request->buffers);
4115
				bo->rq = MAKE_REQUEST(kgem->next_request,
4290
				bo->rq = MAKE_REQUEST(kgem->next_request,
4116
						      kgem->ring);
4291
						      kgem->ring);
4117
				bo->exec = &_kgem_dummy_exec;
4292
				bo->exec = &_kgem_dummy_exec;
-
 
4293
				bo->domain = DOMAIN_GPU;
4118
		}
4294
		}
Line 4119... Line 4295...
4119
 
4295
 
4120
			if (read_write_domain & 0x7fff && !bo->gpu_dirty)
4296
			if (read_write_domain & 0x7fff && !bo->gpu_dirty)
Line 4131... Line 4307...
4131
		assert(RQ_RING(bo->rq) == kgem->ring);
4307
		assert(RQ_RING(bo->rq) == kgem->ring);
Line 4132... Line 4308...
4132
 
4308
 
4133
		if (kgem->gen < 040 && read_write_domain & KGEM_RELOC_FENCED) {
4309
		if (kgem->gen < 040 && read_write_domain & KGEM_RELOC_FENCED) {
4134
			if (bo->tiling &&
4310
			if (bo->tiling &&
-
 
4311
			    (bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0) {
4135
			    (bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0) {
4312
				assert(bo->tiling == I915_TILING_X);
4136
				assert(kgem->nfence < kgem->fence_max);
4313
				assert(kgem->nfence < kgem->fence_max);
4137
				kgem->aperture_fenced +=
4314
				kgem->aperture_fenced +=
4138
					kgem_bo_fenced_size(kgem, bo);
4315
					kgem_bo_fenced_size(kgem, bo);
4139
				kgem->nfence++;
4316
				kgem->nfence++;
Line 4162... Line 4339...
4162
	kgem->reloc[index].write_domain = read_write_domain & 0x7fff;
4339
	kgem->reloc[index].write_domain = read_write_domain & 0x7fff;
Line 4163... Line 4340...
4163
 
4340
 
4164
	return delta;
4341
	return delta;
Line -... Line 4342...
-
 
4342
}
-
 
4343
 
-
 
4344
uint64_t kgem_add_reloc64(struct kgem *kgem,
-
 
4345
			  uint32_t pos,
-
 
4346
			  struct kgem_bo *bo,
-
 
4347
			  uint32_t read_write_domain,
-
 
4348
			  uint64_t delta)
-
 
4349
{
-
 
4350
	int index;
-
 
4351
 
-
 
4352
	DBG(("%s: handle=%d, pos=%d, delta=%ld, domains=%08x\n",
-
 
4353
	     __FUNCTION__, bo ? bo->handle : 0, pos, (long)delta, read_write_domain));
-
 
4354
 
-
 
4355
	assert(kgem->gen >= 0100);
-
 
4356
	assert((read_write_domain & 0x7fff) == 0 || bo != NULL);
-
 
4357
 
-
 
4358
	index = kgem->nreloc++;
-
 
4359
	assert(index < ARRAY_SIZE(kgem->reloc));
-
 
4360
	kgem->reloc[index].offset = pos * sizeof(kgem->batch[0]);
-
 
4361
	if (bo) {
-
 
4362
		assert(kgem->mode != KGEM_NONE);
-
 
4363
		assert(bo->refcnt);
-
 
4364
		while (bo->proxy) {
-
 
4365
			DBG(("%s: adding proxy [delta=%ld] for handle=%d\n",
-
 
4366
			     __FUNCTION__, (long)bo->delta, bo->handle));
-
 
4367
			delta += bo->delta;
-
 
4368
			assert(bo->handle == bo->proxy->handle);
-
 
4369
			/* need to release the cache upon batch submit */
-
 
4370
			if (bo->exec == NULL) {
-
 
4371
				list_move_tail(&bo->request,
-
 
4372
					       &kgem->next_request->buffers);
-
 
4373
				bo->rq = MAKE_REQUEST(kgem->next_request,
-
 
4374
						      kgem->ring);
-
 
4375
				bo->exec = &_kgem_dummy_exec;
-
 
4376
				bo->domain = DOMAIN_GPU;
-
 
4377
			}
-
 
4378
 
-
 
4379
			if (read_write_domain & 0x7fff && !bo->gpu_dirty)
-
 
4380
				__kgem_bo_mark_dirty(bo);
-
 
4381
 
-
 
4382
			bo = bo->proxy;
-
 
4383
			assert(bo->refcnt);
-
 
4384
		}
-
 
4385
		assert(bo->refcnt);
-
 
4386
 
-
 
4387
		if (bo->exec == NULL)
-
 
4388
			kgem_add_bo(kgem, bo);
-
 
4389
		assert(bo->rq == MAKE_REQUEST(kgem->next_request, kgem->ring));
-
 
4390
		assert(RQ_RING(bo->rq) == kgem->ring);
-
 
4391
 
-
 
4392
		kgem->reloc[index].delta = delta;
-
 
4393
		kgem->reloc[index].target_handle = bo->target_handle;
-
 
4394
		kgem->reloc[index].presumed_offset = bo->presumed_offset;
-
 
4395
 
-
 
4396
		if (read_write_domain & 0x7fff && !bo->gpu_dirty) {
-
 
4397
			assert(!bo->snoop || kgem->can_blt_cpu);
-
 
4398
			__kgem_bo_mark_dirty(bo);
-
 
4399
		}
-
 
4400
 
-
 
4401
		delta += bo->presumed_offset;
-
 
4402
	} else {
-
 
4403
		kgem->reloc[index].delta = delta;
-
 
4404
		kgem->reloc[index].target_handle = ~0U;
-
 
4405
		kgem->reloc[index].presumed_offset = 0;
-
 
4406
		if (kgem->nreloc__self < 256)
-
 
4407
			kgem->reloc__self[kgem->nreloc__self++] = index;
-
 
4408
	}
-
 
4409
	kgem->reloc[index].read_domains = read_write_domain >> 16;
-
 
4410
	kgem->reloc[index].write_domain = read_write_domain & 0x7fff;
-
 
4411
 
-
 
4412
	return delta;
4165
}
4413
}
4166
 
4414
 
4167
static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket)
4415
static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket)
Line 4168... Line 4416...
4168
{
4416
{
Line 4184... Line 4432...
4184
	 * vma to within a conservative value.
4432
	 * vma to within a conservative value.
4185
	 */
4433
	 */
4186
	i = 0;
4434
	i = 0;
4187
	while (kgem->vma[type].count > 0) {
4435
	while (kgem->vma[type].count > 0) {
4188
		struct kgem_bo *bo = NULL;
4436
		struct kgem_bo *bo = NULL;
-
 
4437
		void **ptr;
Line 4189... Line 4438...
4189
 
4438
 
4190
		for (j = 0;
4439
		for (j = 0;
4191
		     bo == NULL && j < ARRAY_SIZE(kgem->vma[type].inactive);
4440
		     bo == NULL && j < ARRAY_SIZE(kgem->vma[type].inactive);
4192
		     j++) {
4441
		     j++) {
Line 4196... Line 4445...
4196
	}
4445
	}
4197
		if (bo == NULL)
4446
		if (bo == NULL)
4198
			break;
4447
			break;
Line 4199... Line 4448...
4199
 
4448
 
4200
		DBG(("%s: discarding inactive %s vma cache for %d\n",
-
 
4201
		     __FUNCTION__,
4449
		DBG(("%s: discarding inactive %s vma cache for %d\n",
-
 
4450
		     __FUNCTION__, type ? "CPU" : "GTT", bo->handle));
4202
		     IS_CPU_MAP(bo->map) ? "CPU" : "GTT", bo->handle));
4451
 
4203
		assert(IS_CPU_MAP(bo->map) == type);
-
 
4204
		assert(bo->map);
4452
		ptr = type ? &bo->map__cpu : &bo->map__gtt;
Line 4205... Line 4453...
4205
			assert(bo->rq == NULL);
4453
			assert(bo->rq == NULL);
4206
 
4454
 
4207
		VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo)));
4455
		VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(*ptr), bytes(bo)));
4208
//		munmap(MAP(bo->map), bytes(bo));
4456
//		munmap(MAP(*ptr), bytes(bo));
4209
		bo->map = NULL;
4457
		*ptr = NULL;
Line 4210... Line 4458...
4210
		list_del(&bo->vma);
4458
		list_del(&bo->vma);
4211
		kgem->vma[type].count--;
4459
		kgem->vma[type].count--;
Line 4220... Line 4468...
4220
 
4468
 
4221
void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo)
4469
void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo)
4222
{
4470
{
Line 4223... Line 4471...
4223
	void *ptr;
4471
	void *ptr;
4224
 
4472
 
Line 4225... Line 4473...
4225
	DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__,
4473
	DBG(("%s: handle=%d, offset=%ld, tiling=%d, map=%p:%p, domain=%d\n", __FUNCTION__,
4226
	     bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain));
4474
	     bo->handle, (long)bo->presumed_offset, bo->tiling, bo->map__gtt, bo->map__cpu, bo->domain));
4227
 
-
 
4228
	assert(bo->proxy == NULL);
4475
 
Line 4229... Line 4476...
4229
	assert(list_is_empty(&bo->list));
4476
	assert(bo->proxy == NULL);
4230
	assert(!IS_USER_MAP(bo->map));
4477
	assert(list_is_empty(&bo->list));
4231
	assert_tiling(kgem, bo);
4478
	assert_tiling(kgem, bo);
4232
 
4479
 
4233
	if (bo->tiling == I915_TILING_NONE && !bo->scanout && kgem->has_llc) {
4480
	if (bo->tiling == I915_TILING_NONE && !bo->scanout && kgem->has_llc) {
Line 4234... Line -...
4234
		DBG(("%s: converting request for GTT map into CPU map\n",
-
 
4235
		     __FUNCTION__));
-
 
4236
		return kgem_bo_map__cpu(kgem, bo);
-
 
4237
	}
4481
		DBG(("%s: converting request for GTT map into CPU map\n",
4238
 
4482
		     __FUNCTION__));
4239
	if (IS_CPU_MAP(bo->map))
4483
		return kgem_bo_map__cpu(kgem, bo);
Line 4240... Line 4484...
4240
		kgem_bo_release_map(kgem, bo);
4484
	}
Line 4241... Line 4485...
4241
 
4485
 
4242
	ptr = bo->map;
4486
	ptr = MAP(bo->map__gtt);
Line 4252... Line 4496...
4252
		/* Cache this mapping to avoid the overhead of an
4496
		/* Cache this mapping to avoid the overhead of an
4253
		 * excruciatingly slow GTT pagefault. This is more an
4497
		 * excruciatingly slow GTT pagefault. This is more an
4254
		 * issue with compositing managers which need to frequently
4498
		 * issue with compositing managers which need to frequently
4255
		 * flush CPU damage to their GPU bo.
4499
		 * flush CPU damage to their GPU bo.
4256
		 */
4500
		 */
4257
		bo->map = ptr;
4501
		bo->map__gtt = ptr;
4258
		DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle));
4502
		DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle));
4259
	}
4503
	}
Line 4260... Line 4504...
4260
 
4504
 
4261
	return ptr;
4505
	return ptr;
Line 4262... Line 4506...
4262
}
4506
}
4263
 
4507
 
4264
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
4508
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
Line 4265... Line 4509...
4265
{
4509
{
4266
	void *ptr;
4510
	void *ptr;
Line 4267... Line 4511...
4267
 
4511
 
4268
	DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__,
4512
	DBG(("%s: handle=%d, offset=%ld, tiling=%d, map=%p:%p, domain=%d\n", __FUNCTION__,
4269
	     bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain));
-
 
4270
 
4513
	     bo->handle, (long)bo->presumed_offset, bo->tiling, bo->map__gtt, bo->map__cpu, bo->domain));
4271
	assert(bo->proxy == NULL);
4514
 
Line 4272... Line 4515...
4272
	assert(list_is_empty(&bo->list));
4515
	assert(bo->proxy == NULL);
4273
	assert(!IS_USER_MAP(bo->map));
4516
	assert(list_is_empty(&bo->list));
Line 4282... Line 4525...
4282
		if (ptr)
4525
		if (ptr)
4283
			kgem_bo_sync__cpu(kgem, bo);
4526
			kgem_bo_sync__cpu(kgem, bo);
4284
		return ptr;
4527
		return ptr;
4285
	}
4528
	}
Line 4286... Line -...
4286
 
-
 
4287
	if (IS_CPU_MAP(bo->map))
-
 
4288
		kgem_bo_release_map(kgem, bo);
-
 
4289
 
4529
 
4290
	ptr = bo->map;
4530
	ptr = MAP(bo->map__gtt);
4291
	if (ptr == NULL) {
4531
	if (ptr == NULL) {
4292
		assert(kgem_bo_size(bo) <= kgem->aperture_mappable / 2);
4532
		assert(num_pages(bo) <= kgem->aperture_mappable / 2);
Line 4293... Line 4533...
4293
		assert(kgem->gen != 021 || bo->tiling != I915_TILING_Y);
4533
		assert(kgem->gen != 021 || bo->tiling != I915_TILING_Y);
Line 4294... Line 4534...
4294
 
4534
 
Line 4301... Line 4541...
4301
		/* Cache this mapping to avoid the overhead of an
4541
		/* Cache this mapping to avoid the overhead of an
4302
		 * excruciatingly slow GTT pagefault. This is more an
4542
		 * excruciatingly slow GTT pagefault. This is more an
4303
		 * issue with compositing managers which need to frequently
4543
		 * issue with compositing managers which need to frequently
4304
		 * flush CPU damage to their GPU bo.
4544
		 * flush CPU damage to their GPU bo.
4305
		 */
4545
		 */
4306
		bo->map = ptr;
4546
		bo->map__gtt = ptr;
4307
		DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle));
4547
		DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle));
4308
		}
4548
		}
Line 4309... Line 4549...
4309
 
4549
 
4310
	if (bo->domain != DOMAIN_GTT || FORCE_MMAP_SYNC & (1 << DOMAIN_GTT)) {
4550
	if (bo->domain != DOMAIN_GTT || FORCE_MMAP_SYNC & (1 << DOMAIN_GTT)) {
Line 4331... Line 4571...
4331
 
4571
 
4332
void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
4572
void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
4333
{
4573
{
Line 4334... Line 4574...
4334
	void *ptr;
4574
	void *ptr;
4335
 
4575
 
Line 4336... Line 4576...
4336
	DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__,
4576
	DBG(("%s: handle=%d, offset=%ld, tiling=%d, map=%p:%p, domain=%d\n", __FUNCTION__,
4337
	     bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain));
4577
	     bo->handle, (long)bo->presumed_offset, bo->tiling, bo->map__gtt, bo->map__cpu, bo->domain));
4338
 
-
 
4339
	assert(bo->exec == NULL);
4578
 
Line 4340... Line -...
4340
	assert(list_is_empty(&bo->list));
-
 
4341
	assert(!IS_USER_MAP(bo->map));
-
 
4342
	assert_tiling(kgem, bo);
-
 
4343
 
4579
	assert(bo->exec == NULL);
4344
	if (IS_CPU_MAP(bo->map))
4580
	assert(list_is_empty(&bo->list));
4345
		kgem_bo_release_map(kgem, bo);
4581
	assert_tiling(kgem, bo);
Line 4346... Line 4582...
4346
 
4582
 
Line 4347... Line 4583...
4347
	ptr = bo->map;
4583
	ptr = MAP(bo->map__gtt);
4348
	if (ptr == NULL) {
4584
	if (ptr == NULL) {
Line 4357... Line 4593...
4357
		/* Cache this mapping to avoid the overhead of an
4593
		/* Cache this mapping to avoid the overhead of an
4358
		 * excruciatingly slow GTT pagefault. This is more an
4594
		 * excruciatingly slow GTT pagefault. This is more an
4359
		 * issue with compositing managers which need to frequently
4595
		 * issue with compositing managers which need to frequently
4360
		 * flush CPU damage to their GPU bo.
4596
		 * flush CPU damage to their GPU bo.
4361
		 */
4597
		 */
4362
		bo->map = ptr;
4598
		bo->map__gtt = ptr;
4363
		DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle));
4599
		DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle));
4364
	}
4600
	}
Line 4365... Line 4601...
4365
 
4601
 
4366
	return ptr;
4602
	return ptr;
Line 4367... Line 4603...
4367
}
4603
}
4368
 
4604
 
4369
void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo)
-
 
4370
{
-
 
4371
	if (bo->map)
-
 
4372
		return MAP(bo->map);
-
 
4373
 
4605
void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo)
4374
	kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
4606
{
Line 4375... Line 4607...
4375
	return bo->map = __kgem_bo_map__gtt(kgem, bo);
4607
	return kgem_bo_map__async(kgem, bo);
4376
}
4608
}
4377
 
4609
 
Line 4378... Line 4610...
4378
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
4610
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
4379
{
4611
{
4380
	struct drm_i915_gem_mmap mmap_arg;
4612
	struct drm_i915_gem_mmap mmap_arg;
4381
 
4613
 
4382
	DBG(("%s(handle=%d, size=%d, mapped? %d)\n",
4614
	DBG(("%s(handle=%d, size=%d, map=%p:%p)\n",
Line 4383... Line 4615...
4383
	     __FUNCTION__, bo->handle, bytes(bo), (int)__MAP_TYPE(bo->map)));
4615
	     __FUNCTION__, bo->handle, bytes(bo), bo->map__gtt, bo->map__cpu));
4384
	assert(!bo->purged);
4616
	assert(!bo->purged);
4385
	assert(list_is_empty(&bo->list));
-
 
4386
	assert(bo->proxy == NULL);
-
 
4387
 
-
 
Line 4388... Line 4617...
4388
	if (IS_CPU_MAP(bo->map))
4617
	assert(list_is_empty(&bo->list));
Line 4389... Line 4618...
4389
		return MAP(bo->map);
4618
	assert(bo->proxy == NULL);
4390
 
4619
 
4391
	if (bo->map)
4620
	if (bo->map__cpu)
4392
		kgem_bo_release_map(kgem, bo);
4621
		return MAP(bo->map__cpu);
4393
 
4622
 
4394
	kgem_trim_vma_cache(kgem, MAP_CPU, bucket(bo));
4623
	kgem_trim_vma_cache(kgem, MAP_CPU, bucket(bo));
-
 
4624
 
-
 
4625
retry:
Line 4395... Line 4626...
4395
 
4626
	VG_CLEAR(mmap_arg);
4396
retry:
4627
	mmap_arg.handle = bo->handle;
Line 4397... Line -...
4397
	VG_CLEAR(mmap_arg);
-
 
4398
	mmap_arg.handle = bo->handle;
4628
	mmap_arg.offset = 0;
4399
	mmap_arg.offset = 0;
4629
	mmap_arg.size = bytes(bo);
4400
	mmap_arg.size = bytes(bo);
-
 
Line 4401... Line 4630...
4401
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
4630
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
4402
 
4631
		int err = 0;
4403
		if (__kgem_throttle_retire(kgem, 0))
4632
 
4404
			goto retry;
4633
 
Line 4405... Line 4634...
4405
 
4634
		if (__kgem_throttle_retire(kgem, 0))
Line 4406... Line 4635...
4406
		if (kgem->need_expire) {
4635
			goto retry;
4407
			kgem_cleanup_cache(kgem);
-
 
4408
			goto retry;
4636
 
4409
		}
4637
		if (kgem_cleanup_cache(kgem))
Line -... Line 4638...
-
 
4638
			goto retry;
-
 
4639
 
4410
 
4640
		ErrorF("%s: failed to mmap handle=%d, %d bytes, into CPU domain: %d\n",
-
 
4641
		       __FUNCTION__, bo->handle, bytes(bo), err);
-
 
4642
		return NULL;
4411
		ErrorF("%s: failed to mmap handle=%d, %d bytes, into CPU domain\n",
4643
	}
4412
		       __FUNCTION__, bo->handle, bytes(bo));
4644
 
-
 
4645
	VG(VALGRIND_MAKE_MEM_DEFINED(mmap_arg.addr_ptr, bytes(bo)));
-
 
4646
 
Line 4413... Line -...
4413
		return NULL;
-
 
4414
	}
-
 
4415
 
-
 
4416
	VG(VALGRIND_MAKE_MEM_DEFINED(mmap_arg.addr_ptr, bytes(bo)));
-
 
4417
 
-
 
4418
	DBG(("%s: caching CPU vma for %d\n", __FUNCTION__, bo->handle));
4647
	DBG(("%s: caching CPU vma for %d\n", __FUNCTION__, bo->handle));
Line 4419... Line 4648...
4419
	bo->map = MAKE_CPU_MAP(mmap_arg.addr_ptr);
4648
	return bo->map__cpu = (void *)(uintptr_t)mmap_arg.addr_ptr;
4420
	return (void *)(uintptr_t)mmap_arg.addr_ptr;
4649
}
Line 4421... Line -...
4421
}
-
 
4422
 
-
 
4423
void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
-
 
4424
{
-
 
4425
	struct drm_i915_gem_mmap mmap_arg;
4650
 
4426
 
4651
 
4427
	DBG(("%s(handle=%d, size=%d, mapped? %d)\n",
-
 
Line -... Line 4652...
-
 
4652
/*
-
 
4653
struct kgem_bo *kgem_create_map(struct kgem *kgem,
4428
	     __FUNCTION__, bo->handle, bytes(bo), (int)__MAP_TYPE(bo->map)));
4654
				void *ptr, uint32_t size,
Line 4429... Line 4655...
4429
        assert(bo->refcnt);
4655
				bool read_only)
-
 
4656
{
4430
	assert(!bo->purged);
4657
	struct kgem_bo *bo;
-
 
4658
	uintptr_t first_page, last_page;
-
 
4659
	uint32_t handle;
Line -... Line 4660...
-
 
4660
 
4431
	assert(list_is_empty(&bo->list));
4661
	assert(MAP(ptr) == ptr);
4432
	assert(bo->proxy == NULL);
4662
 
4433
 
4663
	if (!kgem->has_userptr)
4434
	if (IS_CPU_MAP(bo->map))
4664
		return NULL;
Line -... Line 4665...
-
 
4665
 
-
 
4666
	first_page = (uintptr_t)ptr;
-
 
4667
	last_page = first_page + size + PAGE_SIZE - 1;
-
 
4668
 
-
 
4669
	first_page &= ~(PAGE_SIZE-1);
-
 
4670
	last_page &= ~(PAGE_SIZE-1);
4435
		return MAP(bo->map);
4671
	assert(last_page > first_page);
4436
 
4672
 
-
 
4673
	handle = gem_userptr(kgem->fd,
-
 
4674
			     (void *)first_page, last_page-first_page,
4437
retry:
4675
			     read_only);
4438
	VG_CLEAR(mmap_arg);
-
 
Line 4439... Line -...
4439
	mmap_arg.handle = bo->handle;
-
 
4440
	mmap_arg.offset = 0;
-
 
4441
	mmap_arg.size = bytes(bo);
-
 
4442
	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
4676
	if (handle == 0)
4443
		int err = errno;
4677
		return NULL;
4444
 
-
 
4445
		assert(err != EINVAL);
-
 
-
 
4678
 
4446
 
4679
	bo = __kgem_bo_alloc(handle, (last_page - first_page) / PAGE_SIZE);
4447
		if (__kgem_throttle_retire(kgem, 0))
4680
	if (bo == NULL) {
4448
			goto retry;
4681
		gem_close(kgem->fd, handle);
-
 
4682
		return NULL;
-
 
4683
	}
4449
 
4684
 
-
 
4685
	bo->snoop = !kgem->has_llc;
-
 
4686
	debug_alloc__bo(kgem, bo);
4450
		if (kgem->need_expire) {
4687
 
4451
			kgem_cleanup_cache(kgem);
4688
	if (first_page != (uintptr_t)ptr) {
4452
			goto retry;
4689
		struct kgem_bo *proxy;
4453
		}
4690
 
4454
 
4691
		proxy = kgem_create_proxy(kgem, bo,
Line 4498... Line 4735...
4498
			bo->domain = DOMAIN_CPU;
4735
			bo->domain = DOMAIN_CPU;
4499
		}
4736
		}
4500
	}
4737
	}
4501
}
4738
}
Line -... Line 4739...
-
 
4739
 
-
 
4740
void kgem_bo_sync__cpu_full(struct kgem *kgem, struct kgem_bo *bo, bool write)
-
 
4741
{
-
 
4742
	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
-
 
4743
	assert(!bo->scanout || !write);
-
 
4744
 
-
 
4745
	if (write || bo->needs_flush)
-
 
4746
		kgem_bo_submit(kgem, bo);
-
 
4747
 
-
 
4748
	/* SHM pixmaps use proxies for subpage offsets */
-
 
4749
	assert(!bo->purged);
-
 
4750
	assert(bo->refcnt);
-
 
4751
	while (bo->proxy)
-
 
4752
		bo = bo->proxy;
-
 
4753
	assert(bo->refcnt);
-
 
4754
	assert(!bo->purged);
-
 
4755
 
-
 
4756
	if (bo->domain != DOMAIN_CPU || FORCE_MMAP_SYNC & (1 << DOMAIN_CPU)) {
-
 
4757
		struct drm_i915_gem_set_domain set_domain;
-
 
4758
 
-
 
4759
		DBG(("%s: SYNC: handle=%d, needs_flush? %d, domain? %d, busy? %d\n",
-
 
4760
		     __FUNCTION__, bo->handle,
-
 
4761
		     bo->needs_flush, bo->domain,
-
 
4762
		     __kgem_busy(kgem, bo->handle)));
-
 
4763
 
-
 
4764
		VG_CLEAR(set_domain);
-
 
4765
		set_domain.handle = bo->handle;
-
 
4766
		set_domain.read_domains = I915_GEM_DOMAIN_CPU;
-
 
4767
		set_domain.write_domain = write ? I915_GEM_DOMAIN_CPU : 0;
-
 
4768
 
-
 
4769
		if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) {
-
 
4770
			if (bo->exec == NULL)
-
 
4771
				kgem_bo_retire(kgem, bo);
-
 
4772
			bo->domain = write ? DOMAIN_CPU : DOMAIN_NONE;
-
 
4773
		}
-
 
4774
	}
-
 
4775
}
-
 
4776
 
-
 
4777
void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo)
-
 
4778
{
-
 
4779
	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
-
 
4780
	assert(bo->refcnt);
-
 
4781
	assert(bo->proxy == NULL);
-
 
4782
 
-
 
4783
	kgem_bo_submit(kgem, bo);
-
 
4784
 
-
 
4785
	if (bo->domain != DOMAIN_GTT || FORCE_MMAP_SYNC & (1 << DOMAIN_GTT)) {
-
 
4786
		struct drm_i915_gem_set_domain set_domain;
-
 
4787
 
-
 
4788
		DBG(("%s: SYNC: handle=%d, needs_flush? %d, domain? %d, busy? %d\n",
-
 
4789
		     __FUNCTION__, bo->handle,
-
 
4790
		     bo->needs_flush, bo->domain,
-
 
4791
		     __kgem_busy(kgem, bo->handle)));
-
 
4792
 
-
 
4793
		VG_CLEAR(set_domain);
-
 
4794
		set_domain.handle = bo->handle;
-
 
4795
		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
-
 
4796
		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
-
 
4797
 
-
 
4798
		if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) {
-
 
4799
			kgem_bo_retire(kgem, bo);
-
 
4800
			bo->domain = DOMAIN_GTT;
-
 
4801
			bo->gtt_dirty = true;
-
 
4802
		}
-
 
4803
	}
-
 
4804
}
4502
 
4805
 
4503
void kgem_clear_dirty(struct kgem *kgem)
4806
void kgem_clear_dirty(struct kgem *kgem)
4504
{
4807
{
4505
	struct list * const buffers = &kgem->next_request->buffers;
4808
	struct list * const buffers = &kgem->next_request->buffers;
Line 4540... Line 4843...
4540
 
4843
 
4541
	assert(!bo->scanout);
4844
	assert(!bo->scanout);
4542
	bo->proxy = kgem_bo_reference(target);
4845
	bo->proxy = kgem_bo_reference(target);
Line 4543... Line 4846...
4543
	bo->delta = offset;
4846
	bo->delta = offset;
4544
 
4847
 
4545
	if (target->exec) {
4848
	if (target->exec && !bo->io) {
4546
		list_move_tail(&bo->request, &kgem->next_request->buffers);
4849
		list_move_tail(&bo->request, &kgem->next_request->buffers);
4547
		bo->exec = &_kgem_dummy_exec;
4850
		bo->exec = &_kgem_dummy_exec;
Line 4561... Line 4864...
4561
	if (bo == NULL)
4864
	if (bo == NULL)
4562
		return NULL;
4865
		return NULL;
Line 4563... Line 4866...
4563
 
4866
 
4564
	bo->mem = NULL;
4867
	bo->mem = NULL;
4565
	bo->need_io = false;
4868
	bo->need_io = false;
Line 4566... Line 4869...
4566
	bo->mmapped = true;
4869
	bo->mmapped = MMAPPED_CPU;
4567
 
4870
 
Line 4568... Line 4871...
4568
	return bo;
4871
	return bo;
Line 4636... Line 4939...
4636
		     __FUNCTION__, bo->base.handle, num_pages(&bo->base)));
4939
		     __FUNCTION__, bo->base.handle, num_pages(&bo->base)));
Line 4637... Line 4940...
4637
 
4940
 
4638
		assert(bo->base.snoop);
4941
		assert(bo->base.snoop);
4639
		assert(bo->base.tiling == I915_TILING_NONE);
4942
		assert(bo->base.tiling == I915_TILING_NONE);
4640
		assert(num_pages(&bo->base) >= alloc);
4943
		assert(num_pages(&bo->base) >= alloc);
4641
		assert(bo->mmapped == true);
4944
		assert(bo->mmapped == MMAPPED_CPU);
Line 4642... Line 4945...
4642
		assert(bo->need_io == false);
4945
		assert(bo->need_io == false);
4643
 
4946
 
4644
		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
4947
		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
Line 4683... Line 4986...
4683
			DBG(("%s: created CPU (LLC) handle=%d for buffer, size %d\n",
4986
			DBG(("%s: created CPU (LLC) handle=%d for buffer, size %d\n",
4684
			     __FUNCTION__, bo->base.handle, alloc));
4987
			     __FUNCTION__, bo->base.handle, alloc));
4685
		}
4988
		}
Line 4686... Line 4989...
4686
 
4989
 
4687
		assert(bo->base.refcnt == 1);
4990
		assert(bo->base.refcnt == 1);
4688
		assert(bo->mmapped == true);
4991
		assert(bo->mmapped == MMAPPED_CPU);
Line 4689... Line 4992...
4689
		assert(bo->need_io == false);
4992
		assert(bo->need_io == false);
4690
 
4993
 
4691
		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
4994
		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
Line 4719... Line 5022...
4719
			DBG(("%s: created CPU handle=%d for buffer, size %d\n",
5022
			DBG(("%s: created CPU handle=%d for buffer, size %d\n",
4720
			     __FUNCTION__, bo->base.handle, alloc));
5023
			     __FUNCTION__, bo->base.handle, alloc));
4721
		}
5024
		}
Line 4722... Line 5025...
4722
 
5025
 
4723
		assert(bo->base.refcnt == 1);
5026
		assert(bo->base.refcnt == 1);
4724
		assert(bo->mmapped == true);
5027
		assert(bo->mmapped == MMAPPED_CPU);
Line 4725... Line 5028...
4725
		assert(bo->need_io == false);
5028
		assert(bo->need_io == false);
4726
 
5029
 
Line 4761... Line 5064...
4761
		debug_alloc(kgem, alloc);
5064
		debug_alloc(kgem, alloc);
4762
		__kgem_bo_init(&bo->base, handle, alloc);
5065
		__kgem_bo_init(&bo->base, handle, alloc);
4763
		DBG(("%s: created snoop handle=%d for buffer\n",
5066
		DBG(("%s: created snoop handle=%d for buffer\n",
4764
		     __FUNCTION__, bo->base.handle));
5067
		     __FUNCTION__, bo->base.handle));
Line 4765... Line 5068...
4765
 
5068
 
4766
		assert(bo->mmapped == true);
5069
		assert(bo->mmapped == MMAPPED_CPU);
Line 4767... Line 5070...
4767
		assert(bo->need_io == false);
5070
		assert(bo->need_io == false);
4768
 
5071
 
4769
		bo->base.refcnt = 1;
5072
		bo->base.refcnt = 1;
Line 4770... Line 5073...
4770
		bo->base.snoop = true;
5073
		bo->base.snoop = true;
4771
		bo->base.map = MAKE_USER_MAP(bo->mem);
5074
		bo->base.map__cpu = MAKE_USER_MAP(bo->mem);
Line 4772... Line 5075...
4772
 
5075
 
Line 4799... Line 5102...
4799
		assert(bo->base.refcnt >= 1);
5102
		assert(bo->base.refcnt >= 1);
Line 4800... Line 5103...
4800
 
5103
 
4801
		/* We can reuse any write buffer which we can fit */
5104
		/* We can reuse any write buffer which we can fit */
4802
		if (flags == KGEM_BUFFER_LAST &&
5105
		if (flags == KGEM_BUFFER_LAST &&
4803
		    bo->write == KGEM_BUFFER_WRITE &&
5106
		    bo->write == KGEM_BUFFER_WRITE &&
-
 
5107
		    bo->base.refcnt == 1 &&
4804
		    bo->base.refcnt == 1 && !bo->mmapped &&
5108
		    bo->mmapped == MMAPPED_NONE &&
4805
		    size <= bytes(&bo->base)) {
5109
		    size <= bytes(&bo->base)) {
4806
			DBG(("%s: reusing write buffer for read of %d bytes? used=%d, total=%d\n",
5110
			DBG(("%s: reusing write buffer for read of %d bytes? used=%d, total=%d\n",
4807
			     __FUNCTION__, size, bo->used, bytes(&bo->base)));
5111
			     __FUNCTION__, size, bo->used, bytes(&bo->base)));
4808
			gem_write(kgem->fd, bo->base.handle,
5112
			gem_write__cachealigned(kgem->fd, bo->base.handle,
4809
				  0, bo->used, bo->mem);
5113
				  0, bo->used, bo->mem);
4810
			kgem_buffer_release(kgem, bo);
5114
			kgem_buffer_release(kgem, bo);
4811
			bo->need_io = 0;
5115
			bo->need_io = 0;
4812
			bo->write = 0;
5116
			bo->write = 0;
Line 4843... Line 5147...
4843
 
5147
 
4844
	if (flags & KGEM_BUFFER_WRITE) {
5148
	if (flags & KGEM_BUFFER_WRITE) {
4845
		list_for_each_entry(bo, &kgem->active_buffers, base.list) {
5149
		list_for_each_entry(bo, &kgem->active_buffers, base.list) {
4846
			assert(bo->base.io);
5150
			assert(bo->base.io);
-
 
5151
			assert(bo->base.refcnt >= 1);
4847
			assert(bo->base.refcnt >= 1);
5152
			assert(bo->base.exec == NULL);
4848
			assert(bo->mmapped);
5153
			assert(bo->mmapped);
Line 4849... Line 5154...
4849
			assert(!IS_CPU_MAP(bo->base.map) || kgem->has_llc || bo->base.snoop);
5154
			assert(bo->mmapped == MMAPPED_GTT || kgem->has_llc || bo->base.snoop);
4850
 
5155
 
4851
			if (!kgem->has_llc && (bo->write & ~flags) & KGEM_BUFFER_INPLACE) {
5156
			if ((bo->write & ~flags) & KGEM_BUFFER_INPLACE && !bo->base.snoop) {
4852
				DBG(("%s: skip write %x buffer, need %x\n",
5157
				DBG(("%s: skip write %x buffer, need %x\n",
4853
				     __FUNCTION__, bo->write, flags));
5158
				     __FUNCTION__, bo->write, flags));
Line 4860... Line 5165...
4860
				offset = bo->used;
5165
				offset = bo->used;
4861
				bo->used += size;
5166
				bo->used += size;
4862
				list_move(&bo->base.list, &kgem->batch_buffers);
5167
				list_move(&bo->base.list, &kgem->batch_buffers);
4863
				goto done;
5168
				goto done;
4864
			}
5169
			}
-
 
5170
 
-
 
5171
			if (size <= bytes(&bo->base) &&
-
 
5172
			    (bo->base.rq == NULL ||
-
 
5173
			     !__kgem_busy(kgem, bo->base.handle))) {
-
 
5174
				DBG(("%s: reusing whole buffer? size=%d, total=%d\n",
-
 
5175
				     __FUNCTION__, size, bytes(&bo->base)));
-
 
5176
				__kgem_bo_clear_busy(&bo->base);
-
 
5177
				kgem_buffer_release(kgem, bo);
-
 
5178
 
-
 
5179
				switch (bo->mmapped) {
-
 
5180
				case MMAPPED_CPU:
-
 
5181
					kgem_bo_sync__cpu(kgem, &bo->base);
-
 
5182
					break;
-
 
5183
				case MMAPPED_GTT:
-
 
5184
					kgem_bo_sync__gtt(kgem, &bo->base);
-
 
5185
					break;
-
 
5186
				}
-
 
5187
 
-
 
5188
				offset = 0;
-
 
5189
				bo->used = size;
-
 
5190
				list_move(&bo->base.list, &kgem->batch_buffers);
-
 
5191
				goto done;
-
 
5192
			}
4865
		}
5193
		}
4866
	}
5194
	}
4867
#endif
5195
#endif
Line 4868... Line 5196...
4868
 
5196
 
Line 4873... Line 5201...
4873
		alloc = ALIGN(size, kgem->buffer_size);
5201
		alloc = ALIGN(size, kgem->buffer_size);
4874
	if (alloc > MAX_CACHE_SIZE)
5202
	if (alloc > MAX_CACHE_SIZE)
4875
		alloc = PAGE_ALIGN(size);
5203
		alloc = PAGE_ALIGN(size);
4876
	assert(alloc);
5204
	assert(alloc);
Line -... Line 5205...
-
 
5205
 
4877
 
5206
	alloc /= PAGE_SIZE;
4878
	if (alloc > kgem->aperture_mappable / 4)
5207
	if (alloc > kgem->aperture_mappable / 4)
4879
		flags &= ~KGEM_BUFFER_INPLACE;
-
 
Line 4880... Line 5208...
4880
	alloc /= PAGE_SIZE;
5208
		flags &= ~KGEM_BUFFER_INPLACE;
4881
 
5209
 
4882
	if (kgem->has_llc &&
5210
	if (kgem->has_llc &&
4883
	    (flags & KGEM_BUFFER_WRITE_INPLACE) != KGEM_BUFFER_WRITE_INPLACE) {
5211
	    (flags & KGEM_BUFFER_WRITE_INPLACE) != KGEM_BUFFER_WRITE_INPLACE) {
Line 4961... Line 5289...
4961
		if (old == NULL)
5289
		if (old == NULL)
4962
			old = search_linear_cache(kgem, NUM_PAGES(size),
5290
			old = search_linear_cache(kgem, NUM_PAGES(size),
4963
						  CREATE_EXACT | CREATE_INACTIVE | CREATE_GTT_MAP);
5291
						  CREATE_EXACT | CREATE_INACTIVE | CREATE_GTT_MAP);
4964
		if (old == NULL) {
5292
		if (old == NULL) {
4965
			old = search_linear_cache(kgem, alloc, CREATE_INACTIVE);
5293
			old = search_linear_cache(kgem, alloc, CREATE_INACTIVE);
4966
			if (old && !__kgem_bo_is_mappable(kgem, old)) {
5294
			if (old && !kgem_bo_can_map(kgem, old)) {
4967
				_kgem_bo_destroy(kgem, old);
5295
				_kgem_bo_destroy(kgem, old);
4968
				old = NULL;
5296
				old = NULL;
4969
			}
5297
			}
4970
		}
5298
		}
4971
		if (old) {
5299
		if (old) {
4972
			DBG(("%s: reusing handle=%d for buffer\n",
5300
			DBG(("%s: reusing handle=%d for buffer\n",
4973
			     __FUNCTION__, old->handle));
5301
			     __FUNCTION__, old->handle));
4974
			assert(__kgem_bo_is_mappable(kgem, old));
5302
			assert(kgem_bo_can_map(kgem, old));
4975
			assert(!old->snoop);
5303
			assert(!old->snoop);
4976
			assert(old->rq == NULL);
5304
			assert(old->rq == NULL);
Line 4977... Line 5305...
4977
 
5305
 
4978
			bo = buffer_alloc();
5306
			bo = buffer_alloc();
Line 4985... Line 5313...
4985
			assert(bo->mmapped);
5313
			assert(bo->mmapped);
4986
			assert(bo->base.refcnt == 1);
5314
			assert(bo->base.refcnt == 1);
Line 4987... Line 5315...
4987
 
5315
 
4988
			bo->mem = kgem_bo_map(kgem, &bo->base);
5316
			bo->mem = kgem_bo_map(kgem, &bo->base);
4989
			if (bo->mem) {
5317
			if (bo->mem) {
4990
				if (IS_CPU_MAP(bo->base.map))
5318
				if (bo->mem == MAP(bo->base.map__cpu))
-
 
5319
					flags &= ~KGEM_BUFFER_INPLACE;
-
 
5320
				else
4991
					flags &= ~KGEM_BUFFER_INPLACE;
5321
					bo->mmapped = MMAPPED_GTT;
4992
				goto init;
5322
				goto init;
4993
			} else {
5323
			} else {
4994
				bo->base.refcnt = 0;
5324
				bo->base.refcnt = 0;
4995
				kgem_bo_free(kgem, &bo->base);
5325
				kgem_bo_free(kgem, &bo->base);
Line 5105... Line 5435...
5105
	assert(bo->base.refcnt == 1);
5435
	assert(bo->base.refcnt == 1);
5106
	assert(num_pages(&bo->base) >= NUM_PAGES(size));
5436
	assert(num_pages(&bo->base) >= NUM_PAGES(size));
5107
	assert(!bo->need_io || !bo->base.needs_flush);
5437
	assert(!bo->need_io || !bo->base.needs_flush);
5108
	assert(!bo->need_io || bo->base.domain != DOMAIN_GPU);
5438
	assert(!bo->need_io || bo->base.domain != DOMAIN_GPU);
5109
	assert(bo->mem);
5439
	assert(bo->mem);
-
 
5440
	assert(bo->mmapped != MMAPPED_GTT || MAP(bo->base.map__gtt) == bo->mem);
5110
	assert(!bo->mmapped || bo->base.map != NULL);
5441
	assert(bo->mmapped != MMAPPED_CPU || MAP(bo->base.map__cpu) == bo->mem);
Line 5111... Line 5442...
5111
 
5442
 
5112
	bo->used = size;
5443
	bo->used = size;
5113
	bo->write = flags & KGEM_BUFFER_WRITE_INPLACE;
5444
	bo->write = flags & KGEM_BUFFER_WRITE_INPLACE;
Line 5119... Line 5450...
5119
	DBG(("%s(pages=%d [%d]) new handle=%d, used=%d, write=%d\n",
5450
	DBG(("%s(pages=%d [%d]) new handle=%d, used=%d, write=%d\n",
5120
	     __FUNCTION__, num_pages(&bo->base), alloc, bo->base.handle, bo->used, bo->write));
5451
	     __FUNCTION__, num_pages(&bo->base), alloc, bo->base.handle, bo->used, bo->write));
Line 5121... Line 5452...
5121
 
5452
 
5122
done:
5453
done:
-
 
5454
	bo->used = ALIGN(bo->used, UPLOAD_ALIGNMENT);
5123
	bo->used = ALIGN(bo->used, UPLOAD_ALIGNMENT);
5455
	assert(bo->used && bo->used <= bytes(&bo->base));
5124
	assert(bo->mem);
5456
	assert(bo->mem);
5125
	*ret = (char *)bo->mem + offset;
5457
	*ret = (char *)bo->mem + offset;
5126
	return kgem_create_proxy(kgem, &bo->base, offset, size);
5458
	return kgem_create_proxy(kgem, &bo->base, offset, size);
Line 5175... Line 5507...
5175
			io->used = min;
5507
			io->used = min;
5176
		}
5508
		}
5177
		bo->size.bytes -= stride;
5509
		bo->size.bytes -= stride;
5178
	}
5510
	}
Line 5179... Line 5511...
5179
 
5511
 
5180
	bo->map = MAKE_CPU_MAP(*ret);
5512
	bo->map__cpu = *ret;
5181
	bo->pitch = stride;
5513
	bo->pitch = stride;
5182
	bo->unique_id = kgem_get_unique_id(kgem);
5514
	bo->unique_id = kgem_get_unique_id(kgem);
5183
	return bo;
5515
	return bo;
Line 5220... Line 5552...
5220
 
5552
 
5221
void kgem_proxy_bo_attach(struct kgem_bo *bo,
5553
void kgem_proxy_bo_attach(struct kgem_bo *bo,
5222
			  struct kgem_bo **ptr)
5554
			  struct kgem_bo **ptr)
5223
{
5555
{
5224
	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
5556
	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
5225
	assert(bo->map == NULL || IS_CPU_MAP(bo->map));
5557
	assert(bo->map__gtt == NULL);
5226
	assert(bo->proxy);
5558
	assert(bo->proxy);
5227
	list_add(&bo->vma, &bo->proxy->vma);
5559
	list_add(&bo->vma, &bo->proxy->vma);
5228
	bo->map = ptr;
5560
	bo->map__gtt = ptr;
5229
	*ptr = kgem_bo_reference(bo);
5561
	*ptr = kgem_bo_reference(bo);
Line 5230... Line 5562...
5230
}
5562
}
5231
 
5563
 
Line 5256... Line 5588...
5256
		     __FUNCTION__,
5588
		     __FUNCTION__,
5257
		     bo->base.needs_flush,
5589
		     bo->base.needs_flush,
5258
		     bo->base.domain,
5590
		     bo->base.domain,
5259
		     __kgem_busy(kgem, bo->base.handle)));
5591
		     __kgem_busy(kgem, bo->base.handle)));
Line 5260... Line 5592...
5260
 
5592
 
Line 5261... Line 5593...
5261
		assert(!IS_CPU_MAP(bo->base.map) || bo->base.snoop || kgem->has_llc);
5593
		assert(bo->mmapped == MMAPPED_GTT || bo->base.snoop || kgem->has_llc);
5262
 
5594
 
5263
		VG_CLEAR(set_domain);
5595
		VG_CLEAR(set_domain);
5264
		set_domain.handle = bo->base.handle;
5596
		set_domain.handle = bo->base.handle;
5265
		set_domain.write_domain = 0;
5597
		set_domain.write_domain = 0;
Line 5266... Line 5598...
5266
		set_domain.read_domains =
5598
		set_domain.read_domains =
5267
			IS_CPU_MAP(bo->base.map) ? I915_GEM_DOMAIN_CPU : I915_GEM_DOMAIN_GTT;
5599
			bo->mmapped == MMAPPED_CPU ? I915_GEM_DOMAIN_CPU : I915_GEM_DOMAIN_GTT;
5268
 
5600
 
5269
		if (drmIoctl(kgem->fd,
5601
		if (drmIoctl(kgem->fd,