Rev 3263 | Rev 3277 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3263 | Rev 3266 | ||
---|---|---|---|
Line 265... | Line 265... | ||
265 | static int |
265 | static int |
266 | i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, |
266 | i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, |
267 | struct eb_objects *eb) |
267 | struct eb_objects *eb) |
268 | { |
268 | { |
269 | #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) |
269 | #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) |
270 | struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)]; |
270 | struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(64)]; |
271 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
271 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
272 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
272 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
273 | int remain, ret; |
273 | int remain, ret; |
Line 274... | Line 274... | ||
274 | 274 | ||
Line 365... | Line 365... | ||
365 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
365 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
366 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
366 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
367 | bool need_fence, need_mappable; |
367 | bool need_fence, need_mappable; |
368 | int ret; |
368 | int ret; |
Line -... | Line 369... | ||
- | 369 | ||
- | 370 | // ENTER(); |
|
369 | 371 | ||
370 | need_fence = |
372 | need_fence = |
371 | has_fenced_gpu_access && |
373 | has_fenced_gpu_access && |
372 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
374 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
373 | obj->tiling_mode != I915_TILING_NONE; |
375 | obj->tiling_mode != I915_TILING_NONE; |
Line 374... | Line 376... | ||
374 | need_mappable = need_fence || need_reloc_mappable(obj); |
376 | need_mappable = need_fence || need_reloc_mappable(obj); |
375 | 377 | ||
- | 378 | ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false); |
|
- | 379 | if (ret) |
|
376 | ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false); |
380 | { |
- | 381 | FAIL(); |
|
Line 377... | Line 382... | ||
377 | if (ret) |
382 | return ret; |
Line 378... | Line 383... | ||
378 | return ret; |
383 | }; |
379 | 384 | ||
380 | entry->flags |= __EXEC_OBJECT_HAS_PIN; |
385 | entry->flags |= __EXEC_OBJECT_HAS_PIN; |
381 | 386 | ||
- | 387 | if (has_fenced_gpu_access) { |
|
- | 388 | if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { |
|
382 | if (has_fenced_gpu_access) { |
389 | ret = i915_gem_object_get_fence(obj); |
- | 390 | if (ret) |
|
Line 383... | Line 391... | ||
383 | if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { |
391 | { |
384 | ret = i915_gem_object_get_fence(obj); |
392 | FAIL(); |
Line 385... | Line 393... | ||
385 | if (ret) |
393 | return ret; |
Line 399... | Line 407... | ||
399 | 407 | ||
400 | obj->has_aliasing_ppgtt_mapping = 1; |
408 | obj->has_aliasing_ppgtt_mapping = 1; |
Line 401... | Line 409... | ||
401 | } |
409 | } |
- | 410 | ||
- | 411 | entry->offset = obj->gtt_offset; |
|
402 | 412 | // LEAVE(); |
|
403 | entry->offset = obj->gtt_offset; |
413 | |
Line 404... | Line 414... | ||
404 | return 0; |
414 | return 0; |
405 | } |
415 | } |
Line 431... | Line 441... | ||
431 | struct drm_i915_gem_object *obj; |
441 | struct drm_i915_gem_object *obj; |
432 | struct list_head ordered_objects; |
442 | struct list_head ordered_objects; |
433 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
443 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
434 | int retry; |
444 | int retry; |
Line -... | Line 445... | ||
- | 445 | ||
- | 446 | // ENTER(); |
|
435 | 447 | ||
436 | INIT_LIST_HEAD(&ordered_objects); |
448 | INIT_LIST_HEAD(&ordered_objects); |
437 | while (!list_empty(objects)) { |
449 | while (!list_empty(objects)) { |
438 | struct drm_i915_gem_exec_object2 *entry; |
450 | struct drm_i915_gem_exec_object2 *entry; |
Line 512... | Line 524... | ||
512 | err: /* Decrement pin count for bound objects */ |
524 | err: /* Decrement pin count for bound objects */ |
513 | list_for_each_entry(obj, objects, exec_list) |
525 | list_for_each_entry(obj, objects, exec_list) |
514 | i915_gem_execbuffer_unreserve_object(obj); |
526 | i915_gem_execbuffer_unreserve_object(obj); |
Line 515... | Line 527... | ||
515 | 527 | ||
- | 528 | if (ret != -ENOSPC || retry++) |
|
- | 529 | { |
|
516 | if (ret != -ENOSPC || retry++) |
530 | // LEAVE(); |
- | 531 | return ret; |
|
Line 517... | Line 532... | ||
517 | return ret; |
532 | }; |
518 | 533 | ||
519 | // ret = i915_gem_evict_everything(ring->dev); |
534 | // ret = i915_gem_evict_everything(ring->dev); |
520 | if (ret) |
535 | if (ret) |
Line 552... | Line 567... | ||
552 | total += exec[i].relocation_count; |
567 | total += exec[i].relocation_count; |
Line 553... | Line 568... | ||
553 | 568 | ||
554 | reloc_offset = malloc(count * sizeof(*reloc_offset)); |
569 | reloc_offset = malloc(count * sizeof(*reloc_offset)); |
555 | reloc = malloc(total * sizeof(*reloc)); |
570 | reloc = malloc(total * sizeof(*reloc)); |
556 | if (reloc == NULL || reloc_offset == NULL) { |
571 | if (reloc == NULL || reloc_offset == NULL) { |
557 | free(reloc); |
572 | kfree(reloc); |
558 | free(reloc_offset); |
573 | kfree(reloc_offset); |
559 | mutex_lock(&dev->struct_mutex); |
574 | mutex_lock(&dev->struct_mutex); |
560 | return -ENOMEM; |
575 | return -ENOMEM; |
Line 561... | Line 576... | ||
561 | } |
576 | } |
Line 607... | Line 622... | ||
607 | /* reacquire the objects */ |
622 | /* reacquire the objects */ |
608 | eb_reset(eb); |
623 | eb_reset(eb); |
609 | for (i = 0; i < count; i++) { |
624 | for (i = 0; i < count; i++) { |
Line 610... | Line 625... | ||
610 | 625 | ||
- | 626 | if(exec[i].handle == -2) |
|
611 | if(exec[i].handle == -2) |
627 | { |
- | 628 | obj = get_fb_obj(); |
|
- | 629 | drm_gem_object_reference(&obj->base); |
|
612 | obj = get_fb_obj(); |
630 | } |
613 | else |
631 | else |
614 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, |
632 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, |
615 | exec[i].handle)); |
633 | exec[i].handle)); |
616 | if (&obj->base == NULL) { |
634 | if (&obj->base == NULL) { |
Line 643... | Line 661... | ||
643 | * having buffers reserved in the aperture and so causing spurious |
661 | * having buffers reserved in the aperture and so causing spurious |
644 | * ENOSPC for random operations. |
662 | * ENOSPC for random operations. |
645 | */ |
663 | */ |
Line 646... | Line 664... | ||
646 | 664 | ||
647 | err: |
665 | err: |
648 | free(reloc); |
666 | kfree(reloc); |
649 | free(reloc_offset); |
667 | kfree(reloc_offset); |
650 | return ret; |
668 | return ret; |
Line 651... | Line 669... | ||
651 | } |
669 | } |
652 | 670 | ||
Line 841... | Line 859... | ||
841 | u32 flags; |
859 | u32 flags; |
842 | int ret, mode, i; |
860 | int ret, mode, i; |
Line 843... | Line 861... | ||
843 | 861 | ||
844 | if (!i915_gem_check_execbuffer(args)) { |
862 | if (!i915_gem_check_execbuffer(args)) { |
- | 863 | DRM_DEBUG("execbuf with invalid offset/length\n"); |
|
845 | DRM_DEBUG("execbuf with invalid offset/length\n"); |
864 | FAIL(); |
846 | return -EINVAL; |
865 | return -EINVAL; |
Line 847... | Line 866... | ||
847 | } |
866 | } |
848 | 867 | ||
- | 868 | ret = validate_exec_list(exec, args->buffer_count); |
|
- | 869 | if (ret) |
|
849 | ret = validate_exec_list(exec, args->buffer_count); |
870 | { |
- | 871 | FAIL(); |
|
Line 850... | Line 872... | ||
850 | if (ret) |
872 | return ret; |
851 | return ret; |
873 | }; |
852 | 874 | ||
853 | flags = 0; |
875 | flags = 0; |
Line 868... | Line 890... | ||
868 | case I915_EXEC_BSD: |
890 | case I915_EXEC_BSD: |
869 | ring = &dev_priv->ring[VCS]; |
891 | ring = &dev_priv->ring[VCS]; |
870 | if (ctx_id != 0) { |
892 | if (ctx_id != 0) { |
871 | DRM_DEBUG("Ring %s doesn't support contexts\n", |
893 | DRM_DEBUG("Ring %s doesn't support contexts\n", |
872 | ring->name); |
894 | ring->name); |
- | 895 | FAIL(); |
|
873 | return -EPERM; |
896 | return -EPERM; |
874 | } |
897 | } |
875 | break; |
898 | break; |
876 | case I915_EXEC_BLT: |
899 | case I915_EXEC_BLT: |
877 | ring = &dev_priv->ring[BCS]; |
900 | ring = &dev_priv->ring[BCS]; |
Line 976... | Line 999... | ||
976 | INIT_LIST_HEAD(&objects); |
999 | INIT_LIST_HEAD(&objects); |
977 | for (i = 0; i < args->buffer_count; i++) { |
1000 | for (i = 0; i < args->buffer_count; i++) { |
978 | struct drm_i915_gem_object *obj; |
1001 | struct drm_i915_gem_object *obj; |
Line 979... | Line 1002... | ||
979 | 1002 | ||
- | 1003 | if(exec[i].handle == -2) |
|
980 | if(exec[i].handle == -2) |
1004 | { |
- | 1005 | obj = get_fb_obj(); |
|
- | 1006 | drm_gem_object_reference(&obj->base); |
|
981 | obj = get_fb_obj(); |
1007 | } |
982 | else |
1008 | else |
983 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, |
1009 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, |
- | 1010 | exec[i].handle)); |
|
- | 1011 | ||
- | 1012 | // printf("%s object %p handle %d\n", __FUNCTION__, obj, exec[i].handle); |
|
984 | exec[i].handle)); |
1013 | |
985 | if (&obj->base == NULL) { |
1014 | if (&obj->base == NULL) { |
986 | DRM_DEBUG("Invalid object handle %d at index %d\n", |
1015 | DRM_DEBUG("Invalid object handle %d at index %d\n", |
987 | exec[i].handle, i); |
1016 | exec[i].handle, i); |
988 | /* prevent error path from reading uninitialized data */ |
1017 | /* prevent error path from reading uninitialized data */ |
Line 1092... | Line 1121... | ||
1092 | flags); |
1121 | flags); |
1093 | if (ret) |
1122 | if (ret) |
1094 | goto err; |
1123 | goto err; |
1095 | } |
1124 | } |
Line -... | Line 1125... | ||
- | 1125 | ||
- | 1126 | trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); |
|
1096 | 1127 | ||
1097 | // i915_gem_execbuffer_move_to_active(&objects, ring); |
1128 | i915_gem_execbuffer_move_to_active(&objects, ring); |
1098 | // i915_gem_execbuffer_retire_commands(dev, file, ring); |
- | |
1099 | ring->gpu_caches_dirty = true; |
- | |
Line 1100... | Line 1129... | ||
1100 | intel_ring_flush_all_caches(ring); |
1129 | i915_gem_execbuffer_retire_commands(dev, file, ring); |
1101 | 1130 | ||
1102 | err: |
1131 | err: |
1103 | eb_destroy(eb); |
1132 | eb_destroy(eb); |
Line 1113... | Line 1142... | ||
1113 | 1142 | ||
Line 1114... | Line 1143... | ||
1114 | mutex_unlock(&dev->struct_mutex); |
1143 | mutex_unlock(&dev->struct_mutex); |
1115 | 1144 | ||
- | 1145 | pre_mutex_err: |
|
- | 1146 | kfree(cliprects); |
|
1116 | pre_mutex_err: |
1147 | |
1117 | kfree(cliprects); |
1148 | |
Line 1118... | Line -... | ||
1118 | return ret; |
- | |
1119 | } |
1149 | return ret; |
1120 | 1150 | } |
|
1121 | 1151 | ||
1122 | int |
1152 | int |
1123 | i915_gem_execbuffer2(struct drm_device *dev, void *data, |
1153 | i915_gem_execbuffer2(struct drm_device *dev, void *data, |
1124 | struct drm_file *file) |
1154 | struct drm_file *file) |
1125 | { |
1155 | { |
Line -... | Line 1156... | ||
- | 1156 | struct drm_i915_gem_execbuffer2 *args = data; |
|
- | 1157 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; |
|
1126 | struct drm_i915_gem_execbuffer2 *args = data; |
1158 | int ret; |
1127 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; |
1159 | |
1128 | int ret; |
1160 | // ENTER(); |
- | 1161 | ||
1129 | 1162 | if (args->buffer_count < 1 || |
|
1130 | if (args->buffer_count < 1 || |
1163 | args->buffer_count > UINT_MAX / sizeof(*exec2_list)) { |
Line 1131... | Line -... | ||
1131 | args->buffer_count > UINT_MAX / sizeof(*exec2_list)) { |
- | |
1132 | DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); |
- | |
1133 | return -EINVAL; |
1164 | DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); |
- | 1165 | FAIL(); |
|
- | 1166 | return -EINVAL; |
|
- | 1167 | } |
|
- | 1168 | ||
1134 | } |
1169 | exec2_list = malloc(sizeof(*exec2_list)*args->buffer_count); |
1135 | 1170 | ||
1136 | exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, 0); |
1171 | // if (exec2_list == NULL) |
- | 1172 | // exec2_list = drm_malloc_ab(sizeof(*exec2_list), |
|
1137 | if (exec2_list == NULL) |
1173 | // args->buffer_count); |
1138 | exec2_list = malloc(sizeof(*exec2_list) * args->buffer_count); |
1174 | if (exec2_list == NULL) { |
1139 | if (exec2_list == NULL) { |
1175 | DRM_DEBUG("Failed to allocate exec list for %d buffers\n", |
1140 | DRM_DEBUG("Failed to allocate exec list for %d buffers\n", |
1176 | args->buffer_count); |
1141 | args->buffer_count); |
1177 | FAIL(); |
1142 | return -ENOMEM; |
1178 | return -ENOMEM; |
1143 | } |
1179 | } |
1144 | ret = copy_from_user(exec2_list, |
1180 | ret = copy_from_user(exec2_list, |
1145 | (struct drm_i915_relocation_entry __user *) |
1181 | (struct drm_i915_relocation_entry __user *) |
1146 | (uintptr_t) args->buffers_ptr, |
1182 | (uintptr_t) args->buffers_ptr, |
- | 1183 | sizeof(*exec2_list) * args->buffer_count); |
|
1147 | sizeof(*exec2_list) * args->buffer_count); |
1184 | if (ret != 0) { |
1148 | if (ret != 0) { |
1185 | DRM_DEBUG("copy %d exec entries failed %d\n", |
Line 1149... | Line 1186... | ||
1149 | DRM_DEBUG("copy %d exec entries failed %d\n", |
1186 | args->buffer_count, ret); |
1150 | args->buffer_count, ret); |
1187 | kfree(exec2_list); |
Line 1164... | Line 1201... | ||
1164 | "back to user (%d)\n", |
1201 | "back to user (%d)\n", |
1165 | args->buffer_count, ret); |
1202 | args->buffer_count, ret); |
1166 | } |
1203 | } |
1167 | } |
1204 | } |
Line 1168... | Line 1205... | ||
1168 | 1205 | ||
- | 1206 | kfree(exec2_list); |
|
- | 1207 | ||
- | 1208 | // LEAVE(); |
|
1169 | free(exec2_list); |
1209 | |
1170 | return ret; |
1210 | return ret; |