Rev 3277 | Rev 3746 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3277 | Rev 3480 | ||
---|---|---|---|
Line 61... | Line 61... | ||
61 | memcpy(to, from, n); |
61 | memcpy(to, from, n); |
62 | return 0; |
62 | return 0; |
63 | } |
63 | } |
Line 64... | Line 64... | ||
64 | 64 | ||
- | 65 | struct eb_objects { |
|
65 | struct eb_objects { |
66 | struct list_head objects; |
- | 67 | int and; |
|
- | 68 | union { |
|
66 | int and; |
69 | struct drm_i915_gem_object *lut[0]; |
67 | struct hlist_head buckets[0]; |
70 | struct hlist_head buckets[0]; |
- | 71 | }; |
|
Line 68... | Line 72... | ||
68 | }; |
72 | }; |
69 | 73 | ||
70 | static struct eb_objects * |
74 | static struct eb_objects * |
71 | eb_create(int size) |
75 | eb_create(struct drm_i915_gem_execbuffer2 *args) |
- | 76 | { |
|
- | 77 | struct eb_objects *eb = NULL; |
|
- | 78 | ||
- | 79 | if (args->flags & I915_EXEC_HANDLE_LUT) { |
|
- | 80 | int size = args->buffer_count; |
|
- | 81 | size *= sizeof(struct drm_i915_gem_object *); |
|
- | 82 | size += sizeof(struct eb_objects); |
|
- | 83 | eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); |
|
- | 84 | } |
|
- | 85 | ||
72 | { |
86 | if (eb == NULL) { |
73 | struct eb_objects *eb; |
87 | int size = args->buffer_count; |
74 | int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; |
88 | int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; |
75 | BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); |
89 | BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head)); |
76 | while (count > size) |
90 | while (count > 2*size) |
77 | count >>= 1; |
91 | count >>= 1; |
78 | eb = kzalloc(count*sizeof(struct hlist_head) + |
92 | eb = kzalloc(count*sizeof(struct hlist_head) + |
79 | sizeof(struct eb_objects), |
93 | sizeof(struct eb_objects), |
80 | GFP_KERNEL); |
94 | GFP_TEMPORARY); |
Line 81... | Line 95... | ||
81 | if (eb == NULL) |
95 | if (eb == NULL) |
- | 96 | return eb; |
|
- | 97 | ||
- | 98 | eb->and = count - 1; |
|
- | 99 | } else |
|
82 | return eb; |
100 | eb->and = -args->buffer_count; |
83 | 101 | ||
Line 84... | Line 102... | ||
84 | eb->and = count - 1; |
102 | INIT_LIST_HEAD(&eb->objects); |
85 | return eb; |
103 | return eb; |
86 | } |
104 | } |
- | 105 | ||
87 | 106 | static void |
|
88 | static void |
107 | eb_reset(struct eb_objects *eb) |
Line 89... | Line 108... | ||
89 | eb_reset(struct eb_objects *eb) |
108 | { |
90 | { |
109 | if (eb->and >= 0) |
- | 110 | memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); |
|
- | 111 | } |
|
- | 112 | ||
91 | memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); |
113 | static int |
- | 114 | eb_lookup_objects(struct eb_objects *eb, |
|
- | 115 | struct drm_i915_gem_exec_object2 *exec, |
|
- | 116 | const struct drm_i915_gem_execbuffer2 *args, |
|
- | 117 | struct drm_file *file) |
|
- | 118 | { |
|
- | 119 | int i; |
|
- | 120 | ||
- | 121 | spin_lock(&file->table_lock); |
|
- | 122 | for (i = 0; i < args->buffer_count; i++) { |
|
- | 123 | struct drm_i915_gem_object *obj; |
|
- | 124 | ||
- | 125 | if(exec[i].handle == -2) |
|
- | 126 | obj = get_fb_obj(); |
|
- | 127 | else |
|
- | 128 | obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle)); |
|
- | 129 | if (obj == NULL) { |
|
- | 130 | spin_unlock(&file->table_lock); |
|
- | 131 | DRM_DEBUG("Invalid object handle %d at index %d\n", |
|
- | 132 | exec[i].handle, i); |
|
- | 133 | return -ENOENT; |
|
- | 134 | } |
|
- | 135 | ||
- | 136 | if (!list_empty(&obj->exec_list)) { |
|
- | 137 | spin_unlock(&file->table_lock); |
|
- | 138 | DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", |
|
- | 139 | obj, exec[i].handle, i); |
|
- | 140 | return -EINVAL; |
|
- | 141 | } |
|
- | 142 | ||
- | 143 | drm_gem_object_reference(&obj->base); |
|
- | 144 | list_add_tail(&obj->exec_list, &eb->objects); |
|
- | 145 | ||
- | 146 | obj->exec_entry = &exec[i]; |
|
92 | } |
147 | if (eb->and < 0) { |
93 | 148 | eb->lut[i] = obj; |
|
- | 149 | } else { |
|
- | 150 | uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle; |
|
- | 151 | obj->exec_handle = handle; |
|
- | 152 | hlist_add_head(&obj->exec_node, |
|
- | 153 | &eb->buckets[handle & eb->and]); |
|
94 | static void |
154 | } |
Line 95... | Line 155... | ||
95 | eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj) |
155 | } |
96 | { |
156 | spin_unlock(&file->table_lock); |
97 | hlist_add_head(&obj->exec_node, |
157 | |
- | 158 | return 0; |
|
- | 159 | } |
|
- | 160 | ||
- | 161 | static struct drm_i915_gem_object * |
|
- | 162 | eb_get_object(struct eb_objects *eb, unsigned long handle) |
|
98 | &eb->buckets[obj->exec_handle & eb->and]); |
163 | { |
99 | } |
164 | if (eb->and < 0) { |
100 | - | ||
Line 101... | Line 165... | ||
101 | static struct drm_i915_gem_object * |
165 | if (handle >= -eb->and) |
102 | eb_get_object(struct eb_objects *eb, unsigned long handle) |
166 | return NULL; |
- | 167 | return eb->lut[handle]; |
|
- | 168 | } else { |
|
103 | { |
169 | struct hlist_head *head; |
104 | struct hlist_head *head; |
170 | struct hlist_node *node; |
105 | struct hlist_node *node; |
171 | |
106 | struct drm_i915_gem_object *obj; |
172 | head = &eb->buckets[handle & eb->and]; |
107 | - | ||
108 | head = &eb->buckets[handle & eb->and]; |
173 | hlist_for_each(node, head) { |
109 | hlist_for_each(node, head) { |
174 | struct drm_i915_gem_object *obj; |
- | 175 | ||
Line 110... | Line 176... | ||
110 | obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); |
176 | obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); |
111 | if (obj->exec_handle == handle) |
177 | if (obj->exec_handle == handle) |
112 | return obj; |
178 | return obj; |
- | 179 | } |
|
- | 180 | return NULL; |
|
- | 181 | } |
|
- | 182 | } |
|
- | 183 | ||
- | 184 | static void |
|
- | 185 | eb_destroy(struct eb_objects *eb) |
|
- | 186 | { |
|
- | 187 | while (!list_empty(&eb->objects)) { |
|
113 | } |
188 | struct drm_i915_gem_object *obj; |
114 | 189 | ||
Line 115... | Line 190... | ||
115 | return NULL; |
190 | obj = list_first_entry(&eb->objects, |
116 | } |
191 | struct drm_i915_gem_object, |
Line 177... | Line 252... | ||
177 | (int) reloc->offset, |
252 | (int) reloc->offset, |
178 | reloc->read_domains, |
253 | reloc->read_domains, |
179 | reloc->write_domain); |
254 | reloc->write_domain); |
180 | return ret; |
255 | return ret; |
181 | } |
256 | } |
182 | if (unlikely(reloc->write_domain && target_obj->pending_write_domain && |
- | |
183 | reloc->write_domain != target_obj->pending_write_domain)) { |
- | |
184 | DRM_DEBUG("Write domain conflict: " |
- | |
185 | "obj %p target %d offset %d " |
- | |
186 | "new %08x old %08x\n", |
- | |
187 | obj, reloc->target_handle, |
- | |
188 | (int) reloc->offset, |
- | |
189 | reloc->write_domain, |
- | |
190 | target_obj->pending_write_domain); |
- | |
191 | return ret; |
- | |
192 | } |
- | |
Line 193... | Line 257... | ||
193 | 257 | ||
194 | target_obj->pending_read_domains |= reloc->read_domains; |
258 | target_obj->pending_read_domains |= reloc->read_domains; |
Line 195... | Line 259... | ||
195 | target_obj->pending_write_domain |= reloc->write_domain; |
259 | target_obj->pending_write_domain |= reloc->write_domain; |
Line 216... | Line 280... | ||
216 | (int) reloc->offset); |
280 | (int) reloc->offset); |
217 | return ret; |
281 | return ret; |
218 | } |
282 | } |
Line 219... | Line 283... | ||
219 | 283 | ||
220 | /* We can't wait for rendering with pagefaults disabled */ |
- | |
221 | // if (obj->active && in_atomic()) |
- | |
222 | // return -EFAULT; |
- | |
Line 223... | Line 284... | ||
223 | 284 | /* We can't wait for rendering with pagefaults disabled */ |
|
224 | 285 | ||
225 | reloc->delta += target_offset; |
286 | reloc->delta += target_offset; |
226 | if (use_cpu_reloc(obj)) { |
287 | if (use_cpu_reloc(obj)) { |
Line 322... | Line 383... | ||
322 | return 0; |
383 | return 0; |
323 | } |
384 | } |
Line 324... | Line 385... | ||
324 | 385 | ||
325 | static int |
386 | static int |
326 | i915_gem_execbuffer_relocate(struct drm_device *dev, |
387 | i915_gem_execbuffer_relocate(struct drm_device *dev, |
327 | struct eb_objects *eb, |
- | |
328 | struct list_head *objects) |
388 | struct eb_objects *eb) |
329 | { |
389 | { |
330 | struct drm_i915_gem_object *obj; |
390 | struct drm_i915_gem_object *obj; |
Line 331... | Line 391... | ||
331 | int ret = 0; |
391 | int ret = 0; |
Line 336... | Line 396... | ||
336 | * fault handler would call i915_gem_fault() and we would try to |
396 | * fault handler would call i915_gem_fault() and we would try to |
337 | * acquire the struct mutex again. Obviously this is bad and so |
397 | * acquire the struct mutex again. Obviously this is bad and so |
338 | * lockdep complains vehemently. |
398 | * lockdep complains vehemently. |
339 | */ |
399 | */ |
340 | // pagefault_disable(); |
400 | // pagefault_disable(); |
341 | list_for_each_entry(obj, objects, exec_list) { |
401 | list_for_each_entry(obj, &eb->objects, exec_list) { |
342 | ret = i915_gem_execbuffer_relocate_object(obj, eb); |
402 | ret = i915_gem_execbuffer_relocate_object(obj, eb); |
343 | if (ret) |
403 | if (ret) |
344 | break; |
404 | break; |
345 | } |
405 | } |
346 | // pagefault_enable(); |
406 | // pagefault_enable(); |
Line 358... | Line 418... | ||
358 | return entry->relocation_count && !use_cpu_reloc(obj); |
418 | return entry->relocation_count && !use_cpu_reloc(obj); |
359 | } |
419 | } |
Line 360... | Line 420... | ||
360 | 420 | ||
361 | static int |
421 | static int |
362 | i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, |
422 | i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, |
- | 423 | struct intel_ring_buffer *ring, |
|
363 | struct intel_ring_buffer *ring) |
424 | bool *need_reloc) |
364 | { |
425 | { |
365 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
426 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
366 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
427 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
367 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
428 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
Line 407... | Line 468... | ||
407 | obj, obj->cache_level); |
468 | obj, obj->cache_level); |
Line 408... | Line 469... | ||
408 | 469 | ||
409 | obj->has_aliasing_ppgtt_mapping = 1; |
470 | obj->has_aliasing_ppgtt_mapping = 1; |
Line -... | Line 471... | ||
- | 471 | } |
|
410 | } |
472 | |
411 | 473 | if (entry->offset != obj->gtt_offset) { |
|
- | 474 | entry->offset = obj->gtt_offset; |
|
- | 475 | *need_reloc = true; |
|
- | 476 | } |
|
- | 477 | ||
- | 478 | if (entry->flags & EXEC_OBJECT_WRITE) { |
|
- | 479 | obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER; |
|
- | 480 | obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER; |
|
- | 481 | } |
|
- | 482 | ||
- | 483 | if (entry->flags & EXEC_OBJECT_NEEDS_GTT && |
|
Line 412... | Line 484... | ||
412 | entry->offset = obj->gtt_offset; |
484 | !obj->has_global_gtt_mapping) |
413 | // LEAVE(); |
485 | i915_gem_gtt_bind_object(obj, obj->cache_level); |
Line 414... | Line 486... | ||
414 | 486 | ||
Line 435... | Line 507... | ||
435 | } |
507 | } |
Line 436... | Line 508... | ||
436 | 508 | ||
437 | static int |
509 | static int |
438 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, |
510 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, |
439 | struct drm_file *file, |
511 | struct drm_file *file, |
- | 512 | struct list_head *objects, |
|
440 | struct list_head *objects) |
513 | bool *need_relocs) |
441 | { |
514 | { |
442 | struct drm_i915_gem_object *obj; |
515 | struct drm_i915_gem_object *obj; |
443 | struct list_head ordered_objects; |
516 | struct list_head ordered_objects; |
444 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
517 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
Line 465... | Line 538... | ||
465 | if (need_mappable) |
538 | if (need_mappable) |
466 | list_move(&obj->exec_list, &ordered_objects); |
539 | list_move(&obj->exec_list, &ordered_objects); |
467 | else |
540 | else |
468 | list_move_tail(&obj->exec_list, &ordered_objects); |
541 | list_move_tail(&obj->exec_list, &ordered_objects); |
Line 469... | Line 542... | ||
469 | 542 | ||
470 | obj->base.pending_read_domains = 0; |
543 | obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND; |
471 | obj->base.pending_write_domain = 0; |
544 | obj->base.pending_write_domain = 0; |
472 | obj->pending_fenced_gpu_access = false; |
545 | obj->pending_fenced_gpu_access = false; |
473 | } |
546 | } |
Line 505... | Line 578... | ||
505 | 578 | ||
506 | if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || |
579 | if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || |
507 | (need_mappable && !obj->map_and_fenceable)) |
580 | (need_mappable && !obj->map_and_fenceable)) |
508 | ret = i915_gem_object_unbind(obj); |
581 | ret = i915_gem_object_unbind(obj); |
509 | else |
582 | else |
510 | ret = i915_gem_execbuffer_reserve_object(obj, ring); |
583 | ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); |
511 | if (ret) |
584 | if (ret) |
512 | goto err; |
585 | goto err; |
Line 513... | Line 586... | ||
513 | } |
586 | } |
514 | 587 | ||
515 | /* Bind fresh objects */ |
588 | /* Bind fresh objects */ |
516 | list_for_each_entry(obj, objects, exec_list) { |
589 | list_for_each_entry(obj, objects, exec_list) { |
Line 517... | Line 590... | ||
517 | if (obj->gtt_space) |
590 | if (obj->gtt_space) |
518 | continue; |
591 | continue; |
519 | 592 | ||
520 | ret = i915_gem_execbuffer_reserve_object(obj, ring); |
593 | ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); |
Line 521... | Line 594... | ||
521 | if (ret) |
594 | if (ret) |
Line 538... | Line 611... | ||
538 | } while (1); |
611 | } while (1); |
539 | } |
612 | } |
Line 540... | Line 613... | ||
540 | 613 | ||
541 | static int |
614 | static int |
- | 615 | i915_gem_execbuffer_relocate_slow(struct drm_device *dev, |
|
542 | i915_gem_execbuffer_relocate_slow(struct drm_device *dev, |
616 | struct drm_i915_gem_execbuffer2 *args, |
543 | struct drm_file *file, |
617 | struct drm_file *file, |
544 | struct intel_ring_buffer *ring, |
- | |
545 | struct list_head *objects, |
618 | struct intel_ring_buffer *ring, |
546 | struct eb_objects *eb, |
619 | struct eb_objects *eb, |
547 | struct drm_i915_gem_exec_object2 *exec, |
- | |
548 | int count) |
620 | struct drm_i915_gem_exec_object2 *exec) |
549 | { |
621 | { |
550 | struct drm_i915_gem_relocation_entry *reloc; |
622 | struct drm_i915_gem_relocation_entry *reloc; |
- | 623 | struct drm_i915_gem_object *obj; |
|
551 | struct drm_i915_gem_object *obj; |
624 | bool need_relocs; |
552 | int *reloc_offset; |
625 | int *reloc_offset; |
- | 626 | int i, total, ret; |
|
Line 553... | Line 627... | ||
553 | int i, total, ret; |
627 | int count = args->buffer_count; |
554 | 628 | ||
555 | /* We may process another execbuffer during the unlock... */ |
629 | /* We may process another execbuffer during the unlock... */ |
556 | while (!list_empty(objects)) { |
630 | while (!list_empty(&eb->objects)) { |
557 | obj = list_first_entry(objects, |
631 | obj = list_first_entry(&eb->objects, |
558 | struct drm_i915_gem_object, |
632 | struct drm_i915_gem_object, |
559 | exec_list); |
633 | exec_list); |
560 | list_del_init(&obj->exec_list); |
634 | list_del_init(&obj->exec_list); |
Line 620... | Line 694... | ||
620 | goto err; |
694 | goto err; |
621 | } |
695 | } |
Line 622... | Line 696... | ||
622 | 696 | ||
623 | /* reacquire the objects */ |
697 | /* reacquire the objects */ |
624 | eb_reset(eb); |
- | |
625 | for (i = 0; i < count; i++) { |
- | |
626 | - | ||
627 | if(exec[i].handle == -2) |
- | |
628 | { |
- | |
629 | obj = get_fb_obj(); |
698 | eb_reset(eb); |
630 | drm_gem_object_reference(&obj->base); |
699 | ret = eb_lookup_objects(eb, exec, args, file); |
631 | } |
- | |
632 | else |
- | |
633 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, |
- | |
634 | exec[i].handle)); |
- | |
635 | if (&obj->base == NULL) { |
- | |
636 | DRM_DEBUG("Invalid object handle %d at index %d\n", |
- | |
637 | exec[i].handle, i); |
- | |
638 | ret = -ENOENT; |
700 | if (ret) |
639 | goto err; |
- | |
Line 640... | Line 701... | ||
640 | } |
701 | goto err; |
641 | - | ||
642 | list_add_tail(&obj->exec_list, objects); |
- | |
643 | obj->exec_handle = exec[i].handle; |
- | |
644 | obj->exec_entry = &exec[i]; |
- | |
645 | eb_add_object(eb, obj); |
- | |
646 | } |
702 | |
647 | 703 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
|
648 | ret = i915_gem_execbuffer_reserve(ring, file, objects); |
704 | ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs); |
Line 649... | Line 705... | ||
649 | if (ret) |
705 | if (ret) |
650 | goto err; |
706 | goto err; |
651 | 707 | ||
652 | list_for_each_entry(obj, objects, exec_list) { |
708 | list_for_each_entry(obj, &eb->objects, exec_list) { |
653 | int offset = obj->exec_entry - exec; |
709 | int offset = obj->exec_entry - exec; |
654 | ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, |
710 | ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, |
Line 668... | Line 724... | ||
668 | kfree(reloc_offset); |
724 | kfree(reloc_offset); |
669 | return ret; |
725 | return ret; |
670 | } |
726 | } |
Line 671... | Line 727... | ||
671 | 727 | ||
672 | static int |
- | |
673 | i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) |
- | |
674 | { |
- | |
675 | u32 plane, flip_mask; |
- | |
676 | int ret; |
- | |
677 | - | ||
678 | /* Check for any pending flips. As we only maintain a flip queue depth |
- | |
679 | * of 1, we can simply insert a WAIT for the next display flip prior |
- | |
680 | * to executing the batch and avoid stalling the CPU. |
- | |
681 | */ |
- | |
682 | - | ||
683 | for (plane = 0; flips >> plane; plane++) { |
- | |
684 | if (((flips >> plane) & 1) == 0) |
- | |
685 | continue; |
- | |
686 | - | ||
687 | if (plane) |
- | |
688 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; |
- | |
689 | else |
- | |
690 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; |
- | |
691 | - | ||
692 | ret = intel_ring_begin(ring, 2); |
- | |
693 | if (ret) |
- | |
694 | return ret; |
- | |
695 | - | ||
696 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); |
- | |
697 | intel_ring_emit(ring, MI_NOOP); |
- | |
698 | intel_ring_advance(ring); |
- | |
699 | } |
- | |
700 | - | ||
701 | return 0; |
- | |
702 | } |
- | |
703 | - | ||
704 | static int |
728 | static int |
705 | i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, |
729 | i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, |
706 | struct list_head *objects) |
730 | struct list_head *objects) |
707 | { |
731 | { |
708 | struct drm_i915_gem_object *obj; |
732 | struct drm_i915_gem_object *obj; |
709 | uint32_t flush_domains = 0; |
- | |
710 | uint32_t flips = 0; |
733 | uint32_t flush_domains = 0; |
Line 711... | Line 734... | ||
711 | int ret; |
734 | int ret; |
712 | 735 | ||
713 | list_for_each_entry(obj, objects, exec_list) { |
736 | list_for_each_entry(obj, objects, exec_list) { |
714 | ret = i915_gem_object_sync(obj, ring); |
737 | ret = i915_gem_object_sync(obj, ring); |
Line 715... | Line 738... | ||
715 | if (ret) |
738 | if (ret) |
716 | return ret; |
739 | return ret; |
Line 717... | Line -... | ||
717 | - | ||
718 | if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) |
- | |
719 | i915_gem_clflush_object(obj); |
- | |
720 | 740 | ||
721 | if (obj->base.pending_write_domain) |
741 | if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) |
Line 722... | Line -... | ||
722 | flips |= atomic_read(&obj->pending_flip); |
- | |
723 | - | ||
724 | flush_domains |= obj->base.write_domain; |
- | |
725 | } |
- | |
726 | - | ||
727 | if (flips) { |
- | |
728 | ret = i915_gem_execbuffer_wait_for_flips(ring, flips); |
742 | i915_gem_clflush_object(obj); |
729 | if (ret) |
743 | |
Line 730... | Line 744... | ||
730 | return ret; |
744 | flush_domains |= obj->base.write_domain; |
731 | } |
745 | } |
Line 743... | Line 757... | ||
743 | } |
757 | } |
Line 744... | Line 758... | ||
744 | 758 | ||
745 | static bool |
759 | static bool |
746 | i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) |
760 | i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) |
- | 761 | { |
|
- | 762 | if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS) |
|
- | 763 | return false; |
|
747 | { |
764 | |
748 | return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; |
765 | return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; |
Line 749... | Line 766... | ||
749 | } |
766 | } |
750 | 767 | ||
751 | static int |
768 | static int |
752 | validate_exec_list(struct drm_i915_gem_exec_object2 *exec, |
769 | validate_exec_list(struct drm_i915_gem_exec_object2 *exec, |
753 | int count) |
770 | int count) |
- | 771 | { |
|
- | 772 | int i; |
|
Line 754... | Line 773... | ||
754 | { |
773 | int relocs_total = 0; |
755 | int i; |
774 | int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); |
756 | 775 | ||
Line -... | Line 776... | ||
- | 776 | for (i = 0; i < count; i++) { |
|
- | 777 | char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; |
|
- | 778 | int length; /* limited by fault_in_pages_readable() */ |
|
757 | for (i = 0; i < count; i++) { |
779 | |
- | 780 | if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) |
|
758 | char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; |
781 | return -EINVAL; |
- | 782 | ||
759 | int length; /* limited by fault_in_pages_readable() */ |
783 | /* First check for malicious input causing overflow in |
760 | 784 | * the worst case where we need to allocate the entire |
|
- | 785 | * relocation tree as a single array. |
|
Line 761... | Line 786... | ||
761 | /* First check for malicious input causing overflow */ |
786 | */ |
762 | if (exec[i].relocation_count > |
787 | if (exec[i].relocation_count > relocs_max - relocs_total) |
763 | INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) |
- | |
764 | return -EINVAL; |
- | |
765 | - | ||
766 | length = exec[i].relocation_count * |
788 | return -EINVAL; |
767 | sizeof(struct drm_i915_gem_relocation_entry); |
789 | relocs_total += exec[i].relocation_count; |
768 | // if (!access_ok(VERIFY_READ, ptr, length)) |
790 | |
Line 769... | Line 791... | ||
769 | // return -EFAULT; |
791 | length = exec[i].relocation_count * |
Line 787... | Line 809... | ||
787 | 809 | ||
788 | list_for_each_entry(obj, objects, exec_list) { |
810 | list_for_each_entry(obj, objects, exec_list) { |
789 | u32 old_read = obj->base.read_domains; |
811 | u32 old_read = obj->base.read_domains; |
Line 790... | Line -... | ||
790 | u32 old_write = obj->base.write_domain; |
- | |
791 | 812 | u32 old_write = obj->base.write_domain; |
|
- | 813 | ||
- | 814 | obj->base.write_domain = obj->base.pending_write_domain; |
|
- | 815 | if (obj->base.write_domain == 0) |
|
792 | obj->base.read_domains = obj->base.pending_read_domains; |
816 | obj->base.pending_read_domains |= obj->base.read_domains; |
Line 793... | Line 817... | ||
793 | obj->base.write_domain = obj->base.pending_write_domain; |
817 | obj->base.read_domains = obj->base.pending_read_domains; |
794 | obj->fenced_gpu_access = obj->pending_fenced_gpu_access; |
818 | obj->fenced_gpu_access = obj->pending_fenced_gpu_access; |
795 | 819 | ||
Line 847... | Line 871... | ||
847 | struct drm_file *file, |
871 | struct drm_file *file, |
848 | struct drm_i915_gem_execbuffer2 *args, |
872 | struct drm_i915_gem_execbuffer2 *args, |
849 | struct drm_i915_gem_exec_object2 *exec) |
873 | struct drm_i915_gem_exec_object2 *exec) |
850 | { |
874 | { |
851 | drm_i915_private_t *dev_priv = dev->dev_private; |
875 | drm_i915_private_t *dev_priv = dev->dev_private; |
852 | struct list_head objects; |
- | |
853 | struct eb_objects *eb; |
876 | struct eb_objects *eb; |
854 | struct drm_i915_gem_object *batch_obj; |
877 | struct drm_i915_gem_object *batch_obj; |
855 | struct drm_clip_rect *cliprects = NULL; |
878 | struct drm_clip_rect *cliprects = NULL; |
856 | struct intel_ring_buffer *ring; |
879 | struct intel_ring_buffer *ring; |
857 | u32 ctx_id = i915_execbuffer2_get_context_id(*args); |
880 | u32 ctx_id = i915_execbuffer2_get_context_id(*args); |
858 | u32 exec_start, exec_len; |
881 | u32 exec_start, exec_len; |
859 | u32 mask; |
- | |
860 | u32 flags; |
882 | u32 mask, flags; |
861 | int ret, mode, i; |
883 | int ret, mode, i; |
- | 884 | bool need_relocs; |
|
Line 862... | Line 885... | ||
862 | 885 | ||
863 | if (!i915_gem_check_execbuffer(args)) { |
- | |
- | 886 | if (!i915_gem_check_execbuffer(args)) |
|
864 | DRM_DEBUG("execbuf with invalid offset/length\n"); |
887 | { |
865 | FAIL(); |
888 | FAIL(); |
866 | return -EINVAL; |
889 | return -EINVAL; |
Line 867... | Line 890... | ||
867 | } |
890 | } |
Line 873... | Line 896... | ||
873 | return ret; |
896 | return ret; |
874 | }; |
897 | }; |
Line 875... | Line 898... | ||
875 | 898 | ||
876 | flags = 0; |
899 | flags = 0; |
877 | if (args->flags & I915_EXEC_SECURE) { |
- | |
878 | // if (!file->is_master || !capable(CAP_SYS_ADMIN)) |
- | |
Line 879... | Line 900... | ||
879 | // return -EPERM; |
900 | if (args->flags & I915_EXEC_SECURE) { |
880 | 901 | ||
881 | flags |= I915_DISPATCH_SECURE; |
902 | flags |= I915_DISPATCH_SECURE; |
882 | } |
903 | } |
Line 987... | Line 1008... | ||
987 | mutex_unlock(&dev->struct_mutex); |
1008 | mutex_unlock(&dev->struct_mutex); |
988 | ret = -EBUSY; |
1009 | ret = -EBUSY; |
989 | goto pre_mutex_err; |
1010 | goto pre_mutex_err; |
990 | } |
1011 | } |
Line 991... | Line 1012... | ||
991 | 1012 | ||
992 | eb = eb_create(args->buffer_count); |
1013 | eb = eb_create(args); |
993 | if (eb == NULL) { |
1014 | if (eb == NULL) { |
994 | mutex_unlock(&dev->struct_mutex); |
1015 | mutex_unlock(&dev->struct_mutex); |
995 | ret = -ENOMEM; |
1016 | ret = -ENOMEM; |
996 | goto pre_mutex_err; |
1017 | goto pre_mutex_err; |
Line 997... | Line 1018... | ||
997 | } |
1018 | } |
998 | - | ||
999 | /* Look up object handles */ |
- | |
1000 | INIT_LIST_HEAD(&objects); |
- | |
1001 | for (i = 0; i < args->buffer_count; i++) { |
- | |
1002 | struct drm_i915_gem_object *obj; |
- | |
1003 | - | ||
1004 | if(exec[i].handle == -2) |
- | |
1005 | { |
1019 | |
1006 | obj = get_fb_obj(); |
1020 | /* Look up object handles */ |
1007 | drm_gem_object_reference(&obj->base); |
- | |
1008 | } |
- | |
1009 | else |
- | |
1010 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, |
- | |
1011 | exec[i].handle)); |
- | |
1012 | - | ||
1013 | // printf("%s object %p handle %d\n", __FUNCTION__, obj, exec[i].handle); |
- | |
1014 | - | ||
1015 | if (&obj->base == NULL) { |
- | |
1016 | DRM_DEBUG("Invalid object handle %d at index %d\n", |
- | |
1017 | exec[i].handle, i); |
- | |
1018 | /* prevent error path from reading uninitialized data */ |
- | |
1019 | ret = -ENOENT; |
- | |
1020 | goto err; |
- | |
1021 | } |
- | |
1022 | - | ||
1023 | if (!list_empty(&obj->exec_list)) { |
- | |
1024 | DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", |
- | |
1025 | obj, exec[i].handle, i); |
1021 | ret = eb_lookup_objects(eb, exec, args, file); |
1026 | ret = -EINVAL; |
- | |
1027 | goto err; |
- | |
1028 | } |
- | |
1029 | - | ||
1030 | list_add_tail(&obj->exec_list, &objects); |
- | |
1031 | obj->exec_handle = exec[i].handle; |
- | |
1032 | obj->exec_entry = &exec[i]; |
- | |
Line 1033... | Line 1022... | ||
1033 | eb_add_object(eb, obj); |
1022 | if (ret) |
1034 | } |
1023 | goto err; |
1035 | 1024 | ||
1036 | /* take note of the batch buffer before we might reorder the lists */ |
1025 | /* take note of the batch buffer before we might reorder the lists */ |
Line 1037... | Line 1026... | ||
1037 | batch_obj = list_entry(objects.prev, |
1026 | batch_obj = list_entry(eb->objects.prev, |
- | 1027 | struct drm_i915_gem_object, |
|
1038 | struct drm_i915_gem_object, |
1028 | exec_list); |
1039 | exec_list); |
1029 | |
1040 | 1030 | /* Move the objects en-masse into the GTT, evicting if necessary. */ |
|
Line 1041... | Line 1031... | ||
1041 | /* Move the objects en-masse into the GTT, evicting if necessary. */ |
1031 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
- | 1032 | ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs); |
|
1042 | ret = i915_gem_execbuffer_reserve(ring, file, &objects); |
1033 | if (ret) |
1043 | if (ret) |
1034 | goto err; |
1044 | goto err; |
1035 | |
1045 | 1036 | /* The objects are in their final locations, apply the relocations. */ |
|
1046 | /* The objects are in their final locations, apply the relocations. */ |
- | |
1047 | ret = i915_gem_execbuffer_relocate(dev, eb, &objects); |
1037 | if (need_relocs) |
1048 | if (ret) { |
- | |
1049 | if (ret == -EFAULT) { |
1038 | ret = i915_gem_execbuffer_relocate(dev, eb); |
1050 | ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, |
1039 | if (ret) { |
1051 | &objects, eb, |
1040 | if (ret == -EFAULT) { |
1052 | exec, |
1041 | ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, |
1053 | args->buffer_count); |
1042 | eb, exec); |
Line 1070... | Line 1059... | ||
1070 | * hsw should have this fixed, but let's be paranoid and do it |
1059 | * hsw should have this fixed, but let's be paranoid and do it |
1071 | * unconditionally for now. */ |
1060 | * unconditionally for now. */ |
1072 | if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) |
1061 | if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) |
1073 | i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); |
1062 | i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); |
Line 1074... | Line 1063... | ||
1074 | 1063 | ||
1075 | ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); |
1064 | ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects); |
1076 | if (ret) |
1065 | if (ret) |
Line 1077... | Line 1066... | ||
1077 | goto err; |
1066 | goto err; |
1078 | 1067 | ||
Line 1102... | Line 1091... | ||
1102 | } |
1091 | } |
Line 1103... | Line 1092... | ||
1103 | 1092 | ||
1104 | exec_start = batch_obj->gtt_offset + args->batch_start_offset; |
1093 | exec_start = batch_obj->gtt_offset + args->batch_start_offset; |
1105 | exec_len = args->batch_len; |
1094 | exec_len = args->batch_len; |
1106 | if (cliprects) { |
- | |
1107 | // for (i = 0; i < args->num_cliprects; i++) { |
- | |
1108 | // ret = i915_emit_box(dev, &cliprects[i], |
- | |
1109 | // args->DR1, args->DR4); |
- | |
1110 | // if (ret) |
- | |
1111 | // goto err; |
1095 | if (cliprects) { |
1112 | - | ||
1113 | // ret = ring->dispatch_execbuffer(ring, |
- | |
1114 | // exec_start, exec_len, |
- | |
1115 | // flags); |
- | |
1116 | // if (ret) |
- | |
1117 | // goto err; |
- | |
1118 | // } |
1096 | |
1119 | } else { |
1097 | } else { |
1120 | ret = ring->dispatch_execbuffer(ring, |
1098 | ret = ring->dispatch_execbuffer(ring, |
1121 | exec_start, exec_len, |
1099 | exec_start, exec_len, |
1122 | flags); |
1100 | flags); |
1123 | if (ret) |
1101 | if (ret) |
1124 | goto err; |
1102 | goto err; |
Line 1125... | Line 1103... | ||
1125 | } |
1103 | } |
Line 1126... | Line 1104... | ||
1126 | 1104 | ||
1127 | trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); |
1105 | trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); |
Line 1128... | Line 1106... | ||
1128 | 1106 | ||
1129 | i915_gem_execbuffer_move_to_active(&objects, ring); |
1107 | i915_gem_execbuffer_move_to_active(&eb->objects, ring); |
1130 | i915_gem_execbuffer_retire_commands(dev, file, ring); |
- | |
1131 | - | ||
1132 | err: |
- | |
1133 | eb_destroy(eb); |
- | |
1134 | while (!list_empty(&objects)) { |
- | |
1135 | struct drm_i915_gem_object *obj; |
- | |
1136 | - | ||
1137 | obj = list_first_entry(&objects, |
- | |
1138 | struct drm_i915_gem_object, |
- | |
Line 1139... | Line 1108... | ||
1139 | exec_list); |
1108 | i915_gem_execbuffer_retire_commands(dev, file, ring); |
Line 1140... | Line 1109... | ||
1140 | list_del_init(&obj->exec_list); |
1109 | |
1141 | drm_gem_object_unreference(&obj->base); |
1110 | err: |
1142 | } |
- | |
1143 | - | ||
1144 | mutex_unlock(&dev->struct_mutex); |
1111 | eb_destroy(eb); |
1145 | 1112 | ||
Line -... | Line 1113... | ||
- | 1113 | mutex_unlock(&dev->struct_mutex); |
|
- | 1114 | ||
1146 | pre_mutex_err: |
1115 | pre_mutex_err: |
1147 | kfree(cliprects); |
1116 | kfree(cliprects); |
1148 | 1117 | return ret; |
|
1149 | 1118 | } |
|
1150 | return ret; |
1119 | |
Line 1165... | Line 1134... | ||
1165 | DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); |
1134 | DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); |
1166 | FAIL(); |
1135 | FAIL(); |
1167 | return -EINVAL; |
1136 | return -EINVAL; |
1168 | } |
1137 | } |
Line 1169... | Line 1138... | ||
1169 | 1138 | ||
1170 | exec2_list = malloc(sizeof(*exec2_list)*args->buffer_count); |
- | |
1171 | - | ||
1172 | // if (exec2_list == NULL) |
1139 | exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, |
1173 | // exec2_list = drm_malloc_ab(sizeof(*exec2_list), |
- | |
1174 | // args->buffer_count); |
1140 | GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); |
1175 | if (exec2_list == NULL) { |
1141 | if (exec2_list == NULL) { |
1176 | DRM_DEBUG("Failed to allocate exec list for %d buffers\n", |
1142 | DRM_DEBUG("Failed to allocate exec list for %d buffers\n", |
1177 | args->buffer_count); |
1143 | args->buffer_count); |
1178 | FAIL(); |
1144 | FAIL(); |