Rev 4560 | Rev 5354 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4560 | Rev 5060 | ||
---|---|---|---|
Line 42... | Line 42... | ||
42 | #define PROT_READ 0x1 /* page can be read */ |
42 | #define PROT_READ 0x1 /* page can be read */ |
43 | #define PROT_WRITE 0x2 /* page can be written */ |
43 | #define PROT_WRITE 0x2 /* page can be written */ |
44 | #define MAP_SHARED 0x01 /* Share changes */ |
44 | #define MAP_SHARED 0x01 /* Share changes */ |
Line -... | Line 45... | ||
- | 45 | ||
- | 46 | ||
- | 47 | u64 nsecs_to_jiffies64(u64 n) |
|
- | 48 | { |
|
- | 49 | #if (NSEC_PER_SEC % HZ) == 0 |
|
- | 50 | /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */ |
|
- | 51 | return div_u64(n, NSEC_PER_SEC / HZ); |
|
- | 52 | #elif (HZ % 512) == 0 |
|
- | 53 | /* overflow after 292 years if HZ = 1024 */ |
|
- | 54 | return div_u64(n * HZ / 512, NSEC_PER_SEC / 512); |
|
- | 55 | #else |
|
- | 56 | /* |
|
- | 57 | * Generic case - optimized for cases where HZ is a multiple of 3. |
|
- | 58 | * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc. |
|
- | 59 | */ |
|
- | 60 | return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ); |
|
- | 61 | #endif |
|
- | 62 | } |
|
- | 63 | ||
- | 64 | unsigned long nsecs_to_jiffies(u64 n) |
|
- | 65 | { |
|
- | 66 | return (unsigned long)nsecs_to_jiffies64(n); |
|
- | 67 | } |
|
45 | 68 | ||
Line 46... | Line 69... | ||
46 | 69 | ||
47 | struct drm_i915_gem_object *get_fb_obj(); |
70 | struct drm_i915_gem_object *get_fb_obj(); |
48 | 71 | ||
Line 58... | Line 81... | ||
58 | #define MAX_ERRNO 4095 |
81 | #define MAX_ERRNO 4095 |
Line 59... | Line 82... | ||
59 | 82 | ||
Line 60... | Line -... | ||
60 | #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) |
- | |
61 | 83 | #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) |
|
62 | 84 | ||
63 | 85 | ||
64 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); |
86 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); |
65 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, |
87 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, |
66 | bool force); |
88 | bool force); |
67 | static __must_check int |
89 | static __must_check int |
68 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, |
90 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, |
69 | bool readonly); |
- | |
70 | static __must_check int |
- | |
71 | i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, |
- | |
72 | struct i915_address_space *vm, |
- | |
73 | unsigned alignment, |
- | |
74 | bool map_and_fenceable, |
- | |
75 | bool nonblocking); |
- | |
76 | static int i915_gem_phys_pwrite(struct drm_device *dev, |
- | |
Line 77... | Line 91... | ||
77 | struct drm_i915_gem_object *obj, |
91 | bool readonly); |
78 | struct drm_i915_gem_pwrite *args, |
92 | static void |
79 | struct drm_file *file); |
93 | i915_gem_object_retire(struct drm_i915_gem_object *obj); |
80 | 94 | ||
81 | static void i915_gem_write_fence(struct drm_device *dev, int reg, |
95 | static void i915_gem_write_fence(struct drm_device *dev, int reg, |
Line 82... | Line 96... | ||
82 | struct drm_i915_gem_object *obj); |
96 | struct drm_i915_gem_object *obj); |
83 | static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, |
97 | static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, |
84 | struct drm_i915_fence_reg *fence, |
- | |
Line 85... | Line 98... | ||
85 | bool enable); |
98 | struct drm_i915_fence_reg *fence, |
86 | 99 | bool enable); |
|
87 | static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target); |
100 | |
88 | static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); |
101 | static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target); |
Line 227... | Line 240... | ||
227 | size_t pinned; |
240 | size_t pinned; |
Line 228... | Line 241... | ||
228 | 241 | ||
229 | pinned = 0; |
242 | pinned = 0; |
230 | mutex_lock(&dev->struct_mutex); |
243 | mutex_lock(&dev->struct_mutex); |
231 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) |
244 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) |
232 | if (obj->pin_count) |
245 | if (i915_gem_obj_is_pinned(obj)) |
233 | pinned += i915_gem_obj_ggtt_size(obj); |
246 | pinned += i915_gem_obj_ggtt_size(obj); |
Line 234... | Line 247... | ||
234 | mutex_unlock(&dev->struct_mutex); |
247 | mutex_unlock(&dev->struct_mutex); |
235 | 248 | ||
Line 455... | Line 468... | ||
455 | user_data = to_user_ptr(args->data_ptr); |
468 | user_data = to_user_ptr(args->data_ptr); |
456 | remain = args->size; |
469 | remain = args->size; |
Line 457... | Line 470... | ||
457 | 470 | ||
Line 458... | Line -... | ||
458 | obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
- | |
459 | - | ||
460 | if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { |
- | |
461 | /* If we're not in the cpu read domain, set ourself into the gtt |
- | |
462 | * read domain and manually flush cachelines (if required). This |
- | |
463 | * optimizes for the case when the gpu will dirty the data |
- | |
464 | * anyway again before the next pread happens. */ |
471 | obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
465 | needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level); |
- | |
466 | ret = i915_gem_object_wait_rendering(obj, true); |
- | |
467 | if (ret) |
- | |
468 | return ret; |
- | |
469 | } |
- | |
470 | 472 | ||
471 | ret = i915_gem_object_get_pages(obj); |
473 | ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); |
Line 472... | Line -... | ||
472 | if (ret) |
- | |
473 | return ret; |
- | |
474 | 474 | if (ret) |
|
Line 475... | Line 475... | ||
475 | i915_gem_object_pin_pages(obj); |
475 | return ret; |
476 | 476 | ||
477 | offset = args->offset; |
477 | offset = args->offset; |
Line 502... | Line 502... | ||
502 | if (ret == 0) |
502 | if (ret == 0) |
503 | goto next_page; |
503 | goto next_page; |
Line 504... | Line 504... | ||
504 | 504 | ||
Line 505... | Line 505... | ||
505 | mutex_unlock(&dev->struct_mutex); |
505 | mutex_unlock(&dev->struct_mutex); |
506 | 506 | ||
507 | if (likely(!i915_prefault_disable) && !prefaulted) { |
507 | if (likely(!i915.prefault_disable) && !prefaulted) { |
508 | ret = fault_in_multipages_writeable(user_data, remain); |
508 | ret = fault_in_multipages_writeable(user_data, remain); |
509 | /* Userspace is tricking us, but we've already clobbered |
509 | /* Userspace is tricking us, but we've already clobbered |
510 | * its pages with the prefault and promised to write the |
510 | * its pages with the prefault and promised to write the |
Line 518... | Line 518... | ||
518 | user_data, page_do_bit17_swizzling, |
518 | user_data, page_do_bit17_swizzling, |
519 | needs_clflush); |
519 | needs_clflush); |
Line 520... | Line 520... | ||
520 | 520 | ||
Line 521... | Line -... | ||
521 | mutex_lock(&dev->struct_mutex); |
- | |
522 | - | ||
523 | next_page: |
- | |
524 | mark_page_accessed(page); |
521 | mutex_lock(&dev->struct_mutex); |
525 | 522 | ||
Line -... | Line 523... | ||
- | 523 | if (ret) |
|
526 | if (ret) |
524 | goto out; |
527 | goto out; |
525 | |
528 | 526 | next_page: |
|
529 | remain -= page_length; |
527 | remain -= page_length; |
Line 627... | Line 625... | ||
627 | i915_gem_gtt_pwrite_fast(struct drm_device *dev, |
625 | i915_gem_gtt_pwrite_fast(struct drm_device *dev, |
628 | struct drm_i915_gem_object *obj, |
626 | struct drm_i915_gem_object *obj, |
629 | struct drm_i915_gem_pwrite *args, |
627 | struct drm_i915_gem_pwrite *args, |
630 | struct drm_file *file) |
628 | struct drm_file *file) |
631 | { |
629 | { |
632 | drm_i915_private_t *dev_priv = dev->dev_private; |
630 | struct drm_i915_private *dev_priv = dev->dev_private; |
633 | ssize_t remain; |
631 | ssize_t remain; |
634 | loff_t offset, page_base; |
632 | loff_t offset, page_base; |
635 | char __user *user_data; |
633 | char __user *user_data; |
636 | int page_offset, page_length, ret; |
634 | int page_offset, page_length, ret; |
Line 637... | Line 635... | ||
637 | 635 | ||
638 | ret = i915_gem_obj_ggtt_pin(obj, 0, true, true); |
636 | ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK); |
639 | if (ret) |
637 | if (ret) |
Line 640... | Line 638... | ||
640 | goto out; |
638 | goto out; |
641 | 639 | ||
Line 665... | Line 663... | ||
665 | if ((page_offset + remain) > PAGE_SIZE) |
663 | if ((page_offset + remain) > PAGE_SIZE) |
666 | page_length = PAGE_SIZE - page_offset; |
664 | page_length = PAGE_SIZE - page_offset; |
Line 667... | Line 665... | ||
667 | 665 | ||
Line 668... | Line 666... | ||
668 | MapPage(dev_priv->gtt.mappable, dev_priv->gtt.mappable_base+page_base, PG_SW); |
666 | MapPage(dev_priv->gtt.mappable, dev_priv->gtt.mappable_base+page_base, PG_SW); |
Line 669... | Line 667... | ||
669 | 667 | ||
670 | memcpy(dev_priv->gtt.mappable+page_offset, user_data, page_length); |
668 | memcpy((char*)dev_priv->gtt.mappable+page_offset, user_data, page_length); |
671 | 669 | ||
672 | remain -= page_length; |
670 | remain -= page_length; |
Line 673... | Line 671... | ||
673 | user_data += page_length; |
671 | user_data += page_length; |
674 | offset += page_length; |
672 | offset += page_length; |
675 | } |
673 | } |
676 | 674 | ||
677 | out_unpin: |
675 | out_unpin: |
Line 678... | Line 676... | ||
678 | i915_gem_object_unpin(obj); |
676 | i915_gem_object_ggtt_unpin(obj); |
Line 777... | Line 775... | ||
777 | * right away and we therefore have to clflush anyway. */ |
775 | * right away and we therefore have to clflush anyway. */ |
778 | needs_clflush_after = cpu_write_needs_clflush(obj); |
776 | needs_clflush_after = cpu_write_needs_clflush(obj); |
779 | ret = i915_gem_object_wait_rendering(obj, false); |
777 | ret = i915_gem_object_wait_rendering(obj, false); |
780 | if (ret) |
778 | if (ret) |
781 | return ret; |
779 | return ret; |
- | 780 | ||
- | 781 | i915_gem_object_retire(obj); |
|
782 | } |
782 | } |
783 | /* Same trick applies to invalidate partially written cachelines read |
783 | /* Same trick applies to invalidate partially written cachelines read |
784 | * before writing. */ |
784 | * before writing. */ |
785 | if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) |
785 | if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) |
786 | needs_clflush_before = |
786 | needs_clflush_before = |
Line 971... | Line 971... | ||
971 | 971 | ||
972 | /* |
972 | /* |
973 | * Compare seqno against outstanding lazy request. Emit a request if they are |
973 | * Compare seqno against outstanding lazy request. Emit a request if they are |
974 | * equal. |
974 | * equal. |
975 | */ |
975 | */ |
976 | static int |
976 | int |
977 | i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) |
977 | i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno) |
978 | { |
978 | { |
Line 979... | Line 979... | ||
979 | int ret; |
979 | int ret; |
Line 991... | Line 991... | ||
991 | { |
991 | { |
992 | // wake_up_process((struct task_struct *)data); |
992 | // wake_up_process((struct task_struct *)data); |
993 | } |
993 | } |
Line 994... | Line 994... | ||
994 | 994 | ||
995 | static bool missed_irq(struct drm_i915_private *dev_priv, |
995 | static bool missed_irq(struct drm_i915_private *dev_priv, |
996 | struct intel_ring_buffer *ring) |
996 | struct intel_engine_cs *ring) |
997 | { |
997 | { |
998 | return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); |
998 | return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); |
Line 999... | Line 999... | ||
999 | } |
999 | } |
Line 1022... | Line 1022... | ||
1022 | * inserted. |
1022 | * inserted. |
1023 | * |
1023 | * |
1024 | * Returns 0 if the seqno was found within the alloted time. Else returns the |
1024 | * Returns 0 if the seqno was found within the alloted time. Else returns the |
1025 | * errno with remaining time filled in timeout argument. |
1025 | * errno with remaining time filled in timeout argument. |
1026 | */ |
1026 | */ |
1027 | static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, |
1027 | static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, |
1028 | unsigned reset_counter, |
1028 | unsigned reset_counter, |
1029 | bool interruptible, |
1029 | bool interruptible, |
1030 | struct timespec *timeout, |
1030 | s64 *timeout, |
1031 | struct drm_i915_file_private *file_priv) |
1031 | struct drm_i915_file_private *file_priv) |
1032 | { |
1032 | { |
- | 1033 | struct drm_device *dev = ring->dev; |
|
1033 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1034 | struct drm_i915_private *dev_priv = dev->dev_private; |
1034 | const bool irq_test_in_progress = |
1035 | const bool irq_test_in_progress = |
1035 | ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); |
1036 | ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); |
1036 | struct timespec before, now; |
1037 | unsigned long timeout_expire; |
1037 | unsigned long timeout_expire, wait_time; |
1038 | s64 before, now; |
- | 1039 | ||
1038 | wait_queue_t __wait; |
1040 | wait_queue_t __wait; |
1039 | int ret; |
1041 | int ret; |
Line 1040... | Line 1042... | ||
1040 | 1042 | ||
Line 1041... | Line 1043... | ||
1041 | WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n"); |
1043 | WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); |
1042 | 1044 | ||
Line 1043... | Line 1045... | ||
1043 | if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) |
1045 | if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) |
1044 | return 0; |
- | |
Line 1045... | Line 1046... | ||
1045 | 1046 | return 0; |
|
1046 | timeout_expire = timeout ? GetTimerTicks() + timespec_to_jiffies_timeout(timeout) : 0; |
1047 | |
1047 | wait_time = timeout ? timespec_to_jiffies_timeout(timeout) : 1; |
1048 | timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0; |
1048 | 1049 | ||
1049 | if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) { |
1050 | if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) { |
1050 | gen6_rps_boost(dev_priv); |
1051 | gen6_rps_boost(dev_priv); |
Line 1080... | Line 1081... | ||
1080 | if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) { |
1081 | if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) { |
1081 | ret = 0; |
1082 | ret = 0; |
1082 | break; |
1083 | break; |
1083 | } |
1084 | } |
Line 1084... | Line 1085... | ||
1084 | 1085 | ||
1085 | if (timeout && time_after_eq(GetTimerTicks(), timeout_expire)) { |
1086 | if (timeout && time_after_eq(jiffies, timeout_expire)) { |
1086 | ret = -ETIME; |
1087 | ret = -ETIME; |
1087 | break; |
1088 | break; |
Line 1088... | Line 1089... | ||
1088 | } |
1089 | } |
Line 1105... | Line 1106... | ||
1105 | DestroyEvent(__wait.evnt); |
1106 | DestroyEvent(__wait.evnt); |
Line 1106... | Line 1107... | ||
1106 | 1107 | ||
1107 | if (!irq_test_in_progress) |
1108 | if (!irq_test_in_progress) |
Line -... | Line 1109... | ||
- | 1109 | ring->irq_put(ring); |
|
1108 | ring->irq_put(ring); |
1110 | |
1109 | 1111 | // finish_wait(&ring->irq_queue, &wait); |
|
Line 1110... | Line 1112... | ||
1110 | return ret; |
1112 | return ret; |
1111 | } |
1113 | } |
1112 | 1114 | ||
1113 | /** |
1115 | /** |
1114 | * Waits for a sequence number to be signaled, and cleans up the |
1116 | * Waits for a sequence number to be signaled, and cleans up the |
1115 | * request and object lists appropriately for that event. |
1117 | * request and object lists appropriately for that event. |
1116 | */ |
1118 | */ |
1117 | int |
1119 | int |
1118 | i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno) |
1120 | i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno) |
1119 | { |
1121 | { |
1120 | struct drm_device *dev = ring->dev; |
1122 | struct drm_device *dev = ring->dev; |
Line 1138... | Line 1140... | ||
1138 | interruptible, NULL, NULL); |
1140 | interruptible, NULL, NULL); |
1139 | } |
1141 | } |
Line 1140... | Line 1142... | ||
1140 | 1142 | ||
1141 | static int |
1143 | static int |
1142 | i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj, |
1144 | i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj, |
1143 | struct intel_ring_buffer *ring) |
1145 | struct intel_engine_cs *ring) |
1144 | { |
1146 | { |
- | 1147 | if (!obj->active) |
|
Line 1145... | Line 1148... | ||
1145 | i915_gem_retire_requests_ring(ring); |
1148 | return 0; |
1146 | 1149 | ||
1147 | /* Manually manage the write flush as we may have not yet |
1150 | /* Manually manage the write flush as we may have not yet |
1148 | * retired the buffer. |
1151 | * retired the buffer. |
1149 | * |
1152 | * |
1150 | * Note that the last_write_seqno is always the earlier of |
1153 | * Note that the last_write_seqno is always the earlier of |
1151 | * the two (read/write) seqno, so if we haved successfully waited, |
1154 | * the two (read/write) seqno, so if we haved successfully waited, |
1152 | * we know we have passed the last write. |
1155 | * we know we have passed the last write. |
1153 | */ |
- | |
Line 1154... | Line 1156... | ||
1154 | obj->last_write_seqno = 0; |
1156 | */ |
1155 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; |
1157 | obj->last_write_seqno = 0; |
Line 1156... | Line 1158... | ||
1156 | 1158 | ||
Line 1163... | Line 1165... | ||
1163 | */ |
1165 | */ |
1164 | static __must_check int |
1166 | static __must_check int |
1165 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, |
1167 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, |
1166 | bool readonly) |
1168 | bool readonly) |
1167 | { |
1169 | { |
1168 | struct intel_ring_buffer *ring = obj->ring; |
1170 | struct intel_engine_cs *ring = obj->ring; |
1169 | u32 seqno; |
1171 | u32 seqno; |
1170 | int ret; |
1172 | int ret; |
Line 1171... | Line 1173... | ||
1171 | 1173 | ||
1172 | seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; |
1174 | seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; |
Line 1183... | Line 1185... | ||
1183 | /* A nonblocking variant of the above wait. This is a highly dangerous routine |
1185 | /* A nonblocking variant of the above wait. This is a highly dangerous routine |
1184 | * as the object state may change during this call. |
1186 | * as the object state may change during this call. |
1185 | */ |
1187 | */ |
1186 | static __must_check int |
1188 | static __must_check int |
1187 | i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, |
1189 | i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, |
1188 | struct drm_file *file, |
1190 | struct drm_i915_file_private *file_priv, |
1189 | bool readonly) |
1191 | bool readonly) |
1190 | { |
1192 | { |
1191 | struct drm_device *dev = obj->base.dev; |
1193 | struct drm_device *dev = obj->base.dev; |
1192 | struct drm_i915_private *dev_priv = dev->dev_private; |
1194 | struct drm_i915_private *dev_priv = dev->dev_private; |
1193 | struct intel_ring_buffer *ring = obj->ring; |
1195 | struct intel_engine_cs *ring = obj->ring; |
1194 | unsigned reset_counter; |
1196 | unsigned reset_counter; |
1195 | u32 seqno; |
1197 | u32 seqno; |
1196 | int ret; |
1198 | int ret; |
Line 1197... | Line 1199... | ||
1197 | 1199 | ||
Line 1210... | Line 1212... | ||
1210 | if (ret) |
1212 | if (ret) |
1211 | return ret; |
1213 | return ret; |
Line 1212... | Line 1214... | ||
1212 | 1214 | ||
1213 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
1215 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
1214 | mutex_unlock(&dev->struct_mutex); |
1216 | mutex_unlock(&dev->struct_mutex); |
1215 | ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv); |
1217 | ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv); |
1216 | mutex_lock(&dev->struct_mutex); |
1218 | mutex_lock(&dev->struct_mutex); |
1217 | if (ret) |
1219 | if (ret) |
Line 1218... | Line 1220... | ||
1218 | return ret; |
1220 | return ret; |
Line 1259... | Line 1261... | ||
1259 | 1261 | ||
1260 | /* Try to flush the object off the GPU without holding the lock. |
1262 | /* Try to flush the object off the GPU without holding the lock. |
1261 | * We will repeat the flush holding the lock in the normal manner |
1263 | * We will repeat the flush holding the lock in the normal manner |
1262 | * to catch cases where we are gazumped. |
1264 | * to catch cases where we are gazumped. |
1263 | */ |
1265 | */ |
- | 1266 | ret = i915_gem_object_wait_rendering__nonblocking(obj, |
|
- | 1267 | file->driver_priv, |
|
1264 | ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain); |
1268 | !write_domain); |
1265 | if (ret) |
1269 | if (ret) |
Line 1266... | Line 1270... | ||
1266 | goto unref; |
1270 | goto unref; |
1267 | 1271 | ||
Line 1466... | Line 1470... | ||
1466 | ret = -E2BIG; |
1470 | ret = -E2BIG; |
1467 | goto out; |
1471 | goto out; |
1468 | } |
1472 | } |
Line 1469... | Line 1473... | ||
1469 | 1473 | ||
1470 | if (obj->madv != I915_MADV_WILLNEED) { |
1474 | if (obj->madv != I915_MADV_WILLNEED) { |
1471 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); |
1475 | DRM_DEBUG("Attempting to mmap a purgeable buffer\n"); |
1472 | ret = -EINVAL; |
1476 | ret = -EFAULT; |
1473 | goto out; |
1477 | goto out; |
1474 | } |
1478 | } |
1475 | /* Now bind it into the GTT if needed */ |
1479 | /* Now bind it into the GTT if needed */ |
1476 | ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); |
1480 | ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK); |
1477 | if (ret) |
1481 | if (ret) |
Line 1478... | Line 1482... | ||
1478 | goto out; |
1482 | goto out; |
1479 | 1483 | ||
Line 1500... | Line 1504... | ||
1500 | 1504 | ||
1501 | for(ptr = mem; ptr < mem + obj->base.size; ptr+= 4096, pfn+= 4096) |
1505 | for(ptr = mem; ptr < mem + obj->base.size; ptr+= 4096, pfn+= 4096) |
Line 1502... | Line 1506... | ||
1502 | MapPage(ptr, pfn, PG_SHARED|PG_UW); |
1506 | MapPage(ptr, pfn, PG_SHARED|PG_UW); |
1503 | 1507 | ||
Line 1504... | Line 1508... | ||
1504 | unpin: |
1508 | unpin: |
Line 1505... | Line 1509... | ||
1505 | i915_gem_object_unpin(obj); |
1509 | i915_gem_object_unpin_pages(obj); |
Line 1536... | Line 1540... | ||
1536 | struct drm_i915_gem_mmap_gtt *args = data; |
1540 | struct drm_i915_gem_mmap_gtt *args = data; |
Line 1537... | Line 1541... | ||
1537 | 1541 | ||
1538 | return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); |
1542 | return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); |
Line -... | Line 1543... | ||
- | 1543 | } |
|
- | 1544 | ||
- | 1545 | static inline int |
|
- | 1546 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) |
|
- | 1547 | { |
|
- | 1548 | return obj->madv == I915_MADV_DONTNEED; |
|
1539 | } |
1549 | } |
1540 | 1550 | ||
1541 | /* Immediately discard the backing storage */ |
1551 | /* Immediately discard the backing storage */ |
1542 | static void |
1552 | static void |
1543 | i915_gem_object_truncate(struct drm_i915_gem_object *obj) |
- | |
1544 | { |
- | |
1545 | // struct inode *inode; |
1553 | i915_gem_object_truncate(struct drm_i915_gem_object *obj) |
Line 1546... | Line 1554... | ||
1546 | 1554 | { |
|
1547 | // i915_gem_object_free_mmap_offset(obj); |
1555 | // i915_gem_object_free_mmap_offset(obj); |
Line 1548... | Line 1556... | ||
1548 | 1556 | ||
1549 | if (obj->base.filp == NULL) |
1557 | if (obj->base.filp == NULL) |
1550 | return; |
1558 | return; |
1551 | 1559 | ||
1552 | /* Our goal here is to return as much of the memory as |
1560 | /* Our goal here is to return as much of the memory as |
1553 | * is possible back to the system as we are called from OOM. |
- | |
1554 | * To do this we must instruct the shmfs to drop all of its |
1561 | * is possible back to the system as we are called from OOM. |
1555 | * backing pages, *now*. |
- | |
1556 | */ |
1562 | * To do this we must instruct the shmfs to drop all of its |
1557 | // inode = obj->base.filp->f_path.dentry->d_inode; |
1563 | * backing pages, *now*. |
Line -... | Line 1564... | ||
- | 1564 | */ |
|
1558 | // shmem_truncate_range(inode, 0, (loff_t)-1); |
1565 | // shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); |
1559 | 1566 | obj->madv = __I915_MADV_PURGED; |
|
1560 | obj->madv = __I915_MADV_PURGED; |
1567 | } |
- | 1568 | ||
- | 1569 | /* Try to discard unwanted pages */ |
|
- | 1570 | static void |
|
1561 | } |
1571 | i915_gem_object_invalidate(struct drm_i915_gem_object *obj) |
- | 1572 | { |
|
- | 1573 | struct address_space *mapping; |
|
- | 1574 | ||
- | 1575 | switch (obj->madv) { |
|
- | 1576 | case I915_MADV_DONTNEED: |
|
- | 1577 | i915_gem_object_truncate(obj); |
|
- | 1578 | case __I915_MADV_PURGED: |
|
- | 1579 | return; |
|
1562 | 1580 | } |
|
Line 1563... | Line 1581... | ||
1563 | static inline int |
1581 | |
1564 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) |
1582 | if (obj->base.filp == NULL) |
1565 | { |
1583 | return; |
Line 1590... | Line 1608... | ||
1590 | for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { |
1608 | for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { |
1591 | struct page *page = sg_page_iter_page(&sg_iter); |
1609 | struct page *page = sg_page_iter_page(&sg_iter); |
Line 1592... | Line 1610... | ||
1592 | 1610 | ||
1593 | page_cache_release(page); |
1611 | page_cache_release(page); |
1594 | } |
- | |
1595 | //DRM_DEBUG_KMS("%s release %d pages\n", __FUNCTION__, page_count); |
- | |
1596 | 1612 | } |
|
Line 1597... | Line 1613... | ||
1597 | obj->dirty = 0; |
1613 | obj->dirty = 0; |
1598 | 1614 | ||
1599 | sg_free_table(obj->pages); |
1615 | sg_free_table(obj->pages); |
Line 1619... | Line 1635... | ||
1619 | list_del(&obj->global_list); |
1635 | list_del(&obj->global_list); |
Line 1620... | Line 1636... | ||
1620 | 1636 | ||
1621 | ops->put_pages(obj); |
1637 | ops->put_pages(obj); |
Line 1622... | Line -... | ||
1622 | obj->pages = NULL; |
- | |
1623 | 1638 | obj->pages = NULL; |
|
Line 1624... | Line 1639... | ||
1624 | if (i915_gem_object_is_purgeable(obj)) |
1639 | |
1625 | i915_gem_object_truncate(obj); |
1640 | i915_gem_object_invalidate(obj); |
Line 1721... | Line 1736... | ||
1721 | 1736 | ||
1722 | if (obj->pages) |
1737 | if (obj->pages) |
Line 1723... | Line 1738... | ||
1723 | return 0; |
1738 | return 0; |
1724 | 1739 | ||
1725 | if (obj->madv != I915_MADV_WILLNEED) { |
1740 | if (obj->madv != I915_MADV_WILLNEED) { |
1726 | DRM_ERROR("Attempting to obtain a purgeable object\n"); |
1741 | DRM_DEBUG("Attempting to obtain a purgeable object\n"); |
Line 1727... | Line 1742... | ||
1727 | return -EINVAL; |
1742 | return -EFAULT; |
Line 1728... | Line 1743... | ||
1728 | } |
1743 | } |
Line 1735... | Line 1750... | ||
1735 | 1750 | ||
1736 | list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list); |
1751 | list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list); |
1737 | return 0; |
1752 | return 0; |
Line 1738... | Line 1753... | ||
1738 | } |
1753 | } |
1739 | 1754 | ||
1740 | void |
1755 | static void |
1741 | i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
1756 | i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
1742 | struct intel_ring_buffer *ring) |
1757 | struct intel_engine_cs *ring) |
1743 | { |
1758 | { |
1744 | struct drm_device *dev = obj->base.dev; |
1759 | struct drm_device *dev = obj->base.dev; |
Line 1775... | Line 1790... | ||
1775 | } |
1790 | } |
1776 | } |
1791 | } |
1777 | } |
1792 | } |
Line 1778... | Line 1793... | ||
1778 | 1793 | ||
1779 | void i915_vma_move_to_active(struct i915_vma *vma, |
1794 | void i915_vma_move_to_active(struct i915_vma *vma, |
1780 | struct intel_ring_buffer *ring) |
1795 | struct intel_engine_cs *ring) |
1781 | { |
1796 | { |
1782 | list_move_tail(&vma->mm_list, &vma->vm->active_list); |
1797 | list_move_tail(&vma->mm_list, &vma->vm->active_list); |
1783 | return i915_gem_object_move_to_active(vma->obj, ring); |
1798 | return i915_gem_object_move_to_active(vma->obj, ring); |
Line 1784... | Line 1799... | ||
1784 | } |
1799 | } |
1785 | 1800 | ||
1786 | static void |
1801 | static void |
1787 | i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) |
1802 | i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) |
1788 | { |
1803 | { |
1789 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
1804 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
Line 1790... | Line 1805... | ||
1790 | struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; |
1805 | struct i915_address_space *vm; |
1791 | struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); |
1806 | struct i915_vma *vma; |
Line -... | Line 1807... | ||
- | 1807 | ||
- | 1808 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); |
|
- | 1809 | BUG_ON(!obj->active); |
|
1792 | 1810 | ||
- | 1811 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { |
|
Line 1793... | Line 1812... | ||
1793 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); |
1812 | vma = i915_gem_obj_to_vma(obj, vm); |
1794 | BUG_ON(!obj->active); |
1813 | if (vma && !list_empty(&vma->mm_list)) |
Line 1795... | Line 1814... | ||
1795 | 1814 | list_move_tail(&vma->mm_list, &vm->inactive_list); |
|
Line 1809... | Line 1828... | ||
1809 | drm_gem_object_unreference(&obj->base); |
1828 | drm_gem_object_unreference(&obj->base); |
Line 1810... | Line 1829... | ||
1810 | 1829 | ||
1811 | WARN_ON(i915_verify_lists(dev)); |
1830 | WARN_ON(i915_verify_lists(dev)); |
Line -... | Line 1831... | ||
- | 1831 | } |
|
- | 1832 | ||
- | 1833 | static void |
|
- | 1834 | i915_gem_object_retire(struct drm_i915_gem_object *obj) |
|
- | 1835 | { |
|
- | 1836 | struct intel_engine_cs *ring = obj->ring; |
|
- | 1837 | ||
- | 1838 | if (ring == NULL) |
|
- | 1839 | return; |
|
- | 1840 | ||
- | 1841 | if (i915_seqno_passed(ring->get_seqno(ring, true), |
|
- | 1842 | obj->last_read_seqno)) |
|
- | 1843 | i915_gem_object_move_to_inactive(obj); |
|
1812 | } |
1844 | } |
1813 | 1845 | ||
1814 | static int |
1846 | static int |
1815 | i915_gem_init_seqno(struct drm_device *dev, u32 seqno) |
1847 | i915_gem_init_seqno(struct drm_device *dev, u32 seqno) |
1816 | { |
1848 | { |
1817 | struct drm_i915_private *dev_priv = dev->dev_private; |
1849 | struct drm_i915_private *dev_priv = dev->dev_private; |
Line 1818... | Line 1850... | ||
1818 | struct intel_ring_buffer *ring; |
1850 | struct intel_engine_cs *ring; |
1819 | int ret, i, j; |
1851 | int ret, i, j; |
1820 | 1852 | ||
Line 1828... | Line 1860... | ||
1828 | 1860 | ||
1829 | /* Finally reset hw state */ |
1861 | /* Finally reset hw state */ |
1830 | for_each_ring(ring, dev_priv, i) { |
1862 | for_each_ring(ring, dev_priv, i) { |
Line 1831... | Line 1863... | ||
1831 | intel_ring_init_seqno(ring, seqno); |
1863 | intel_ring_init_seqno(ring, seqno); |
1832 | 1864 | ||
1833 | for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) |
1865 | for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++) |
Line 1834... | Line 1866... | ||
1834 | ring->sync_seqno[j] = 0; |
1866 | ring->semaphore.sync_seqno[j] = 0; |
1835 | } |
1867 | } |
Line 1879... | Line 1911... | ||
1879 | 1911 | ||
1880 | *seqno = dev_priv->last_seqno = dev_priv->next_seqno++; |
1912 | *seqno = dev_priv->last_seqno = dev_priv->next_seqno++; |
1881 | return 0; |
1913 | return 0; |
Line 1882... | Line 1914... | ||
1882 | } |
1914 | } |
1883 | 1915 | ||
1884 | int __i915_add_request(struct intel_ring_buffer *ring, |
1916 | int __i915_add_request(struct intel_engine_cs *ring, |
1885 | struct drm_file *file, |
1917 | struct drm_file *file, |
1886 | struct drm_i915_gem_object *obj, |
1918 | struct drm_i915_gem_object *obj, |
1887 | u32 *out_seqno) |
1919 | u32 *out_seqno) |
1888 | { |
1920 | { |
1889 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1921 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
1890 | struct drm_i915_gem_request *request; |
- | |
1891 | u32 request_ring_position, request_start; |
1922 | struct drm_i915_gem_request *request; |
Line 1892... | Line 1923... | ||
1892 | int was_empty; |
1923 | u32 request_ring_position, request_start; |
1893 | int ret; |
1924 | int ret; |
1894 | 1925 | ||
1895 | request_start = intel_ring_get_tail(ring); |
1926 | request_start = intel_ring_get_tail(ring->buffer); |
1896 | /* |
1927 | /* |
1897 | * Emit any outstanding flushes - execbuf can fail to emit the flush |
1928 | * Emit any outstanding flushes - execbuf can fail to emit the flush |
Line 1911... | Line 1942... | ||
1911 | /* Record the position of the start of the request so that |
1942 | /* Record the position of the start of the request so that |
1912 | * should we detect the updated seqno part-way through the |
1943 | * should we detect the updated seqno part-way through the |
1913 | * GPU processing the request, we never over-estimate the |
1944 | * GPU processing the request, we never over-estimate the |
1914 | * position of the head. |
1945 | * position of the head. |
1915 | */ |
1946 | */ |
1916 | request_ring_position = intel_ring_get_tail(ring); |
1947 | request_ring_position = intel_ring_get_tail(ring->buffer); |
Line 1917... | Line 1948... | ||
1917 | 1948 | ||
1918 | ret = ring->add_request(ring); |
1949 | ret = ring->add_request(ring); |
1919 | if (ret) |
1950 | if (ret) |
Line 1937... | Line 1968... | ||
1937 | */ |
1968 | */ |
1938 | request->ctx = ring->last_context; |
1969 | request->ctx = ring->last_context; |
1939 | if (request->ctx) |
1970 | if (request->ctx) |
1940 | i915_gem_context_reference(request->ctx); |
1971 | i915_gem_context_reference(request->ctx); |
Line 1941... | Line 1972... | ||
1941 | 1972 | ||
1942 | request->emitted_jiffies = GetTimerTicks(); |
- | |
1943 | was_empty = list_empty(&ring->request_list); |
1973 | request->emitted_jiffies = jiffies; |
1944 | list_add_tail(&request->list, &ring->request_list); |
1974 | list_add_tail(&request->list, &ring->request_list); |
Line 1945... | Line 1975... | ||
1945 | request->file_priv = NULL; |
1975 | request->file_priv = NULL; |
1946 | 1976 | ||
Line 1959... | Line 1989... | ||
1959 | ring->preallocated_lazy_request = NULL; |
1989 | ring->preallocated_lazy_request = NULL; |
Line 1960... | Line 1990... | ||
1960 | 1990 | ||
1961 | if (!dev_priv->ums.mm_suspended) { |
1991 | if (!dev_priv->ums.mm_suspended) { |
Line 1962... | Line -... | ||
1962 | // i915_queue_hangcheck(ring->dev); |
- | |
1963 | 1992 | // i915_queue_hangcheck(ring->dev); |
|
1964 | if (was_empty) { |
1993 | |
1965 | queue_delayed_work(dev_priv->wq, |
1994 | queue_delayed_work(dev_priv->wq, |
1966 | &dev_priv->mm.retire_work, |
1995 | &dev_priv->mm.retire_work, |
1967 | round_jiffies_up_relative(HZ)); |
1996 | round_jiffies_up_relative(HZ)); |
1968 | intel_mark_busy(dev_priv->dev); |
- | |
Line 1969... | Line 1997... | ||
1969 | } |
1997 | intel_mark_busy(dev_priv->dev); |
1970 | } |
1998 | } |
1971 | 1999 | ||
1972 | if (out_seqno) |
2000 | if (out_seqno) |
Line 1986... | Line 2014... | ||
1986 | list_del(&request->client_list); |
2014 | list_del(&request->client_list); |
1987 | request->file_priv = NULL; |
2015 | request->file_priv = NULL; |
1988 | spin_unlock(&file_priv->mm.lock); |
2016 | spin_unlock(&file_priv->mm.lock); |
1989 | } |
2017 | } |
Line 1990... | Line 2018... | ||
1990 | 2018 | ||
1991 | static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj, |
2019 | static bool i915_context_is_banned(struct drm_i915_private *dev_priv, |
1992 | struct i915_address_space *vm) |
2020 | const struct intel_context *ctx) |
1993 | { |
- | |
1994 | if (acthd >= i915_gem_obj_offset(obj, vm) && |
- | |
1995 | acthd < i915_gem_obj_offset(obj, vm) + obj->base.size) |
2021 | { |
Line 1996... | Line 2022... | ||
1996 | return true; |
2022 | unsigned long elapsed; |
1997 | - | ||
Line 1998... | Line -... | ||
1998 | return false; |
- | |
1999 | } |
- | |
2000 | - | ||
2001 | static bool i915_head_inside_request(const u32 acthd_unmasked, |
- | |
2002 | const u32 request_start, |
- | |
2003 | const u32 request_end) |
- | |
2004 | { |
2023 | |
2005 | const u32 acthd = acthd_unmasked & HEAD_ADDR; |
- | |
2006 | - | ||
2007 | if (request_start < request_end) { |
- | |
2008 | if (acthd >= request_start && acthd < request_end) |
- | |
2009 | return true; |
2024 | elapsed = GetTimerTicks()/100 - ctx->hang_stats.guilty_ts; |
2010 | } else if (request_start > request_end) { |
- | |
Line 2011... | Line -... | ||
2011 | if (acthd >= request_start || acthd < request_end) |
- | |
2012 | return true; |
- | |
2013 | } |
- | |
2014 | 2025 | ||
2015 | return false; |
- | |
2016 | } |
- | |
2017 | - | ||
2018 | static struct i915_address_space * |
2026 | if (ctx->hang_stats.banned) |
2019 | request_to_vm(struct drm_i915_gem_request *request) |
- | |
2020 | { |
- | |
2021 | struct drm_i915_private *dev_priv = request->ring->dev->dev_private; |
- | |
2022 | struct i915_address_space *vm; |
- | |
2023 | - | ||
2024 | vm = &dev_priv->gtt.base; |
- | |
2025 | - | ||
2026 | return vm; |
2027 | return true; |
2027 | } |
- | |
2028 | - | ||
2029 | static bool i915_request_guilty(struct drm_i915_gem_request *request, |
- | |
2030 | const u32 acthd, bool *inside) |
- | |
2031 | { |
- | |
2032 | /* There is a possibility that unmasked head address |
- | |
2033 | * pointing inside the ring, matches the batch_obj address range. |
- | |
2034 | * However this is extremely unlikely. |
- | |
2035 | */ |
- | |
2036 | if (request->batch_obj) { |
2028 | |
2037 | if (i915_head_inside_object(acthd, request->batch_obj, |
- | |
2038 | request_to_vm(request))) { |
- | |
2039 | *inside = true; |
- | |
2040 | return true; |
2029 | if (elapsed <= DRM_I915_CTX_BAN_PERIOD) { |
2041 | } |
2030 | if (!i915_gem_context_is_default(ctx)) { |
- | 2031 | DRM_DEBUG("context hanging too fast, banning!\n"); |
|
2042 | } |
2032 | return true; |
2043 | 2033 | } else if (i915_stop_ring_allow_ban(dev_priv)) { |
|
- | 2034 | if (i915_stop_ring_allow_warn(dev_priv)) |
|
Line 2044... | Line 2035... | ||
2044 | if (i915_head_inside_request(acthd, request->head, request->tail)) { |
2035 | DRM_ERROR("gpu hanging too fast, banning!\n"); |
2045 | *inside = false; |
2036 | return true; |
Line 2046... | Line 2037... | ||
2046 | return true; |
2037 | } |
- | 2038 | } |
|
- | 2039 | ||
2047 | } |
2040 | return false; |
2048 | - | ||
2049 | return false; |
- | |
2050 | } |
- | |
2051 | - | ||
2052 | static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs) |
- | |
2053 | { |
2041 | } |
2054 | const unsigned long elapsed = GetTimerTicks()/100 - hs->guilty_ts; |
- | |
2055 | - | ||
2056 | if (hs->banned) |
- | |
Line -... | Line 2042... | ||
- | 2042 | ||
2057 | return true; |
2043 | static void i915_set_reset_status(struct drm_i915_private *dev_priv, |
2058 | - | ||
Line 2059... | Line -... | ||
2059 | if (elapsed <= DRM_I915_CTX_BAN_PERIOD) { |
- | |
2060 | DRM_ERROR("context hanging too fast, declaring banned!\n"); |
- | |
2061 | return true; |
- | |
2062 | } |
- | |
2063 | - | ||
2064 | return false; |
- | |
2065 | } |
- | |
2066 | - | ||
2067 | static void i915_set_reset_status(struct intel_ring_buffer *ring, |
- | |
2068 | struct drm_i915_gem_request *request, |
- | |
2069 | u32 acthd) |
- | |
2070 | { |
- | |
2071 | struct i915_ctx_hang_stats *hs = NULL; |
- | |
2072 | bool inside, guilty; |
- | |
2073 | unsigned long offset = 0; |
- | |
2074 | - | ||
2075 | /* Innocent until proven guilty */ |
- | |
2076 | guilty = false; |
- | |
2077 | - | ||
2078 | if (request->batch_obj) |
- | |
2079 | offset = i915_gem_obj_offset(request->batch_obj, |
- | |
2080 | request_to_vm(request)); |
- | |
2081 | - | ||
2082 | if (ring->hangcheck.action != HANGCHECK_WAIT && |
- | |
2083 | i915_request_guilty(request, acthd, &inside)) { |
- | |
2084 | DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n", |
- | |
2085 | ring->name, |
- | |
2086 | inside ? "inside" : "flushing", |
- | |
2087 | offset, |
- | |
2088 | request->ctx ? request->ctx->id : 0, |
- | |
2089 | acthd); |
- | |
2090 | 2044 | struct intel_context *ctx, |
|
2091 | guilty = true; |
- | |
2092 | } |
- | |
Line 2093... | Line -... | ||
2093 | - | ||
2094 | /* If contexts are disabled or this is the default context, use |
2045 | const bool guilty) |
2095 | * file_priv->reset_state |
2046 | { |
2096 | */ |
2047 | struct i915_ctx_hang_stats *hs; |
2097 | if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID) |
2048 | |
2098 | hs = &request->ctx->hang_stats; |
2049 | if (WARN_ON(!ctx)) |
2099 | else if (request->file_priv) |
2050 | return; |
2100 | hs = &request->file_priv->hang_stats; |
2051 | |
2101 | 2052 | hs = &ctx->hang_stats; |
|
2102 | if (hs) { |
- | |
Line 2103... | Line 2053... | ||
2103 | if (guilty) { |
2053 | |
2104 | hs->banned = i915_context_is_banned(hs); |
2054 | if (guilty) { |
2105 | hs->batch_active++; |
2055 | hs->banned = i915_context_is_banned(dev_priv, ctx); |
2106 | hs->guilty_ts = GetTimerTicks()/100; |
2056 | hs->batch_active++; |
Line 2119... | Line 2069... | ||
2119 | i915_gem_context_unreference(request->ctx); |
2069 | i915_gem_context_unreference(request->ctx); |
Line 2120... | Line 2070... | ||
2120 | 2070 | ||
2121 | kfree(request); |
2071 | kfree(request); |
Line 2122... | Line 2072... | ||
2122 | } |
2072 | } |
2123 | 2073 | ||
2124 | static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, |
2074 | struct drm_i915_gem_request * |
2125 | struct intel_ring_buffer *ring) |
- | |
2126 | { |
- | |
2127 | u32 completed_seqno = ring->get_seqno(ring, false); |
2075 | i915_gem_find_active_request(struct intel_engine_cs *ring) |
- | 2076 | { |
|
- | 2077 | struct drm_i915_gem_request *request; |
|
- | 2078 | u32 completed_seqno; |
|
Line 2128... | Line 2079... | ||
2128 | u32 acthd = intel_ring_get_active_head(ring); |
2079 | |
2129 | struct drm_i915_gem_request *request; |
2080 | completed_seqno = ring->get_seqno(ring, false); |
2130 | 2081 | ||
Line 2131... | Line 2082... | ||
2131 | list_for_each_entry(request, &ring->request_list, list) { |
2082 | list_for_each_entry(request, &ring->request_list, list) { |
2132 | if (i915_seqno_passed(completed_seqno, request->seqno)) |
2083 | if (i915_seqno_passed(completed_seqno, request->seqno)) |
- | 2084 | continue; |
|
- | 2085 | ||
- | 2086 | return request; |
|
- | 2087 | } |
|
- | 2088 | ||
- | 2089 | return NULL; |
|
- | 2090 | } |
|
- | 2091 | ||
- | 2092 | static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, |
|
- | 2093 | struct intel_engine_cs *ring) |
|
- | 2094 | { |
|
- | 2095 | struct drm_i915_gem_request *request; |
|
- | 2096 | bool ring_hung; |
|
- | 2097 | ||
- | 2098 | request = i915_gem_find_active_request(ring); |
|
- | 2099 | ||
- | 2100 | if (request == NULL) |
|
- | 2101 | return; |
|
- | 2102 | ||
- | 2103 | ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; |
|
- | 2104 | ||
2133 | continue; |
2105 | i915_set_reset_status(dev_priv, request->ctx, ring_hung); |
Line 2134... | Line 2106... | ||
2134 | 2106 | ||
2135 | i915_set_reset_status(ring, request, acthd); |
2107 | list_for_each_entry_continue(request, &ring->request_list, list) |
2136 | } |
2108 | i915_set_reset_status(dev_priv, request->ctx, false); |
2137 | } |
2109 | } |
2138 | 2110 | ||
Line 2139... | Line 2111... | ||
2139 | static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, |
2111 | static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, |
Line 2163... | Line 2135... | ||
2163 | struct drm_i915_gem_request, |
2135 | struct drm_i915_gem_request, |
2164 | list); |
2136 | list); |
Line 2165... | Line 2137... | ||
2165 | 2137 | ||
2166 | i915_gem_free_request(request); |
2138 | i915_gem_free_request(request); |
- | 2139 | } |
|
- | 2140 | ||
- | 2141 | /* These may not have been flush before the reset, do so now */ |
|
- | 2142 | kfree(ring->preallocated_lazy_request); |
|
- | 2143 | ring->preallocated_lazy_request = NULL; |
|
2167 | } |
2144 | ring->outstanding_lazy_seqno = 0; |
Line 2168... | Line 2145... | ||
2168 | } |
2145 | } |
2169 | 2146 | ||
2170 | void i915_gem_restore_fences(struct drm_device *dev) |
2147 | void i915_gem_restore_fences(struct drm_device *dev) |
Line 2189... | Line 2166... | ||
2189 | } |
2166 | } |
Line 2190... | Line 2167... | ||
2190 | 2167 | ||
2191 | void i915_gem_reset(struct drm_device *dev) |
2168 | void i915_gem_reset(struct drm_device *dev) |
2192 | { |
2169 | { |
2193 | struct drm_i915_private *dev_priv = dev->dev_private; |
2170 | struct drm_i915_private *dev_priv = dev->dev_private; |
2194 | struct intel_ring_buffer *ring; |
2171 | struct intel_engine_cs *ring; |
Line 2195... | Line 2172... | ||
2195 | int i; |
2172 | int i; |
2196 | 2173 | ||
2197 | /* |
2174 | /* |
Line 2203... | Line 2180... | ||
2203 | i915_gem_reset_ring_status(dev_priv, ring); |
2180 | i915_gem_reset_ring_status(dev_priv, ring); |
Line 2204... | Line 2181... | ||
2204 | 2181 | ||
2205 | for_each_ring(ring, dev_priv, i) |
2182 | for_each_ring(ring, dev_priv, i) |
Line 2206... | Line 2183... | ||
2206 | i915_gem_reset_ring_cleanup(dev_priv, ring); |
2183 | i915_gem_reset_ring_cleanup(dev_priv, ring); |
Line 2207... | Line 2184... | ||
2207 | 2184 | ||
2208 | i915_gem_cleanup_ringbuffer(dev); |
2185 | i915_gem_context_reset(dev); |
Line 2209... | Line 2186... | ||
2209 | 2186 | ||
2210 | i915_gem_restore_fences(dev); |
2187 | i915_gem_restore_fences(dev); |
2211 | } |
2188 | } |
2212 | 2189 | ||
2213 | /** |
2190 | /** |
2214 | * This function clears the request list as sequence numbers are passed. |
2191 | * This function clears the request list as sequence numbers are passed. |
2215 | */ |
2192 | */ |
Line 2216... | Line 2193... | ||
2216 | void |
2193 | void |
2217 | i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) |
2194 | i915_gem_retire_requests_ring(struct intel_engine_cs *ring) |
Line 2218... | Line 2195... | ||
2218 | { |
2195 | { |
Line 2219... | Line 2196... | ||
2219 | uint32_t seqno; |
2196 | uint32_t seqno; |
Line -... | Line 2197... | ||
- | 2197 | ||
- | 2198 | if (list_empty(&ring->request_list)) |
|
- | 2199 | return; |
|
- | 2200 | ||
- | 2201 | WARN_ON(i915_verify_lists(ring->dev)); |
|
- | 2202 | ||
- | 2203 | seqno = ring->get_seqno(ring, true); |
|
- | 2204 | ||
- | 2205 | /* Move any buffers on the active list that are no longer referenced |
|
- | 2206 | * by the ringbuffer to the flushing/inactive lists as appropriate, |
|
- | 2207 | * before we free the context associated with the requests. |
|
- | 2208 | */ |
|
- | 2209 | while (!list_empty(&ring->active_list)) { |
|
- | 2210 | struct drm_i915_gem_object *obj; |
|
- | 2211 | ||
- | 2212 | obj = list_first_entry(&ring->active_list, |
|
- | 2213 | struct drm_i915_gem_object, |
|
- | 2214 | ring_list); |
|
2220 | 2215 | ||
2221 | if (list_empty(&ring->request_list)) |
2216 | if (!i915_seqno_passed(seqno, obj->last_read_seqno)) |
Line 2222... | Line 2217... | ||
2222 | return; |
2217 | break; |
2223 | 2218 | ||
Line 2239... | Line 2234... | ||
2239 | /* We know the GPU must have read the request to have |
2234 | /* We know the GPU must have read the request to have |
2240 | * sent us the seqno + interrupt, so use the position |
2235 | * sent us the seqno + interrupt, so use the position |
2241 | * of tail of the request to update the last known position |
2236 | * of tail of the request to update the last known position |
2242 | * of the GPU head. |
2237 | * of the GPU head. |
2243 | */ |
2238 | */ |
2244 | ring->last_retired_head = request->tail; |
2239 | ring->buffer->last_retired_head = request->tail; |
Line 2245... | Line 2240... | ||
2245 | 2240 | ||
2246 | i915_gem_free_request(request); |
2241 | i915_gem_free_request(request); |
Line 2247... | Line -... | ||
2247 | } |
- | |
2248 | - | ||
2249 | /* Move any buffers on the active list that are no longer referenced |
- | |
2250 | * by the ringbuffer to the flushing/inactive lists as appropriate. |
- | |
2251 | */ |
- | |
2252 | while (!list_empty(&ring->active_list)) { |
- | |
2253 | struct drm_i915_gem_object *obj; |
- | |
2254 | - | ||
2255 | obj = list_first_entry(&ring->active_list, |
- | |
2256 | struct drm_i915_gem_object, |
- | |
2257 | ring_list); |
- | |
2258 | - | ||
2259 | if (!i915_seqno_passed(seqno, obj->last_read_seqno)) |
- | |
2260 | break; |
- | |
2261 | - | ||
2262 | i915_gem_object_move_to_inactive(obj); |
- | |
2263 | } |
2242 | } |
2264 | 2243 | ||
2265 | if (unlikely(ring->trace_irq_seqno && |
2244 | if (unlikely(ring->trace_irq_seqno && |
2266 | i915_seqno_passed(seqno, ring->trace_irq_seqno))) { |
2245 | i915_seqno_passed(seqno, ring->trace_irq_seqno))) { |
2267 | ring->irq_put(ring); |
2246 | ring->irq_put(ring); |
Line 2272... | Line 2251... | ||
2272 | } |
2251 | } |
Line 2273... | Line 2252... | ||
2273 | 2252 | ||
2274 | bool |
2253 | bool |
2275 | i915_gem_retire_requests(struct drm_device *dev) |
2254 | i915_gem_retire_requests(struct drm_device *dev) |
2276 | { |
2255 | { |
2277 | drm_i915_private_t *dev_priv = dev->dev_private; |
2256 | struct drm_i915_private *dev_priv = dev->dev_private; |
2278 | struct intel_ring_buffer *ring; |
2257 | struct intel_engine_cs *ring; |
2279 | bool idle = true; |
2258 | bool idle = true; |
Line 2280... | Line 2259... | ||
2280 | int i; |
2259 | int i; |
2281 | 2260 | ||
Line 2364... | Line 2343... | ||
2364 | * ioctl |
2343 | * ioctl |
2365 | */ |
2344 | */ |
2366 | int |
2345 | int |
2367 | i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) |
2346 | i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) |
2368 | { |
2347 | { |
2369 | drm_i915_private_t *dev_priv = dev->dev_private; |
2348 | struct drm_i915_private *dev_priv = dev->dev_private; |
2370 | struct drm_i915_gem_wait *args = data; |
2349 | struct drm_i915_gem_wait *args = data; |
2371 | struct drm_i915_gem_object *obj; |
2350 | struct drm_i915_gem_object *obj; |
2372 | struct intel_ring_buffer *ring = NULL; |
2351 | struct intel_engine_cs *ring = NULL; |
2373 | struct timespec timeout_stack, *timeout = NULL; |
- | |
2374 | unsigned reset_counter; |
2352 | unsigned reset_counter; |
2375 | u32 seqno = 0; |
2353 | u32 seqno = 0; |
2376 | int ret = 0; |
2354 | int ret = 0; |
Line 2377... | Line -... | ||
2377 | - | ||
2378 | if (args->timeout_ns >= 0) { |
- | |
2379 | timeout_stack = ns_to_timespec(args->timeout_ns); |
- | |
2380 | timeout = &timeout_stack; |
- | |
2381 | } |
- | |
2382 | 2355 | ||
2383 | ret = i915_mutex_lock_interruptible(dev); |
2356 | ret = i915_mutex_lock_interruptible(dev); |
2384 | if (ret) |
2357 | if (ret) |
Line 2385... | Line 2358... | ||
2385 | return ret; |
2358 | return ret; |
Line 2402... | Line 2375... | ||
2402 | 2375 | ||
2403 | if (seqno == 0) |
2376 | if (seqno == 0) |
Line 2404... | Line 2377... | ||
2404 | goto out; |
2377 | goto out; |
2405 | 2378 | ||
2406 | /* Do this after OLR check to make sure we make forward progress polling |
2379 | /* Do this after OLR check to make sure we make forward progress polling |
2407 | * on this IOCTL with a 0 timeout (like busy ioctl) |
2380 | * on this IOCTL with a timeout <=0 (like busy ioctl) |
2408 | */ |
2381 | */ |
2409 | if (!args->timeout_ns) { |
2382 | if (args->timeout_ns <= 0) { |
2410 | ret = -ETIME; |
2383 | ret = -ETIME; |
Line 2411... | Line 2384... | ||
2411 | goto out; |
2384 | goto out; |
2412 | } |
2385 | } |
2413 | 2386 | ||
Line 2414... | Line 2387... | ||
2414 | drm_gem_object_unreference(&obj->base); |
2387 | drm_gem_object_unreference(&obj->base); |
2415 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
- | |
2416 | mutex_unlock(&dev->struct_mutex); |
2388 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
2417 | - | ||
Line 2418... | Line 2389... | ||
2418 | ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv); |
2389 | mutex_unlock(&dev->struct_mutex); |
2419 | if (timeout) |
2390 | |
2420 | args->timeout_ns = timespec_to_ns(timeout); |
2391 | return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns, |
2421 | return ret; |
2392 | file->driver_priv); |
Line 2438... | Line 2409... | ||
2438 | * |
2409 | * |
2439 | * Returns 0 if successful, else propagates up the lower layer error. |
2410 | * Returns 0 if successful, else propagates up the lower layer error. |
2440 | */ |
2411 | */ |
2441 | int |
2412 | int |
2442 | i915_gem_object_sync(struct drm_i915_gem_object *obj, |
2413 | i915_gem_object_sync(struct drm_i915_gem_object *obj, |
2443 | struct intel_ring_buffer *to) |
2414 | struct intel_engine_cs *to) |
2444 | { |
2415 | { |
2445 | struct intel_ring_buffer *from = obj->ring; |
2416 | struct intel_engine_cs *from = obj->ring; |
2446 | u32 seqno; |
2417 | u32 seqno; |
2447 | int ret, idx; |
2418 | int ret, idx; |
Line 2448... | Line 2419... | ||
2448 | 2419 | ||
2449 | if (from == NULL || to == from) |
2420 | if (from == NULL || to == from) |
Line 2453... | Line 2424... | ||
2453 | return i915_gem_object_wait_rendering(obj, false); |
2424 | return i915_gem_object_wait_rendering(obj, false); |
Line 2454... | Line 2425... | ||
2454 | 2425 | ||
Line 2455... | Line 2426... | ||
2455 | idx = intel_ring_sync_index(from, to); |
2426 | idx = intel_ring_sync_index(from, to); |
- | 2427 | ||
- | 2428 | seqno = obj->last_read_seqno; |
|
2456 | 2429 | /* Optimization: Avoid semaphore sync when we are sure we already |
|
2457 | seqno = obj->last_read_seqno; |
2430 | * waited for an object with higher seqno */ |
Line 2458... | Line 2431... | ||
2458 | if (seqno <= from->sync_seqno[idx]) |
2431 | if (seqno <= from->semaphore.sync_seqno[idx]) |
2459 | return 0; |
2432 | return 0; |
2460 | 2433 | ||
Line 2461... | Line 2434... | ||
2461 | ret = i915_gem_check_olr(obj->ring, seqno); |
2434 | ret = i915_gem_check_olr(obj->ring, seqno); |
2462 | if (ret) |
2435 | if (ret) |
2463 | return ret; |
2436 | return ret; |
2464 | 2437 | ||
2465 | trace_i915_gem_ring_sync_to(from, to, seqno); |
2438 | trace_i915_gem_ring_sync_to(from, to, seqno); |
2466 | ret = to->sync_to(to, from, seqno); |
2439 | ret = to->semaphore.sync_to(to, from, seqno); |
2467 | if (!ret) |
2440 | if (!ret) |
2468 | /* We use last_read_seqno because sync_to() |
2441 | /* We use last_read_seqno because sync_to() |
Line 2469... | Line 2442... | ||
2469 | * might have just caused seqno wrap under |
2442 | * might have just caused seqno wrap under |
2470 | * the radar. |
2443 | * the radar. |
Line 2471... | Line 2444... | ||
2471 | */ |
2444 | */ |
Line 2499... | Line 2472... | ||
2499 | } |
2472 | } |
Line 2500... | Line 2473... | ||
2500 | 2473 | ||
2501 | int i915_vma_unbind(struct i915_vma *vma) |
2474 | int i915_vma_unbind(struct i915_vma *vma) |
2502 | { |
2475 | { |
2503 | struct drm_i915_gem_object *obj = vma->obj; |
2476 | struct drm_i915_gem_object *obj = vma->obj; |
2504 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
2477 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
Line 2505... | Line 2478... | ||
2505 | int ret; |
2478 | int ret; |
2506 | 2479 | ||
Line 2507... | Line 2480... | ||
2507 | if(obj == get_fb_obj()) |
2480 | if(obj == get_fb_obj()) |
2508 | return 0; |
2481 | return 0; |
Line 2509... | Line 2482... | ||
2509 | 2482 | ||
2510 | if (list_empty(&vma->vma_link)) |
2483 | if (list_empty(&vma->vma_link)) |
2511 | return 0; |
- | |
2512 | 2484 | return 0; |
|
2513 | if (!drm_mm_node_allocated(&vma->node)) { |
2485 | |
Line 2514... | Line 2486... | ||
2514 | i915_gem_vma_destroy(vma); |
2486 | if (!drm_mm_node_allocated(&vma->node)) { |
2515 | 2487 | i915_gem_vma_destroy(vma); |
|
Line 2516... | Line 2488... | ||
2516 | return 0; |
2488 | return 0; |
Line 2517... | Line 2489... | ||
2517 | } |
2489 | } |
Line 2527... | Line 2499... | ||
2527 | /* Continue on if we fail due to EIO, the GPU is hung so we |
2499 | /* Continue on if we fail due to EIO, the GPU is hung so we |
2528 | * should be safe and we need to cleanup or else we might |
2500 | * should be safe and we need to cleanup or else we might |
2529 | * cause memory corruption through use-after-free. |
2501 | * cause memory corruption through use-after-free. |
2530 | */ |
2502 | */ |
Line -... | Line 2503... | ||
- | 2503 | ||
2531 | 2504 | if (i915_is_ggtt(vma->vm)) { |
|
Line 2532... | Line 2505... | ||
2532 | i915_gem_object_finish_gtt(obj); |
2505 | i915_gem_object_finish_gtt(obj); |
2533 | 2506 | ||
2534 | /* release the fence reg _after_ flushing */ |
2507 | /* release the fence reg _after_ flushing */ |
2535 | ret = i915_gem_object_put_fence(obj); |
2508 | ret = i915_gem_object_put_fence(obj); |
- | 2509 | if (ret) |
|
Line 2536... | Line 2510... | ||
2536 | if (ret) |
2510 | return ret; |
Line 2537... | Line -... | ||
2537 | return ret; |
- | |
2538 | - | ||
2539 | trace_i915_vma_unbind(vma); |
- | |
2540 | - | ||
2541 | if (obj->has_global_gtt_mapping) |
- | |
2542 | i915_gem_gtt_unbind_object(obj); |
- | |
2543 | if (obj->has_aliasing_ppgtt_mapping) { |
2511 | } |
Line 2544... | Line 2512... | ||
2544 | i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj); |
2512 | |
2545 | obj->has_aliasing_ppgtt_mapping = 0; |
2513 | trace_i915_vma_unbind(vma); |
2546 | } |
2514 | |
2547 | i915_gem_gtt_finish_object(obj); |
2515 | vma->unbind_vma(vma); |
Line 2548... | Line 2516... | ||
2548 | 2516 | ||
2549 | list_del(&vma->mm_list); |
2517 | list_del_init(&vma->mm_list); |
Line 2550... | Line 2518... | ||
2550 | /* Avoid an unnecessary call to unbind on rebind. */ |
2518 | /* Avoid an unnecessary call to unbind on rebind. */ |
2551 | if (i915_is_ggtt(vma->vm)) |
2519 | if (i915_is_ggtt(vma->vm)) |
2552 | obj->map_and_fenceable = true; |
2520 | obj->map_and_fenceable = true; |
- | 2521 | ||
2553 | 2522 | drm_mm_remove_node(&vma->node); |
|
- | 2523 | i915_gem_vma_destroy(vma); |
|
Line 2554... | Line 2524... | ||
2554 | drm_mm_remove_node(&vma->node); |
2524 | |
2555 | i915_gem_vma_destroy(vma); |
2525 | /* Since the unbound list is global, only move to that list if |
2556 | 2526 | * no more VMAs exist. */ |
|
2557 | /* Since the unbound list is global, only move to that list if |
2527 | if (list_empty(&obj->vma_list)) { |
2558 | * no more VMAs exist. */ |
2528 | i915_gem_gtt_finish_object(obj); |
Line 2559... | Line 2529... | ||
2559 | if (list_empty(&obj->vma_list)) |
2529 | list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); |
2560 | list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); |
2530 | } |
Line 2561... | Line -... | ||
2561 | - | ||
2562 | /* And finally now the object is completely decoupled from this vma, |
- | |
2563 | * we can drop its hold on the backing storage and allow it to be |
- | |
2564 | * reaped by the shrinker. |
- | |
2565 | */ |
- | |
2566 | i915_gem_object_unpin_pages(obj); |
- | |
2567 | - | ||
2568 | return 0; |
- | |
2569 | } |
- | |
2570 | - | ||
2571 | /** |
- | |
2572 | * Unbinds an object from the global GTT aperture. |
- | |
2573 | */ |
- | |
2574 | int |
- | |
2575 | i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) |
- | |
2576 | { |
- | |
2577 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
- | |
2578 | struct i915_address_space *ggtt = &dev_priv->gtt.base; |
- | |
2579 | - | ||
2580 | if (!i915_gem_obj_ggtt_bound(obj)) |
- | |
2581 | return 0; |
2531 | |
2582 | 2532 | /* And finally now the object is completely decoupled from this vma, |
|
2583 | if (obj->pin_count) |
2533 | * we can drop its hold on the backing storage and allow it to be |
2584 | return -EBUSY; |
2534 | * reaped by the shrinker. |
2585 | 2535 | */ |
|
Line 2586... | Line 2536... | ||
2586 | BUG_ON(obj->pages == NULL); |
2536 | i915_gem_object_unpin_pages(obj); |
2587 | 2537 | ||
2588 | return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt)); |
2538 | return 0; |
2589 | } |
2539 | } |
2590 | 2540 | ||
Line 2591... | Line 2541... | ||
2591 | int i915_gpu_idle(struct drm_device *dev) |
2541 | int i915_gpu_idle(struct drm_device *dev) |
2592 | { |
2542 | { |
Line 2609... | Line 2559... | ||
2609 | } |
2559 | } |
Line 2610... | Line 2560... | ||
2610 | 2560 | ||
2611 | static void i965_write_fence_reg(struct drm_device *dev, int reg, |
2561 | static void i965_write_fence_reg(struct drm_device *dev, int reg, |
2612 | struct drm_i915_gem_object *obj) |
2562 | struct drm_i915_gem_object *obj) |
2613 | { |
2563 | { |
2614 | drm_i915_private_t *dev_priv = dev->dev_private; |
2564 | struct drm_i915_private *dev_priv = dev->dev_private; |
2615 | int fence_reg; |
2565 | int fence_reg; |
Line 2616... | Line 2566... | ||
2616 | int fence_pitch_shift; |
2566 | int fence_pitch_shift; |
2617 | 2567 | ||
Line 2661... | Line 2611... | ||
2661 | } |
2611 | } |
Line 2662... | Line 2612... | ||
2662 | 2612 | ||
2663 | static void i915_write_fence_reg(struct drm_device *dev, int reg, |
2613 | static void i915_write_fence_reg(struct drm_device *dev, int reg, |
2664 | struct drm_i915_gem_object *obj) |
2614 | struct drm_i915_gem_object *obj) |
2665 | { |
2615 | { |
2666 | drm_i915_private_t *dev_priv = dev->dev_private; |
2616 | struct drm_i915_private *dev_priv = dev->dev_private; |
Line 2667... | Line 2617... | ||
2667 | u32 val; |
2617 | u32 val; |
2668 | 2618 | ||
2669 | if (obj) { |
2619 | if (obj) { |
Line 2705... | Line 2655... | ||
2705 | } |
2655 | } |
Line 2706... | Line 2656... | ||
2706 | 2656 | ||
2707 | static void i830_write_fence_reg(struct drm_device *dev, int reg, |
2657 | static void i830_write_fence_reg(struct drm_device *dev, int reg, |
2708 | struct drm_i915_gem_object *obj) |
2658 | struct drm_i915_gem_object *obj) |
2709 | { |
2659 | { |
2710 | drm_i915_private_t *dev_priv = dev->dev_private; |
2660 | struct drm_i915_private *dev_priv = dev->dev_private; |
Line 2711... | Line 2661... | ||
2711 | uint32_t val; |
2661 | uint32_t val; |
2712 | 2662 | ||
2713 | if (obj) { |
2663 | if (obj) { |
Line 2830... | Line 2780... | ||
2830 | if (obj->fence_reg == I915_FENCE_REG_NONE) |
2780 | if (obj->fence_reg == I915_FENCE_REG_NONE) |
2831 | return 0; |
2781 | return 0; |
Line 2832... | Line 2782... | ||
2832 | 2782 | ||
Line -... | Line 2783... | ||
- | 2783 | fence = &dev_priv->fence_regs[obj->fence_reg]; |
|
- | 2784 | ||
- | 2785 | if (WARN_ON(fence->pin_count)) |
|
2833 | fence = &dev_priv->fence_regs[obj->fence_reg]; |
2786 | return -EBUSY; |
2834 | 2787 | ||
Line 2835... | Line 2788... | ||
2835 | i915_gem_object_fence_lost(obj); |
2788 | i915_gem_object_fence_lost(obj); |
2836 | i915_gem_object_update_fence(obj, fence, false); |
2789 | i915_gem_object_update_fence(obj, fence, false); |
Line 3008... | Line 2961... | ||
3008 | } |
2961 | } |
Line 3009... | Line 2962... | ||
3009 | 2962 | ||
3010 | /** |
2963 | /** |
3011 | * Finds free space in the GTT aperture and binds the object there. |
2964 | * Finds free space in the GTT aperture and binds the object there. |
3012 | */ |
2965 | */ |
3013 | static int |
2966 | static struct i915_vma * |
3014 | i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, |
2967 | i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, |
3015 | struct i915_address_space *vm, |
2968 | struct i915_address_space *vm, |
3016 | unsigned alignment, |
- | |
3017 | bool map_and_fenceable, |
2969 | unsigned alignment, |
3018 | bool nonblocking) |
2970 | uint64_t flags) |
3019 | { |
2971 | { |
3020 | struct drm_device *dev = obj->base.dev; |
2972 | struct drm_device *dev = obj->base.dev; |
3021 | drm_i915_private_t *dev_priv = dev->dev_private; |
2973 | struct drm_i915_private *dev_priv = dev->dev_private; |
- | 2974 | u32 size, fence_size, fence_alignment, unfenced_alignment; |
|
- | 2975 | unsigned long start = |
|
3022 | u32 size, fence_size, fence_alignment, unfenced_alignment; |
2976 | flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; |
3023 | size_t gtt_max = |
2977 | unsigned long end = |
3024 | map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total; |
2978 | flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; |
3025 | struct i915_vma *vma; |
2979 | struct i915_vma *vma; |
Line 3026... | Line 2980... | ||
3026 | int ret; |
2980 | int ret; |
3027 | 2981 | ||
Line 3035... | Line 2989... | ||
3035 | i915_gem_get_gtt_alignment(dev, |
2989 | i915_gem_get_gtt_alignment(dev, |
3036 | obj->base.size, |
2990 | obj->base.size, |
3037 | obj->tiling_mode, false); |
2991 | obj->tiling_mode, false); |
Line 3038... | Line 2992... | ||
3038 | 2992 | ||
3039 | if (alignment == 0) |
2993 | if (alignment == 0) |
3040 | alignment = map_and_fenceable ? fence_alignment : |
2994 | alignment = flags & PIN_MAPPABLE ? fence_alignment : |
3041 | unfenced_alignment; |
2995 | unfenced_alignment; |
3042 | if (map_and_fenceable && alignment & (fence_alignment - 1)) { |
2996 | if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) { |
3043 | DRM_ERROR("Invalid object alignment requested %u\n", alignment); |
2997 | DRM_DEBUG("Invalid object alignment requested %u\n", alignment); |
3044 | return -EINVAL; |
2998 | return ERR_PTR(-EINVAL); |
Line 3045... | Line 2999... | ||
3045 | } |
2999 | } |
Line 3046... | Line 3000... | ||
3046 | 3000 | ||
3047 | size = map_and_fenceable ? fence_size : obj->base.size; |
3001 | size = flags & PIN_MAPPABLE ? fence_size : obj->base.size; |
3048 | 3002 | ||
3049 | /* If the object is bigger than the entire aperture, reject it early |
3003 | /* If the object is bigger than the entire aperture, reject it early |
3050 | * before evicting everything in a vain attempt to find space. |
3004 | * before evicting everything in a vain attempt to find space. |
3051 | */ |
3005 | */ |
3052 | if (obj->base.size > gtt_max) { |
3006 | if (obj->base.size > end) { |
3053 | DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", |
3007 | DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n", |
3054 | obj->base.size, |
3008 | obj->base.size, |
3055 | map_and_fenceable ? "mappable" : "total", |
3009 | flags & PIN_MAPPABLE ? "mappable" : "total", |
Line 3056... | Line 3010... | ||
3056 | gtt_max); |
3010 | end); |
3057 | return -E2BIG; |
3011 | return ERR_PTR(-E2BIG); |
3058 | } |
3012 | } |
Line 3059... | Line 3013... | ||
3059 | 3013 | ||
Line 3060... | Line -... | ||
3060 | ret = i915_gem_object_get_pages(obj); |
- | |
3061 | if (ret) |
- | |
3062 | return ret; |
3014 | ret = i915_gem_object_get_pages(obj); |
3063 | 3015 | if (ret) |
|
3064 | i915_gem_object_pin_pages(obj); |
- | |
3065 | 3016 | return ERR_PTR(ret); |
|
3066 | BUG_ON(!i915_is_ggtt(vm)); |
- | |
3067 | - | ||
3068 | vma = i915_gem_obj_lookup_or_create_vma(obj, vm); |
- | |
3069 | if (IS_ERR(vma)) { |
- | |
Line 3070... | Line 3017... | ||
3070 | ret = PTR_ERR(vma); |
3017 | |
3071 | goto err_unpin; |
3018 | i915_gem_object_pin_pages(obj); |
3072 | } |
3019 | |
3073 | 3020 | vma = i915_gem_obj_lookup_or_create_vma(obj, vm); |
|
- | 3021 | if (IS_ERR(vma)) |
|
3074 | /* For now we only ever use 1 vma per object */ |
3022 | goto err_unpin; |
- | 3023 | ||
3075 | WARN_ON(!list_is_singular(&obj->vma_list)); |
3024 | search_free: |
Line 3076... | Line 3025... | ||
3076 | 3025 | ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, |
|
3077 | search_free: |
3026 | size, alignment, |
3078 | ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, |
3027 | obj->cache_level, |
Line 3106... | Line 3055... | ||
3106 | dev_priv->gtt.mappable_end); |
3055 | dev_priv->gtt.mappable_end); |
Line 3107... | Line 3056... | ||
3107 | 3056 | ||
3108 | obj->map_and_fenceable = mappable && fenceable; |
3057 | obj->map_and_fenceable = mappable && fenceable; |
Line 3109... | Line 3058... | ||
3109 | } |
3058 | } |
- | 3059 | ||
- | 3060 | WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); |
|
- | 3061 | ||
- | 3062 | trace_i915_vma_bind(vma, flags); |
|
Line 3110... | Line -... | ||
3110 | - | ||
3111 | WARN_ON(map_and_fenceable && !obj->map_and_fenceable); |
3063 | vma->bind_vma(vma, obj->cache_level, |
3112 | 3064 | flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0); |
|
Line 3113... | Line 3065... | ||
3113 | trace_i915_vma_bind(vma, map_and_fenceable); |
3065 | |
3114 | i915_gem_verify_gtt(dev); |
3066 | i915_gem_verify_gtt(dev); |
3115 | return 0; |
3067 | return vma; |
3116 | 3068 | ||
- | 3069 | err_remove_node: |
|
3117 | err_remove_node: |
3070 | drm_mm_remove_node(&vma->node); |
3118 | drm_mm_remove_node(&vma->node); |
3071 | err_free_vma: |
3119 | err_free_vma: |
3072 | i915_gem_vma_destroy(vma); |
3120 | i915_gem_vma_destroy(vma); |
3073 | vma = ERR_PTR(ret); |
Line 3121... | Line 3074... | ||
3121 | err_unpin: |
3074 | err_unpin: |
3122 | i915_gem_object_unpin_pages(obj); |
3075 | i915_gem_object_unpin_pages(obj); |
3123 | return ret; |
3076 | return vma; |
Line 3213... | Line 3166... | ||
3213 | * flushes to occur. |
3166 | * flushes to occur. |
3214 | */ |
3167 | */ |
3215 | int |
3168 | int |
3216 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) |
3169 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) |
3217 | { |
3170 | { |
3218 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
3171 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
3219 | uint32_t old_write_domain, old_read_domains; |
3172 | uint32_t old_write_domain, old_read_domains; |
3220 | int ret; |
3173 | int ret; |
Line 3221... | Line 3174... | ||
3221 | 3174 | ||
3222 | /* Not valid to be called on unbound objects. */ |
3175 | /* Not valid to be called on unbound objects. */ |
Line 3228... | Line 3181... | ||
3228 | 3181 | ||
3229 | ret = i915_gem_object_wait_rendering(obj, !write); |
3182 | ret = i915_gem_object_wait_rendering(obj, !write); |
3230 | if (ret) |
3183 | if (ret) |
Line -... | Line 3184... | ||
- | 3184 | return ret; |
|
3231 | return ret; |
3185 | |
Line 3232... | Line 3186... | ||
3232 | 3186 | i915_gem_object_retire(obj); |
|
3233 | i915_gem_object_flush_cpu_write_domain(obj, false); |
3187 | i915_gem_object_flush_cpu_write_domain(obj, false); |
3234 | 3188 | ||
Line 3271... | Line 3225... | ||
3271 | 3225 | ||
3272 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, |
3226 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, |
3273 | enum i915_cache_level cache_level) |
3227 | enum i915_cache_level cache_level) |
3274 | { |
3228 | { |
3275 | struct drm_device *dev = obj->base.dev; |
- | |
3276 | drm_i915_private_t *dev_priv = dev->dev_private; |
3229 | struct drm_device *dev = obj->base.dev; |
3277 | struct i915_vma *vma; |
3230 | struct i915_vma *vma, *next; |
Line 3278... | Line 3231... | ||
3278 | int ret; |
3231 | int ret; |
3279 | 3232 | ||
Line 3280... | Line 3233... | ||
3280 | if (obj->cache_level == cache_level) |
3233 | if (obj->cache_level == cache_level) |
3281 | return 0; |
3234 | return 0; |
3282 | 3235 | ||
3283 | if (obj->pin_count) { |
3236 | if (i915_gem_obj_is_pinned(obj)) { |
Line 3284... | Line 3237... | ||
3284 | DRM_DEBUG("can not change the cache level of pinned objects\n"); |
3237 | DRM_DEBUG("can not change the cache level of pinned objects\n"); |
3285 | return -EBUSY; |
3238 | return -EBUSY; |
3286 | } |
3239 | } |
3287 | 3240 | ||
3288 | list_for_each_entry(vma, &obj->vma_list, vma_link) { |
3241 | list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { |
3289 | if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { |
- | |
3290 | ret = i915_vma_unbind(vma); |
- | |
3291 | if (ret) |
3242 | if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { |
3292 | return ret; |
3243 | ret = i915_vma_unbind(vma); |
Line 3293... | Line 3244... | ||
3293 | 3244 | if (ret) |
|
3294 | break; |
3245 | return ret; |
Line 3310... | Line 3261... | ||
3310 | ret = i915_gem_object_put_fence(obj); |
3261 | ret = i915_gem_object_put_fence(obj); |
3311 | if (ret) |
3262 | if (ret) |
3312 | return ret; |
3263 | return ret; |
3313 | } |
3264 | } |
Line 3314... | Line 3265... | ||
3314 | 3265 | ||
3315 | if (obj->has_global_gtt_mapping) |
3266 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
3316 | i915_gem_gtt_bind_object(obj, cache_level); |
3267 | if (drm_mm_node_allocated(&vma->node)) |
3317 | if (obj->has_aliasing_ppgtt_mapping) |
3268 | vma->bind_vma(vma, cache_level, |
3318 | i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, |
- | |
3319 | obj, cache_level); |
3269 | obj->has_global_gtt_mapping ? GLOBAL_BIND : 0); |
Line 3320... | Line 3270... | ||
3320 | } |
3270 | } |
3321 | 3271 | ||
3322 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
3272 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
Line 3330... | Line 3280... | ||
3330 | * actually been tracking whether the data is in the |
3280 | * actually been tracking whether the data is in the |
3331 | * CPU cache or not, since we only allow one bit set |
3281 | * CPU cache or not, since we only allow one bit set |
3332 | * in obj->write_domain and have been skipping the clflushes. |
3282 | * in obj->write_domain and have been skipping the clflushes. |
3333 | * Just set it to the CPU cache for now. |
3283 | * Just set it to the CPU cache for now. |
3334 | */ |
3284 | */ |
- | 3285 | i915_gem_object_retire(obj); |
|
3335 | WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); |
3286 | WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); |
Line 3336... | Line 3287... | ||
3336 | 3287 | ||
3337 | old_read_domains = obj->base.read_domains; |
3288 | old_read_domains = obj->base.read_domains; |
Line 3427... | Line 3378... | ||
3427 | return ret; |
3378 | return ret; |
3428 | } |
3379 | } |
Line 3429... | Line 3380... | ||
3429 | 3380 | ||
3430 | static bool is_pin_display(struct drm_i915_gem_object *obj) |
3381 | static bool is_pin_display(struct drm_i915_gem_object *obj) |
- | 3382 | { |
|
- | 3383 | struct i915_vma *vma; |
|
- | 3384 | ||
- | 3385 | if (list_empty(&obj->vma_list)) |
|
- | 3386 | return false; |
|
- | 3387 | ||
- | 3388 | vma = i915_gem_obj_to_ggtt(obj); |
|
- | 3389 | if (!vma) |
|
- | 3390 | return false; |
|
3431 | { |
3391 | |
3432 | /* There are 3 sources that pin objects: |
3392 | /* There are 3 sources that pin objects: |
3433 | * 1. The display engine (scanouts, sprites, cursors); |
3393 | * 1. The display engine (scanouts, sprites, cursors); |
3434 | * 2. Reservations for execbuffer; |
3394 | * 2. Reservations for execbuffer; |
3435 | * 3. The user. |
3395 | * 3. The user. |
Line 3438... | Line 3398... | ||
3438 | * are only called outside of the reservation path. The user |
3398 | * are only called outside of the reservation path. The user |
3439 | * can only increment pin_count once, and so if after |
3399 | * can only increment pin_count once, and so if after |
3440 | * subtracting the potential reference by the user, any pin_count |
3400 | * subtracting the potential reference by the user, any pin_count |
3441 | * remains, it must be due to another use by the display engine. |
3401 | * remains, it must be due to another use by the display engine. |
3442 | */ |
3402 | */ |
3443 | return obj->pin_count - !!obj->user_pin_count; |
3403 | return vma->pin_count - !!obj->user_pin_count; |
3444 | } |
3404 | } |
Line 3445... | Line 3405... | ||
3445 | 3405 | ||
3446 | /* |
3406 | /* |
3447 | * Prepare buffer for display plane (scanout, cursors, etc). |
3407 | * Prepare buffer for display plane (scanout, cursors, etc). |
3448 | * Can be called from an uninterruptible phase (modesetting) and allows |
3408 | * Can be called from an uninterruptible phase (modesetting) and allows |
3449 | * any flushes to be pipelined (for pageflips). |
3409 | * any flushes to be pipelined (for pageflips). |
3450 | */ |
3410 | */ |
3451 | int |
3411 | int |
3452 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
3412 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
3453 | u32 alignment, |
3413 | u32 alignment, |
3454 | struct intel_ring_buffer *pipelined) |
3414 | struct intel_engine_cs *pipelined) |
3455 | { |
3415 | { |
- | 3416 | u32 old_read_domains, old_write_domain; |
|
3456 | u32 old_read_domains, old_write_domain; |
3417 | bool was_pin_display; |
Line 3457... | Line 3418... | ||
3457 | int ret; |
3418 | int ret; |
3458 | 3419 | ||
3459 | if (pipelined != obj->ring) { |
3420 | if (pipelined != obj->ring) { |
Line 3463... | Line 3424... | ||
3463 | } |
3424 | } |
Line 3464... | Line 3425... | ||
3464 | 3425 | ||
3465 | /* Mark the pin_display early so that we account for the |
3426 | /* Mark the pin_display early so that we account for the |
3466 | * display coherency whilst setting up the cache domains. |
3427 | * display coherency whilst setting up the cache domains. |
- | 3428 | */ |
|
3467 | */ |
3429 | was_pin_display = obj->pin_display; |
Line 3468... | Line 3430... | ||
3468 | obj->pin_display = true; |
3430 | obj->pin_display = true; |
3469 | 3431 | ||
3470 | /* The display engine is not coherent with the LLC cache on gen6. As |
3432 | /* The display engine is not coherent with the LLC cache on gen6. As |
Line 3483... | Line 3445... | ||
3483 | 3445 | ||
3484 | /* As the user may map the buffer once pinned in the display plane |
3446 | /* As the user may map the buffer once pinned in the display plane |
3485 | * (e.g. libkms for the bootup splash), we have to ensure that we |
3447 | * (e.g. libkms for the bootup splash), we have to ensure that we |
3486 | * always use map_and_fenceable for all scanout buffers. |
3448 | * always use map_and_fenceable for all scanout buffers. |
3487 | */ |
3449 | */ |
3488 | ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false); |
3450 | ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE); |
3489 | if (ret) |
3451 | if (ret) |
Line 3490... | Line 3452... | ||
3490 | goto err_unpin_display; |
3452 | goto err_unpin_display; |
Line 3505... | Line 3467... | ||
3505 | old_write_domain); |
3467 | old_write_domain); |
Line 3506... | Line 3468... | ||
3506 | 3468 | ||
Line 3507... | Line 3469... | ||
3507 | return 0; |
3469 | return 0; |
- | 3470 | ||
3508 | 3471 | err_unpin_display: |
|
3509 | err_unpin_display: |
3472 | WARN_ON(was_pin_display != is_pin_display(obj)); |
3510 | obj->pin_display = is_pin_display(obj); |
3473 | obj->pin_display = was_pin_display; |
Line 3511... | Line 3474... | ||
3511 | return ret; |
3474 | return ret; |
3512 | } |
3475 | } |
3513 | 3476 | ||
3514 | void |
3477 | void |
3515 | i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj) |
3478 | i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj) |
3516 | { |
3479 | { |
Line 3517... | Line 3480... | ||
3517 | i915_gem_object_unpin(obj); |
3480 | i915_gem_object_ggtt_unpin(obj); |
3518 | obj->pin_display = is_pin_display(obj); |
3481 | obj->pin_display = is_pin_display(obj); |
Line 3552... | Line 3515... | ||
3552 | 3515 | ||
3553 | ret = i915_gem_object_wait_rendering(obj, !write); |
3516 | ret = i915_gem_object_wait_rendering(obj, !write); |
3554 | if (ret) |
3517 | if (ret) |
Line -... | Line 3518... | ||
- | 3518 | return ret; |
|
3555 | return ret; |
3519 | |
Line 3556... | Line 3520... | ||
3556 | 3520 | i915_gem_object_retire(obj); |
|
3557 | i915_gem_object_flush_gtt_write_domain(obj); |
3521 | i915_gem_object_flush_gtt_write_domain(obj); |
Line 3599... | Line 3563... | ||
3599 | static int |
3563 | static int |
3600 | i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) |
3564 | i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) |
3601 | { |
3565 | { |
3602 | struct drm_i915_private *dev_priv = dev->dev_private; |
3566 | struct drm_i915_private *dev_priv = dev->dev_private; |
3603 | struct drm_i915_file_private *file_priv = file->driver_priv; |
3567 | struct drm_i915_file_private *file_priv = file->driver_priv; |
3604 | unsigned long recent_enough = GetTimerTicks() - msecs_to_jiffies(20); |
3568 | unsigned long recent_enough = jiffies - msecs_to_jiffies(20); |
3605 | struct drm_i915_gem_request *request; |
3569 | struct drm_i915_gem_request *request; |
3606 | struct intel_ring_buffer *ring = NULL; |
3570 | struct intel_engine_cs *ring = NULL; |
3607 | unsigned reset_counter; |
3571 | unsigned reset_counter; |
3608 | u32 seqno = 0; |
3572 | u32 seqno = 0; |
3609 | int ret; |
3573 | int ret; |
Line 3610... | Line 3574... | ||
3610 | 3574 | ||
Line 3635... | Line 3599... | ||
3635 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); |
3599 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); |
Line 3636... | Line 3600... | ||
3636 | 3600 | ||
3637 | return ret; |
3601 | return ret; |
Line -... | Line 3602... | ||
- | 3602 | } |
|
- | 3603 | ||
- | 3604 | static bool |
|
- | 3605 | i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) |
|
- | 3606 | { |
|
- | 3607 | struct drm_i915_gem_object *obj = vma->obj; |
|
- | 3608 | ||
- | 3609 | if (alignment && |
|
- | 3610 | vma->node.start & (alignment - 1)) |
|
- | 3611 | return true; |
|
- | 3612 | ||
- | 3613 | if (flags & PIN_MAPPABLE && !obj->map_and_fenceable) |
|
- | 3614 | return true; |
|
- | 3615 | ||
- | 3616 | if (flags & PIN_OFFSET_BIAS && |
|
- | 3617 | vma->node.start < (flags & PIN_OFFSET_MASK)) |
|
- | 3618 | return true; |
|
- | 3619 | ||
- | 3620 | return false; |
|
3638 | } |
3621 | } |
3639 | 3622 | ||
3640 | int |
3623 | int |
3641 | i915_gem_object_pin(struct drm_i915_gem_object *obj, |
3624 | i915_gem_object_pin(struct drm_i915_gem_object *obj, |
3642 | struct i915_address_space *vm, |
- | |
3643 | uint32_t alignment, |
3625 | struct i915_address_space *vm, |
3644 | bool map_and_fenceable, |
3626 | uint32_t alignment, |
- | 3627 | uint64_t flags) |
|
3645 | bool nonblocking) |
3628 | { |
3646 | { |
3629 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
Line 3647... | Line 3630... | ||
3647 | struct i915_vma *vma; |
3630 | struct i915_vma *vma; |
3648 | int ret; |
3631 | int ret; |
Line 3649... | Line 3632... | ||
3649 | 3632 | ||
- | 3633 | if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base)) |
|
Line 3650... | Line 3634... | ||
3650 | if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) |
3634 | return -ENODEV; |
3651 | return -EBUSY; |
- | |
3652 | 3635 | ||
3653 | WARN_ON(map_and_fenceable && !i915_is_ggtt(vm)); |
3636 | if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm))) |
3654 | 3637 | return -EINVAL; |
|
- | 3638 | ||
3655 | vma = i915_gem_obj_to_vma(obj, vm); |
3639 | vma = i915_gem_obj_to_vma(obj, vm); |
3656 | 3640 | if (vma) { |
|
3657 | if (vma) { |
3641 | if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) |
3658 | if ((alignment && |
3642 | return -EBUSY; |
3659 | vma->node.start & (alignment - 1)) || |
3643 | |
3660 | (map_and_fenceable && !obj->map_and_fenceable)) { |
3644 | if (i915_vma_misplaced(vma, alignment, flags)) { |
3661 | WARN(obj->pin_count, |
3645 | WARN(vma->pin_count, |
3662 | "bo is already pinned with incorrect alignment:" |
3646 | "bo is already pinned with incorrect alignment:" |
3663 | " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," |
3647 | " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," |
3664 | " obj->map_and_fenceable=%d\n", |
3648 | " obj->map_and_fenceable=%d\n", |
3665 | i915_gem_obj_offset(obj, vm), alignment, |
3649 | i915_gem_obj_offset(obj, vm), alignment, |
- | 3650 | !!(flags & PIN_MAPPABLE), |
|
- | 3651 | obj->map_and_fenceable); |
|
3666 | map_and_fenceable, |
3652 | ret = i915_vma_unbind(vma); |
3667 | obj->map_and_fenceable); |
3653 | if (ret) |
Line 3668... | Line 3654... | ||
3668 | ret = i915_vma_unbind(vma); |
3654 | return ret; |
3669 | if (ret) |
- | |
3670 | return ret; |
- | |
3671 | } |
3655 | |
3672 | } |
- | |
3673 | - | ||
3674 | if (!i915_gem_obj_bound(obj, vm)) { |
3656 | vma = NULL; |
3675 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
3657 | } |
3676 | - | ||
3677 | ret = i915_gem_object_bind_to_vm(obj, vm, alignment, |
- | |
3678 | map_and_fenceable, |
- | |
3679 | nonblocking); |
3658 | } |
Line 3680... | Line 3659... | ||
3680 | if (ret) |
3659 | |
3681 | return ret; |
3660 | if (vma == NULL || !drm_mm_node_allocated(&vma->node)) { |
Line 3682... | Line 3661... | ||
3682 | 3661 | vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags); |
|
- | 3662 | if (IS_ERR(vma)) |
|
3683 | if (!dev_priv->mm.aliasing_ppgtt) |
3663 | return PTR_ERR(vma); |
Line 3684... | Line 3664... | ||
3684 | i915_gem_gtt_bind_object(obj, obj->cache_level); |
3664 | } |
3685 | } |
3665 | |
Line 3686... | Line 3666... | ||
3686 | 3666 | if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping) |
|
3687 | if (!obj->has_global_gtt_mapping && map_and_fenceable) |
3667 | vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); |
3688 | i915_gem_gtt_bind_object(obj, obj->cache_level); |
3668 | |
- | 3669 | vma->pin_count++; |
|
- | 3670 | if (flags & PIN_MAPPABLE) |
|
- | 3671 | obj->pin_mappable |= true; |
|
3689 | 3672 | ||
3690 | obj->pin_count++; |
3673 | return 0; |
Line 3691... | Line 3674... | ||
3691 | obj->pin_mappable |= map_and_fenceable; |
3674 | } |
3692 | 3675 | ||
3693 | return 0; |
3676 | void |
Line -... | Line 3677... | ||
- | 3677 | i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) |
|
- | 3678 | { |
|
- | 3679 | struct i915_vma *vma = i915_gem_obj_to_ggtt(obj); |
|
- | 3680 | ||
- | 3681 | BUG_ON(!vma); |
|
- | 3682 | BUG_ON(vma->pin_count == 0); |
|
- | 3683 | BUG_ON(!i915_gem_obj_ggtt_bound(obj)); |
|
- | 3684 | ||
- | 3685 | if (--vma->pin_count == 0) |
|
- | 3686 | obj->pin_mappable = false; |
|
- | 3687 | } |
|
- | 3688 | ||
- | 3689 | bool |
|
- | 3690 | i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) |
|
- | 3691 | { |
|
- | 3692 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
|
- | 3693 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
|
- | 3694 | struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj); |
|
- | 3695 | ||
- | 3696 | WARN_ON(!ggtt_vma || |
|
- | 3697 | dev_priv->fence_regs[obj->fence_reg].pin_count > |
|
- | 3698 | ggtt_vma->pin_count); |
|
- | 3699 | dev_priv->fence_regs[obj->fence_reg].pin_count++; |
|
- | 3700 | return true; |
|
- | 3701 | } else |
|
- | 3702 | return false; |
|
3694 | } |
3703 | } |
3695 | 3704 | ||
3696 | void |
3705 | void |
3697 | i915_gem_object_unpin(struct drm_i915_gem_object *obj) |
3706 | i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) |
3698 | { |
3707 | { |
3699 | BUG_ON(obj->pin_count == 0); |
3708 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
3700 | BUG_ON(!i915_gem_obj_bound_any(obj)); |
3709 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
Line -... | Line 3710... | ||
- | 3710 | WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0); |
|
- | 3711 | dev_priv->fence_regs[obj->fence_reg].pin_count--; |
|
- | 3712 | } |
|
3701 | 3713 | } |
|
3702 | if (--obj->pin_count == 0) |
3714 | |
3703 | obj->pin_mappable = false; |
3715 | int |
Line 3704... | Line 3716... | ||
3704 | } |
3716 | i915_gem_pin_ioctl(struct drm_device *dev, void *data, |
Line 3720... | Line 3732... | ||
3720 | ret = -ENOENT; |
3732 | ret = -ENOENT; |
3721 | goto unlock; |
3733 | goto unlock; |
3722 | } |
3734 | } |
Line 3723... | Line 3735... | ||
3723 | 3735 | ||
3724 | if (obj->madv != I915_MADV_WILLNEED) { |
3736 | if (obj->madv != I915_MADV_WILLNEED) { |
3725 | DRM_ERROR("Attempting to pin a purgeable buffer\n"); |
3737 | DRM_DEBUG("Attempting to pin a purgeable buffer\n"); |
3726 | ret = -EINVAL; |
3738 | ret = -EFAULT; |
3727 | goto out; |
3739 | goto out; |
Line 3728... | Line 3740... | ||
3728 | } |
3740 | } |
3729 | 3741 | ||
3730 | if (obj->pin_filp != NULL && obj->pin_filp != file) { |
3742 | if (obj->pin_filp != NULL && obj->pin_filp != file) { |
3731 | DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", |
3743 | DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n", |
3732 | args->handle); |
3744 | args->handle); |
3733 | ret = -EINVAL; |
3745 | ret = -EINVAL; |
Line 3738... | Line 3750... | ||
3738 | ret = -EBUSY; |
3750 | ret = -EBUSY; |
3739 | goto out; |
3751 | goto out; |
3740 | } |
3752 | } |
Line 3741... | Line 3753... | ||
3741 | 3753 | ||
3742 | if (obj->user_pin_count == 0) { |
3754 | if (obj->user_pin_count == 0) { |
3743 | ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false); |
3755 | ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE); |
3744 | if (ret) |
3756 | if (ret) |
3745 | goto out; |
3757 | goto out; |
Line 3746... | Line 3758... | ||
3746 | } |
3758 | } |
Line 3773... | Line 3785... | ||
3773 | ret = -ENOENT; |
3785 | ret = -ENOENT; |
3774 | goto unlock; |
3786 | goto unlock; |
3775 | } |
3787 | } |
Line 3776... | Line 3788... | ||
3776 | 3788 | ||
3777 | if (obj->pin_filp != file) { |
3789 | if (obj->pin_filp != file) { |
3778 | DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", |
3790 | DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", |
3779 | args->handle); |
3791 | args->handle); |
3780 | ret = -EINVAL; |
3792 | ret = -EINVAL; |
3781 | goto out; |
3793 | goto out; |
3782 | } |
3794 | } |
3783 | obj->user_pin_count--; |
3795 | obj->user_pin_count--; |
3784 | if (obj->user_pin_count == 0) { |
3796 | if (obj->user_pin_count == 0) { |
3785 | obj->pin_filp = NULL; |
3797 | obj->pin_filp = NULL; |
3786 | i915_gem_object_unpin(obj); |
3798 | i915_gem_object_ggtt_unpin(obj); |
Line 3787... | Line 3799... | ||
3787 | } |
3799 | } |
3788 | 3800 | ||
3789 | out: |
3801 | out: |
Line 3863... | Line 3875... | ||
3863 | if (&obj->base == NULL) { |
3875 | if (&obj->base == NULL) { |
3864 | ret = -ENOENT; |
3876 | ret = -ENOENT; |
3865 | goto unlock; |
3877 | goto unlock; |
3866 | } |
3878 | } |
Line 3867... | Line 3879... | ||
3867 | 3879 | ||
3868 | if (obj->pin_count) { |
3880 | if (i915_gem_obj_is_pinned(obj)) { |
3869 | ret = -EINVAL; |
3881 | ret = -EINVAL; |
3870 | goto out; |
3882 | goto out; |
Line 3871... | Line 3883... | ||
3871 | } |
3883 | } |
Line 3956... | Line 3968... | ||
3956 | 3968 | ||
3957 | void i915_gem_free_object(struct drm_gem_object *gem_obj) |
3969 | void i915_gem_free_object(struct drm_gem_object *gem_obj) |
3958 | { |
3970 | { |
3959 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); |
3971 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); |
3960 | struct drm_device *dev = obj->base.dev; |
3972 | struct drm_device *dev = obj->base.dev; |
3961 | drm_i915_private_t *dev_priv = dev->dev_private; |
3973 | struct drm_i915_private *dev_priv = dev->dev_private; |
Line 3962... | Line 3974... | ||
3962 | struct i915_vma *vma, *next; |
3974 | struct i915_vma *vma, *next; |
Line 3963... | Line 3975... | ||
3963 | 3975 | ||
Line 3964... | Line -... | ||
3964 | intel_runtime_pm_get(dev_priv); |
- | |
3965 | - | ||
3966 | trace_i915_gem_object_destroy(obj); |
- | |
3967 | - | ||
3968 | - | ||
3969 | obj->pin_count = 0; |
3976 | intel_runtime_pm_get(dev_priv); |
- | 3977 | ||
- | 3978 | trace_i915_gem_object_destroy(obj); |
|
- | 3979 | ||
3970 | /* NB: 0 or 1 elements */ |
3980 | list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { |
3971 | WARN_ON(!list_empty(&obj->vma_list) && |
3981 | int ret; |
3972 | !list_is_singular(&obj->vma_list)); |
3982 | |
Line 3973... | Line 3983... | ||
3973 | list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { |
3983 | vma->pin_count = 0; |
3974 | int ret = i915_vma_unbind(vma); |
3984 | ret = i915_vma_unbind(vma); |
Line 3987... | Line 3997... | ||
3987 | /* Stolen objects don't hold a ref, but do hold pin count. Fix that up |
3997 | /* Stolen objects don't hold a ref, but do hold pin count. Fix that up |
3988 | * before progressing. */ |
3998 | * before progressing. */ |
3989 | if (obj->stolen) |
3999 | if (obj->stolen) |
3990 | i915_gem_object_unpin_pages(obj); |
4000 | i915_gem_object_unpin_pages(obj); |
Line -... | Line 4001... | ||
- | 4001 | ||
- | 4002 | WARN_ON(obj->frontbuffer_bits); |
|
3991 | 4003 | ||
3992 | if (WARN_ON(obj->pages_pin_count)) |
4004 | if (WARN_ON(obj->pages_pin_count)) |
3993 | obj->pages_pin_count = 0; |
4005 | obj->pages_pin_count = 0; |
3994 | i915_gem_object_put_pages(obj); |
4006 | i915_gem_object_put_pages(obj); |
3995 | // i915_gem_object_free_mmap_offset(obj); |
- | |
Line 3996... | Line 4007... | ||
3996 | i915_gem_object_release_stolen(obj); |
4007 | // i915_gem_object_free_mmap_offset(obj); |
Line 3997... | Line 4008... | ||
3997 | 4008 | ||
Line 4022... | Line 4033... | ||
4022 | return vma; |
4033 | return vma; |
Line 4023... | Line 4034... | ||
4023 | 4034 | ||
4024 | return NULL; |
4035 | return NULL; |
Line 4025... | Line -... | ||
4025 | } |
- | |
4026 | - | ||
4027 | static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, |
- | |
4028 | struct i915_address_space *vm) |
- | |
4029 | { |
- | |
4030 | struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); |
- | |
4031 | if (vma == NULL) |
- | |
4032 | return ERR_PTR(-ENOMEM); |
- | |
4033 | - | ||
4034 | INIT_LIST_HEAD(&vma->vma_link); |
- | |
4035 | INIT_LIST_HEAD(&vma->mm_list); |
- | |
4036 | INIT_LIST_HEAD(&vma->exec_list); |
- | |
4037 | vma->vm = vm; |
- | |
4038 | vma->obj = obj; |
- | |
4039 | - | ||
4040 | /* Keep GGTT vmas first to make debug easier */ |
- | |
4041 | if (i915_is_ggtt(vm)) |
- | |
4042 | list_add(&vma->vma_link, &obj->vma_list); |
- | |
4043 | else |
- | |
4044 | list_add_tail(&vma->vma_link, &obj->vma_list); |
- | |
4045 | - | ||
4046 | return vma; |
- | |
4047 | } |
- | |
4048 | - | ||
4049 | struct i915_vma * |
- | |
4050 | i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, |
- | |
4051 | struct i915_address_space *vm) |
- | |
4052 | { |
- | |
4053 | struct i915_vma *vma; |
- | |
4054 | - | ||
4055 | vma = i915_gem_obj_to_vma(obj, vm); |
- | |
4056 | if (!vma) |
- | |
4057 | vma = __i915_gem_vma_create(obj, vm); |
- | |
4058 | - | ||
4059 | return vma; |
- | |
4060 | } |
4036 | } |
4061 | 4037 | ||
4062 | void i915_gem_vma_destroy(struct i915_vma *vma) |
4038 | void i915_gem_vma_destroy(struct i915_vma *vma) |
Line 4063... | Line 4039... | ||
4063 | { |
4039 | { |
Line 4074... | Line 4050... | ||
4074 | 4050 | ||
4075 | #if 0 |
4051 | #if 0 |
4076 | int |
4052 | int |
4077 | i915_gem_suspend(struct drm_device *dev) |
4053 | i915_gem_suspend(struct drm_device *dev) |
4078 | { |
4054 | { |
4079 | drm_i915_private_t *dev_priv = dev->dev_private; |
4055 | struct drm_i915_private *dev_priv = dev->dev_private; |
Line 4080... | Line 4056... | ||
4080 | int ret = 0; |
4056 | int ret = 0; |
4081 | 4057 | ||
4082 | mutex_lock(&dev->struct_mutex); |
4058 | mutex_lock(&dev->struct_mutex); |
Line 4092... | Line 4068... | ||
4092 | /* Under UMS, be paranoid and evict. */ |
4068 | /* Under UMS, be paranoid and evict. */ |
4093 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
4069 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
4094 | i915_gem_evict_everything(dev); |
4070 | i915_gem_evict_everything(dev); |
Line 4095... | Line 4071... | ||
4095 | 4071 | ||
4096 | i915_kernel_lost_context(dev); |
4072 | i915_kernel_lost_context(dev); |
Line 4097... | Line 4073... | ||
4097 | i915_gem_cleanup_ringbuffer(dev); |
4073 | i915_gem_stop_ringbuffers(dev); |
4098 | 4074 | ||
4099 | /* Hack! Don't let anybody do execbuf while we don't control the chip. |
4075 | /* Hack! Don't let anybody do execbuf while we don't control the chip. |
4100 | * We need to replace this with a semaphore, or something. |
4076 | * We need to replace this with a semaphore, or something. |
Line 4104... | Line 4080... | ||
4104 | DRIVER_MODESET); |
4080 | DRIVER_MODESET); |
4105 | mutex_unlock(&dev->struct_mutex); |
4081 | mutex_unlock(&dev->struct_mutex); |
Line 4106... | Line 4082... | ||
4106 | 4082 | ||
4107 | del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); |
4083 | del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); |
4108 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); |
4084 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); |
Line 4109... | Line 4085... | ||
4109 | cancel_delayed_work_sync(&dev_priv->mm.idle_work); |
4085 | flush_delayed_work(&dev_priv->mm.idle_work); |
Line 4110... | Line 4086... | ||
4110 | 4086 | ||
4111 | return 0; |
4087 | return 0; |
4112 | 4088 | ||
4113 | err: |
4089 | err: |
4114 | mutex_unlock(&dev->struct_mutex); |
4090 | mutex_unlock(&dev->struct_mutex); |
Line 4115... | Line 4091... | ||
4115 | return ret; |
4091 | return ret; |
4116 | } |
4092 | } |
4117 | #endif |
4093 | #endif |
4118 | 4094 | ||
4119 | int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice) |
4095 | int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice) |
4120 | { |
4096 | { |
4121 | struct drm_device *dev = ring->dev; |
4097 | struct drm_device *dev = ring->dev; |
Line 4122... | Line 4098... | ||
4122 | drm_i915_private_t *dev_priv = dev->dev_private; |
4098 | struct drm_i915_private *dev_priv = dev->dev_private; |
Line 4147... | Line 4123... | ||
4147 | return ret; |
4123 | return ret; |
4148 | } |
4124 | } |
Line 4149... | Line 4125... | ||
4149 | 4125 | ||
4150 | void i915_gem_init_swizzling(struct drm_device *dev) |
4126 | void i915_gem_init_swizzling(struct drm_device *dev) |
4151 | { |
4127 | { |
Line 4152... | Line 4128... | ||
4152 | drm_i915_private_t *dev_priv = dev->dev_private; |
4128 | struct drm_i915_private *dev_priv = dev->dev_private; |
4153 | 4129 | ||
4154 | if (INTEL_INFO(dev)->gen < 5 || |
4130 | if (INTEL_INFO(dev)->gen < 5 || |
Line 4213... | Line 4189... | ||
4213 | ret = intel_init_vebox_ring_buffer(dev); |
4189 | ret = intel_init_vebox_ring_buffer(dev); |
4214 | if (ret) |
4190 | if (ret) |
4215 | goto cleanup_blt_ring; |
4191 | goto cleanup_blt_ring; |
4216 | } |
4192 | } |
Line -... | Line 4193... | ||
- | 4193 | ||
- | 4194 | if (HAS_BSD2(dev)) { |
|
- | 4195 | ret = intel_init_bsd2_ring_buffer(dev); |
|
- | 4196 | if (ret) |
|
- | 4197 | goto cleanup_vebox_ring; |
|
Line 4217... | Line 4198... | ||
4217 | 4198 | } |
|
4218 | 4199 | ||
4219 | ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000)); |
4200 | ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000)); |
Line 4220... | Line 4201... | ||
4220 | if (ret) |
4201 | if (ret) |
Line -... | Line 4202... | ||
- | 4202 | goto cleanup_bsd2_ring; |
|
- | 4203 | ||
4221 | goto cleanup_vebox_ring; |
4204 | return 0; |
4222 | 4205 | ||
4223 | return 0; |
4206 | cleanup_bsd2_ring: |
4224 | 4207 | intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]); |
|
4225 | cleanup_vebox_ring: |
4208 | cleanup_vebox_ring: |
Line 4235... | Line 4218... | ||
4235 | } |
4218 | } |
Line 4236... | Line 4219... | ||
4236 | 4219 | ||
4237 | int |
4220 | int |
4238 | i915_gem_init_hw(struct drm_device *dev) |
4221 | i915_gem_init_hw(struct drm_device *dev) |
4239 | { |
4222 | { |
4240 | drm_i915_private_t *dev_priv = dev->dev_private; |
4223 | struct drm_i915_private *dev_priv = dev->dev_private; |
Line 4241... | Line 4224... | ||
4241 | int ret, i; |
4224 | int ret, i; |
4242 | 4225 | ||
Line 4249... | Line 4232... | ||
4249 | if (IS_HASWELL(dev)) |
4232 | if (IS_HASWELL(dev)) |
4250 | I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ? |
4233 | I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ? |
4251 | LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); |
4234 | LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); |
Line 4252... | Line 4235... | ||
4252 | 4235 | ||
- | 4236 | if (HAS_PCH_NOP(dev)) { |
|
4253 | if (HAS_PCH_NOP(dev)) { |
4237 | if (IS_IVYBRIDGE(dev)) { |
4254 | u32 temp = I915_READ(GEN7_MSG_CTL); |
4238 | u32 temp = I915_READ(GEN7_MSG_CTL); |
4255 | temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); |
4239 | temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); |
- | 4240 | I915_WRITE(GEN7_MSG_CTL, temp); |
|
- | 4241 | } else if (INTEL_INFO(dev)->gen >= 7) { |
|
- | 4242 | u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT); |
|
- | 4243 | temp &= ~RESET_PCH_HANDSHAKE_ENABLE; |
|
- | 4244 | I915_WRITE(HSW_NDE_RSTWRN_OPT, temp); |
|
4256 | I915_WRITE(GEN7_MSG_CTL, temp); |
4245 | } |
Line 4257... | Line 4246... | ||
4257 | } |
4246 | } |
Line 4258... | Line 4247... | ||
4258 | 4247 | ||
Line 4264... | Line 4253... | ||
4264 | 4253 | ||
4265 | for (i = 0; i < NUM_L3_SLICES(dev); i++) |
4254 | for (i = 0; i < NUM_L3_SLICES(dev); i++) |
Line 4266... | Line 4255... | ||
4266 | i915_gem_l3_remap(&dev_priv->ring[RCS], i); |
4255 | i915_gem_l3_remap(&dev_priv->ring[RCS], i); |
4267 | 4256 | ||
- | 4257 | /* |
|
- | 4258 | * XXX: Contexts should only be initialized once. Doing a switch to the |
|
- | 4259 | * default context switch however is something we'd like to do after |
|
4268 | /* |
4260 | * reset or thaw (the latter may not actually be necessary for HW, but |
4269 | * XXX: There was some w/a described somewhere suggesting loading |
4261 | * goes with our code better). Context switching requires rings (for |
4270 | * contexts before PPGTT. |
4262 | * the do_switch), but before enabling PPGTT. So don't move this. |
4271 | */ |
4263 | */ |
- | 4264 | ret = i915_gem_context_enable(dev_priv); |
|
4272 | ret = i915_gem_context_init(dev); |
4265 | if (ret && ret != -EIO) { |
4273 | if (ret) { |
- | |
4274 | i915_gem_cleanup_ringbuffer(dev); |
- | |
4275 | DRM_ERROR("Context initialization failed %d\n", ret); |
4266 | DRM_ERROR("Context enable failed %d\n", ret); |
Line 4276... | Line -... | ||
4276 | return ret; |
- | |
4277 | } |
- | |
4278 | - | ||
4279 | if (dev_priv->mm.aliasing_ppgtt) { |
- | |
4280 | ret = dev_priv->mm.aliasing_ppgtt->enable(dev); |
- | |
4281 | if (ret) { |
- | |
4282 | i915_gem_cleanup_aliasing_ppgtt(dev); |
- | |
4283 | DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n"); |
- | |
4284 | } |
4267 | i915_gem_cleanup_ringbuffer(dev); |
4285 | } |
4268 | } |
Line 4286... | Line 4269... | ||
4286 | 4269 | ||
4287 | return 0; |
4270 | return ret; |
4288 | } |
4271 | } |
Line 4294... | Line 4277... | ||
4294 | 4277 | ||
Line 4295... | Line 4278... | ||
4295 | mutex_lock(&dev->struct_mutex); |
4278 | mutex_lock(&dev->struct_mutex); |
4296 | 4279 | ||
4297 | if (IS_VALLEYVIEW(dev)) { |
4280 | if (IS_VALLEYVIEW(dev)) { |
4298 | /* VLVA0 (potential hack), BIOS isn't actually waking us */ |
4281 | /* VLVA0 (potential hack), BIOS isn't actually waking us */ |
- | 4282 | I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ); |
|
4299 | I915_WRITE(VLV_GTLC_WAKE_CTRL, 1); |
4283 | if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & |
4300 | if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10)) |
4284 | VLV_GTLC_ALLOWWAKEACK), 10)) |
Line 4301... | Line 4285... | ||
4301 | DRM_DEBUG_DRIVER("allow wake ack timed out\n"); |
4285 | DRM_DEBUG_DRIVER("allow wake ack timed out\n"); |
Line 4302... | Line 4286... | ||
4302 | } |
4286 | } |
4303 | - | ||
4304 | i915_gem_init_global_gtt(dev); |
4287 | |
4305 | 4288 | i915_gem_init_global_gtt(dev); |
|
4306 | ret = i915_gem_init_hw(dev); |
4289 | |
4307 | mutex_unlock(&dev->struct_mutex); |
4290 | ret = i915_gem_context_init(dev); |
Line -... | Line 4291... | ||
- | 4291 | if (ret) { |
|
- | 4292 | mutex_unlock(&dev->struct_mutex); |
|
- | 4293 | return ret; |
|
- | 4294 | } |
|
- | 4295 | ||
- | 4296 | ret = i915_gem_init_hw(dev); |
|
- | 4297 | if (ret == -EIO) { |
|
- | 4298 | /* Allow ring initialisation to fail by marking the GPU as |
|
- | 4299 | * wedged. But we only want to do this where the GPU is angry, |
|
- | 4300 | * for all other failure, such as an allocation failure, bail. |
|
- | 4301 | */ |
|
Line 4308... | Line 4302... | ||
4308 | if (ret) { |
4302 | DRM_ERROR("Failed to initialize GPU, declaring it wedged\n"); |
4309 | i915_gem_cleanup_aliasing_ppgtt(dev); |
4303 | atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); |
Line 4310... | Line 4304... | ||
4310 | return ret; |
4304 | ret = 0; |
4311 | } |
4305 | } |
4312 | 4306 | mutex_unlock(&dev->struct_mutex); |
|
4313 | 4307 | ||
4314 | return 0; |
4308 | return ret; |
4315 | } |
4309 | } |
Line 4316... | Line 4310... | ||
4316 | 4310 | ||
4317 | void |
4311 | void |
4318 | i915_gem_cleanup_ringbuffer(struct drm_device *dev) |
4312 | i915_gem_cleanup_ringbuffer(struct drm_device *dev) |
Line 4350... | Line 4344... | ||
4350 | mutex_unlock(&dev->struct_mutex); |
4344 | mutex_unlock(&dev->struct_mutex); |
4351 | return ret; |
4345 | return ret; |
4352 | } |
4346 | } |
Line 4353... | Line 4347... | ||
4353 | 4347 | ||
4354 | BUG_ON(!list_empty(&dev_priv->gtt.base.active_list)); |
- | |
Line 4355... | Line 4348... | ||
4355 | mutex_unlock(&dev->struct_mutex); |
4348 | BUG_ON(!list_empty(&dev_priv->gtt.base.active_list)); |
4356 | 4349 | ||
4357 | ret = drm_irq_install(dev); |
4350 | ret = drm_irq_install(dev, dev->pdev->irq); |
- | 4351 | if (ret) |
|
Line 4358... | Line 4352... | ||
4358 | if (ret) |
4352 | goto cleanup_ringbuffer; |
Line 4359... | Line 4353... | ||
4359 | goto cleanup_ringbuffer; |
4353 | mutex_unlock(&dev->struct_mutex); |
4360 | - | ||
4361 | return 0; |
4354 | |
4362 | 4355 | return 0; |
|
4363 | cleanup_ringbuffer: |
4356 | |
Line 4364... | Line 4357... | ||
4364 | mutex_lock(&dev->struct_mutex); |
4357 | cleanup_ringbuffer: |
Line 4374... | Line 4367... | ||
4374 | struct drm_file *file_priv) |
4367 | struct drm_file *file_priv) |
4375 | { |
4368 | { |
4376 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
4369 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
4377 | return 0; |
4370 | return 0; |
Line -... | Line 4371... | ||
- | 4371 | ||
4378 | 4372 | mutex_lock(&dev->struct_mutex); |
|
- | 4373 | drm_irq_uninstall(dev); |
|
Line 4379... | Line 4374... | ||
4379 | drm_irq_uninstall(dev); |
4374 | mutex_unlock(&dev->struct_mutex); |
4380 | 4375 | ||
Line 4381... | Line 4376... | ||
4381 | return i915_gem_suspend(dev); |
4376 | return i915_gem_suspend(dev); |
Line 4394... | Line 4389... | ||
4394 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
4389 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
4395 | } |
4390 | } |
4396 | #endif |
4391 | #endif |
Line 4397... | Line 4392... | ||
4397 | 4392 | ||
4398 | static void |
4393 | static void |
4399 | init_ring_lists(struct intel_ring_buffer *ring) |
4394 | init_ring_lists(struct intel_engine_cs *ring) |
4400 | { |
4395 | { |
4401 | INIT_LIST_HEAD(&ring->active_list); |
4396 | INIT_LIST_HEAD(&ring->active_list); |
4402 | INIT_LIST_HEAD(&ring->request_list); |
4397 | INIT_LIST_HEAD(&ring->request_list); |
Line 4403... | Line 4398... | ||
4403 | } |
4398 | } |
4404 | 4399 | ||
4405 | static void i915_init_vm(struct drm_i915_private *dev_priv, |
4400 | void i915_init_vm(struct drm_i915_private *dev_priv, |
- | 4401 | struct i915_address_space *vm) |
|
- | 4402 | { |
|
4406 | struct i915_address_space *vm) |
4403 | if (!i915_is_ggtt(vm)) |
4407 | { |
4404 | drm_mm_init(&vm->mm, vm->start, vm->total); |
4408 | vm->dev = dev_priv->dev; |
4405 | vm->dev = dev_priv->dev; |
4409 | INIT_LIST_HEAD(&vm->active_list); |
4406 | INIT_LIST_HEAD(&vm->active_list); |
4410 | INIT_LIST_HEAD(&vm->inactive_list); |
4407 | INIT_LIST_HEAD(&vm->inactive_list); |
4411 | INIT_LIST_HEAD(&vm->global_link); |
4408 | INIT_LIST_HEAD(&vm->global_link); |
Line 4412... | Line 4409... | ||
4412 | list_add(&vm->global_link, &dev_priv->vm_list); |
4409 | list_add_tail(&vm->global_link, &dev_priv->vm_list); |
4413 | } |
4410 | } |
4414 | 4411 | ||
4415 | void |
4412 | void |
4416 | i915_gem_load(struct drm_device *dev) |
4413 | i915_gem_load(struct drm_device *dev) |
Line 4417... | Line 4414... | ||
4417 | { |
4414 | { |
4418 | drm_i915_private_t *dev_priv = dev->dev_private; |
4415 | struct drm_i915_private *dev_priv = dev->dev_private; |
Line 4456... | Line 4453... | ||
4456 | 4453 | ||
Line 4457... | Line 4454... | ||
4457 | i915_gem_detect_bit_6_swizzle(dev); |
4454 | i915_gem_detect_bit_6_swizzle(dev); |
Line -... | Line 4455... | ||
- | 4455 | ||
4458 | 4456 | dev_priv->mm.interruptible = true; |
|
Line 4459... | Line -... | ||
4459 | dev_priv->mm.interruptible = true; |
- | |
4460 | - | ||
4461 | } |
- | |
4462 | - | ||
4463 | #if 0 |
- | |
4464 | /* |
4457 | |
4465 | * Create a physically contiguous memory object for this object |
- | |
4466 | * e.g. for cursor + overlay regs |
4458 | mutex_init(&dev_priv->fb_tracking.lock); |
4467 | */ |
- | |
4468 | static int i915_gem_init_phys_object(struct drm_device *dev, |
4459 | } |
4469 | int id, int size, int align) |
4460 | |
Line 4470... | Line -... | ||
4470 | { |
- | |
4471 | drm_i915_private_t *dev_priv = dev->dev_private; |
4461 | int i915_gem_open(struct drm_device *dev, struct drm_file *file) |
Line 4472... | Line 4462... | ||
4472 | struct drm_i915_gem_phys_object *phys_obj; |
4462 | { |
4473 | int ret; |
4463 | struct drm_i915_file_private *file_priv; |
4474 | 4464 | int ret; |
|
Line 4475... | Line -... | ||
4475 | if (dev_priv->mm.phys_objs[id - 1] || !size) |
- | |
4476 | return 0; |
- | |
4477 | - | ||
4478 | phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL); |
- | |
4479 | if (!phys_obj) |
- | |
4480 | return -ENOMEM; |
- | |
4481 | - | ||
4482 | phys_obj->id = id; |
- | |
4483 | - | ||
4484 | phys_obj->handle = drm_pci_alloc(dev, size, align); |
- | |
4485 | if (!phys_obj->handle) { |
- | |
4486 | ret = -ENOMEM; |
- | |
4487 | goto kfree_obj; |
- | |
4488 | } |
- | |
4489 | #ifdef CONFIG_X86 |
- | |
4490 | set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); |
- | |
4491 | #endif |
- | |
4492 | - | ||
4493 | dev_priv->mm.phys_objs[id - 1] = phys_obj; |
- | |
4494 | - | ||
4495 | return 0; |
- | |
4496 | kfree_obj: |
4465 | |
4497 | kfree(phys_obj); |
- | |
4498 | return ret; |
- | |
4499 | } |
- | |
4500 | - | ||
4501 | static void i915_gem_free_phys_object(struct drm_device *dev, int id) |
- | |
4502 | { |
- | |
4503 | drm_i915_private_t *dev_priv = dev->dev_private; |
- | |
4504 | struct drm_i915_gem_phys_object *phys_obj; |
- | |
4505 | - | ||
4506 | if (!dev_priv->mm.phys_objs[id - 1]) |
- | |
4507 | return; |
- | |
4508 | - | ||
4509 | phys_obj = dev_priv->mm.phys_objs[id - 1]; |
- | |
4510 | if (phys_obj->cur_obj) { |
- | |
4511 | i915_gem_detach_phys_object(dev, phys_obj->cur_obj); |
- | |
4512 | } |
- | |
4513 | - | ||
4514 | #ifdef CONFIG_X86 |
- | |
4515 | set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); |
- | |
4516 | #endif |
- | |
4517 | drm_pci_free(dev, phys_obj->handle); |
- | |
4518 | kfree(phys_obj); |
- | |
4519 | dev_priv->mm.phys_objs[id - 1] = NULL; |
- | |
4520 | } |
- | |
4521 | - | ||
4522 | void i915_gem_free_all_phys_object(struct drm_device *dev) |
- | |
4523 | { |
- | |
4524 | int i; |
- | |
4525 | - | ||
4526 | for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++) |
- | |
4527 | i915_gem_free_phys_object(dev, i); |
- | |
4528 | } |
- | |
4529 | - | ||
4530 | void i915_gem_detach_phys_object(struct drm_device *dev, |
- | |
4531 | struct drm_i915_gem_object *obj) |
- | |
4532 | { |
- | |
4533 | struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; |
- | |
4534 | char *vaddr; |
- | |
4535 | int i; |
- | |
4536 | int page_count; |
- | |
4537 | - | ||
4538 | if (!obj->phys_obj) |
- | |
4539 | return; |
- | |
4540 | vaddr = obj->phys_obj->handle->vaddr; |
- | |
4541 | - | ||
4542 | page_count = obj->base.size / PAGE_SIZE; |
- | |
4543 | for (i = 0; i < page_count; i++) { |
- | |
4544 | struct page *page = shmem_read_mapping_page(mapping, i); |
- | |
4545 | if (!IS_ERR(page)) { |
- | |
4546 | char *dst = kmap_atomic(page); |
- | |
4547 | memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); |
- | |
4548 | kunmap_atomic(dst); |
- | |
4549 | - | ||
4550 | drm_clflush_pages(&page, 1); |
- | |
4551 | - | ||
4552 | set_page_dirty(page); |
- | |
4553 | mark_page_accessed(page); |
- | |
4554 | page_cache_release(page); |
- | |
4555 | } |
- | |
4556 | } |
- | |
4557 | i915_gem_chipset_flush(dev); |
- | |
4558 | - | ||
4559 | obj->phys_obj->cur_obj = NULL; |
- | |
4560 | obj->phys_obj = NULL; |
- | |
4561 | } |
- | |
4562 | - | ||
4563 | int |
4466 | DRM_DEBUG_DRIVER("\n"); |
4564 | i915_gem_attach_phys_object(struct drm_device *dev, |
4467 | |
4565 | struct drm_i915_gem_object *obj, |
- | |
4566 | int id, |
- | |
Line 4567... | Line 4468... | ||
4567 | int align) |
4468 | file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); |
- | 4469 | if (!file_priv) |
|
- | 4470 | return -ENOMEM; |
|
4568 | { |
4471 | |
Line 4569... | Line -... | ||
4569 | struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; |
- | |
4570 | drm_i915_private_t *dev_priv = dev->dev_private; |
4472 | file->driver_priv = file_priv; |
4571 | int ret = 0; |
4473 | file_priv->dev_priv = dev->dev_private; |
4572 | int page_count; |
4474 | file_priv->file = file; |
4573 | int i; |
- | |
Line 4574... | Line -... | ||
4574 | - | ||
4575 | if (id > I915_MAX_PHYS_OBJECT) |
- | |
4576 | return -EINVAL; |
- | |
4577 | - | ||
4578 | if (obj->phys_obj) { |
- | |
4579 | if (obj->phys_obj->id == id) |
- | |
4580 | return 0; |
- | |
4581 | i915_gem_detach_phys_object(dev, obj); |
4475 | |
4582 | } |
4476 | spin_lock_init(&file_priv->mm.lock); |
4583 | - | ||
4584 | /* create a new object */ |
- | |
4585 | if (!dev_priv->mm.phys_objs[id - 1]) { |
- | |
4586 | ret = i915_gem_init_phys_object(dev, id, |
- | |
4587 | obj->base.size, align); |
- | |
4588 | if (ret) { |
- | |
4589 | DRM_ERROR("failed to init phys object %d size: %zu\n", |
- | |
4590 | id, obj->base.size); |
- | |
4591 | return ret; |
- | |
4592 | } |
- | |
4593 | } |
- | |
4594 | - | ||
4595 | /* bind to the object */ |
- | |
4596 | obj->phys_obj = dev_priv->mm.phys_objs[id - 1]; |
- | |
4597 | obj->phys_obj->cur_obj = obj; |
- | |
4598 | - | ||
4599 | page_count = obj->base.size / PAGE_SIZE; |
- | |
4600 | - | ||
4601 | for (i = 0; i < page_count; i++) { |
- | |
4602 | struct page *page; |
- | |
4603 | char *dst, *src; |
- | |
4604 | - | ||
4605 | page = shmem_read_mapping_page(mapping, i); |
- | |
4606 | if (IS_ERR(page)) |
- | |
Line 4607... | Line -... | ||
4607 | return PTR_ERR(page); |
- | |
4608 | - | ||
4609 | src = kmap_atomic(page); |
- | |
4610 | dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE); |
- | |
4611 | memcpy(dst, src, PAGE_SIZE); |
4477 | INIT_LIST_HEAD(&file_priv->mm.request_list); |
4612 | kunmap_atomic(src); |
4478 | // INIT_DELAYED_WORK(&file_priv->mm.idle_work, |
4613 | - | ||
4614 | mark_page_accessed(page); |
4479 | // i915_gem_file_idle_work_handler); |
4615 | page_cache_release(page); |
4480 | |
4616 | } |
- | |
4617 | - | ||
4618 | return 0; |
- | |
4619 | } |
- | |
4620 | - | ||
4621 | static int |
- | |
4622 | i915_gem_phys_pwrite(struct drm_device *dev, |
- | |
4623 | struct drm_i915_gem_object *obj, |
- | |
4624 | struct drm_i915_gem_pwrite *args, |
- | |
4625 | struct drm_file *file_priv) |
4481 | ret = i915_gem_context_open(dev, file); |
4626 | { |
4482 | if (ret) |
4627 | void *vaddr = obj->phys_obj->handle->vaddr + args->offset; |
4483 | kfree(file_priv); |
4628 | char __user *user_data = to_user_ptr(args->data_ptr); |
4484 | |
4629 | - | ||
4630 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { |
- | |
4631 | unsigned long unwritten; |
4485 | return ret; |
Line -... | Line 4486... | ||
- | 4486 | } |
|
4632 | 4487 | ||
- | 4488 | void i915_gem_track_fb(struct drm_i915_gem_object *old, |
|
4633 | /* The physical object once assigned is fixed for the lifetime |
4489 | struct drm_i915_gem_object *new, |
4634 | * of the obj, so we can safely drop the lock and continue |
4490 | unsigned frontbuffer_bits) |
4635 | * to access vaddr. |
- | |
4636 | */ |
- | |
4637 | mutex_unlock(&dev->struct_mutex); |
- | |
4638 | unwritten = copy_from_user(vaddr, user_data, args->size); |
- | |
4639 | mutex_lock(&dev->struct_mutex); |
- | |
4640 | if (unwritten) |
- | |
4641 | return -EFAULT; |
- | |
4642 | } |
- | |
4643 | - | ||
4644 | i915_gem_chipset_flush(dev); |
- | |
4645 | return 0; |
- | |
4646 | } |
- | |
4647 | - | ||
4648 | void i915_gem_release(struct drm_device *dev, struct drm_file *file) |
- | |
4649 | { |
- | |
4650 | struct drm_i915_file_private *file_priv = file->driver_priv; |
- | |
4651 | - | ||
4652 | /* Clean up our request list when the client is going away, so that |
- | |
4653 | * later retire_requests won't dereference our soon-to-be-gone |
- | |
4654 | * file_priv. |
- | |
4655 | */ |
4491 | { |
4656 | spin_lock(&file_priv->mm.lock); |
- | |
Line 4657... | Line 4492... | ||
4657 | while (!list_empty(&file_priv->mm.request_list)) { |
4492 | if (old) { |
4658 | struct drm_i915_gem_request *request; |
4493 | WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex)); |
4659 | 4494 | WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits)); |
|
4660 | request = list_first_entry(&file_priv->mm.request_list, |
4495 | old->frontbuffer_bits &= ~frontbuffer_bits; |
Line 4685... | Line 4520... | ||
4685 | struct i915_address_space *vm) |
4520 | struct i915_address_space *vm) |
4686 | { |
4521 | { |
4687 | struct drm_i915_private *dev_priv = o->base.dev->dev_private; |
4522 | struct drm_i915_private *dev_priv = o->base.dev->dev_private; |
4688 | struct i915_vma *vma; |
4523 | struct i915_vma *vma; |
Line -... | Line 4524... | ||
- | 4524 | ||
4689 | 4525 | if (!dev_priv->mm.aliasing_ppgtt || |
|
4690 | if (vm == &dev_priv->mm.aliasing_ppgtt->base) |
4526 | vm == &dev_priv->mm.aliasing_ppgtt->base) |
Line 4691... | Line -... | ||
4691 | vm = &dev_priv->gtt.base; |
- | |
4692 | 4527 | vm = &dev_priv->gtt.base; |
|
4693 | BUG_ON(list_empty(&o->vma_list)); |
4528 | |
4694 | list_for_each_entry(vma, &o->vma_list, vma_link) { |
4529 | list_for_each_entry(vma, &o->vma_list, vma_link) { |
Line 4695... | Line 4530... | ||
4695 | if (vma->vm == vm) |
4530 | if (vma->vm == vm) |
- | 4531 | return vma->node.start; |
|
- | 4532 | ||
4696 | return vma->node.start; |
4533 | } |
4697 | 4534 | WARN(1, "%s vma for this object not found.\n", |
|
Line 4698... | Line 4535... | ||
4698 | } |
4535 | i915_is_ggtt(vm) ? "global" : "ppgtt"); |
4699 | return 0; //-1; |
4536 | return -1; |
4700 | } |
4537 | } |
Line 4726... | Line 4563... | ||
4726 | struct i915_address_space *vm) |
4563 | struct i915_address_space *vm) |
4727 | { |
4564 | { |
4728 | struct drm_i915_private *dev_priv = o->base.dev->dev_private; |
4565 | struct drm_i915_private *dev_priv = o->base.dev->dev_private; |
4729 | struct i915_vma *vma; |
4566 | struct i915_vma *vma; |
Line -... | Line 4567... | ||
- | 4567 | ||
4730 | 4568 | if (!dev_priv->mm.aliasing_ppgtt || |
|
4731 | if (vm == &dev_priv->mm.aliasing_ppgtt->base) |
4569 | vm == &dev_priv->mm.aliasing_ppgtt->base) |
Line 4732... | Line 4570... | ||
4732 | vm = &dev_priv->gtt.base; |
4570 | vm = &dev_priv->gtt.base; |
Line 4733... | Line 4571... | ||
4733 | 4571 | ||
Line 4739... | Line 4577... | ||
4739 | 4577 | ||
4740 | return 0; |
4578 | return 0; |
Line -... | Line 4579... | ||
- | 4579 | } |
|
4741 | } |
4580 | |
4742 | 4581 | ||
4743 | 4582 | ||
Line -... | Line 4583... | ||
- | 4583 | struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) |
|
- | 4584 | { |
|
- | 4585 | struct i915_vma *vma; |
|
4744 | struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) |
4586 | |
4745 | { |
4587 | /* This WARN has probably outlived its usefulness (callers already |
Line 4746... | Line 4588... | ||
4746 | struct i915_vma *vma; |
4588 | * WARN if they don't find the GGTT vma they expect). When removing, |
4747 | 4589 | * remember to remove the pre-check in is_pin_display() as well */ |
|
4748 | if (WARN_ON(list_empty(&obj->vma_list))) |
4590 | if (WARN_ON(list_empty(&obj->vma_list))) |
Line 4749... | Line 4591... | ||
4749 | return NULL; |
4591 | return NULL; |
4750 | 4592 |