Rev 3298 | Rev 3482 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3298 | Rev 3480 | ||
---|---|---|---|
Line 116... | Line 116... | ||
116 | dev_priv->mm.object_count--; |
116 | dev_priv->mm.object_count--; |
117 | dev_priv->mm.object_memory -= size; |
117 | dev_priv->mm.object_memory -= size; |
118 | } |
118 | } |
Line 119... | Line 119... | ||
119 | 119 | ||
120 | static int |
120 | static int |
121 | i915_gem_wait_for_error(struct drm_device *dev) |
121 | i915_gem_wait_for_error(struct i915_gpu_error *error) |
122 | { |
- | |
123 | struct drm_i915_private *dev_priv = dev->dev_private; |
- | |
124 | struct completion *x = &dev_priv->error_completion; |
- | |
125 | unsigned long flags; |
122 | { |
Line 126... | Line 123... | ||
126 | int ret; |
123 | int ret; |
- | 124 | ||
127 | 125 | #define EXIT_COND (!i915_reset_in_progress(error)) |
|
128 | if (!atomic_read(&dev_priv->mm.wedged)) |
126 | if (EXIT_COND) |
129 | return 0; |
127 | return 0; |
130 | #if 0 |
128 | #if 0 |
131 | /* |
129 | /* |
132 | * Only wait 10 seconds for the gpu reset to complete to avoid hanging |
130 | * Only wait 10 seconds for the gpu reset to complete to avoid hanging |
133 | * userspace. If it takes that long something really bad is going on and |
131 | * userspace. If it takes that long something really bad is going on and |
134 | * we should simply try to bail out and fail as gracefully as possible. |
132 | * we should simply try to bail out and fail as gracefully as possible. |
- | 133 | */ |
|
- | 134 | ret = wait_event_interruptible_timeout(error->reset_queue, |
|
135 | */ |
135 | EXIT_COND, |
136 | ret = wait_for_completion_interruptible_timeout(x, 10*HZ); |
136 | 10*HZ); |
137 | if (ret == 0) { |
137 | if (ret == 0) { |
138 | DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); |
138 | DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); |
139 | return -EIO; |
139 | return -EIO; |
140 | } else if (ret < 0) { |
140 | } else if (ret < 0) { |
Line 141... | Line -... | ||
141 | return ret; |
- | |
142 | } |
- | |
143 | - | ||
144 | if (atomic_read(&dev_priv->mm.wedged)) { |
- | |
145 | /* GPU is hung, bump the completion count to account for |
- | |
146 | * the token we just consumed so that we never hit zero and |
- | |
147 | * end up waiting upon a subsequent completion event that |
- | |
148 | * will never happen. |
- | |
149 | */ |
- | |
150 | spin_lock_irqsave(&x->wait.lock, flags); |
- | |
151 | x->done++; |
141 | return ret; |
- | 142 | } |
|
Line 152... | Line 143... | ||
152 | spin_unlock_irqrestore(&x->wait.lock, flags); |
143 | |
153 | } |
144 | #endif |
Line 154... | Line 145... | ||
154 | #endif |
145 | #undef EXIT_COND |
155 | 146 | ||
- | 147 | return 0; |
|
156 | return 0; |
148 | } |
Line 157... | Line 149... | ||
157 | } |
149 | |
158 | 150 | int i915_mutex_lock_interruptible(struct drm_device *dev) |
|
159 | int i915_mutex_lock_interruptible(struct drm_device *dev) |
151 | { |
Line 160... | Line 152... | ||
160 | { |
152 | struct drm_i915_private *dev_priv = dev->dev_private; |
- | 153 | int ret; |
|
- | 154 | ||
Line 161... | Line 155... | ||
161 | int ret; |
155 | ret = i915_gem_wait_for_error(&dev_priv->gpu_error); |
162 | 156 | if (ret) |
|
163 | ret = i915_gem_wait_for_error(dev); |
157 | return ret; |
Line 181... | Line 175... | ||
181 | 175 | ||
182 | int |
176 | int |
183 | i915_gem_init_ioctl(struct drm_device *dev, void *data, |
177 | i915_gem_init_ioctl(struct drm_device *dev, void *data, |
184 | struct drm_file *file) |
178 | struct drm_file *file) |
- | 179 | { |
|
185 | { |
180 | struct drm_i915_private *dev_priv = dev->dev_private; |
Line 186... | Line 181... | ||
186 | struct drm_i915_gem_init *args = data; |
181 | struct drm_i915_gem_init *args = data; |
187 | 182 | ||
Line 195... | Line 190... | ||
195 | /* GEM with user mode setting was never supported on ilk and later. */ |
190 | /* GEM with user mode setting was never supported on ilk and later. */ |
196 | if (INTEL_INFO(dev)->gen >= 5) |
191 | if (INTEL_INFO(dev)->gen >= 5) |
197 | return -ENODEV; |
192 | return -ENODEV; |
Line 198... | Line 193... | ||
198 | 193 | ||
199 | mutex_lock(&dev->struct_mutex); |
194 | mutex_lock(&dev->struct_mutex); |
- | 195 | i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end, |
|
200 | i915_gem_init_global_gtt(dev, args->gtt_start, |
196 | args->gtt_end); |
201 | args->gtt_end, args->gtt_end); |
197 | dev_priv->gtt.mappable_end = args->gtt_end; |
Line 202... | Line 198... | ||
202 | mutex_unlock(&dev->struct_mutex); |
198 | mutex_unlock(&dev->struct_mutex); |
203 | 199 | ||
204 | return 0; |
200 | return 0; |
Line 219... | Line 215... | ||
219 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) |
215 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) |
220 | if (obj->pin_count) |
216 | if (obj->pin_count) |
221 | pinned += obj->gtt_space->size; |
217 | pinned += obj->gtt_space->size; |
222 | mutex_unlock(&dev->struct_mutex); |
218 | mutex_unlock(&dev->struct_mutex); |
Line 223... | Line 219... | ||
223 | 219 | ||
224 | args->aper_size = dev_priv->mm.gtt_total; |
220 | args->aper_size = dev_priv->gtt.total; |
Line 225... | Line 221... | ||
225 | args->aper_available_size = args->aper_size - pinned; |
221 | args->aper_available_size = args->aper_size - pinned; |
226 | 222 | ||
Line -... | Line 223... | ||
- | 223 | return 0; |
|
- | 224 | } |
|
- | 225 | ||
- | 226 | void *i915_gem_object_alloc(struct drm_device *dev) |
|
- | 227 | { |
|
- | 228 | struct drm_i915_private *dev_priv = dev->dev_private; |
|
- | 229 | return kmalloc(sizeof(struct drm_i915_gem_object), 0); |
|
- | 230 | } |
|
- | 231 | ||
- | 232 | void i915_gem_object_free(struct drm_i915_gem_object *obj) |
|
- | 233 | { |
|
- | 234 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
|
227 | return 0; |
235 | kfree(obj); |
228 | } |
236 | } |
229 | 237 | ||
230 | static int |
238 | static int |
231 | i915_gem_create(struct drm_file *file, |
239 | i915_gem_create(struct drm_file *file, |
Line 295... | Line 303... | ||
295 | 303 | ||
296 | return i915_gem_create(file, dev, |
304 | return i915_gem_create(file, dev, |
297 | args->size, &args->handle); |
305 | args->size, &args->handle); |
Line 298... | Line -... | ||
298 | } |
- | |
299 | - | ||
300 | static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) |
- | |
Line 301... | Line -... | ||
301 | { |
- | |
302 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
- | |
303 | - | ||
304 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
306 | } |
Line 305... | Line 307... | ||
305 | obj->tiling_mode != I915_TILING_NONE; |
307 | |
306 | } |
308 | |
307 | #if 0 |
309 | #if 0 |
Line 444... | Line 446... | ||
444 | char __user *user_data; |
446 | char __user *user_data; |
445 | ssize_t remain; |
447 | ssize_t remain; |
446 | loff_t offset; |
448 | loff_t offset; |
447 | int shmem_page_offset, page_length, ret = 0; |
449 | int shmem_page_offset, page_length, ret = 0; |
448 | int obj_do_bit17_swizzling, page_do_bit17_swizzling; |
450 | int obj_do_bit17_swizzling, page_do_bit17_swizzling; |
449 | int hit_slowpath = 0; |
- | |
450 | int prefaulted = 0; |
451 | int prefaulted = 0; |
451 | int needs_clflush = 0; |
452 | int needs_clflush = 0; |
452 | struct scatterlist *sg; |
453 | struct scatterlist *sg; |
453 | int i; |
454 | int i; |
Line 506... | Line 507... | ||
506 | user_data, page_do_bit17_swizzling, |
507 | user_data, page_do_bit17_swizzling, |
507 | needs_clflush); |
508 | needs_clflush); |
508 | if (ret == 0) |
509 | if (ret == 0) |
509 | goto next_page; |
510 | goto next_page; |
Line 510... | Line -... | ||
510 | - | ||
511 | hit_slowpath = 1; |
511 | |
Line 512... | Line 512... | ||
512 | mutex_unlock(&dev->struct_mutex); |
512 | mutex_unlock(&dev->struct_mutex); |
513 | 513 | ||
514 | if (!prefaulted) { |
514 | if (!prefaulted) { |
Line 539... | Line 539... | ||
539 | } |
539 | } |
Line 540... | Line 540... | ||
540 | 540 | ||
541 | out: |
541 | out: |
Line 542... | Line -... | ||
542 | i915_gem_object_unpin_pages(obj); |
- | |
543 | - | ||
544 | if (hit_slowpath) { |
- | |
545 | /* Fixup: Kill any reinstated backing storage pages */ |
- | |
546 | if (obj->madv == __I915_MADV_PURGED) |
- | |
547 | i915_gem_object_truncate(obj); |
- | |
548 | } |
542 | i915_gem_object_unpin_pages(obj); |
549 | 543 | ||
Line 550... | Line 544... | ||
550 | return ret; |
544 | return ret; |
551 | } |
545 | } |
Line 886... | Line 880... | ||
886 | 880 | ||
887 | out: |
881 | out: |
Line 888... | Line 882... | ||
888 | i915_gem_object_unpin_pages(obj); |
882 | i915_gem_object_unpin_pages(obj); |
- | 883 | ||
889 | 884 | if (hit_slowpath) { |
|
890 | if (hit_slowpath) { |
- | |
891 | /* Fixup: Kill any reinstated backing storage pages */ |
885 | /* |
892 | if (obj->madv == __I915_MADV_PURGED) |
886 | * Fixup: Flush cpu caches in case we didn't flush the dirty |
- | 887 | * cachelines in-line while writing and the object moved |
|
893 | i915_gem_object_truncate(obj); |
888 | * out of the cpu write domain while we've dropped the lock. |
894 | /* and flush dirty cachelines in case the object isn't in the cpu write |
889 | */ |
895 | * domain anymore. */ |
890 | if (!needs_clflush_after && |
896 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { |
891 | obj->base.write_domain != I915_GEM_DOMAIN_CPU) { |
897 | i915_gem_clflush_object(obj); |
892 | i915_gem_clflush_object(obj); |
898 | i915_gem_chipset_flush(dev); |
893 | i915_gem_chipset_flush(dev); |
Line 916... | Line 911... | ||
916 | { |
911 | { |
917 | struct drm_i915_gem_pwrite *args = data; |
912 | struct drm_i915_gem_pwrite *args = data; |
918 | struct drm_i915_gem_object *obj; |
913 | struct drm_i915_gem_object *obj; |
919 | int ret; |
914 | int ret; |
Line -... | Line 915... | ||
- | 915 | ||
- | 916 | if(args->handle == -2) |
|
- | 917 | { |
|
- | 918 | printf("%s handle %d\n", __FUNCTION__, args->handle); |
|
- | 919 | return 0; |
|
- | 920 | } |
|
920 | 921 | ||
921 | if (args->size == 0) |
922 | if (args->size == 0) |
Line 922... | Line 923... | ||
922 | return 0; |
923 | return 0; |
923 | 924 | ||
Line 978... | Line 979... | ||
978 | mutex_unlock(&dev->struct_mutex); |
979 | mutex_unlock(&dev->struct_mutex); |
979 | return ret; |
980 | return ret; |
980 | } |
981 | } |
Line 981... | Line 982... | ||
981 | 982 | ||
982 | int |
983 | int |
983 | i915_gem_check_wedge(struct drm_i915_private *dev_priv, |
984 | i915_gem_check_wedge(struct i915_gpu_error *error, |
984 | bool interruptible) |
985 | bool interruptible) |
985 | { |
986 | { |
986 | if (atomic_read(&dev_priv->mm.wedged)) { |
- | |
987 | struct completion *x = &dev_priv->error_completion; |
- | |
988 | bool recovery_complete; |
- | |
989 | unsigned long flags; |
- | |
990 | - | ||
991 | /* Give the error handler a chance to run. */ |
- | |
992 | spin_lock_irqsave(&x->wait.lock, flags); |
- | |
993 | recovery_complete = x->done > 0; |
- | |
994 | spin_unlock_irqrestore(&x->wait.lock, flags); |
- | |
995 | 987 | if (i915_reset_in_progress(error)) { |
|
996 | /* Non-interruptible callers can't handle -EAGAIN, hence return |
988 | /* Non-interruptible callers can't handle -EAGAIN, hence return |
997 | * -EIO unconditionally for these. */ |
989 | * -EIO unconditionally for these. */ |
998 | if (!interruptible) |
990 | if (!interruptible) |
Line 999... | Line 991... | ||
999 | return -EIO; |
991 | return -EIO; |
1000 | 992 | ||
1001 | /* Recovery complete, but still wedged means reset failure. */ |
993 | /* Recovery complete, but the reset failed ... */ |
Line 1002... | Line 994... | ||
1002 | if (recovery_complete) |
994 | if (i915_terminally_wedged(error)) |
1003 | return -EIO; |
995 | return -EIO; |
Line 1028... | Line 1020... | ||
1028 | 1020 | ||
1029 | /** |
1021 | /** |
1030 | * __wait_seqno - wait until execution of seqno has finished |
1022 | * __wait_seqno - wait until execution of seqno has finished |
1031 | * @ring: the ring expected to report seqno |
1023 | * @ring: the ring expected to report seqno |
- | 1024 | * @seqno: duh! |
|
1032 | * @seqno: duh! |
1025 | * @reset_counter: reset sequence associated with the given seqno |
1033 | * @interruptible: do an interruptible wait (normally yes) |
1026 | * @interruptible: do an interruptible wait (normally yes) |
1034 | * @timeout: in - how long to wait (NULL forever); out - how much time remaining |
1027 | * @timeout: in - how long to wait (NULL forever); out - how much time remaining |
- | 1028 | * |
|
- | 1029 | * Note: It is of utmost importance that the passed in seqno and reset_counter |
|
- | 1030 | * values have been read by the caller in an smp safe manner. Where read-side |
|
- | 1031 | * locks are involved, it is sufficient to read the reset_counter before |
|
- | 1032 | * unlocking the lock that protects the seqno. For lockless tricks, the |
|
- | 1033 | * reset_counter _must_ be read before, and an appropriate smp_rmb must be |
|
- | 1034 | * inserted. |
|
1035 | * |
1035 | * |
1036 | * Returns 0 if the seqno was found within the alloted time. Else returns the |
1036 | * Returns 0 if the seqno was found within the alloted time. Else returns the |
1037 | * errno with remaining time filled in timeout argument. |
1037 | * errno with remaining time filled in timeout argument. |
1038 | */ |
1038 | */ |
- | 1039 | static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, |
|
1039 | static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, |
1040 | unsigned reset_counter, |
1040 | bool interruptible, struct timespec *timeout) |
1041 | bool interruptible, struct timespec *timeout) |
1041 | { |
1042 | { |
1042 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1043 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1043 | struct timespec before, now, wait_time={1,0}; |
1044 | struct timespec before, now, wait_time={1,0}; |
Line 1064... | Line 1065... | ||
1064 | /* Record current time in case interrupted by signal, or wedged * */ |
1065 | /* Record current time in case interrupted by signal, or wedged * */ |
1065 | getrawmonotonic(&before); |
1066 | getrawmonotonic(&before); |
Line 1066... | Line 1067... | ||
1066 | 1067 | ||
1067 | #define EXIT_COND \ |
1068 | #define EXIT_COND \ |
1068 | (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ |
1069 | (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ |
- | 1070 | i915_reset_in_progress(&dev_priv->gpu_error) || \ |
|
1069 | atomic_read(&dev_priv->mm.wedged)) |
1071 | reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) |
1070 | do { |
1072 | do { |
1071 | if (interruptible) |
1073 | if (interruptible) |
1072 | end = wait_event_interruptible_timeout(ring->irq_queue, |
1074 | end = wait_event_interruptible_timeout(ring->irq_queue, |
1073 | EXIT_COND, |
1075 | EXIT_COND, |
1074 | timeout_jiffies); |
1076 | timeout_jiffies); |
1075 | else |
1077 | else |
1076 | end = wait_event_timeout(ring->irq_queue, EXIT_COND, |
1078 | end = wait_event_timeout(ring->irq_queue, EXIT_COND, |
Line -... | Line 1079... | ||
- | 1079 | timeout_jiffies); |
|
- | 1080 | ||
- | 1081 | /* We need to check whether any gpu reset happened in between |
|
- | 1082 | * the caller grabbing the seqno and now ... */ |
|
- | 1083 | if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) |
|
- | 1084 | end = -EAGAIN; |
|
- | 1085 | ||
1077 | timeout_jiffies); |
1086 | /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely |
1078 | 1087 | * gone. */ |
|
1079 | ret = i915_gem_check_wedge(dev_priv, interruptible); |
1088 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); |
1080 | if (ret) |
1089 | if (ret) |
Line 1081... | Line 1090... | ||
1081 | end = ret; |
1090 | end = ret; |
Line 1120... | Line 1129... | ||
1120 | int ret; |
1129 | int ret; |
Line 1121... | Line 1130... | ||
1121 | 1130 | ||
1122 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
1131 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
Line 1123... | Line 1132... | ||
1123 | BUG_ON(seqno == 0); |
1132 | BUG_ON(seqno == 0); |
1124 | 1133 | ||
1125 | ret = i915_gem_check_wedge(dev_priv, interruptible); |
1134 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); |
Line 1126... | Line 1135... | ||
1126 | if (ret) |
1135 | if (ret) |
1127 | return ret; |
1136 | return ret; |
1128 | 1137 | ||
Line 1129... | Line 1138... | ||
1129 | ret = i915_gem_check_olr(ring, seqno); |
1138 | ret = i915_gem_check_olr(ring, seqno); |
- | 1139 | if (ret) |
|
- | 1140 | return ret; |
|
1130 | if (ret) |
1141 | |
Line 1131... | Line 1142... | ||
1131 | return ret; |
1142 | return __wait_seqno(ring, seqno, |
1132 | 1143 | atomic_read(&dev_priv->gpu_error.reset_counter), |
|
1133 | return __wait_seqno(ring, seqno, interruptible, NULL); |
1144 | interruptible, NULL); |
Line 1175... | Line 1186... | ||
1175 | bool readonly) |
1186 | bool readonly) |
1176 | { |
1187 | { |
1177 | struct drm_device *dev = obj->base.dev; |
1188 | struct drm_device *dev = obj->base.dev; |
1178 | struct drm_i915_private *dev_priv = dev->dev_private; |
1189 | struct drm_i915_private *dev_priv = dev->dev_private; |
1179 | struct intel_ring_buffer *ring = obj->ring; |
1190 | struct intel_ring_buffer *ring = obj->ring; |
- | 1191 | unsigned reset_counter; |
|
1180 | u32 seqno; |
1192 | u32 seqno; |
1181 | int ret; |
1193 | int ret; |
Line 1182... | Line 1194... | ||
1182 | 1194 | ||
1183 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
1195 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
Line 1184... | Line 1196... | ||
1184 | BUG_ON(!dev_priv->mm.interruptible); |
1196 | BUG_ON(!dev_priv->mm.interruptible); |
1185 | 1197 | ||
1186 | seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; |
1198 | seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; |
Line 1187... | Line 1199... | ||
1187 | if (seqno == 0) |
1199 | if (seqno == 0) |
1188 | return 0; |
1200 | return 0; |
1189 | 1201 | ||
Line 1190... | Line 1202... | ||
1190 | ret = i915_gem_check_wedge(dev_priv, true); |
1202 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, true); |
1191 | if (ret) |
1203 | if (ret) |
1192 | return ret; |
1204 | return ret; |
Line -... | Line 1205... | ||
- | 1205 | ||
1193 | 1206 | ret = i915_gem_check_olr(ring, seqno); |
|
1194 | ret = i915_gem_check_olr(ring, seqno); |
1207 | if (ret) |
1195 | if (ret) |
1208 | return ret; |
Line 1196... | Line 1209... | ||
1196 | return ret; |
1209 | |
Line 1197... | Line 1210... | ||
1197 | 1210 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
|
Line 1225... | Line 1238... | ||
1225 | struct drm_i915_gem_object *obj; |
1238 | struct drm_i915_gem_object *obj; |
1226 | uint32_t read_domains = args->read_domains; |
1239 | uint32_t read_domains = args->read_domains; |
1227 | uint32_t write_domain = args->write_domain; |
1240 | uint32_t write_domain = args->write_domain; |
1228 | int ret; |
1241 | int ret; |
Line -... | Line 1242... | ||
- | 1242 | ||
- | 1243 | ||
- | 1244 | if(args->handle == -2) |
|
- | 1245 | { |
|
- | 1246 | printf("%s handle %d\n", __FUNCTION__, args->handle); |
|
- | 1247 | return 0; |
|
- | 1248 | } |
|
1229 | 1249 | ||
1230 | /* Only handle setting domains to types used by the CPU. */ |
1250 | /* Only handle setting domains to types used by the CPU. */ |
1231 | if (write_domain & I915_GEM_GPU_DOMAINS) |
1251 | if (write_domain & I915_GEM_GPU_DOMAINS) |
Line 1232... | Line 1252... | ||
1232 | return -EINVAL; |
1252 | return -EINVAL; |
Line 1296... | Line 1316... | ||
1296 | { |
1316 | { |
1297 | struct drm_i915_gem_mmap *args = data; |
1317 | struct drm_i915_gem_mmap *args = data; |
1298 | struct drm_gem_object *obj; |
1318 | struct drm_gem_object *obj; |
1299 | unsigned long addr = 0; |
1319 | unsigned long addr = 0; |
Line -... | Line 1320... | ||
- | 1320 | ||
- | 1321 | if(args->handle == -2) |
|
- | 1322 | { |
|
- | 1323 | printf("%s handle %d\n", __FUNCTION__, args->handle); |
|
- | 1324 | return 0; |
|
- | 1325 | } |
|
1300 | 1326 | ||
1301 | obj = drm_gem_object_lookup(dev, file, args->handle); |
1327 | obj = drm_gem_object_lookup(dev, file, args->handle); |
1302 | if (obj == NULL) |
1328 | if (obj == NULL) |
Line 1303... | Line 1329... | ||
1303 | return -ENOENT; |
1329 | return -ENOENT; |
Line 1362... | Line 1388... | ||
1362 | // obj->base.size, 1); |
1388 | // obj->base.size, 1); |
Line 1363... | Line 1389... | ||
1363 | 1389 | ||
1364 | obj->fault_mappable = false; |
1390 | obj->fault_mappable = false; |
Line 1365... | Line 1391... | ||
1365 | } |
1391 | } |
1366 | 1392 | ||
1367 | static uint32_t |
1393 | uint32_t |
1368 | i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) |
1394 | i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) |
Line 1369... | Line 1395... | ||
1369 | { |
1395 | { |
Line 1390... | Line 1416... | ||
1390 | * @obj: object to check |
1416 | * @obj: object to check |
1391 | * |
1417 | * |
1392 | * Return the required GTT alignment for an object, taking into account |
1418 | * Return the required GTT alignment for an object, taking into account |
1393 | * potential fence register mapping. |
1419 | * potential fence register mapping. |
1394 | */ |
1420 | */ |
1395 | static uint32_t |
1421 | uint32_t |
1396 | i915_gem_get_gtt_alignment(struct drm_device *dev, |
1422 | i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, |
1397 | uint32_t size, |
- | |
1398 | int tiling_mode) |
1423 | int tiling_mode, bool fenced) |
1399 | { |
1424 | { |
1400 | /* |
1425 | /* |
1401 | * Minimum alignment is 4k (GTT page size), but might be greater |
1426 | * Minimum alignment is 4k (GTT page size), but might be greater |
1402 | * if a fence register is needed for the object. |
1427 | * if a fence register is needed for the object. |
1403 | */ |
1428 | */ |
1404 | if (INTEL_INFO(dev)->gen >= 4 || |
1429 | if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) || |
1405 | tiling_mode == I915_TILING_NONE) |
1430 | tiling_mode == I915_TILING_NONE) |
1406 | return 4096; |
1431 | return 4096; |
Line 1407... | Line 1432... | ||
1407 | 1432 | ||
1408 | /* |
1433 | /* |
Line 1439... | Line 1464... | ||
1439 | * the power-of-tile object size. |
1464 | * the power-of-tile object size. |
1440 | */ |
1465 | */ |
1441 | return i915_gem_get_gtt_size(dev, size, tiling_mode); |
1466 | return i915_gem_get_gtt_size(dev, size, tiling_mode); |
1442 | } |
1467 | } |
Line -... | Line 1468... | ||
- | 1468 | ||
- | 1469 | int |
|
- | 1470 | i915_gem_mmap_gtt(struct drm_file *file, |
|
- | 1471 | struct drm_device *dev, |
|
- | 1472 | uint32_t handle, |
|
- | 1473 | uint64_t *offset) |
|
- | 1474 | { |
|
- | 1475 | struct drm_i915_private *dev_priv = dev->dev_private; |
|
- | 1476 | struct drm_i915_gem_object *obj; |
|
- | 1477 | unsigned long pfn; |
|
- | 1478 | char *mem, *ptr; |
|
- | 1479 | int ret; |
|
- | 1480 | ||
- | 1481 | ret = i915_mutex_lock_interruptible(dev); |
|
- | 1482 | if (ret) |
|
- | 1483 | return ret; |
|
- | 1484 | ||
- | 1485 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); |
|
- | 1486 | if (&obj->base == NULL) { |
|
- | 1487 | ret = -ENOENT; |
|
- | 1488 | goto unlock; |
|
- | 1489 | } |
|
- | 1490 | ||
- | 1491 | if (obj->base.size > dev_priv->gtt.mappable_end) { |
|
- | 1492 | ret = -E2BIG; |
|
- | 1493 | goto out; |
|
- | 1494 | } |
|
- | 1495 | ||
- | 1496 | if (obj->madv != I915_MADV_WILLNEED) { |
|
- | 1497 | DRM_ERROR("Attempting to mmap a purgeable buffer\n"); |
|
- | 1498 | ret = -EINVAL; |
|
- | 1499 | goto out; |
|
- | 1500 | } |
|
- | 1501 | /* Now bind it into the GTT if needed */ |
|
- | 1502 | ret = i915_gem_object_pin(obj, 0, true, false); |
|
- | 1503 | if (ret) |
|
- | 1504 | goto out; |
|
- | 1505 | ||
- | 1506 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); |
|
- | 1507 | if (ret) |
|
- | 1508 | goto unpin; |
|
- | 1509 | ||
- | 1510 | ret = i915_gem_object_get_fence(obj); |
|
- | 1511 | if (ret) |
|
- | 1512 | goto unpin; |
|
- | 1513 | ||
- | 1514 | obj->fault_mappable = true; |
|
- | 1515 | ||
- | 1516 | pfn = dev_priv->gtt.mappable_base + obj->gtt_offset; |
|
- | 1517 | ||
- | 1518 | /* Finally, remap it using the new GTT offset */ |
|
- | 1519 | ||
- | 1520 | mem = UserAlloc(obj->base.size); |
|
- | 1521 | if(unlikely(mem == NULL)) |
|
- | 1522 | { |
|
- | 1523 | ret = -ENOMEM; |
|
- | 1524 | goto unpin; |
|
- | 1525 | } |
|
- | 1526 | ||
- | 1527 | for(ptr = mem; ptr < mem + obj->base.size; ptr+= 4096, pfn+= 4096) |
|
- | 1528 | MapPage(ptr, pfn, PG_SHARED|PG_UW); |
|
- | 1529 | ||
- | 1530 | unpin: |
|
- | 1531 | i915_gem_object_unpin(obj); |
|
- | 1532 | ||
- | 1533 | ||
- | 1534 | *offset = (u64)mem; |
|
- | 1535 | ||
- | 1536 | out: |
|
- | 1537 | drm_gem_object_unreference(&obj->base); |
|
- | 1538 | unlock: |
|
- | 1539 | mutex_unlock(&dev->struct_mutex); |
|
- | 1540 | return ret; |
|
- | 1541 | } |
|
- | 1542 | ||
- | 1543 | /** |
|
- | 1544 | * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing |
|
- | 1545 | * @dev: DRM device |
|
- | 1546 | * @data: GTT mapping ioctl data |
|
- | 1547 | * @file: GEM object info |
|
- | 1548 | * |
|
- | 1549 | * Simply returns the fake offset to userspace so it can mmap it. |
|
- | 1550 | * The mmap call will end up in drm_gem_mmap(), which will set things |
|
- | 1551 | * up so we can get faults in the handler above. |
|
- | 1552 | * |
|
- | 1553 | * The fault handler will take care of binding the object into the GTT |
|
- | 1554 | * (since it may have been evicted to make room for something), allocating |
|
- | 1555 | * a fence register, and mapping the appropriate aperture address into |
|
- | 1556 | * userspace. |
|
- | 1557 | */ |
|
- | 1558 | int |
|
- | 1559 | i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, |
|
- | 1560 | struct drm_file *file) |
|
- | 1561 | { |
|
- | 1562 | struct drm_i915_gem_mmap_gtt *args = data; |
|
- | 1563 | ||
- | 1564 | return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); |
|
- | 1565 | } |
|
1443 | 1566 | ||
1444 | /* Immediately discard the backing storage */ |
1567 | /* Immediately discard the backing storage */ |
1445 | static void |
1568 | static void |
1446 | i915_gem_object_truncate(struct drm_i915_gem_object *obj) |
1569 | i915_gem_object_truncate(struct drm_i915_gem_object *obj) |
1447 | { |
1570 | { |
Line 1502... | Line 1625... | ||
1502 | 1625 | ||
1503 | sg_free_table(obj->pages); |
1626 | sg_free_table(obj->pages); |
1504 | kfree(obj->pages); |
1627 | kfree(obj->pages); |
Line 1505... | Line 1628... | ||
1505 | } |
1628 | } |
1506 | 1629 | ||
1507 | static int |
1630 | int |
1508 | i915_gem_object_put_pages(struct drm_i915_gem_object *obj) |
1631 | i915_gem_object_put_pages(struct drm_i915_gem_object *obj) |
Line 1509... | Line 1632... | ||
1509 | { |
1632 | { |
Line 1667... | Line 1790... | ||
1667 | struct drm_i915_private *dev_priv = dev->dev_private; |
1790 | struct drm_i915_private *dev_priv = dev->dev_private; |
Line 1668... | Line 1791... | ||
1668 | 1791 | ||
1669 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); |
1792 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); |
Line 1670... | Line -... | ||
1670 | BUG_ON(!obj->active); |
- | |
1671 | - | ||
1672 | if (obj->pin_count) /* are we a framebuffer? */ |
- | |
1673 | intel_mark_fb_idle(obj); |
1793 | BUG_ON(!obj->active); |
Line 1674... | Line 1794... | ||
1674 | 1794 | ||
1675 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
1795 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
Line 1689... | Line 1809... | ||
1689 | 1809 | ||
1690 | WARN_ON(i915_verify_lists(dev)); |
1810 | WARN_ON(i915_verify_lists(dev)); |
Line 1691... | Line 1811... | ||
1691 | } |
1811 | } |
1692 | 1812 | ||
1693 | static int |
1813 | static int |
1694 | i915_gem_handle_seqno_wrap(struct drm_device *dev) |
1814 | i915_gem_init_seqno(struct drm_device *dev, u32 seqno) |
1695 | { |
1815 | { |
1696 | struct drm_i915_private *dev_priv = dev->dev_private; |
1816 | struct drm_i915_private *dev_priv = dev->dev_private; |
Line 1697... | Line 1817... | ||
1697 | struct intel_ring_buffer *ring; |
1817 | struct intel_ring_buffer *ring; |
1698 | int ret, i, j; |
- | |
1699 | - | ||
1700 | /* The hardware uses various monotonic 32-bit counters, if we |
- | |
1701 | * detect that they will wraparound we need to idle the GPU |
- | |
1702 | * and reset those counters. |
1818 | int ret, i, j; |
1703 | */ |
- | |
1704 | ret = 0; |
- | |
1705 | for_each_ring(ring, dev_priv, i) { |
- | |
1706 | for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) |
- | |
1707 | ret |= ring->sync_seqno[j] != 0; |
- | |
1708 | } |
- | |
1709 | if (ret == 0) |
1819 | |
1710 | return ret; |
1820 | /* Carefully retire all requests without writing to the rings */ |
1711 | 1821 | for_each_ring(ring, dev_priv, i) { |
|
1712 | ret = i915_gpu_idle(dev); |
1822 | ret = intel_ring_idle(ring); |
1713 | if (ret) |
1823 | if (ret) |
- | 1824 | return ret; |
|
- | 1825 | } |
|
1714 | return ret; |
1826 | i915_gem_retire_requests(dev); |
- | 1827 | ||
- | 1828 | /* Finally reset hw state */ |
|
1715 | 1829 | for_each_ring(ring, dev_priv, i) { |
|
1716 | i915_gem_retire_requests(dev); |
1830 | intel_ring_init_seqno(ring, seqno); |
1717 | for_each_ring(ring, dev_priv, i) { |
1831 | |
Line 1718... | Line 1832... | ||
1718 | for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) |
1832 | for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) |
1719 | ring->sync_seqno[j] = 0; |
1833 | ring->sync_seqno[j] = 0; |
Line -... | Line 1834... | ||
- | 1834 | } |
|
- | 1835 | ||
- | 1836 | return 0; |
|
- | 1837 | } |
|
- | 1838 | ||
- | 1839 | int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) |
|
- | 1840 | { |
|
- | 1841 | struct drm_i915_private *dev_priv = dev->dev_private; |
|
- | 1842 | int ret; |
|
- | 1843 | ||
- | 1844 | if (seqno == 0) |
|
- | 1845 | return -EINVAL; |
|
- | 1846 | ||
- | 1847 | /* HWS page needs to be set less than what we |
|
- | 1848 | * will inject to ring |
|
- | 1849 | */ |
|
- | 1850 | ret = i915_gem_init_seqno(dev, seqno - 1); |
|
- | 1851 | if (ret) |
|
- | 1852 | return ret; |
|
- | 1853 | ||
- | 1854 | /* Carefully set the last_seqno value so that wrap |
|
- | 1855 | * detection still works |
|
- | 1856 | */ |
|
- | 1857 | dev_priv->next_seqno = seqno; |
|
- | 1858 | dev_priv->last_seqno = seqno - 1; |
|
- | 1859 | if (dev_priv->last_seqno == 0) |
|
1720 | } |
1860 | dev_priv->last_seqno--; |
1721 | 1861 | ||
1722 | return 0; |
1862 | return 0; |
1723 | } |
1863 | } |
Line 1724... | Line 1864... | ||
1724 | 1864 | ||
1725 | int |
1865 | int |
1726 | i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) |
1866 | i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) |
1727 | { |
1867 | { |
1728 | struct drm_i915_private *dev_priv = dev->dev_private; |
1868 | struct drm_i915_private *dev_priv = dev->dev_private; |
Line 1729... | Line 1869... | ||
1729 | 1869 | ||
1730 | /* reserve 0 for non-seqno */ |
1870 | /* reserve 0 for non-seqno */ |
Line 1731... | Line 1871... | ||
1731 | if (dev_priv->next_seqno == 0) { |
1871 | if (dev_priv->next_seqno == 0) { |
1732 | int ret = i915_gem_handle_seqno_wrap(dev); |
1872 | int ret = i915_gem_init_seqno(dev, 0); |
1733 | if (ret) |
1873 | if (ret) |
Line 1734... | Line 1874... | ||
1734 | return ret; |
1874 | return ret; |
1735 | 1875 | ||
Line 2124... | Line 2264... | ||
2124 | 2264 | ||
2125 | static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) |
2265 | static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) |
2126 | { |
2266 | { |
Line 2127... | Line -... | ||
2127 | u32 old_write_domain, old_read_domains; |
- | |
2128 | - | ||
2129 | /* Act a barrier for all accesses through the GTT */ |
- | |
2130 | mb(); |
2267 | u32 old_write_domain, old_read_domains; |
2131 | 2268 | ||
Line 2132... | Line 2269... | ||
2132 | /* Force a pagefault for domain tracking on next user access */ |
2269 | /* Force a pagefault for domain tracking on next user access */ |
2133 | // i915_gem_release_mmap(obj); |
2270 | // i915_gem_release_mmap(obj); |
Line -... | Line 2271... | ||
- | 2271 | ||
- | 2272 | if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) |
|
- | 2273 | return; |
|
2134 | 2274 | ||
2135 | if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) |
2275 | /* Wait for any direct GTT access to complete */ |
Line 2136... | Line 2276... | ||
2136 | return; |
2276 | mb(); |
2137 | 2277 | ||
Line 2151... | Line 2291... | ||
2151 | */ |
2291 | */ |
2152 | int |
2292 | int |
2153 | i915_gem_object_unbind(struct drm_i915_gem_object *obj) |
2293 | i915_gem_object_unbind(struct drm_i915_gem_object *obj) |
2154 | { |
2294 | { |
2155 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
2295 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
2156 | int ret = 0; |
2296 | int ret; |
Line 2157... | Line 2297... | ||
2157 | 2297 | ||
2158 | if(obj == get_fb_obj()) |
2298 | if(obj == get_fb_obj()) |
Line 2159... | Line 2299... | ||
2159 | return 0; |
2299 | return 0; |
Line 2221... | Line 2361... | ||
2221 | } |
2361 | } |
Line 2222... | Line 2362... | ||
2222 | 2362 | ||
2223 | return 0; |
2363 | return 0; |
Line 2224... | Line 2364... | ||
2224 | } |
2364 | } |
2225 | 2365 | ||
2226 | static void sandybridge_write_fence_reg(struct drm_device *dev, int reg, |
2366 | static void i965_write_fence_reg(struct drm_device *dev, int reg, |
2227 | struct drm_i915_gem_object *obj) |
2367 | struct drm_i915_gem_object *obj) |
- | 2368 | { |
|
- | 2369 | drm_i915_private_t *dev_priv = dev->dev_private; |
|
2228 | { |
2370 | int fence_reg; |
Line 2229... | Line -... | ||
2229 | drm_i915_private_t *dev_priv = dev->dev_private; |
- | |
2230 | uint64_t val; |
- | |
2231 | - | ||
2232 | if (obj) { |
2371 | int fence_pitch_shift; |
2233 | u32 size = obj->gtt_space->size; |
- | |
2234 | 2372 | uint64_t val; |
|
2235 | val = (uint64_t)((obj->gtt_offset + size - 4096) & |
- | |
2236 | 0xfffff000) << 32; |
2373 | |
2237 | val |= obj->gtt_offset & 0xfffff000; |
- | |
2238 | val |= (uint64_t)((obj->stride / 128) - 1) << |
- | |
2239 | SANDYBRIDGE_FENCE_PITCH_SHIFT; |
- | |
2240 | - | ||
2241 | if (obj->tiling_mode == I915_TILING_Y) |
2374 | if (INTEL_INFO(dev)->gen >= 6) { |
2242 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
- | |
2243 | val |= I965_FENCE_REG_VALID; |
- | |
2244 | } else |
2375 | fence_reg = FENCE_REG_SANDYBRIDGE_0; |
2245 | val = 0; |
2376 | fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT; |
2246 | 2377 | } else { |
|
Line 2247... | Line -... | ||
2247 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val); |
- | |
2248 | POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8); |
- | |
2249 | } |
- | |
2250 | - | ||
2251 | static void i965_write_fence_reg(struct drm_device *dev, int reg, |
- | |
2252 | struct drm_i915_gem_object *obj) |
- | |
2253 | { |
2378 | fence_reg = FENCE_REG_965_0; |
2254 | drm_i915_private_t *dev_priv = dev->dev_private; |
2379 | fence_pitch_shift = I965_FENCE_PITCH_SHIFT; |
Line 2255... | Line 2380... | ||
2255 | uint64_t val; |
2380 | } |
2256 | 2381 | ||
2257 | if (obj) { |
2382 | if (obj) { |
2258 | u32 size = obj->gtt_space->size; |
2383 | u32 size = obj->gtt_space->size; |
2259 | 2384 | ||
2260 | val = (uint64_t)((obj->gtt_offset + size - 4096) & |
2385 | val = (uint64_t)((obj->gtt_offset + size - 4096) & |
2261 | 0xfffff000) << 32; |
2386 | 0xfffff000) << 32; |
2262 | val |= obj->gtt_offset & 0xfffff000; |
2387 | val |= obj->gtt_offset & 0xfffff000; |
2263 | val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; |
2388 | val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; |
Line -... | Line 2389... | ||
- | 2389 | if (obj->tiling_mode == I915_TILING_Y) |
|
2264 | if (obj->tiling_mode == I915_TILING_Y) |
2390 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
2265 | val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
2391 | val |= I965_FENCE_REG_VALID; |
2266 | val |= I965_FENCE_REG_VALID; |
2392 | } else |
Line 2267... | Line 2393... | ||
2267 | } else |
2393 | val = 0; |
2268 | val = 0; |
2394 | |
2269 | 2395 | fence_reg += reg * 8; |
|
Line 2345... | Line 2471... | ||
2345 | 2471 | ||
2346 | I915_WRITE(FENCE_REG_830_0 + reg * 4, val); |
2472 | I915_WRITE(FENCE_REG_830_0 + reg * 4, val); |
2347 | POSTING_READ(FENCE_REG_830_0 + reg * 4); |
2473 | POSTING_READ(FENCE_REG_830_0 + reg * 4); |
Line -... | Line 2474... | ||
- | 2474 | } |
|
- | 2475 | ||
- | 2476 | inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj) |
|
- | 2477 | { |
|
- | 2478 | return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT; |
|
2348 | } |
2479 | } |
2349 | 2480 | ||
2350 | static void i915_gem_write_fence(struct drm_device *dev, int reg, |
2481 | static void i915_gem_write_fence(struct drm_device *dev, int reg, |
- | 2482 | struct drm_i915_gem_object *obj) |
|
- | 2483 | { |
|
- | 2484 | struct drm_i915_private *dev_priv = dev->dev_private; |
|
- | 2485 | ||
- | 2486 | /* Ensure that all CPU reads are completed before installing a fence |
|
- | 2487 | * and all writes before removing the fence. |
|
- | 2488 | */ |
|
- | 2489 | if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) |
|
2351 | struct drm_i915_gem_object *obj) |
2490 | mb(); |
2352 | { |
2491 | |
2353 | switch (INTEL_INFO(dev)->gen) { |
2492 | switch (INTEL_INFO(dev)->gen) { |
2354 | case 7: |
2493 | case 7: |
2355 | case 6: sandybridge_write_fence_reg(dev, reg, obj); break; |
2494 | case 6: |
2356 | case 5: |
2495 | case 5: |
2357 | case 4: i965_write_fence_reg(dev, reg, obj); break; |
2496 | case 4: i965_write_fence_reg(dev, reg, obj); break; |
2358 | case 3: i915_write_fence_reg(dev, reg, obj); break; |
2497 | case 3: i915_write_fence_reg(dev, reg, obj); break; |
2359 | case 2: i830_write_fence_reg(dev, reg, obj); break; |
2498 | case 2: i830_write_fence_reg(dev, reg, obj); break; |
- | 2499 | default: BUG(); |
|
- | 2500 | } |
|
- | 2501 | ||
- | 2502 | /* And similarly be paranoid that no direct access to this region |
|
- | 2503 | * is reordered to before the fence is installed. |
|
- | 2504 | */ |
|
2360 | default: break; |
2505 | if (i915_gem_object_needs_mb(obj)) |
Line 2361... | Line 2506... | ||
2361 | } |
2506 | mb(); |
2362 | } |
2507 | } |
2363 | 2508 | ||
Line 2386... | Line 2531... | ||
2386 | list_del_init(&fence->lru_list); |
2531 | list_del_init(&fence->lru_list); |
2387 | } |
2532 | } |
2388 | } |
2533 | } |
Line 2389... | Line 2534... | ||
2389 | 2534 | ||
2390 | static int |
2535 | static int |
2391 | i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) |
2536 | i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) |
2392 | { |
2537 | { |
2393 | if (obj->last_fenced_seqno) { |
2538 | if (obj->last_fenced_seqno) { |
2394 | int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); |
2539 | int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); |
2395 | if (ret) |
2540 | if (ret) |
Line 2396... | Line 2541... | ||
2396 | return ret; |
2541 | return ret; |
2397 | 2542 | ||
Line 2398... | Line -... | ||
2398 | obj->last_fenced_seqno = 0; |
- | |
2399 | } |
- | |
2400 | - | ||
2401 | /* Ensure that all CPU reads are completed before installing a fence |
- | |
2402 | * and all writes before removing the fence. |
- | |
2403 | */ |
- | |
2404 | if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) |
2543 | obj->last_fenced_seqno = 0; |
2405 | mb(); |
2544 | } |
2406 | 2545 | ||
Line 2407... | Line 2546... | ||
2407 | obj->fenced_gpu_access = false; |
2546 | obj->fenced_gpu_access = false; |
2408 | return 0; |
2547 | return 0; |
2409 | } |
2548 | } |
2410 | 2549 | ||
2411 | int |
2550 | int |
Line 2412... | Line 2551... | ||
2412 | i915_gem_object_put_fence(struct drm_i915_gem_object *obj) |
2551 | i915_gem_object_put_fence(struct drm_i915_gem_object *obj) |
2413 | { |
2552 | { |
2414 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
2553 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
Line 2415... | Line 2554... | ||
2415 | int ret; |
2554 | int ret; |
2416 | 2555 | ||
Line 2486... | Line 2625... | ||
2486 | 2625 | ||
2487 | /* Have we updated the tiling parameters upon the object and so |
2626 | /* Have we updated the tiling parameters upon the object and so |
2488 | * will need to serialise the write to the associated fence register? |
2627 | * will need to serialise the write to the associated fence register? |
2489 | */ |
2628 | */ |
2490 | if (obj->fence_dirty) { |
2629 | if (obj->fence_dirty) { |
2491 | ret = i915_gem_object_flush_fence(obj); |
2630 | ret = i915_gem_object_wait_fence(obj); |
2492 | if (ret) |
2631 | if (ret) |
2493 | return ret; |
2632 | return ret; |
Line 2494... | Line 2633... | ||
2494 | } |
2633 | } |
Line 2507... | Line 2646... | ||
2507 | return -EDEADLK; |
2646 | return -EDEADLK; |
Line 2508... | Line 2647... | ||
2508 | 2647 | ||
2509 | if (reg->obj) { |
2648 | if (reg->obj) { |
Line 2510... | Line 2649... | ||
2510 | struct drm_i915_gem_object *old = reg->obj; |
2649 | struct drm_i915_gem_object *old = reg->obj; |
2511 | 2650 | ||
2512 | ret = i915_gem_object_flush_fence(old); |
2651 | ret = i915_gem_object_wait_fence(old); |
Line 2513... | Line 2652... | ||
2513 | if (ret) |
2652 | if (ret) |
2514 | return ret; |
2653 | return ret; |
Line 2530... | Line 2669... | ||
2530 | { |
2669 | { |
2531 | struct drm_mm_node *other; |
2670 | struct drm_mm_node *other; |
Line 2532... | Line 2671... | ||
2532 | 2671 | ||
2533 | /* On non-LLC machines we have to be careful when putting differing |
2672 | /* On non-LLC machines we have to be careful when putting differing |
2534 | * types of snoopable memory together to avoid the prefetcher |
2673 | * types of snoopable memory together to avoid the prefetcher |
2535 | * crossing memory domains and dieing. |
2674 | * crossing memory domains and dying. |
2536 | */ |
2675 | */ |
2537 | if (HAS_LLC(dev)) |
2676 | if (HAS_LLC(dev)) |
Line 2538... | Line 2677... | ||
2538 | return true; |
2677 | return true; |
Line 2608... | Line 2747... | ||
2608 | struct drm_mm_node *node; |
2747 | struct drm_mm_node *node; |
2609 | u32 size, fence_size, fence_alignment, unfenced_alignment; |
2748 | u32 size, fence_size, fence_alignment, unfenced_alignment; |
2610 | bool mappable, fenceable; |
2749 | bool mappable, fenceable; |
2611 | int ret; |
2750 | int ret; |
Line 2612... | Line -... | ||
2612 | - | ||
2613 | if (obj->madv != I915_MADV_WILLNEED) { |
- | |
2614 | DRM_ERROR("Attempting to bind a purgeable object\n"); |
- | |
2615 | return -EINVAL; |
- | |
2616 | } |
- | |
2617 | 2751 | ||
2618 | fence_size = i915_gem_get_gtt_size(dev, |
2752 | fence_size = i915_gem_get_gtt_size(dev, |
2619 | obj->base.size, |
2753 | obj->base.size, |
2620 | obj->tiling_mode); |
2754 | obj->tiling_mode); |
2621 | fence_alignment = i915_gem_get_gtt_alignment(dev, |
2755 | fence_alignment = i915_gem_get_gtt_alignment(dev, |
2622 | obj->base.size, |
2756 | obj->base.size, |
2623 | obj->tiling_mode); |
2757 | obj->tiling_mode, true); |
2624 | unfenced_alignment = |
2758 | unfenced_alignment = |
2625 | i915_gem_get_unfenced_gtt_alignment(dev, |
2759 | i915_gem_get_gtt_alignment(dev, |
2626 | obj->base.size, |
2760 | obj->base.size, |
Line 2627... | Line 2761... | ||
2627 | obj->tiling_mode); |
2761 | obj->tiling_mode, false); |
2628 | 2762 | ||
2629 | if (alignment == 0) |
2763 | if (alignment == 0) |
2630 | alignment = map_and_fenceable ? fence_alignment : |
2764 | alignment = map_and_fenceable ? fence_alignment : |
Line 2638... | Line 2772... | ||
2638 | 2772 | ||
2639 | /* If the object is bigger than the entire aperture, reject it early |
2773 | /* If the object is bigger than the entire aperture, reject it early |
2640 | * before evicting everything in a vain attempt to find space. |
2774 | * before evicting everything in a vain attempt to find space. |
2641 | */ |
2775 | */ |
2642 | if (obj->base.size > |
2776 | if (obj->base.size > |
2643 | (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { |
2777 | (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) { |
2644 | DRM_ERROR("Attempting to bind an object larger than the aperture\n"); |
2778 | DRM_ERROR("Attempting to bind an object larger than the aperture\n"); |
2645 | return -E2BIG; |
2779 | return -E2BIG; |
Line 2646... | Line 2780... | ||
2646 | } |
2780 | } |
Line 2659... | Line 2793... | ||
2659 | 2793 | ||
2660 | search_free: |
2794 | search_free: |
2661 | if (map_and_fenceable) |
2795 | if (map_and_fenceable) |
2662 | ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, |
2796 | ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, |
2663 | size, alignment, obj->cache_level, |
2797 | size, alignment, obj->cache_level, |
2664 | 0, dev_priv->mm.gtt_mappable_end); |
2798 | 0, dev_priv->gtt.mappable_end); |
2665 | else |
2799 | else |
2666 | ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node, |
2800 | ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node, |
2667 | size, alignment, obj->cache_level); |
2801 | size, alignment, obj->cache_level); |
Line 2693... | Line 2827... | ||
2693 | fenceable = |
2827 | fenceable = |
2694 | node->size == fence_size && |
2828 | node->size == fence_size && |
2695 | (node->start & (fence_alignment - 1)) == 0; |
2829 | (node->start & (fence_alignment - 1)) == 0; |
Line 2696... | Line 2830... | ||
2696 | 2830 | ||
2697 | mappable = |
2831 | mappable = |
Line 2698... | Line 2832... | ||
2698 | obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; |
2832 | obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end; |
Line 2699... | Line 2833... | ||
2699 | 2833 | ||
2700 | obj->map_and_fenceable = mappable && fenceable; |
2834 | obj->map_and_fenceable = mappable && fenceable; |
Line 2713... | Line 2847... | ||
2713 | * again at bind time. |
2847 | * again at bind time. |
2714 | */ |
2848 | */ |
2715 | if (obj->pages == NULL) |
2849 | if (obj->pages == NULL) |
2716 | return; |
2850 | return; |
Line -... | Line 2851... | ||
- | 2851 | ||
- | 2852 | /* |
|
- | 2853 | * Stolen memory is always coherent with the GPU as it is explicitly |
|
- | 2854 | * marked as wc by the system, or the system is cache-coherent. |
|
- | 2855 | */ |
|
- | 2856 | if (obj->stolen) |
|
- | 2857 | return; |
|
2717 | 2858 | ||
2718 | /* If the GPU is snooping the contents of the CPU cache, |
2859 | /* If the GPU is snooping the contents of the CPU cache, |
2719 | * we do not need to manually clear the CPU cache lines. However, |
2860 | * we do not need to manually clear the CPU cache lines. However, |
2720 | * the caches are only snooped when the render cache is |
2861 | * the caches are only snooped when the render cache is |
2721 | * flushed/invalidated. As we always have to emit invalidations |
2862 | * flushed/invalidated. As we always have to emit invalidations |
Line 2846... | Line 2987... | ||
2846 | if (ret) |
2987 | if (ret) |
2847 | return ret; |
2988 | return ret; |
Line 2848... | Line 2989... | ||
2848 | 2989 | ||
Line -... | Line 2990... | ||
- | 2990 | i915_gem_object_flush_cpu_write_domain(obj); |
|
- | 2991 | ||
- | 2992 | /* Serialise direct access to this object with the barriers for |
|
- | 2993 | * coherent writes from the GPU, by effectively invalidating the |
|
- | 2994 | * GTT domain upon first access. |
|
- | 2995 | */ |
|
- | 2996 | if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) |
|
2849 | i915_gem_object_flush_cpu_write_domain(obj); |
2997 | mb(); |
2850 | 2998 | ||
Line 2851... | Line 2999... | ||
2851 | old_write_domain = obj->base.write_domain; |
2999 | old_write_domain = obj->base.write_domain; |
2852 | old_read_domains = obj->base.read_domains; |
3000 | old_read_domains = obj->base.read_domains; |
Line 2953... | Line 3101... | ||
2953 | { |
3101 | { |
2954 | struct drm_i915_gem_caching *args = data; |
3102 | struct drm_i915_gem_caching *args = data; |
2955 | struct drm_i915_gem_object *obj; |
3103 | struct drm_i915_gem_object *obj; |
2956 | int ret; |
3104 | int ret; |
Line -... | Line 3105... | ||
- | 3105 | ||
- | 3106 | if(args->handle == -2) |
|
- | 3107 | { |
|
- | 3108 | printf("%s handle %d\n", __FUNCTION__, args->handle); |
|
- | 3109 | return 0; |
|
- | 3110 | } |
|
2957 | 3111 | ||
2958 | ret = i915_mutex_lock_interruptible(dev); |
3112 | ret = i915_mutex_lock_interruptible(dev); |
2959 | if (ret) |
3113 | if (ret) |
Line 2960... | Line 3114... | ||
2960 | return ret; |
3114 | return ret; |
Line 2979... | Line 3133... | ||
2979 | struct drm_i915_gem_caching *args = data; |
3133 | struct drm_i915_gem_caching *args = data; |
2980 | struct drm_i915_gem_object *obj; |
3134 | struct drm_i915_gem_object *obj; |
2981 | enum i915_cache_level level; |
3135 | enum i915_cache_level level; |
2982 | int ret; |
3136 | int ret; |
Line -... | Line 3137... | ||
- | 3137 | ||
- | 3138 | if(args->handle == -2) |
|
- | 3139 | { |
|
- | 3140 | printf("%s handle %d\n", __FUNCTION__, args->handle); |
|
- | 3141 | return 0; |
|
- | 3142 | } |
|
2983 | 3143 | ||
2984 | switch (args->caching) { |
3144 | switch (args->caching) { |
2985 | case I915_CACHING_NONE: |
3145 | case I915_CACHING_NONE: |
2986 | level = I915_CACHE_NONE; |
3146 | level = I915_CACHE_NONE; |
2987 | break; |
3147 | break; |
Line 3152... | Line 3312... | ||
3152 | struct drm_i915_private *dev_priv = dev->dev_private; |
3312 | struct drm_i915_private *dev_priv = dev->dev_private; |
3153 | struct drm_i915_file_private *file_priv = file->driver_priv; |
3313 | struct drm_i915_file_private *file_priv = file->driver_priv; |
3154 | unsigned long recent_enough = GetTimerTicks() - msecs_to_jiffies(20); |
3314 | unsigned long recent_enough = GetTimerTicks() - msecs_to_jiffies(20); |
3155 | struct drm_i915_gem_request *request; |
3315 | struct drm_i915_gem_request *request; |
3156 | struct intel_ring_buffer *ring = NULL; |
3316 | struct intel_ring_buffer *ring = NULL; |
- | 3317 | unsigned reset_counter; |
|
3157 | u32 seqno = 0; |
3318 | u32 seqno = 0; |
3158 | int ret; |
3319 | int ret; |
Line 3159... | Line 3320... | ||
3159 | 3320 | ||
- | 3321 | ret = i915_gem_wait_for_error(&dev_priv->gpu_error); |
|
3160 | if (atomic_read(&dev_priv->mm.wedged)) |
3322 | if (ret) |
- | 3323 | return ret; |
|
- | 3324 | ||
- | 3325 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, false); |
|
- | 3326 | if (ret) |
|
Line 3161... | Line 3327... | ||
3161 | return -EIO; |
3327 | return ret; |
3162 | 3328 | ||
3163 | spin_lock(&file_priv->mm.lock); |
3329 | spin_lock(&file_priv->mm.lock); |
3164 | list_for_each_entry(request, &file_priv->mm.request_list, client_list) { |
3330 | list_for_each_entry(request, &file_priv->mm.request_list, client_list) { |
Line 3165... | Line 3331... | ||
3165 | if (time_after_eq(request->emitted_jiffies, recent_enough)) |
3331 | if (time_after_eq(request->emitted_jiffies, recent_enough)) |
3166 | break; |
3332 | break; |
3167 | 3333 | ||
- | 3334 | ring = request->ring; |
|
3168 | ring = request->ring; |
3335 | seqno = request->seqno; |
Line 3169... | Line 3336... | ||
3169 | seqno = request->seqno; |
3336 | } |
3170 | } |
3337 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
Line 3171... | Line 3338... | ||
3171 | spin_unlock(&file_priv->mm.lock); |
3338 | spin_unlock(&file_priv->mm.lock); |
3172 | 3339 | ||
3173 | if (seqno == 0) |
3340 | if (seqno == 0) |
Line 3174... | Line 3341... | ||
3174 | return 0; |
3341 | return 0; |
3175 | 3342 | ||
Line 3245... | Line 3412... | ||
3245 | { |
3412 | { |
3246 | struct drm_i915_gem_pin *args = data; |
3413 | struct drm_i915_gem_pin *args = data; |
3247 | struct drm_i915_gem_object *obj; |
3414 | struct drm_i915_gem_object *obj; |
3248 | int ret; |
3415 | int ret; |
Line -... | Line 3416... | ||
- | 3416 | ||
- | 3417 | if(args->handle == -2) |
|
- | 3418 | { |
|
- | 3419 | printf("%s handle %d\n", __FUNCTION__, args->handle); |
|
- | 3420 | return 0; |
|
- | 3421 | } |
|
3249 | 3422 | ||
3250 | ret = i915_mutex_lock_interruptible(dev); |
3423 | ret = i915_mutex_lock_interruptible(dev); |
3251 | if (ret) |
3424 | if (ret) |
Line 3252... | Line 3425... | ||
3252 | return ret; |
3425 | return ret; |
Line 3342... | Line 3515... | ||
3342 | 3515 | ||
3343 | ret = i915_mutex_lock_interruptible(dev); |
3516 | ret = i915_mutex_lock_interruptible(dev); |
3344 | if (ret) |
3517 | if (ret) |
Line -... | Line 3518... | ||
- | 3518 | return ret; |
|
- | 3519 | ||
- | 3520 | if(args->handle == -2) |
|
- | 3521 | { |
|
- | 3522 | obj = get_fb_obj(); |
|
- | 3523 | drm_gem_object_reference(&obj->base); |
|
3345 | return ret; |
3524 | } |
3346 | 3525 | else |
|
3347 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
3526 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
3348 | if (&obj->base == NULL) { |
3527 | if (&obj->base == NULL) { |
3349 | ret = -ENOENT; |
3528 | ret = -ENOENT; |
Line 3452... | Line 3631... | ||
3452 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
3631 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
3453 | size_t size) |
3632 | size_t size) |
3454 | { |
3633 | { |
3455 | struct drm_i915_gem_object *obj; |
3634 | struct drm_i915_gem_object *obj; |
3456 | struct address_space *mapping; |
3635 | struct address_space *mapping; |
3457 | u32 mask; |
3636 | gfp_t mask; |
Line 3458... | Line 3637... | ||
3458 | 3637 | ||
3459 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
3638 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
3460 | if (obj == NULL) |
3639 | if (obj == NULL) |
Line 3563... | Line 3742... | ||
3563 | mutex_unlock(&dev->struct_mutex); |
3742 | mutex_unlock(&dev->struct_mutex); |
3564 | return ret; |
3743 | return ret; |
3565 | } |
3744 | } |
3566 | i915_gem_retire_requests(dev); |
3745 | i915_gem_retire_requests(dev); |
Line -... | Line 3746... | ||
- | 3746 | ||
- | 3747 | /* Under UMS, be paranoid and evict. */ |
|
- | 3748 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
|
- | 3749 | i915_gem_evict_everything(dev); |
|
3567 | 3750 | ||
Line 3568... | Line 3751... | ||
3568 | i915_gem_reset_fences(dev); |
3751 | i915_gem_reset_fences(dev); |
3569 | 3752 | ||
3570 | /* Hack! Don't let anybody do execbuf while we don't control the chip. |
3753 | /* Hack! Don't let anybody do execbuf while we don't control the chip. |
3571 | * We need to replace this with a semaphore, or something. |
3754 | * We need to replace this with a semaphore, or something. |
3572 | * And not confound mm.suspended! |
3755 | * And not confound mm.suspended! |
3573 | */ |
3756 | */ |
Line 3574... | Line 3757... | ||
3574 | dev_priv->mm.suspended = 1; |
3757 | dev_priv->mm.suspended = 1; |
3575 | del_timer_sync(&dev_priv->hangcheck_timer); |
3758 | del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); |
Line 3576... | Line 3759... | ||
3576 | 3759 | ||
Line 3590... | Line 3773... | ||
3590 | { |
3773 | { |
3591 | drm_i915_private_t *dev_priv = dev->dev_private; |
3774 | drm_i915_private_t *dev_priv = dev->dev_private; |
3592 | u32 misccpctl; |
3775 | u32 misccpctl; |
3593 | int i; |
3776 | int i; |
Line 3594... | Line 3777... | ||
3594 | 3777 | ||
3595 | if (!IS_IVYBRIDGE(dev)) |
3778 | if (!HAS_L3_GPU_CACHE(dev)) |
Line 3596... | Line 3779... | ||
3596 | return; |
3779 | return; |
3597 | 3780 | ||
Line 3633... | Line 3816... | ||
3633 | return; |
3816 | return; |
Line 3634... | Line 3817... | ||
3634 | 3817 | ||
3635 | I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); |
3818 | I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); |
3636 | if (IS_GEN6(dev)) |
3819 | if (IS_GEN6(dev)) |
3637 | I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); |
3820 | I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); |
3638 | else |
3821 | else if (IS_GEN7(dev)) |
- | 3822 | I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); |
|
- | 3823 | else |
|
3639 | I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); |
3824 | BUG(); |
Line 3640... | Line 3825... | ||
3640 | } |
3825 | } |
3641 | 3826 | ||
3642 | static bool |
3827 | static bool |
Line 3653... | Line 3838... | ||
3653 | } |
3838 | } |
Line 3654... | Line 3839... | ||
3654 | 3839 | ||
3655 | return true; |
3840 | return true; |
Line 3656... | Line -... | ||
3656 | } |
- | |
3657 | 3841 | } |
|
3658 | int |
3842 | |
3659 | i915_gem_init_hw(struct drm_device *dev) |
3843 | static int i915_gem_init_rings(struct drm_device *dev) |
3660 | { |
3844 | { |
Line 3661... | Line -... | ||
3661 | drm_i915_private_t *dev_priv = dev->dev_private; |
- | |
3662 | int ret; |
- | |
3663 | - | ||
3664 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) |
- | |
3665 | return -EIO; |
- | |
3666 | - | ||
3667 | if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) |
- | |
3668 | I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000); |
- | |
3669 | - | ||
3670 | i915_gem_l3_remap(dev); |
- | |
3671 | 3845 | struct drm_i915_private *dev_priv = dev->dev_private; |
|
3672 | i915_gem_init_swizzling(dev); |
3846 | int ret; |
3673 | 3847 | ||
Line 3674... | Line 3848... | ||
3674 | ret = intel_init_render_ring_buffer(dev); |
3848 | ret = intel_init_render_ring_buffer(dev); |
Line 3685... | Line 3859... | ||
3685 | ret = intel_init_blt_ring_buffer(dev); |
3859 | ret = intel_init_blt_ring_buffer(dev); |
3686 | if (ret) |
3860 | if (ret) |
3687 | goto cleanup_bsd_ring; |
3861 | goto cleanup_bsd_ring; |
3688 | } |
3862 | } |
Line 3689... | Line 3863... | ||
3689 | 3863 | ||
3690 | dev_priv->next_seqno = 1; |
- | |
3691 | - | ||
3692 | /* |
- | |
3693 | * XXX: There was some w/a described somewhere suggesting loading |
- | |
3694 | * contexts before PPGTT. |
3864 | ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000)); |
3695 | */ |
- | |
3696 | i915_gem_context_init(dev); |
3865 | if (ret) |
Line 3697... | Line 3866... | ||
3697 | i915_gem_init_ppgtt(dev); |
3866 | goto cleanup_blt_ring; |
Line -... | Line 3867... | ||
- | 3867 | ||
- | 3868 | return 0; |
|
3698 | 3869 | ||
3699 | return 0; |
3870 | cleanup_blt_ring: |
3700 | 3871 | intel_cleanup_ring_buffer(&dev_priv->ring[BCS]); |
|
3701 | cleanup_bsd_ring: |
3872 | cleanup_bsd_ring: |
- | 3873 | intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); |
|
3702 | intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); |
3874 | cleanup_render_ring: |
3703 | cleanup_render_ring: |
3875 | intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); |
Line 3704... | Line 3876... | ||
3704 | intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); |
3876 | |
3705 | return ret; |
3877 | return ret; |
3706 | } |
3878 | } |
3707 | 3879 | ||
3708 | static bool |
3880 | int |
Line 3709... | Line -... | ||
3709 | intel_enable_ppgtt(struct drm_device *dev) |
- | |
3710 | { |
- | |
3711 | if (i915_enable_ppgtt >= 0) |
3881 | i915_gem_init_hw(struct drm_device *dev) |
3712 | return i915_enable_ppgtt; |
3882 | { |
3713 | - | ||
Line -... | Line 3883... | ||
- | 3883 | drm_i915_private_t *dev_priv = dev->dev_private; |
|
- | 3884 | int ret; |
|
- | 3885 | ||
- | 3886 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) |
|
- | 3887 | return -EIO; |
|
- | 3888 | ||
- | 3889 | if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) |
|
- | 3890 | I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000); |
|
- | 3891 | ||
3714 | #ifdef CONFIG_INTEL_IOMMU |
3892 | i915_gem_l3_remap(dev); |
- | 3893 | ||
- | 3894 | i915_gem_init_swizzling(dev); |
|
- | 3895 | ||
- | 3896 | ret = i915_gem_init_rings(dev); |
|
- | 3897 | if (ret) |
|
- | 3898 | return ret; |
|
- | 3899 | ||
- | 3900 | /* |
|
- | 3901 | * XXX: There was some w/a described somewhere suggesting loading |
|
3715 | /* Disable ppgtt on SNB if VT-d is on. */ |
3902 | * contexts before PPGTT. |
Line 3716... | Line 3903... | ||
3716 | if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) |
3903 | */ |
Line 3717... | Line 3904... | ||
3717 | return false; |
3904 | i915_gem_context_init(dev); |
3718 | #endif |
3905 | i915_gem_init_ppgtt(dev); |
3719 | 3906 | ||
3720 | return true; |
- | |
3721 | } |
3907 | return 0; |
Line 3722... | Line -... | ||
3722 | - | ||
3723 | #define LFB_SIZE 0xC00000 |
- | |
3724 | - | ||
3725 | int i915_gem_init(struct drm_device *dev) |
3908 | } |
3726 | { |
- | |
3727 | struct drm_i915_private *dev_priv = dev->dev_private; |
- | |
3728 | unsigned long gtt_size, mappable_size; |
- | |
3729 | int ret; |
- | |
3730 | - | ||
3731 | gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; |
- | |
3732 | mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; |
- | |
3733 | 3909 | ||
3734 | mutex_lock(&dev->struct_mutex); |
- | |
3735 | if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { |
- | |
3736 | /* PPGTT pdes are stolen from global gtt ptes, so shrink the |
- | |
3737 | * aperture accordingly when using aliasing ppgtt. */ |
- | |
3738 | gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; |
- | |
3739 | - | ||
3740 | i915_gem_init_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size - LFB_SIZE); |
- | |
3741 | - | ||
3742 | ret = i915_gem_init_aliasing_ppgtt(dev); |
- | |
3743 | if (ret) { |
- | |
3744 | mutex_unlock(&dev->struct_mutex); |
- | |
3745 | return ret; |
- | |
3746 | } |
- | |
3747 | } else { |
- | |
3748 | /* Let GEM Manage all of the aperture. |
- | |
3749 | * |
- | |
3750 | * However, leave one page at the end still bound to the scratch |
- | |
3751 | * page. There are a number of places where the hardware |
- | |
3752 | * apparently prefetches past the end of the object, and we've |
3910 | #define LFB_SIZE 0xC00000 |
3753 | * seen multiple hangs with the GPU head pointer stuck in a |
3911 | |
3754 | * batchbuffer bound at the last page of the aperture. One page |
3912 | int i915_gem_init(struct drm_device *dev) |
3755 | * should be enough to keep any prefetching inside of the |
3913 | { |
3756 | * aperture. |
3914 | struct drm_i915_private *dev_priv = dev->dev_private; |
Line 3789... | Line 3947... | ||
3789 | int ret; |
3947 | int ret; |
Line 3790... | Line 3948... | ||
3790 | 3948 | ||
3791 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
3949 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
Line 3792... | Line 3950... | ||
3792 | return 0; |
3950 | return 0; |
3793 | 3951 | ||
3794 | if (atomic_read(&dev_priv->mm.wedged)) { |
3952 | if (i915_reset_in_progress(&dev_priv->gpu_error)) { |
3795 | DRM_ERROR("Reenabling wedged hardware, good luck\n"); |
3953 | DRM_ERROR("Reenabling wedged hardware, good luck\n"); |
Line 3796... | Line 3954... | ||
3796 | atomic_set(&dev_priv->mm.wedged, 0); |
3954 | atomic_set(&dev_priv->gpu_error.reset_counter, 0); |
3797 | } |
3955 | } |
Line 3856... | Line 4014... | ||
3856 | } |
4014 | } |
Line 3857... | Line 4015... | ||
3857 | 4015 | ||
3858 | void |
4016 | void |
3859 | i915_gem_load(struct drm_device *dev) |
4017 | i915_gem_load(struct drm_device *dev) |
3860 | { |
- | |
3861 | int i; |
4018 | { |
- | 4019 | drm_i915_private_t *dev_priv = dev->dev_private; |
|
Line 3862... | Line 4020... | ||
3862 | drm_i915_private_t *dev_priv = dev->dev_private; |
4020 | int i; |
3863 | 4021 | ||
3864 | INIT_LIST_HEAD(&dev_priv->mm.active_list); |
4022 | INIT_LIST_HEAD(&dev_priv->mm.active_list); |
3865 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
4023 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
Line 3870... | Line 4028... | ||
3870 | init_ring_lists(&dev_priv->ring[i]); |
4028 | init_ring_lists(&dev_priv->ring[i]); |
3871 | for (i = 0; i < I915_MAX_NUM_FENCES; i++) |
4029 | for (i = 0; i < I915_MAX_NUM_FENCES; i++) |
3872 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); |
4030 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); |
3873 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, |
4031 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, |
3874 | i915_gem_retire_work_handler); |
4032 | i915_gem_retire_work_handler); |
- | 4033 | init_waitqueue_head(&dev_priv->gpu_error.reset_queue); |
|
Line 3875... | Line 4034... | ||
3875 | 4034 | ||
3876 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ |
4035 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ |
3877 | if (IS_GEN3(dev)) { |
4036 | if (IS_GEN3(dev)) { |
3878 | I915_WRITE(MI_ARB_STATE, |
4037 | I915_WRITE(MI_ARB_STATE, |