Rev 4539 | Rev 5060 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4539 | Rev 4560 | ||
---|---|---|---|
Line 31... | Line 31... | ||
31 | #include "i915_drv.h" |
31 | #include "i915_drv.h" |
32 | #include "i915_trace.h" |
32 | #include "i915_trace.h" |
33 | #include "intel_drv.h" |
33 | #include "intel_drv.h" |
34 | //#include |
34 | //#include |
Line -... | Line 35... | ||
- | 35 | ||
- | 36 | #define __EXEC_OBJECT_HAS_PIN (1<<31) |
|
Line 35... | Line 37... | ||
35 | 37 | #define __EXEC_OBJECT_HAS_FENCE (1<<30) |
|
36 | 38 | ||
37 | static unsigned long |
39 | static unsigned long |
38 | copy_to_user(void __user *to, const void *from, unsigned long n) |
40 | copy_to_user(void __user *to, const void *from, unsigned long n) |
Line 46... | Line 48... | ||
46 | { |
48 | { |
47 | memcpy(to, from, n); |
49 | memcpy(to, from, n); |
48 | return 0; |
50 | return 0; |
49 | } |
51 | } |
Line 50... | Line 52... | ||
50 | 52 | ||
51 | struct eb_objects { |
53 | struct eb_vmas { |
52 | struct list_head objects; |
54 | struct list_head vmas; |
53 | int and; |
55 | int and; |
54 | union { |
56 | union { |
55 | struct drm_i915_gem_object *lut[0]; |
57 | struct i915_vma *lut[0]; |
56 | struct hlist_head buckets[0]; |
58 | struct hlist_head buckets[0]; |
57 | }; |
59 | }; |
Line 58... | Line 60... | ||
58 | }; |
60 | }; |
59 | 61 | ||
60 | static struct eb_objects * |
62 | static struct eb_vmas * |
61 | eb_create(struct drm_i915_gem_execbuffer2 *args) |
63 | eb_create(struct drm_i915_gem_execbuffer2 *args) |
Line 62... | Line 64... | ||
62 | { |
64 | { |
63 | struct eb_objects *eb = NULL; |
65 | struct eb_vmas *eb = NULL; |
64 | 66 | ||
65 | if (args->flags & I915_EXEC_HANDLE_LUT) { |
67 | if (args->flags & I915_EXEC_HANDLE_LUT) { |
66 | int size = args->buffer_count; |
68 | unsigned size = args->buffer_count; |
67 | size *= sizeof(struct drm_i915_gem_object *); |
69 | size *= sizeof(struct i915_vma *); |
Line 68... | Line 70... | ||
68 | size += sizeof(struct eb_objects); |
70 | size += sizeof(struct eb_vmas); |
69 | eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); |
71 | eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); |
70 | } |
72 | } |
71 | 73 | ||
72 | if (eb == NULL) { |
74 | if (eb == NULL) { |
73 | int size = args->buffer_count; |
75 | unsigned size = args->buffer_count; |
74 | int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; |
76 | unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2; |
75 | BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head)); |
77 | BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head)); |
76 | while (count > 2*size) |
78 | while (count > 2*size) |
77 | count >>= 1; |
79 | count >>= 1; |
78 | eb = kzalloc(count*sizeof(struct hlist_head) + |
80 | eb = kzalloc(count*sizeof(struct hlist_head) + |
Line 79... | Line 81... | ||
79 | sizeof(struct eb_objects), |
81 | sizeof(struct eb_vmas), |
80 | GFP_TEMPORARY); |
82 | GFP_TEMPORARY); |
81 | if (eb == NULL) |
83 | if (eb == NULL) |
Line 82... | Line 84... | ||
82 | return eb; |
84 | return eb; |
83 | 85 | ||
84 | eb->and = count - 1; |
86 | eb->and = count - 1; |
Line 85... | Line 87... | ||
85 | } else |
87 | } else |
86 | eb->and = -args->buffer_count; |
88 | eb->and = -args->buffer_count; |
87 | 89 | ||
88 | INIT_LIST_HEAD(&eb->objects); |
90 | INIT_LIST_HEAD(&eb->vmas); |
89 | return eb; |
91 | return eb; |
90 | } |
92 | } |
Line 91... | Line 93... | ||
91 | 93 | ||
92 | static void |
94 | static void |
93 | eb_reset(struct eb_objects *eb) |
95 | eb_reset(struct eb_vmas *eb) |
94 | { |
96 | { |
- | 97 | if (eb->and >= 0) |
|
95 | if (eb->and >= 0) |
98 | memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); |
96 | memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); |
99 | } |
- | 100 | ||
- | 101 | static int |
|
97 | } |
102 | eb_lookup_vmas(struct eb_vmas *eb, |
Line -... | Line 103... | ||
- | 103 | struct drm_i915_gem_exec_object2 *exec, |
|
98 | 104 | const struct drm_i915_gem_execbuffer2 *args, |
|
- | 105 | struct i915_address_space *vm, |
|
- | 106 | struct drm_file *file) |
|
99 | static int |
107 | { |
100 | eb_lookup_objects(struct eb_objects *eb, |
- | |
101 | struct drm_i915_gem_exec_object2 *exec, |
- | |
102 | const struct drm_i915_gem_execbuffer2 *args, |
108 | struct drm_i915_gem_object *obj; |
103 | struct drm_file *file) |
109 | struct list_head objects; |
104 | { |
110 | int i, ret; |
105 | int i; |
111 | |
106 | 112 | INIT_LIST_HEAD(&objects); |
|
107 | spin_lock(&file->table_lock); |
113 | spin_lock(&file->table_lock); |
- | 114 | /* Grab a reference to the object and release the lock so we can lookup |
|
108 | for (i = 0; i < args->buffer_count; i++) { |
115 | * or create the VMA without using GFP_ATOMIC */ |
Line 109... | Line 116... | ||
109 | struct drm_i915_gem_object *obj; |
116 | for (i = 0; i < args->buffer_count; i++) { |
110 | 117 | obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle)); |
|
111 | obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle)); |
118 | if (obj == NULL) { |
112 | if (obj == NULL) { |
119 | spin_unlock(&file->table_lock); |
113 | spin_unlock(&file->table_lock); |
120 | DRM_DEBUG("Invalid object handle %d at index %d\n", |
- | 121 | exec[i].handle, i); |
|
114 | DRM_DEBUG("Invalid object handle %d at index %d\n", |
122 | ret = -ENOENT; |
Line 115... | Line 123... | ||
115 | exec[i].handle, i); |
123 | goto err; |
116 | return -ENOENT; |
124 | } |
- | 125 | ||
- | 126 | if (!list_empty(&obj->obj_exec_link)) { |
|
Line -... | Line 127... | ||
- | 127 | spin_unlock(&file->table_lock); |
|
- | 128 | DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", |
|
- | 129 | obj, exec[i].handle, i); |
|
- | 130 | ret = -EINVAL; |
|
- | 131 | goto err; |
|
- | 132 | } |
|
- | 133 | ||
- | 134 | drm_gem_object_reference(&obj->base); |
|
- | 135 | list_add_tail(&obj->obj_exec_link, &objects); |
|
- | 136 | } |
|
- | 137 | spin_unlock(&file->table_lock); |
|
- | 138 | ||
- | 139 | i = 0; |
|
- | 140 | while (!list_empty(&objects)) { |
|
- | 141 | struct i915_vma *vma; |
|
- | 142 | ||
- | 143 | obj = list_first_entry(&objects, |
|
- | 144 | struct drm_i915_gem_object, |
|
- | 145 | obj_exec_link); |
|
- | 146 | ||
- | 147 | /* |
|
- | 148 | * NOTE: We can leak any vmas created here when something fails |
|
- | 149 | * later on. But that's no issue since vma_unbind can deal with |
|
- | 150 | * vmas which are not actually bound. And since only |
|
- | 151 | * lookup_or_create exists as an interface to get at the vma |
|
- | 152 | * from the (obj, vm) we don't run the risk of creating |
|
- | 153 | * duplicated vmas for the same vm. |
|
117 | } |
154 | */ |
118 | 155 | vma = i915_gem_obj_lookup_or_create_vma(obj, vm); |
|
119 | if (!list_empty(&obj->exec_list)) { |
156 | if (IS_ERR(vma)) { |
120 | spin_unlock(&file->table_lock); |
157 | DRM_DEBUG("Failed to lookup VMA\n"); |
121 | DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", |
158 | ret = PTR_ERR(vma); |
122 | obj, exec[i].handle, i); |
159 | goto err; |
123 | return -EINVAL; |
160 | } |
124 | } |
161 | |
125 | 162 | /* Transfer ownership from the objects list to the vmas list. */ |
|
- | 163 | list_add_tail(&vma->exec_list, &eb->vmas); |
|
126 | drm_gem_object_reference(&obj->base); |
164 | list_del_init(&obj->obj_exec_link); |
127 | list_add_tail(&obj->exec_list, &eb->objects); |
- | |
Line 128... | Line 165... | ||
128 | 165 | ||
- | 166 | vma->exec_entry = &exec[i]; |
|
- | 167 | if (eb->and < 0) { |
|
- | 168 | eb->lut[i] = vma; |
|
- | 169 | } else { |
|
- | 170 | uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle; |
|
- | 171 | vma->exec_handle = handle; |
|
- | 172 | hlist_add_head(&vma->exec_node, |
|
- | 173 | &eb->buckets[handle & eb->and]); |
|
- | 174 | } |
|
- | 175 | ++i; |
|
- | 176 | } |
|
- | 177 | ||
- | 178 | return 0; |
|
- | 179 | ||
- | 180 | ||
- | 181 | err: |
|
129 | obj->exec_entry = &exec[i]; |
182 | while (!list_empty(&objects)) { |
Line 130... | Line -... | ||
130 | if (eb->and < 0) { |
- | |
131 | eb->lut[i] = obj; |
183 | obj = list_first_entry(&objects, |
132 | } else { |
184 | struct drm_i915_gem_object, |
133 | uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle; |
185 | obj_exec_link); |
134 | obj->exec_handle = handle; |
186 | list_del_init(&obj->obj_exec_link); |
135 | hlist_add_head(&obj->exec_node, |
187 | drm_gem_object_unreference(&obj->base); |
136 | &eb->buckets[handle & eb->and]); |
188 | } |
Line 152... | Line 204... | ||
152 | struct hlist_head *head; |
204 | struct hlist_head *head; |
153 | struct hlist_node *node; |
205 | struct hlist_node *node; |
Line 154... | Line 206... | ||
154 | 206 | ||
155 | head = &eb->buckets[handle & eb->and]; |
207 | head = &eb->buckets[handle & eb->and]; |
156 | hlist_for_each(node, head) { |
208 | hlist_for_each(node, head) { |
Line 157... | Line 209... | ||
157 | struct drm_i915_gem_object *obj; |
209 | struct i915_vma *vma; |
158 | 210 | ||
159 | obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); |
211 | vma = hlist_entry(node, struct i915_vma, exec_node); |
160 | if (obj->exec_handle == handle) |
212 | if (vma->exec_handle == handle) |
161 | return obj; |
213 | return vma; |
162 | } |
214 | } |
163 | return NULL; |
215 | return NULL; |
Line 164... | Line 216... | ||
164 | } |
216 | } |
165 | } |
217 | } |
166 | 218 | ||
167 | static void |
219 | static void |
168 | eb_destroy(struct eb_objects *eb) |
220 | i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) |
Line -... | Line 221... | ||
- | 221 | { |
|
- | 222 | struct drm_i915_gem_exec_object2 *entry; |
|
- | 223 | struct drm_i915_gem_object *obj = vma->obj; |
|
- | 224 | ||
- | 225 | if (!drm_mm_node_allocated(&vma->node)) |
|
- | 226 | return; |
|
- | 227 | ||
- | 228 | entry = vma->exec_entry; |
|
- | 229 | ||
- | 230 | if (entry->flags & __EXEC_OBJECT_HAS_FENCE) |
|
- | 231 | i915_gem_object_unpin_fence(obj); |
|
- | 232 | ||
- | 233 | if (entry->flags & __EXEC_OBJECT_HAS_PIN) |
|
- | 234 | i915_gem_object_unpin(obj); |
|
- | 235 | ||
- | 236 | entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); |
|
- | 237 | } |
|
- | 238 | ||
- | 239 | static void eb_destroy(struct eb_vmas *eb) |
|
169 | { |
240 | { |
170 | while (!list_empty(&eb->objects)) { |
241 | while (!list_empty(&eb->vmas)) { |
171 | struct drm_i915_gem_object *obj; |
242 | struct i915_vma *vma; |
172 | 243 | ||
- | 244 | vma = list_first_entry(&eb->vmas, |
|
173 | obj = list_first_entry(&eb->objects, |
245 | struct i915_vma, |
174 | struct drm_i915_gem_object, |
246 | exec_list); |
175 | exec_list); |
247 | list_del_init(&vma->exec_list); |
176 | list_del_init(&obj->exec_list); |
248 | i915_gem_execbuffer_unreserve_vma(vma); |
Line 177... | Line 249... | ||
177 | drm_gem_object_unreference(&obj->base); |
249 | drm_gem_object_unreference(&vma->obj->base); |
178 | } |
250 | } |
- | 251 | kfree(eb); |
|
179 | kfree(eb); |
252 | } |
180 | } |
253 | |
181 | 254 | static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) |
|
182 | static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) |
255 | { |
Line 183... | Line 256... | ||
183 | { |
256 | return (HAS_LLC(obj->base.dev) || |
Line 192... | Line 265... | ||
192 | { |
265 | { |
193 | struct drm_device *dev = obj->base.dev; |
266 | struct drm_device *dev = obj->base.dev; |
194 | struct drm_i915_private *dev_priv = dev->dev_private; |
267 | struct drm_i915_private *dev_priv = dev->dev_private; |
195 | uint32_t page_offset = offset_in_page(reloc->offset); |
268 | uint32_t page_offset = offset_in_page(reloc->offset); |
196 | char *vaddr; |
269 | char *vaddr; |
197 | int ret = -EINVAL; |
270 | int ret; |
Line 198... | Line 271... | ||
198 | 271 | ||
199 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
272 | ret = i915_gem_object_set_to_cpu_domain(obj, true); |
200 | if (ret) |
273 | if (ret) |
Line 201... | Line 274... | ||
201 | return ret; |
274 | return ret; |
202 | 275 | ||
Line 213... | Line 286... | ||
213 | { |
286 | { |
214 | struct drm_device *dev = obj->base.dev; |
287 | struct drm_device *dev = obj->base.dev; |
215 | struct drm_i915_private *dev_priv = dev->dev_private; |
288 | struct drm_i915_private *dev_priv = dev->dev_private; |
216 | uint32_t __iomem *reloc_entry; |
289 | uint32_t __iomem *reloc_entry; |
217 | void __iomem *reloc_page; |
290 | void __iomem *reloc_page; |
218 | int ret = -EINVAL; |
291 | int ret; |
Line 219... | Line 292... | ||
219 | 292 | ||
220 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
293 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
221 | if (ret) |
294 | if (ret) |
Line 237... | Line 310... | ||
237 | return 0; |
310 | return 0; |
238 | } |
311 | } |
Line 239... | Line 312... | ||
239 | 312 | ||
240 | static int |
313 | static int |
241 | i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, |
314 | i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, |
242 | struct eb_objects *eb, |
315 | struct eb_vmas *eb, |
243 | struct drm_i915_gem_relocation_entry *reloc, |
316 | struct drm_i915_gem_relocation_entry *reloc, |
244 | struct i915_address_space *vm) |
317 | struct i915_address_space *vm) |
245 | { |
318 | { |
246 | struct drm_device *dev = obj->base.dev; |
319 | struct drm_device *dev = obj->base.dev; |
247 | struct drm_gem_object *target_obj; |
320 | struct drm_gem_object *target_obj; |
- | 321 | struct drm_i915_gem_object *target_i915_obj; |
|
248 | struct drm_i915_gem_object *target_i915_obj; |
322 | struct i915_vma *target_vma; |
249 | uint32_t target_offset; |
323 | uint32_t target_offset; |
Line 250... | Line 324... | ||
250 | int ret = -EINVAL; |
324 | int ret; |
251 | 325 | ||
252 | /* we've already hold a reference to all valid objects */ |
326 | /* we've already hold a reference to all valid objects */ |
253 | target_obj = &eb_get_object(eb, reloc->target_handle)->base; |
327 | target_vma = eb_get_vma(eb, reloc->target_handle); |
- | 328 | if (unlikely(target_vma == NULL)) |
|
- | 329 | return -ENOENT; |
|
Line 254... | Line -... | ||
254 | if (unlikely(target_obj == NULL)) |
- | |
255 | return -ENOENT; |
330 | target_i915_obj = target_vma->obj; |
Line 256... | Line 331... | ||
256 | 331 | target_obj = &target_vma->obj->base; |
|
257 | target_i915_obj = to_intel_bo(target_obj); |
332 | |
258 | target_offset = i915_gem_obj_ggtt_offset(target_i915_obj); |
333 | target_offset = target_vma->node.start; |
259 | 334 | ||
Line 274... | Line 349... | ||
274 | "read %08x write %08x", |
349 | "read %08x write %08x", |
275 | obj, reloc->target_handle, |
350 | obj, reloc->target_handle, |
276 | (int) reloc->offset, |
351 | (int) reloc->offset, |
277 | reloc->read_domains, |
352 | reloc->read_domains, |
278 | reloc->write_domain); |
353 | reloc->write_domain); |
279 | return ret; |
354 | return -EINVAL; |
280 | } |
355 | } |
281 | if (unlikely((reloc->write_domain | reloc->read_domains) |
356 | if (unlikely((reloc->write_domain | reloc->read_domains) |
282 | & ~I915_GEM_GPU_DOMAINS)) { |
357 | & ~I915_GEM_GPU_DOMAINS)) { |
283 | DRM_DEBUG("reloc with read/write non-GPU domains: " |
358 | DRM_DEBUG("reloc with read/write non-GPU domains: " |
284 | "obj %p target %d offset %d " |
359 | "obj %p target %d offset %d " |
285 | "read %08x write %08x", |
360 | "read %08x write %08x", |
286 | obj, reloc->target_handle, |
361 | obj, reloc->target_handle, |
287 | (int) reloc->offset, |
362 | (int) reloc->offset, |
288 | reloc->read_domains, |
363 | reloc->read_domains, |
289 | reloc->write_domain); |
364 | reloc->write_domain); |
290 | return ret; |
365 | return -EINVAL; |
291 | } |
366 | } |
Line 292... | Line 367... | ||
292 | 367 | ||
293 | target_obj->pending_read_domains |= reloc->read_domains; |
368 | target_obj->pending_read_domains |= reloc->read_domains; |
Line 298... | Line 373... | ||
298 | */ |
373 | */ |
299 | if (target_offset == reloc->presumed_offset) |
374 | if (target_offset == reloc->presumed_offset) |
300 | return 0; |
375 | return 0; |
Line 301... | Line 376... | ||
301 | 376 | ||
302 | /* Check that the relocation address is valid... */ |
377 | /* Check that the relocation address is valid... */ |
- | 378 | if (unlikely(reloc->offset > |
|
303 | if (unlikely(reloc->offset > obj->base.size - 4)) { |
379 | obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) { |
304 | DRM_DEBUG("Relocation beyond object bounds: " |
380 | DRM_DEBUG("Relocation beyond object bounds: " |
305 | "obj %p target %d offset %d size %d.\n", |
381 | "obj %p target %d offset %d size %d.\n", |
306 | obj, reloc->target_handle, |
382 | obj, reloc->target_handle, |
307 | (int) reloc->offset, |
383 | (int) reloc->offset, |
308 | (int) obj->base.size); |
384 | (int) obj->base.size); |
309 | return ret; |
385 | return -EINVAL; |
310 | } |
386 | } |
311 | if (unlikely(reloc->offset & 3)) { |
387 | if (unlikely(reloc->offset & 3)) { |
312 | DRM_DEBUG("Relocation not 4-byte aligned: " |
388 | DRM_DEBUG("Relocation not 4-byte aligned: " |
313 | "obj %p target %d offset %d.\n", |
389 | "obj %p target %d offset %d.\n", |
314 | obj, reloc->target_handle, |
390 | obj, reloc->target_handle, |
315 | (int) reloc->offset); |
391 | (int) reloc->offset); |
316 | return ret; |
392 | return -EINVAL; |
Line 317... | Line 393... | ||
317 | } |
393 | } |
Line 318... | Line 394... | ||
318 | 394 | ||
Line 332... | Line 408... | ||
332 | 408 | ||
333 | return 0; |
409 | return 0; |
Line 334... | Line 410... | ||
334 | } |
410 | } |
335 | 411 | ||
336 | static int |
412 | static int |
337 | i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, |
- | |
338 | struct eb_objects *eb, |
413 | i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, |
339 | struct i915_address_space *vm) |
414 | struct eb_vmas *eb) |
340 | { |
415 | { |
341 | #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) |
416 | #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) |
342 | struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(64)]; |
417 | struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(64)]; |
343 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
418 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
Line 344... | Line 419... | ||
344 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
419 | struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; |
Line 345... | Line 420... | ||
345 | int remain, ret; |
420 | int remain, ret; |
Line 357... | Line 432... | ||
357 | memcpy(r, user_relocs, count*sizeof(r[0])); |
432 | memcpy(r, user_relocs, count*sizeof(r[0])); |
Line 358... | Line 433... | ||
358 | 433 | ||
359 | do { |
434 | do { |
Line 360... | Line 435... | ||
360 | u64 offset = r->presumed_offset; |
435 | u64 offset = r->presumed_offset; |
361 | 436 | ||
362 | ret = i915_gem_execbuffer_relocate_entry(obj, eb, r, |
437 | ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, |
363 | vm); |
438 | vma->vm); |
Line 364... | Line 439... | ||
364 | if (ret) |
439 | if (ret) |
365 | return ret; |
440 | return ret; |
Line 379... | Line 454... | ||
379 | return 0; |
454 | return 0; |
380 | #undef N_RELOC |
455 | #undef N_RELOC |
381 | } |
456 | } |
Line 382... | Line 457... | ||
382 | 457 | ||
383 | static int |
458 | static int |
384 | i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, |
459 | i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma, |
385 | struct eb_objects *eb, |
460 | struct eb_vmas *eb, |
386 | struct drm_i915_gem_relocation_entry *relocs, |
- | |
387 | struct i915_address_space *vm) |
461 | struct drm_i915_gem_relocation_entry *relocs) |
388 | { |
462 | { |
389 | const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
463 | const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; |
Line 390... | Line 464... | ||
390 | int i, ret; |
464 | int i, ret; |
391 | 465 | ||
392 | for (i = 0; i < entry->relocation_count; i++) { |
466 | for (i = 0; i < entry->relocation_count; i++) { |
393 | ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i], |
467 | ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], |
394 | vm); |
468 | vma->vm); |
395 | if (ret) |
469 | if (ret) |
Line 396... | Line 470... | ||
396 | return ret; |
470 | return ret; |
397 | } |
471 | } |
Line 398... | Line 472... | ||
398 | 472 | ||
399 | return 0; |
473 | return 0; |
400 | } |
- | |
401 | 474 | } |
|
402 | static int |
475 | |
403 | i915_gem_execbuffer_relocate(struct eb_objects *eb, |
476 | static int |
Line 404... | Line 477... | ||
404 | struct i915_address_space *vm) |
477 | i915_gem_execbuffer_relocate(struct eb_vmas *eb) |
405 | { |
478 | { |
406 | struct drm_i915_gem_object *obj; |
479 | struct i915_vma *vma; |
407 | int ret = 0; |
480 | int ret = 0; |
408 | 481 | ||
409 | /* This is the fast path and we cannot handle a pagefault whilst |
482 | /* This is the fast path and we cannot handle a pagefault whilst |
410 | * holding the struct mutex lest the user pass in the relocations |
483 | * holding the struct mutex lest the user pass in the relocations |
411 | * contained within a mmaped bo. For in such a case we, the page |
484 | * contained within a mmaped bo. For in such a case we, the page |
412 | * fault handler would call i915_gem_fault() and we would try to |
485 | * fault handler would call i915_gem_fault() and we would try to |
413 | * acquire the struct mutex again. Obviously this is bad and so |
486 | * acquire the struct mutex again. Obviously this is bad and so |
414 | * lockdep complains vehemently. |
487 | * lockdep complains vehemently. |
415 | */ |
488 | */ |
416 | // pagefault_disable(); |
489 | // pagefault_disable(); |
417 | list_for_each_entry(obj, &eb->objects, exec_list) { |
490 | list_for_each_entry(vma, &eb->vmas, exec_list) { |
Line 418... | Line 491... | ||
418 | ret = i915_gem_execbuffer_relocate_object(obj, eb, vm); |
491 | ret = i915_gem_execbuffer_relocate_vma(vma, eb); |
419 | if (ret) |
492 | if (ret) |
Line 420... | Line -... | ||
420 | break; |
- | |
421 | } |
- | |
422 | // pagefault_enable(); |
- | |
423 | 493 | break; |
|
424 | return ret; |
494 | } |
425 | } |
495 | // pagefault_enable(); |
426 | 496 | ||
427 | #define __EXEC_OBJECT_HAS_PIN (1<<31) |
497 | return ret; |
- | 498 | } |
|
428 | #define __EXEC_OBJECT_HAS_FENCE (1<<30) |
499 | |
Line 429... | Line 500... | ||
429 | 500 | static int |
|
430 | static int |
501 | need_reloc_mappable(struct i915_vma *vma) |
431 | need_reloc_mappable(struct drm_i915_gem_object *obj) |
502 | { |
432 | { |
- | |
433 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
503 | struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; |
434 | return entry->relocation_count && !use_cpu_reloc(obj); |
504 | return entry->relocation_count && !use_cpu_reloc(vma->obj) && |
435 | } |
505 | i915_is_ggtt(vma->vm); |
436 | 506 | } |
|
437 | static int |
507 | |
438 | i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, |
508 | static int |
- | 509 | i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, |
|
439 | struct intel_ring_buffer *ring, |
510 | struct intel_ring_buffer *ring, |
Line 440... | Line 511... | ||
440 | struct i915_address_space *vm, |
511 | bool *need_reloc) |
441 | bool *need_reloc) |
512 | { |
442 | { |
513 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
443 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
514 | struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; |
444 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
515 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
Line 445... | Line 516... | ||
445 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
516 | bool need_fence, need_mappable; |
446 | bool need_fence, need_mappable; |
517 | struct drm_i915_gem_object *obj = vma->obj; |
447 | int ret; |
518 | int ret; |
448 | 519 | ||
Line 449... | Line 520... | ||
449 | need_fence = |
520 | need_fence = |
Line 478... | Line 549... | ||
478 | obj, obj->cache_level); |
549 | obj, obj->cache_level); |
Line 479... | Line 550... | ||
479 | 550 | ||
480 | obj->has_aliasing_ppgtt_mapping = 1; |
551 | obj->has_aliasing_ppgtt_mapping = 1; |
Line 481... | Line 552... | ||
481 | } |
552 | } |
482 | 553 | ||
483 | if (entry->offset != i915_gem_obj_offset(obj, vm)) { |
554 | if (entry->offset != vma->node.start) { |
484 | entry->offset = i915_gem_obj_offset(obj, vm); |
555 | entry->offset = vma->node.start; |
Line 485... | Line 556... | ||
485 | *need_reloc = true; |
556 | *need_reloc = true; |
486 | } |
557 | } |
Line 495... | Line 566... | ||
495 | i915_gem_gtt_bind_object(obj, obj->cache_level); |
566 | i915_gem_gtt_bind_object(obj, obj->cache_level); |
Line 496... | Line 567... | ||
496 | 567 | ||
497 | return 0; |
568 | return 0; |
Line 498... | Line -... | ||
498 | } |
- | |
499 | - | ||
500 | static void |
- | |
501 | i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) |
- | |
502 | { |
- | |
503 | struct drm_i915_gem_exec_object2 *entry; |
- | |
504 | - | ||
505 | if (!i915_gem_obj_bound_any(obj)) |
- | |
506 | return; |
- | |
507 | - | ||
508 | entry = obj->exec_entry; |
- | |
509 | - | ||
510 | if (entry->flags & __EXEC_OBJECT_HAS_FENCE) |
- | |
511 | i915_gem_object_unpin_fence(obj); |
- | |
512 | - | ||
513 | if (entry->flags & __EXEC_OBJECT_HAS_PIN) |
- | |
514 | i915_gem_object_unpin(obj); |
- | |
515 | - | ||
516 | entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); |
- | |
517 | } |
569 | } |
518 | 570 | ||
519 | static int |
571 | static int |
520 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, |
- | |
521 | struct list_head *objects, |
572 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, |
522 | struct i915_address_space *vm, |
573 | struct list_head *vmas, |
523 | bool *need_relocs) |
574 | bool *need_relocs) |
- | 575 | { |
|
- | 576 | struct drm_i915_gem_object *obj; |
|
524 | { |
577 | struct i915_vma *vma; |
525 | struct drm_i915_gem_object *obj; |
578 | struct i915_address_space *vm; |
526 | struct list_head ordered_objects; |
579 | struct list_head ordered_vmas; |
Line -... | Line 580... | ||
- | 580 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
|
- | 581 | int retry; |
|
- | 582 | ||
- | 583 | if (list_empty(vmas)) |
|
- | 584 | return 0; |
|
527 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
585 | |
528 | int retry; |
586 | vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; |
529 | 587 | ||
530 | INIT_LIST_HEAD(&ordered_objects); |
588 | INIT_LIST_HEAD(&ordered_vmas); |
Line 531... | Line 589... | ||
531 | while (!list_empty(objects)) { |
589 | while (!list_empty(vmas)) { |
532 | struct drm_i915_gem_exec_object2 *entry; |
- | |
533 | bool need_fence, need_mappable; |
590 | struct drm_i915_gem_exec_object2 *entry; |
534 | 591 | bool need_fence, need_mappable; |
|
Line 535... | Line 592... | ||
535 | obj = list_first_entry(objects, |
592 | |
536 | struct drm_i915_gem_object, |
593 | vma = list_first_entry(vmas, struct i915_vma, exec_list); |
537 | exec_list); |
594 | obj = vma->obj; |
538 | entry = obj->exec_entry; |
595 | entry = vma->exec_entry; |
539 | 596 | ||
Line 540... | Line 597... | ||
540 | need_fence = |
597 | need_fence = |
541 | has_fenced_gpu_access && |
598 | has_fenced_gpu_access && |
542 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
599 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
543 | obj->tiling_mode != I915_TILING_NONE; |
600 | obj->tiling_mode != I915_TILING_NONE; |
Line 544... | Line 601... | ||
544 | need_mappable = need_fence || need_reloc_mappable(obj); |
601 | need_mappable = need_fence || need_reloc_mappable(vma); |
545 | 602 | ||
546 | if (need_mappable) |
603 | if (need_mappable) |
547 | list_move(&obj->exec_list, &ordered_objects); |
604 | list_move(&vma->exec_list, &ordered_vmas); |
548 | else |
605 | else |
Line 549... | Line 606... | ||
549 | list_move_tail(&obj->exec_list, &ordered_objects); |
606 | list_move_tail(&vma->exec_list, &ordered_vmas); |
550 | 607 | ||
551 | obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND; |
608 | obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND; |
552 | obj->base.pending_write_domain = 0; |
609 | obj->base.pending_write_domain = 0; |
Line 569... | Line 626... | ||
569 | retry = 0; |
626 | retry = 0; |
570 | do { |
627 | do { |
571 | int ret = 0; |
628 | int ret = 0; |
Line 572... | Line 629... | ||
572 | 629 | ||
573 | /* Unbind any ill-fitting objects or pin. */ |
630 | /* Unbind any ill-fitting objects or pin. */ |
574 | list_for_each_entry(obj, objects, exec_list) { |
631 | list_for_each_entry(vma, vmas, exec_list) { |
575 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
632 | struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; |
576 | bool need_fence, need_mappable; |
- | |
Line -... | Line 633... | ||
- | 633 | bool need_fence, need_mappable; |
|
- | 634 | ||
577 | u32 obj_offset; |
635 | obj = vma->obj; |
578 | 636 | ||
Line 579... | Line -... | ||
579 | if (!i915_gem_obj_bound(obj, vm)) |
- | |
580 | continue; |
637 | if (!drm_mm_node_allocated(&vma->node)) |
581 | 638 | continue; |
|
582 | obj_offset = i915_gem_obj_offset(obj, vm); |
639 | |
583 | need_fence = |
640 | need_fence = |
584 | has_fenced_gpu_access && |
641 | has_fenced_gpu_access && |
Line 585... | Line 642... | ||
585 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
642 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
586 | obj->tiling_mode != I915_TILING_NONE; |
643 | obj->tiling_mode != I915_TILING_NONE; |
Line 587... | Line 644... | ||
587 | need_mappable = need_fence || need_reloc_mappable(obj); |
644 | need_mappable = need_fence || need_reloc_mappable(vma); |
588 | 645 | ||
589 | WARN_ON((need_mappable || need_fence) && |
646 | WARN_ON((need_mappable || need_fence) && |
590 | !i915_is_ggtt(vm)); |
647 | !i915_is_ggtt(vma->vm)); |
591 | 648 | ||
592 | if ((entry->alignment && |
649 | if ((entry->alignment && |
593 | obj_offset & (entry->alignment - 1)) || |
650 | vma->node.start & (entry->alignment - 1)) || |
594 | (need_mappable && !obj->map_and_fenceable)) |
651 | (need_mappable && !obj->map_and_fenceable)) |
595 | ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm)); |
652 | ret = i915_vma_unbind(vma); |
Line 596... | Line 653... | ||
596 | else |
653 | else |
597 | ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); |
654 | ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); |
598 | if (ret) |
655 | if (ret) |
599 | goto err; |
656 | goto err; |
Line 600... | Line 657... | ||
600 | } |
657 | } |
601 | 658 | ||
602 | /* Bind fresh objects */ |
659 | /* Bind fresh objects */ |
603 | list_for_each_entry(obj, objects, exec_list) { |
660 | list_for_each_entry(vma, vmas, exec_list) { |
Line 604... | Line -... | ||
604 | if (i915_gem_obj_bound(obj, vm)) |
- | |
605 | continue; |
- | |
606 | - | ||
607 | ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); |
661 | if (drm_mm_node_allocated(&vma->node)) |
608 | if (ret) |
662 | continue; |
609 | goto err; |
663 | |
Line -... | Line 664... | ||
- | 664 | ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); |
|
- | 665 | if (ret) |
|
- | 666 | goto err; |
|
- | 667 | } |
|
610 | } |
668 | |
611 | 669 | err: |
|
612 | err: /* Decrement pin count for bound objects */ |
670 | if (ret != -ENOSPC || retry++) |
613 | list_for_each_entry(obj, objects, exec_list) |
671 | return ret; |
614 | i915_gem_execbuffer_unreserve_object(obj); |
672 | |
Line 615... | Line 673... | ||
615 | 673 | /* Decrement pin count for bound objects */ |
|
616 | if (ret != -ENOSPC || retry++) |
674 | list_for_each_entry(vma, vmas, exec_list) |
617 | return ret; |
675 | i915_gem_execbuffer_unreserve_vma(vma); |
618 | 676 | ||
619 | // ret = i915_gem_evict_everything(ring->dev); |
677 | // ret = i915_gem_evict_vm(vm, true); |
620 | if (ret) |
678 | if (ret) |
621 | return ret; |
679 | return ret; |
622 | } while (1); |
- | |
623 | } |
680 | } while (1); |
624 | 681 | } |
|
625 | static int |
682 | |
- | 683 | static int |
|
626 | i915_gem_execbuffer_relocate_slow(struct drm_device *dev, |
684 | i915_gem_execbuffer_relocate_slow(struct drm_device *dev, |
627 | struct drm_i915_gem_execbuffer2 *args, |
685 | struct drm_i915_gem_execbuffer2 *args, |
628 | struct drm_file *file, |
686 | struct drm_file *file, |
629 | struct intel_ring_buffer *ring, |
687 | struct intel_ring_buffer *ring, |
- | 688 | struct eb_vmas *eb, |
|
- | 689 | struct drm_i915_gem_exec_object2 *exec) |
|
- | 690 | { |
|
- | 691 | struct drm_i915_gem_relocation_entry *reloc; |
|
- | 692 | struct i915_address_space *vm; |
|
Line 630... | Line 693... | ||
630 | struct eb_objects *eb, |
693 | struct i915_vma *vma; |
631 | struct drm_i915_gem_exec_object2 *exec, |
694 | bool need_relocs; |
632 | struct i915_address_space *vm) |
695 | int *reloc_offset; |
633 | { |
- | |
634 | struct drm_i915_gem_relocation_entry *reloc; |
696 | int i, total, ret; |
635 | struct drm_i915_gem_object *obj; |
697 | unsigned count = args->buffer_count; |
636 | bool need_relocs; |
698 | |
637 | int *reloc_offset; |
699 | if (WARN_ON(list_empty(&eb->vmas))) |
Line 638... | Line 700... | ||
638 | int i, total, ret; |
700 | return 0; |
Line 639... | Line 701... | ||
639 | int count = args->buffer_count; |
701 | |
Line 706... | Line 768... | ||
706 | goto err; |
768 | goto err; |
707 | } |
769 | } |
Line 708... | Line 770... | ||
708 | 770 | ||
709 | /* reacquire the objects */ |
771 | /* reacquire the objects */ |
710 | eb_reset(eb); |
772 | eb_reset(eb); |
711 | ret = eb_lookup_objects(eb, exec, args, file); |
773 | ret = eb_lookup_vmas(eb, exec, args, vm, file); |
712 | if (ret) |
774 | if (ret) |
Line 713... | Line 775... | ||
713 | goto err; |
775 | goto err; |
714 | 776 | ||
715 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
777 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
716 | ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs); |
778 | ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs); |
Line 717... | Line 779... | ||
717 | if (ret) |
779 | if (ret) |
718 | goto err; |
780 | goto err; |
719 | 781 | ||
720 | list_for_each_entry(obj, &eb->objects, exec_list) { |
782 | list_for_each_entry(vma, &eb->vmas, exec_list) { |
721 | int offset = obj->exec_entry - exec; |
- | |
722 | ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, |
783 | int offset = vma->exec_entry - exec; |
723 | reloc + reloc_offset[offset], |
784 | ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb, |
724 | vm); |
785 | reloc + reloc_offset[offset]); |
Line 725... | Line 786... | ||
725 | if (ret) |
786 | if (ret) |
Line 738... | Line 799... | ||
738 | return ret; |
799 | return ret; |
739 | } |
800 | } |
Line 740... | Line 801... | ||
740 | 801 | ||
741 | static int |
802 | static int |
742 | i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, |
803 | i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, |
743 | struct list_head *objects) |
804 | struct list_head *vmas) |
744 | { |
805 | { |
745 | struct drm_i915_gem_object *obj; |
806 | struct i915_vma *vma; |
746 | uint32_t flush_domains = 0; |
807 | uint32_t flush_domains = 0; |
747 | bool flush_chipset = false; |
808 | bool flush_chipset = false; |
Line 748... | Line 809... | ||
748 | int ret; |
809 | int ret; |
- | 810 | ||
749 | 811 | list_for_each_entry(vma, vmas, exec_list) { |
|
750 | list_for_each_entry(obj, objects, exec_list) { |
812 | struct drm_i915_gem_object *obj = vma->obj; |
751 | ret = i915_gem_object_sync(obj, ring); |
813 | ret = i915_gem_object_sync(obj, ring); |
Line 752... | Line 814... | ||
752 | if (ret) |
814 | if (ret) |
Line 782... | Line 844... | ||
782 | static int |
844 | static int |
783 | validate_exec_list(struct drm_i915_gem_exec_object2 *exec, |
845 | validate_exec_list(struct drm_i915_gem_exec_object2 *exec, |
784 | int count) |
846 | int count) |
785 | { |
847 | { |
786 | int i; |
848 | int i; |
787 | int relocs_total = 0; |
849 | unsigned relocs_total = 0; |
788 | int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); |
850 | unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry); |
Line 789... | Line 851... | ||
789 | 851 | ||
790 | for (i = 0; i < count; i++) { |
852 | for (i = 0; i < count; i++) { |
791 | char __user *ptr = to_user_ptr(exec[i].relocs_ptr); |
853 | char __user *ptr = to_user_ptr(exec[i].relocs_ptr); |
Line 807... | Line 869... | ||
807 | /* |
869 | /* |
808 | * We must check that the entire relocation array is safe |
870 | * We must check that the entire relocation array is safe |
809 | * to read, but since we may need to update the presumed |
871 | * to read, but since we may need to update the presumed |
810 | * offsets during execution, check for full write access. |
872 | * offsets during execution, check for full write access. |
811 | */ |
873 | */ |
- | 874 | } |
|
- | 875 | ||
- | 876 | return 0; |
|
- | 877 | } |
|
- | 878 | ||
- | 879 | static int |
|
- | 880 | i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, |
|
- | 881 | const u32 ctx_id) |
|
- | 882 | { |
|
- | 883 | struct i915_ctx_hang_stats *hs; |
|
Line -... | Line 884... | ||
- | 884 | ||
- | 885 | hs = i915_gem_context_get_hang_stats(dev, file, ctx_id); |
|
- | 886 | if (IS_ERR(hs)) |
|
- | 887 | return PTR_ERR(hs); |
|
- | 888 | ||
- | 889 | if (hs->banned) { |
|
- | 890 | DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id); |
|
812 | 891 | return -EIO; |
|
Line 813... | Line 892... | ||
813 | } |
892 | } |
814 | 893 | ||
Line 815... | Line 894... | ||
815 | return 0; |
894 | return 0; |
816 | } |
895 | } |
817 | - | ||
818 | static void |
896 | |
819 | i915_gem_execbuffer_move_to_active(struct list_head *objects, |
897 | static void |
820 | struct i915_address_space *vm, |
898 | i915_gem_execbuffer_move_to_active(struct list_head *vmas, |
Line 821... | Line 899... | ||
821 | struct intel_ring_buffer *ring) |
899 | struct intel_ring_buffer *ring) |
- | 900 | { |
|
822 | { |
901 | struct i915_vma *vma; |
823 | struct drm_i915_gem_object *obj; |
902 | |
Line 824... | Line 903... | ||
824 | 903 | list_for_each_entry(vma, vmas, exec_list) { |
|
825 | list_for_each_entry(obj, objects, exec_list) { |
904 | struct drm_i915_gem_object *obj = vma->obj; |
826 | u32 old_read = obj->base.read_domains; |
905 | u32 old_read = obj->base.read_domains; |
827 | u32 old_write = obj->base.write_domain; |
906 | u32 old_write = obj->base.write_domain; |
828 | 907 | ||
Line 829... | Line -... | ||
829 | obj->base.write_domain = obj->base.pending_write_domain; |
- | |
830 | if (obj->base.write_domain == 0) |
- | |
831 | obj->base.pending_read_domains |= obj->base.read_domains; |
908 | obj->base.write_domain = obj->base.pending_write_domain; |
832 | obj->base.read_domains = obj->base.pending_read_domains; |
909 | if (obj->base.write_domain == 0) |
833 | obj->fenced_gpu_access = obj->pending_fenced_gpu_access; |
910 | obj->base.pending_read_domains |= obj->base.read_domains; |
834 | 911 | obj->base.read_domains = obj->base.pending_read_domains; |
|
835 | /* FIXME: This lookup gets fixed later <-- danvet */ |
912 | obj->fenced_gpu_access = obj->pending_fenced_gpu_access; |
836 | list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list); |
913 | |
Line 890... | Line 967... | ||
890 | struct drm_i915_gem_execbuffer2 *args, |
967 | struct drm_i915_gem_execbuffer2 *args, |
891 | struct drm_i915_gem_exec_object2 *exec, |
968 | struct drm_i915_gem_exec_object2 *exec, |
892 | struct i915_address_space *vm) |
969 | struct i915_address_space *vm) |
893 | { |
970 | { |
894 | drm_i915_private_t *dev_priv = dev->dev_private; |
971 | drm_i915_private_t *dev_priv = dev->dev_private; |
895 | struct eb_objects *eb; |
972 | struct eb_vmas *eb; |
896 | struct drm_i915_gem_object *batch_obj; |
973 | struct drm_i915_gem_object *batch_obj; |
897 | struct drm_clip_rect *cliprects = NULL; |
974 | struct drm_clip_rect *cliprects = NULL; |
898 | struct intel_ring_buffer *ring; |
975 | struct intel_ring_buffer *ring; |
899 | u32 ctx_id = i915_execbuffer2_get_context_id(*args); |
976 | const u32 ctx_id = i915_execbuffer2_get_context_id(*args); |
900 | u32 exec_start, exec_len; |
977 | u32 exec_start, exec_len; |
901 | u32 mask, flags; |
978 | u32 mask, flags; |
902 | int ret, mode, i; |
979 | int ret, mode, i; |
903 | bool need_relocs; |
980 | bool need_relocs; |
Line 1003... | Line 1080... | ||
1003 | DRM_DEBUG("execbuf with %u cliprects\n", |
1080 | DRM_DEBUG("execbuf with %u cliprects\n", |
1004 | args->num_cliprects); |
1081 | args->num_cliprects); |
1005 | return -EINVAL; |
1082 | return -EINVAL; |
1006 | } |
1083 | } |
Line 1007... | Line 1084... | ||
1007 | 1084 | ||
- | 1085 | cliprects = kcalloc(args->num_cliprects, |
|
1008 | cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), |
1086 | sizeof(*cliprects), |
1009 | GFP_KERNEL); |
1087 | GFP_KERNEL); |
1010 | if (cliprects == NULL) { |
1088 | if (cliprects == NULL) { |
1011 | ret = -ENOMEM; |
1089 | ret = -ENOMEM; |
1012 | goto pre_mutex_err; |
1090 | goto pre_mutex_err; |
Line 1018... | Line 1096... | ||
1018 | ret = -EFAULT; |
1096 | ret = -EFAULT; |
1019 | goto pre_mutex_err; |
1097 | goto pre_mutex_err; |
1020 | } |
1098 | } |
1021 | } |
1099 | } |
Line -... | Line 1100... | ||
- | 1100 | ||
- | 1101 | intel_runtime_pm_get(dev_priv); |
|
1022 | 1102 | ||
1023 | ret = i915_mutex_lock_interruptible(dev); |
1103 | ret = i915_mutex_lock_interruptible(dev); |
1024 | if (ret) |
1104 | if (ret) |
Line 1025... | Line 1105... | ||
1025 | goto pre_mutex_err; |
1105 | goto pre_mutex_err; |
1026 | 1106 | ||
1027 | if (dev_priv->ums.mm_suspended) { |
1107 | if (dev_priv->ums.mm_suspended) { |
1028 | mutex_unlock(&dev->struct_mutex); |
1108 | mutex_unlock(&dev->struct_mutex); |
1029 | ret = -EBUSY; |
1109 | ret = -EBUSY; |
Line -... | Line 1110... | ||
- | 1110 | goto pre_mutex_err; |
|
- | 1111 | } |
|
- | 1112 | ||
- | 1113 | ret = i915_gem_validate_context(dev, file, ctx_id); |
|
- | 1114 | if (ret) { |
|
- | 1115 | mutex_unlock(&dev->struct_mutex); |
|
1030 | goto pre_mutex_err; |
1116 | goto pre_mutex_err; |
1031 | } |
1117 | } |
1032 | 1118 | ||
1033 | eb = eb_create(args); |
1119 | eb = eb_create(args); |
1034 | if (eb == NULL) { |
1120 | if (eb == NULL) { |
1035 | mutex_unlock(&dev->struct_mutex); |
1121 | mutex_unlock(&dev->struct_mutex); |
Line 1036... | Line 1122... | ||
1036 | ret = -ENOMEM; |
1122 | ret = -ENOMEM; |
1037 | goto pre_mutex_err; |
1123 | goto pre_mutex_err; |
1038 | } |
1124 | } |
1039 | 1125 | ||
Line 1040... | Line 1126... | ||
1040 | /* Look up object handles */ |
1126 | /* Look up object handles */ |
1041 | ret = eb_lookup_objects(eb, exec, args, file); |
1127 | ret = eb_lookup_vmas(eb, exec, args, vm, file); |
1042 | if (ret) |
- | |
1043 | goto err; |
- | |
Line 1044... | Line 1128... | ||
1044 | 1128 | if (ret) |
|
1045 | /* take note of the batch buffer before we might reorder the lists */ |
1129 | goto err; |
1046 | batch_obj = list_entry(eb->objects.prev, |
1130 | |
1047 | struct drm_i915_gem_object, |
1131 | /* take note of the batch buffer before we might reorder the lists */ |
1048 | exec_list); |
1132 | batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj; |
Line 1049... | Line 1133... | ||
1049 | 1133 | ||
1050 | /* Move the objects en-masse into the GTT, evicting if necessary. */ |
1134 | /* Move the objects en-masse into the GTT, evicting if necessary. */ |
1051 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
1135 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
1052 | ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs); |
1136 | ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs); |
1053 | if (ret) |
1137 | if (ret) |
1054 | goto err; |
1138 | goto err; |
1055 | 1139 | ||
1056 | /* The objects are in their final locations, apply the relocations. */ |
1140 | /* The objects are in their final locations, apply the relocations. */ |
1057 | if (need_relocs) |
1141 | if (need_relocs) |
1058 | ret = i915_gem_execbuffer_relocate(eb, vm); |
1142 | ret = i915_gem_execbuffer_relocate(eb); |
1059 | if (ret) { |
1143 | if (ret) { |
1060 | if (ret == -EFAULT) { |
1144 | if (ret == -EFAULT) { |
Line 1074... | Line 1158... | ||
1074 | } |
1158 | } |
1075 | batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; |
1159 | batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; |
Line 1076... | Line 1160... | ||
1076 | 1160 | ||
1077 | /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure |
1161 | /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure |
1078 | * batch" bit. Hence we need to pin secure batches into the global gtt. |
1162 | * batch" bit. Hence we need to pin secure batches into the global gtt. |
1079 | * hsw should have this fixed, but let's be paranoid and do it |
- | |
1080 | * unconditionally for now. */ |
1163 | * hsw should have this fixed, but bdw mucks it up again. */ |
1081 | if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) |
1164 | if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) |
Line 1082... | Line 1165... | ||
1082 | i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); |
1165 | i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); |
1083 | 1166 | ||
1084 | ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects); |
1167 | ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas); |
Line 1085... | Line 1168... | ||
1085 | if (ret) |
1168 | if (ret) |
1086 | goto err; |
1169 | goto err; |
Line 1134... | Line 1217... | ||
1134 | goto err; |
1217 | goto err; |
1135 | } |
1218 | } |
Line 1136... | Line 1219... | ||
1136 | 1219 | ||
Line 1137... | Line 1220... | ||
1137 | trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); |
1220 | trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); |
1138 | 1221 | ||
Line 1139... | Line 1222... | ||
1139 | i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring); |
1222 | i915_gem_execbuffer_move_to_active(&eb->vmas, ring); |
1140 | i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); |
1223 | i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); |
Line 1141... | Line 1224... | ||
1141 | 1224 | ||
Line 1142... | Line 1225... | ||
1142 | err: |
1225 | err: |
1143 | eb_destroy(eb); |
1226 | eb_destroy(eb); |
- | 1227 | ||
- | 1228 | mutex_unlock(&dev->struct_mutex); |
|
- | 1229 | ||
- | 1230 | pre_mutex_err: |
|
1144 | 1231 | kfree(cliprects); |
|
1145 | mutex_unlock(&dev->struct_mutex); |
1232 | |
Line 1146... | Line 1233... | ||
1146 | 1233 | /* intel_gpu_busy should also get a ref, so it will free when the device |
|
1147 | pre_mutex_err: |
1234 | * is really idle. */ |