Rev 4246 | Rev 4392 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
3263 | Serge | 1 | /* |
2 | * Copyright © 2008,2010 Intel Corporation |
||
3 | * |
||
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
5 | * copy of this software and associated documentation files (the "Software"), |
||
6 | * to deal in the Software without restriction, including without limitation |
||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
8 | * and/or sell copies of the Software, and to permit persons to whom the |
||
9 | * Software is furnished to do so, subject to the following conditions: |
||
10 | * |
||
11 | * The above copyright notice and this permission notice (including the next |
||
12 | * paragraph) shall be included in all copies or substantial portions of the |
||
13 | * Software. |
||
14 | * |
||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
||
21 | * IN THE SOFTWARE. |
||
22 | * |
||
23 | * Authors: |
||
24 | * Eric Anholt |
||
25 | * Chris Wilson |
||
26 | * |
||
27 | */ |
||
28 | |||
29 | #include |
||
30 | #include |
||
31 | #include "i915_drv.h" |
||
32 | #include "i915_trace.h" |
||
33 | #include "intel_drv.h" |
||
34 | //#include |
||
35 | |||
36 | #define I915_EXEC_SECURE (1<<9) |
||
37 | #define I915_EXEC_IS_PINNED (1<<10) |
||
4104 | Serge | 38 | #define I915_EXEC_VEBOX (4<<0) |
3263 | Serge | 39 | |
40 | |||
41 | struct drm_i915_gem_object *get_fb_obj(); |
||
42 | |||
43 | |||
44 | static unsigned long |
||
45 | copy_to_user(void __user *to, const void *from, unsigned long n) |
||
46 | { |
||
47 | memcpy(to, from, n); |
||
48 | return 0; |
||
49 | } |
||
50 | |||
51 | static unsigned long |
||
52 | copy_from_user(void *to, const void __user *from, unsigned long n) |
||
53 | { |
||
54 | memcpy(to, from, n); |
||
55 | return 0; |
||
56 | } |
||
57 | |||
58 | struct eb_objects { |
||
3480 | Serge | 59 | struct list_head objects; |
3263 | Serge | 60 | int and; |
3480 | Serge | 61 | union { |
62 | struct drm_i915_gem_object *lut[0]; |
||
3263 | Serge | 63 | struct hlist_head buckets[0]; |
3480 | Serge | 64 | }; |
3263 | Serge | 65 | }; |
66 | |||
67 | static struct eb_objects * |
||
3480 | Serge | 68 | eb_create(struct drm_i915_gem_execbuffer2 *args) |
3263 | Serge | 69 | { |
3480 | Serge | 70 | struct eb_objects *eb = NULL; |
71 | |||
72 | if (args->flags & I915_EXEC_HANDLE_LUT) { |
||
73 | int size = args->buffer_count; |
||
74 | size *= sizeof(struct drm_i915_gem_object *); |
||
75 | size += sizeof(struct eb_objects); |
||
76 | eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); |
||
77 | } |
||
78 | |||
79 | if (eb == NULL) { |
||
80 | int size = args->buffer_count; |
||
3263 | Serge | 81 | int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; |
3480 | Serge | 82 | BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head)); |
83 | while (count > 2*size) |
||
3263 | Serge | 84 | count >>= 1; |
85 | eb = kzalloc(count*sizeof(struct hlist_head) + |
||
86 | sizeof(struct eb_objects), |
||
3480 | Serge | 87 | GFP_TEMPORARY); |
3263 | Serge | 88 | if (eb == NULL) |
89 | return eb; |
||
90 | |||
91 | eb->and = count - 1; |
||
3480 | Serge | 92 | } else |
93 | eb->and = -args->buffer_count; |
||
94 | |||
95 | INIT_LIST_HEAD(&eb->objects); |
||
3263 | Serge | 96 | return eb; |
97 | } |
||
98 | |||
99 | static void |
||
100 | eb_reset(struct eb_objects *eb) |
||
101 | { |
||
3480 | Serge | 102 | if (eb->and >= 0) |
3263 | Serge | 103 | memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); |
104 | } |
||
105 | |||
3480 | Serge | 106 | static int |
107 | eb_lookup_objects(struct eb_objects *eb, |
||
108 | struct drm_i915_gem_exec_object2 *exec, |
||
109 | const struct drm_i915_gem_execbuffer2 *args, |
||
110 | struct drm_file *file) |
||
3263 | Serge | 111 | { |
3480 | Serge | 112 | int i; |
113 | |||
114 | spin_lock(&file->table_lock); |
||
115 | for (i = 0; i < args->buffer_count; i++) { |
||
116 | struct drm_i915_gem_object *obj; |
||
117 | |||
118 | if(exec[i].handle == -2) |
||
119 | obj = get_fb_obj(); |
||
120 | else |
||
121 | obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle)); |
||
122 | if (obj == NULL) { |
||
123 | spin_unlock(&file->table_lock); |
||
124 | DRM_DEBUG("Invalid object handle %d at index %d\n", |
||
125 | exec[i].handle, i); |
||
126 | return -ENOENT; |
||
127 | } |
||
128 | |||
129 | if (!list_empty(&obj->exec_list)) { |
||
130 | spin_unlock(&file->table_lock); |
||
131 | DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", |
||
132 | obj, exec[i].handle, i); |
||
133 | return -EINVAL; |
||
134 | } |
||
135 | |||
136 | drm_gem_object_reference(&obj->base); |
||
137 | list_add_tail(&obj->exec_list, &eb->objects); |
||
138 | |||
139 | obj->exec_entry = &exec[i]; |
||
4246 | Serge | 140 | |
141 | if(exec[i].handle == -2) |
||
142 | continue; |
||
143 | |||
3480 | Serge | 144 | if (eb->and < 0) { |
145 | eb->lut[i] = obj; |
||
146 | } else { |
||
147 | uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle; |
||
148 | obj->exec_handle = handle; |
||
3263 | Serge | 149 | hlist_add_head(&obj->exec_node, |
3480 | Serge | 150 | &eb->buckets[handle & eb->and]); |
151 | } |
||
152 | } |
||
153 | spin_unlock(&file->table_lock); |
||
154 | |||
155 | return 0; |
||
3263 | Serge | 156 | } |
157 | |||
158 | static struct drm_i915_gem_object * |
||
159 | eb_get_object(struct eb_objects *eb, unsigned long handle) |
||
160 | { |
||
4246 | Serge | 161 | |
162 | if(handle == -2) |
||
163 | return get_fb_obj(); |
||
164 | |||
3480 | Serge | 165 | if (eb->and < 0) { |
166 | if (handle >= -eb->and) |
||
167 | return NULL; |
||
168 | return eb->lut[handle]; |
||
169 | } else { |
||
3263 | Serge | 170 | struct hlist_head *head; |
171 | struct hlist_node *node; |
||
172 | |||
173 | head = &eb->buckets[handle & eb->and]; |
||
174 | hlist_for_each(node, head) { |
||
3480 | Serge | 175 | struct drm_i915_gem_object *obj; |
176 | |||
3263 | Serge | 177 | obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); |
178 | if (obj->exec_handle == handle) |
||
179 | return obj; |
||
180 | } |
||
181 | return NULL; |
||
3480 | Serge | 182 | } |
3263 | Serge | 183 | } |
184 | |||
185 | static void |
||
186 | eb_destroy(struct eb_objects *eb) |
||
187 | { |
||
3480 | Serge | 188 | while (!list_empty(&eb->objects)) { |
189 | struct drm_i915_gem_object *obj; |
||
190 | |||
191 | obj = list_first_entry(&eb->objects, |
||
192 | struct drm_i915_gem_object, |
||
193 | exec_list); |
||
194 | list_del_init(&obj->exec_list); |
||
195 | drm_gem_object_unreference(&obj->base); |
||
196 | } |
||
3263 | Serge | 197 | kfree(eb); |
198 | } |
||
199 | |||
200 | static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) |
||
201 | { |
||
202 | return (obj->base.write_domain == I915_GEM_DOMAIN_CPU || |
||
203 | !obj->map_and_fenceable || |
||
204 | obj->cache_level != I915_CACHE_NONE); |
||
205 | } |
||
206 | |||
207 | static int |
||
4371 | Serge | 208 | relocate_entry_cpu(struct drm_i915_gem_object *obj, |
209 | struct drm_i915_gem_relocation_entry *reloc) |
||
210 | { |
||
211 | uint32_t page_offset = offset_in_page(reloc->offset); |
||
212 | char *vaddr; |
||
213 | int ret = -EINVAL; |
||
214 | |||
215 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
||
216 | if (ret) |
||
217 | return ret; |
||
218 | |||
219 | vaddr = (char *)MapIoMem((addr_t)i915_gem_object_get_page(obj, |
||
220 | reloc->offset >> PAGE_SHIFT), 4096, 3); |
||
221 | *(uint32_t *)(vaddr + page_offset) = reloc->delta; |
||
222 | FreeKernelSpace(vaddr); |
||
223 | |||
224 | return 0; |
||
225 | } |
||
226 | |||
227 | static int |
||
228 | relocate_entry_gtt(struct drm_i915_gem_object *obj, |
||
229 | struct drm_i915_gem_relocation_entry *reloc) |
||
230 | { |
||
231 | struct drm_device *dev = obj->base.dev; |
||
232 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
233 | uint32_t __iomem *reloc_entry; |
||
234 | void __iomem *reloc_page; |
||
235 | int ret = -EINVAL; |
||
236 | |||
237 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
||
238 | if (ret) |
||
239 | return ret; |
||
240 | |||
241 | ret = i915_gem_object_put_fence(obj); |
||
242 | if (ret) |
||
243 | return ret; |
||
244 | |||
245 | /* Map the page containing the relocation we're going to perform. */ |
||
246 | reloc->offset += i915_gem_obj_ggtt_offset(obj); |
||
247 | reloc_page = (void*)MapIoMem(dev_priv->gtt.mappable_base + |
||
248 | (reloc->offset & PAGE_MASK), 4096, 0x18|3); |
||
249 | reloc_entry = (uint32_t __iomem *) |
||
250 | (reloc_page + offset_in_page(reloc->offset)); |
||
251 | iowrite32(reloc->delta, reloc_entry); |
||
252 | FreeKernelSpace(reloc_page); |
||
253 | |||
254 | return 0; |
||
255 | } |
||
256 | |||
257 | static int |
||
3263 | Serge | 258 | i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, |
259 | struct eb_objects *eb, |
||
4104 | Serge | 260 | struct drm_i915_gem_relocation_entry *reloc, |
261 | struct i915_address_space *vm) |
||
3263 | Serge | 262 | { |
263 | struct drm_device *dev = obj->base.dev; |
||
264 | struct drm_gem_object *target_obj; |
||
265 | struct drm_i915_gem_object *target_i915_obj; |
||
266 | uint32_t target_offset; |
||
267 | int ret = -EINVAL; |
||
268 | |||
269 | /* we've already hold a reference to all valid objects */ |
||
270 | target_obj = &eb_get_object(eb, reloc->target_handle)->base; |
||
271 | if (unlikely(target_obj == NULL)) |
||
272 | return -ENOENT; |
||
273 | |||
274 | target_i915_obj = to_intel_bo(target_obj); |
||
4104 | Serge | 275 | target_offset = i915_gem_obj_ggtt_offset(target_i915_obj); |
3263 | Serge | 276 | |
277 | /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and |
||
278 | * pipe_control writes because the gpu doesn't properly redirect them |
||
279 | * through the ppgtt for non_secure batchbuffers. */ |
||
280 | if (unlikely(IS_GEN6(dev) && |
||
281 | reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && |
||
282 | !target_i915_obj->has_global_gtt_mapping)) { |
||
283 | i915_gem_gtt_bind_object(target_i915_obj, |
||
284 | target_i915_obj->cache_level); |
||
285 | } |
||
286 | |||
287 | /* Validate that the target is in a valid r/w GPU domain */ |
||
288 | if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { |
||
289 | DRM_DEBUG("reloc with multiple write domains: " |
||
290 | "obj %p target %d offset %d " |
||
291 | "read %08x write %08x", |
||
292 | obj, reloc->target_handle, |
||
293 | (int) reloc->offset, |
||
294 | reloc->read_domains, |
||
295 | reloc->write_domain); |
||
296 | return ret; |
||
297 | } |
||
298 | if (unlikely((reloc->write_domain | reloc->read_domains) |
||
299 | & ~I915_GEM_GPU_DOMAINS)) { |
||
300 | DRM_DEBUG("reloc with read/write non-GPU domains: " |
||
301 | "obj %p target %d offset %d " |
||
302 | "read %08x write %08x", |
||
303 | obj, reloc->target_handle, |
||
304 | (int) reloc->offset, |
||
305 | reloc->read_domains, |
||
306 | reloc->write_domain); |
||
307 | return ret; |
||
308 | } |
||
309 | |||
310 | target_obj->pending_read_domains |= reloc->read_domains; |
||
311 | target_obj->pending_write_domain |= reloc->write_domain; |
||
312 | |||
313 | /* If the relocation already has the right value in it, no |
||
314 | * more work needs to be done. |
||
315 | */ |
||
316 | if (target_offset == reloc->presumed_offset) |
||
317 | return 0; |
||
318 | |||
319 | /* Check that the relocation address is valid... */ |
||
320 | if (unlikely(reloc->offset > obj->base.size - 4)) { |
||
321 | DRM_DEBUG("Relocation beyond object bounds: " |
||
322 | "obj %p target %d offset %d size %d.\n", |
||
323 | obj, reloc->target_handle, |
||
324 | (int) reloc->offset, |
||
325 | (int) obj->base.size); |
||
326 | return ret; |
||
327 | } |
||
328 | if (unlikely(reloc->offset & 3)) { |
||
329 | DRM_DEBUG("Relocation not 4-byte aligned: " |
||
330 | "obj %p target %d offset %d.\n", |
||
331 | obj, reloc->target_handle, |
||
332 | (int) reloc->offset); |
||
333 | return ret; |
||
334 | } |
||
335 | |||
336 | /* We can't wait for rendering with pagefaults disabled */ |
||
337 | |||
338 | reloc->delta += target_offset; |
||
4371 | Serge | 339 | if (use_cpu_reloc(obj)) |
340 | ret = relocate_entry_cpu(obj, reloc); |
||
341 | else |
||
342 | ret = relocate_entry_gtt(obj, reloc); |
||
3263 | Serge | 343 | |
344 | if (ret) |
||
345 | return ret; |
||
346 | |||
347 | /* and update the user's relocation entry */ |
||
348 | reloc->presumed_offset = target_offset; |
||
349 | |||
350 | return 0; |
||
351 | } |
||
352 | |||
353 | static int |
||
354 | i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, |
||
4104 | Serge | 355 | struct eb_objects *eb, |
356 | struct i915_address_space *vm) |
||
3263 | Serge | 357 | { |
358 | #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) |
||
3266 | Serge | 359 | struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(64)]; |
3263 | Serge | 360 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
361 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
||
362 | int remain, ret; |
||
363 | |||
364 | user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; |
||
365 | |||
366 | remain = entry->relocation_count; |
||
367 | while (remain) { |
||
368 | struct drm_i915_gem_relocation_entry *r = stack_reloc; |
||
369 | int count = remain; |
||
370 | if (count > ARRAY_SIZE(stack_reloc)) |
||
371 | count = ARRAY_SIZE(stack_reloc); |
||
372 | remain -= count; |
||
373 | |||
374 | memcpy(r, user_relocs, count*sizeof(r[0])); |
||
375 | |||
376 | do { |
||
377 | u64 offset = r->presumed_offset; |
||
378 | |||
4104 | Serge | 379 | ret = i915_gem_execbuffer_relocate_entry(obj, eb, r, |
380 | vm); |
||
3263 | Serge | 381 | if (ret) |
382 | return ret; |
||
383 | |||
384 | memcpy(&user_relocs->presumed_offset, |
||
385 | &r->presumed_offset, |
||
386 | sizeof(r->presumed_offset)); |
||
387 | |||
388 | user_relocs++; |
||
389 | r++; |
||
390 | } while (--count); |
||
391 | } |
||
392 | |||
393 | return 0; |
||
394 | #undef N_RELOC |
||
395 | } |
||
396 | |||
397 | static int |
||
398 | i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, |
||
399 | struct eb_objects *eb, |
||
4104 | Serge | 400 | struct drm_i915_gem_relocation_entry *relocs, |
401 | struct i915_address_space *vm) |
||
3263 | Serge | 402 | { |
403 | const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
||
404 | int i, ret; |
||
405 | |||
406 | for (i = 0; i < entry->relocation_count; i++) { |
||
4104 | Serge | 407 | ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i], |
408 | vm); |
||
3263 | Serge | 409 | if (ret) |
410 | return ret; |
||
411 | } |
||
412 | |||
413 | return 0; |
||
414 | } |
||
415 | |||
416 | static int |
||
4104 | Serge | 417 | i915_gem_execbuffer_relocate(struct eb_objects *eb, |
418 | struct i915_address_space *vm) |
||
3263 | Serge | 419 | { |
420 | struct drm_i915_gem_object *obj; |
||
421 | int ret = 0; |
||
422 | |||
423 | /* This is the fast path and we cannot handle a pagefault whilst |
||
424 | * holding the struct mutex lest the user pass in the relocations |
||
425 | * contained within a mmaped bo. For in such a case we, the page |
||
426 | * fault handler would call i915_gem_fault() and we would try to |
||
427 | * acquire the struct mutex again. Obviously this is bad and so |
||
428 | * lockdep complains vehemently. |
||
429 | */ |
||
4104 | Serge | 430 | // pagefault_disable(); |
3480 | Serge | 431 | list_for_each_entry(obj, &eb->objects, exec_list) { |
4104 | Serge | 432 | ret = i915_gem_execbuffer_relocate_object(obj, eb, vm); |
3263 | Serge | 433 | if (ret) |
434 | break; |
||
435 | } |
||
436 | // pagefault_enable(); |
||
437 | |||
438 | return ret; |
||
439 | } |
||
440 | |||
441 | #define __EXEC_OBJECT_HAS_PIN (1<<31) |
||
442 | #define __EXEC_OBJECT_HAS_FENCE (1<<30) |
||
443 | |||
444 | static int |
||
445 | need_reloc_mappable(struct drm_i915_gem_object *obj) |
||
446 | { |
||
447 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
||
448 | return entry->relocation_count && !use_cpu_reloc(obj); |
||
449 | } |
||
450 | |||
451 | static int |
||
452 | i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, |
||
3480 | Serge | 453 | struct intel_ring_buffer *ring, |
4104 | Serge | 454 | struct i915_address_space *vm, |
3480 | Serge | 455 | bool *need_reloc) |
3263 | Serge | 456 | { |
457 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
||
458 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
||
459 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
||
460 | bool need_fence, need_mappable; |
||
461 | int ret; |
||
462 | |||
463 | need_fence = |
||
464 | has_fenced_gpu_access && |
||
465 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
||
466 | obj->tiling_mode != I915_TILING_NONE; |
||
467 | need_mappable = need_fence || need_reloc_mappable(obj); |
||
468 | |||
4104 | Serge | 469 | ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable, |
470 | false); |
||
3263 | Serge | 471 | if (ret) |
472 | return ret; |
||
473 | |||
474 | entry->flags |= __EXEC_OBJECT_HAS_PIN; |
||
475 | |||
476 | if (has_fenced_gpu_access) { |
||
477 | if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { |
||
478 | ret = i915_gem_object_get_fence(obj); |
||
479 | if (ret) |
||
480 | return ret; |
||
481 | |||
482 | if (i915_gem_object_pin_fence(obj)) |
||
483 | entry->flags |= __EXEC_OBJECT_HAS_FENCE; |
||
484 | |||
485 | obj->pending_fenced_gpu_access = true; |
||
486 | } |
||
487 | } |
||
488 | |||
489 | /* Ensure ppgtt mapping exists if needed */ |
||
490 | if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { |
||
491 | i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, |
||
492 | obj, obj->cache_level); |
||
493 | |||
494 | obj->has_aliasing_ppgtt_mapping = 1; |
||
495 | } |
||
496 | |||
4104 | Serge | 497 | if (entry->offset != i915_gem_obj_offset(obj, vm)) { |
498 | entry->offset = i915_gem_obj_offset(obj, vm); |
||
3480 | Serge | 499 | *need_reloc = true; |
500 | } |
||
3266 | Serge | 501 | |
3480 | Serge | 502 | if (entry->flags & EXEC_OBJECT_WRITE) { |
503 | obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER; |
||
504 | obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER; |
||
505 | } |
||
506 | |||
507 | if (entry->flags & EXEC_OBJECT_NEEDS_GTT && |
||
508 | !obj->has_global_gtt_mapping) |
||
509 | i915_gem_gtt_bind_object(obj, obj->cache_level); |
||
510 | |||
3263 | Serge | 511 | return 0; |
512 | } |
||
513 | |||
514 | static void |
||
515 | i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) |
||
516 | { |
||
517 | struct drm_i915_gem_exec_object2 *entry; |
||
518 | |||
4104 | Serge | 519 | if (!i915_gem_obj_bound_any(obj)) |
3263 | Serge | 520 | return; |
521 | |||
522 | entry = obj->exec_entry; |
||
523 | |||
524 | if (entry->flags & __EXEC_OBJECT_HAS_FENCE) |
||
525 | i915_gem_object_unpin_fence(obj); |
||
526 | |||
527 | if (entry->flags & __EXEC_OBJECT_HAS_PIN) |
||
528 | i915_gem_object_unpin(obj); |
||
529 | |||
530 | entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); |
||
531 | } |
||
532 | |||
533 | static int |
||
534 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, |
||
3480 | Serge | 535 | struct list_head *objects, |
4104 | Serge | 536 | struct i915_address_space *vm, |
3480 | Serge | 537 | bool *need_relocs) |
3263 | Serge | 538 | { |
539 | struct drm_i915_gem_object *obj; |
||
540 | struct list_head ordered_objects; |
||
541 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
||
542 | int retry; |
||
543 | |||
544 | INIT_LIST_HEAD(&ordered_objects); |
||
545 | while (!list_empty(objects)) { |
||
546 | struct drm_i915_gem_exec_object2 *entry; |
||
547 | bool need_fence, need_mappable; |
||
548 | |||
549 | obj = list_first_entry(objects, |
||
550 | struct drm_i915_gem_object, |
||
551 | exec_list); |
||
552 | entry = obj->exec_entry; |
||
553 | |||
554 | need_fence = |
||
555 | has_fenced_gpu_access && |
||
556 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
||
557 | obj->tiling_mode != I915_TILING_NONE; |
||
558 | need_mappable = need_fence || need_reloc_mappable(obj); |
||
559 | |||
560 | if (need_mappable) |
||
561 | list_move(&obj->exec_list, &ordered_objects); |
||
562 | else |
||
563 | list_move_tail(&obj->exec_list, &ordered_objects); |
||
564 | |||
3480 | Serge | 565 | obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND; |
3263 | Serge | 566 | obj->base.pending_write_domain = 0; |
567 | obj->pending_fenced_gpu_access = false; |
||
568 | } |
||
569 | list_splice(&ordered_objects, objects); |
||
570 | |||
571 | /* Attempt to pin all of the buffers into the GTT. |
||
572 | * This is done in 3 phases: |
||
573 | * |
||
574 | * 1a. Unbind all objects that do not match the GTT constraints for |
||
575 | * the execbuffer (fenceable, mappable, alignment etc). |
||
576 | * 1b. Increment pin count for already bound objects. |
||
577 | * 2. Bind new objects. |
||
578 | * 3. Decrement pin count. |
||
579 | * |
||
580 | * This avoid unnecessary unbinding of later objects in order to make |
||
581 | * room for the earlier objects *unless* we need to defragment. |
||
582 | */ |
||
583 | retry = 0; |
||
584 | do { |
||
585 | int ret = 0; |
||
586 | |||
587 | /* Unbind any ill-fitting objects or pin. */ |
||
588 | list_for_each_entry(obj, objects, exec_list) { |
||
589 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
||
590 | bool need_fence, need_mappable; |
||
4104 | Serge | 591 | u32 obj_offset; |
3263 | Serge | 592 | |
4104 | Serge | 593 | if (!i915_gem_obj_bound(obj, vm)) |
3263 | Serge | 594 | continue; |
595 | |||
4104 | Serge | 596 | obj_offset = i915_gem_obj_offset(obj, vm); |
3263 | Serge | 597 | need_fence = |
598 | has_fenced_gpu_access && |
||
599 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
||
600 | obj->tiling_mode != I915_TILING_NONE; |
||
601 | need_mappable = need_fence || need_reloc_mappable(obj); |
||
602 | |||
4246 | Serge | 603 | WARN_ON((need_mappable || need_fence) && |
604 | !i915_is_ggtt(vm)); |
||
4104 | Serge | 605 | |
606 | if ((entry->alignment && |
||
607 | obj_offset & (entry->alignment - 1)) || |
||
3263 | Serge | 608 | (need_mappable && !obj->map_and_fenceable)) |
4104 | Serge | 609 | ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm)); |
3263 | Serge | 610 | else |
4104 | Serge | 611 | ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); |
3263 | Serge | 612 | if (ret) |
613 | goto err; |
||
614 | } |
||
615 | |||
616 | /* Bind fresh objects */ |
||
617 | list_for_each_entry(obj, objects, exec_list) { |
||
4104 | Serge | 618 | if (i915_gem_obj_bound(obj, vm)) |
3263 | Serge | 619 | continue; |
620 | |||
4104 | Serge | 621 | ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); |
3263 | Serge | 622 | if (ret) |
623 | goto err; |
||
624 | } |
||
625 | |||
626 | err: /* Decrement pin count for bound objects */ |
||
627 | list_for_each_entry(obj, objects, exec_list) |
||
628 | i915_gem_execbuffer_unreserve_object(obj); |
||
629 | |||
630 | if (ret != -ENOSPC || retry++) |
||
631 | return ret; |
||
632 | |||
633 | // ret = i915_gem_evict_everything(ring->dev); |
||
634 | if (ret) |
||
635 | return ret; |
||
636 | } while (1); |
||
637 | } |
||
638 | |||
639 | static int |
||
640 | i915_gem_execbuffer_relocate_slow(struct drm_device *dev, |
||
3480 | Serge | 641 | struct drm_i915_gem_execbuffer2 *args, |
3263 | Serge | 642 | struct drm_file *file, |
643 | struct intel_ring_buffer *ring, |
||
644 | struct eb_objects *eb, |
||
4104 | Serge | 645 | struct drm_i915_gem_exec_object2 *exec, |
646 | struct i915_address_space *vm) |
||
3263 | Serge | 647 | { |
648 | struct drm_i915_gem_relocation_entry *reloc; |
||
649 | struct drm_i915_gem_object *obj; |
||
3480 | Serge | 650 | bool need_relocs; |
3263 | Serge | 651 | int *reloc_offset; |
652 | int i, total, ret; |
||
3480 | Serge | 653 | int count = args->buffer_count; |
3263 | Serge | 654 | |
655 | /* We may process another execbuffer during the unlock... */ |
||
3480 | Serge | 656 | while (!list_empty(&eb->objects)) { |
657 | obj = list_first_entry(&eb->objects, |
||
3263 | Serge | 658 | struct drm_i915_gem_object, |
659 | exec_list); |
||
660 | list_del_init(&obj->exec_list); |
||
661 | drm_gem_object_unreference(&obj->base); |
||
662 | } |
||
663 | |||
664 | mutex_unlock(&dev->struct_mutex); |
||
665 | |||
666 | total = 0; |
||
667 | for (i = 0; i < count; i++) |
||
668 | total += exec[i].relocation_count; |
||
669 | |||
670 | reloc_offset = malloc(count * sizeof(*reloc_offset)); |
||
671 | reloc = malloc(total * sizeof(*reloc)); |
||
672 | if (reloc == NULL || reloc_offset == NULL) { |
||
3266 | Serge | 673 | kfree(reloc); |
674 | kfree(reloc_offset); |
||
3263 | Serge | 675 | mutex_lock(&dev->struct_mutex); |
676 | return -ENOMEM; |
||
677 | } |
||
678 | |||
679 | total = 0; |
||
680 | for (i = 0; i < count; i++) { |
||
681 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
||
682 | u64 invalid_offset = (u64)-1; |
||
683 | int j; |
||
684 | |||
685 | user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; |
||
686 | |||
687 | if (copy_from_user(reloc+total, user_relocs, |
||
688 | exec[i].relocation_count * sizeof(*reloc))) { |
||
689 | ret = -EFAULT; |
||
690 | mutex_lock(&dev->struct_mutex); |
||
691 | goto err; |
||
692 | } |
||
693 | |||
694 | /* As we do not update the known relocation offsets after |
||
695 | * relocating (due to the complexities in lock handling), |
||
696 | * we need to mark them as invalid now so that we force the |
||
697 | * relocation processing next time. Just in case the target |
||
698 | * object is evicted and then rebound into its old |
||
699 | * presumed_offset before the next execbuffer - if that |
||
700 | * happened we would make the mistake of assuming that the |
||
701 | * relocations were valid. |
||
702 | */ |
||
703 | for (j = 0; j < exec[i].relocation_count; j++) { |
||
704 | if (copy_to_user(&user_relocs[j].presumed_offset, |
||
705 | &invalid_offset, |
||
706 | sizeof(invalid_offset))) { |
||
707 | ret = -EFAULT; |
||
708 | mutex_lock(&dev->struct_mutex); |
||
709 | goto err; |
||
710 | } |
||
711 | } |
||
712 | |||
713 | reloc_offset[i] = total; |
||
714 | total += exec[i].relocation_count; |
||
715 | } |
||
716 | |||
717 | ret = i915_mutex_lock_interruptible(dev); |
||
718 | if (ret) { |
||
719 | mutex_lock(&dev->struct_mutex); |
||
720 | goto err; |
||
721 | } |
||
722 | |||
723 | /* reacquire the objects */ |
||
724 | eb_reset(eb); |
||
3480 | Serge | 725 | ret = eb_lookup_objects(eb, exec, args, file); |
726 | if (ret) |
||
3263 | Serge | 727 | goto err; |
728 | |||
3480 | Serge | 729 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
4104 | Serge | 730 | ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs); |
3263 | Serge | 731 | if (ret) |
732 | goto err; |
||
733 | |||
3480 | Serge | 734 | list_for_each_entry(obj, &eb->objects, exec_list) { |
3263 | Serge | 735 | int offset = obj->exec_entry - exec; |
736 | ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, |
||
4104 | Serge | 737 | reloc + reloc_offset[offset], |
738 | vm); |
||
3263 | Serge | 739 | if (ret) |
740 | goto err; |
||
741 | } |
||
742 | |||
743 | /* Leave the user relocations as are, this is the painfully slow path, |
||
744 | * and we want to avoid the complication of dropping the lock whilst |
||
745 | * having buffers reserved in the aperture and so causing spurious |
||
746 | * ENOSPC for random operations. |
||
747 | */ |
||
748 | |||
749 | err: |
||
3266 | Serge | 750 | kfree(reloc); |
751 | kfree(reloc_offset); |
||
3263 | Serge | 752 | return ret; |
753 | } |
||
754 | |||
755 | static int |
||
756 | i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, |
||
757 | struct list_head *objects) |
||
758 | { |
||
759 | struct drm_i915_gem_object *obj; |
||
760 | uint32_t flush_domains = 0; |
||
4104 | Serge | 761 | bool flush_chipset = false; |
3263 | Serge | 762 | int ret; |
763 | |||
764 | list_for_each_entry(obj, objects, exec_list) { |
||
765 | ret = i915_gem_object_sync(obj, ring); |
||
766 | if (ret) |
||
767 | return ret; |
||
768 | |||
769 | if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) |
||
4104 | Serge | 770 | flush_chipset |= i915_gem_clflush_object(obj, false); |
3263 | Serge | 771 | |
772 | flush_domains |= obj->base.write_domain; |
||
773 | } |
||
774 | |||
4104 | Serge | 775 | if (flush_chipset) |
3263 | Serge | 776 | i915_gem_chipset_flush(ring->dev); |
777 | |||
778 | if (flush_domains & I915_GEM_DOMAIN_GTT) |
||
779 | wmb(); |
||
780 | |||
781 | /* Unconditionally invalidate gpu caches and ensure that we do flush |
||
782 | * any residual writes from the previous batch. |
||
783 | */ |
||
784 | return intel_ring_invalidate_all_caches(ring); |
||
785 | } |
||
786 | |||
787 | static bool |
||
788 | i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) |
||
789 | { |
||
3480 | Serge | 790 | if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS) |
791 | return false; |
||
792 | |||
3263 | Serge | 793 | return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; |
794 | } |
||
795 | |||
796 | static int |
||
797 | validate_exec_list(struct drm_i915_gem_exec_object2 *exec, |
||
798 | int count) |
||
799 | { |
||
800 | int i; |
||
3480 | Serge | 801 | int relocs_total = 0; |
802 | int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); |
||
3263 | Serge | 803 | |
804 | for (i = 0; i < count; i++) { |
||
3746 | Serge | 805 | char __user *ptr = to_user_ptr(exec[i].relocs_ptr); |
3263 | Serge | 806 | int length; /* limited by fault_in_pages_readable() */ |
807 | |||
3480 | Serge | 808 | if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) |
3263 | Serge | 809 | return -EINVAL; |
810 | |||
3480 | Serge | 811 | /* First check for malicious input causing overflow in |
812 | * the worst case where we need to allocate the entire |
||
813 | * relocation tree as a single array. |
||
814 | */ |
||
815 | if (exec[i].relocation_count > relocs_max - relocs_total) |
||
816 | return -EINVAL; |
||
817 | relocs_total += exec[i].relocation_count; |
||
818 | |||
3263 | Serge | 819 | length = exec[i].relocation_count * |
820 | sizeof(struct drm_i915_gem_relocation_entry); |
||
3746 | Serge | 821 | /* |
822 | * We must check that the entire relocation array is safe |
||
823 | * to read, but since we may need to update the presumed |
||
824 | * offsets during execution, check for full write access. |
||
825 | */ |
||
3263 | Serge | 826 | // if (!access_ok(VERIFY_WRITE, ptr, length)) |
827 | // return -EFAULT; |
||
828 | |||
829 | // if (fault_in_multipages_readable(ptr, length)) |
||
830 | // return -EFAULT; |
||
831 | } |
||
832 | |||
833 | return 0; |
||
834 | } |
||
835 | |||
836 | static void |
||
837 | i915_gem_execbuffer_move_to_active(struct list_head *objects, |
||
4104 | Serge | 838 | struct i915_address_space *vm, |
3263 | Serge | 839 | struct intel_ring_buffer *ring) |
840 | { |
||
841 | struct drm_i915_gem_object *obj; |
||
842 | |||
843 | list_for_each_entry(obj, objects, exec_list) { |
||
844 | u32 old_read = obj->base.read_domains; |
||
845 | u32 old_write = obj->base.write_domain; |
||
846 | |||
3480 | Serge | 847 | obj->base.write_domain = obj->base.pending_write_domain; |
848 | if (obj->base.write_domain == 0) |
||
849 | obj->base.pending_read_domains |= obj->base.read_domains; |
||
3263 | Serge | 850 | obj->base.read_domains = obj->base.pending_read_domains; |
851 | obj->fenced_gpu_access = obj->pending_fenced_gpu_access; |
||
852 | |||
4104 | Serge | 853 | /* FIXME: This lookup gets fixed later <-- danvet */ |
854 | list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list); |
||
3263 | Serge | 855 | i915_gem_object_move_to_active(obj, ring); |
856 | if (obj->base.write_domain) { |
||
857 | obj->dirty = 1; |
||
858 | obj->last_write_seqno = intel_ring_get_seqno(ring); |
||
859 | if (obj->pin_count) /* check for potential scanout */ |
||
4104 | Serge | 860 | intel_mark_fb_busy(obj, ring); |
3263 | Serge | 861 | } |
862 | |||
863 | trace_i915_gem_object_change_domain(obj, old_read, old_write); |
||
864 | } |
||
865 | } |
||
866 | |||
867 | static void |
||
868 | i915_gem_execbuffer_retire_commands(struct drm_device *dev, |
||
869 | struct drm_file *file, |
||
4104 | Serge | 870 | struct intel_ring_buffer *ring, |
871 | struct drm_i915_gem_object *obj) |
||
3263 | Serge | 872 | { |
873 | /* Unconditionally force add_request to emit a full flush. */ |
||
874 | ring->gpu_caches_dirty = true; |
||
875 | |||
876 | /* Add a breadcrumb for the completion of the batch buffer */ |
||
4104 | Serge | 877 | (void)__i915_add_request(ring, file, obj, NULL); |
3263 | Serge | 878 | } |
879 | |||
880 | static int |
||
881 | i915_reset_gen7_sol_offsets(struct drm_device *dev, |
||
882 | struct intel_ring_buffer *ring) |
||
883 | { |
||
884 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
885 | int ret, i; |
||
886 | |||
887 | if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) |
||
888 | return 0; |
||
889 | |||
890 | ret = intel_ring_begin(ring, 4 * 3); |
||
891 | if (ret) |
||
892 | return ret; |
||
893 | |||
894 | for (i = 0; i < 4; i++) { |
||
895 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
||
896 | intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i)); |
||
897 | intel_ring_emit(ring, 0); |
||
898 | } |
||
899 | |||
900 | intel_ring_advance(ring); |
||
901 | |||
902 | return 0; |
||
903 | } |
||
904 | |||
905 | static int |
||
906 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
||
907 | struct drm_file *file, |
||
908 | struct drm_i915_gem_execbuffer2 *args, |
||
4104 | Serge | 909 | struct drm_i915_gem_exec_object2 *exec, |
910 | struct i915_address_space *vm) |
||
3263 | Serge | 911 | { |
912 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
913 | struct eb_objects *eb; |
||
914 | struct drm_i915_gem_object *batch_obj; |
||
915 | struct drm_clip_rect *cliprects = NULL; |
||
916 | struct intel_ring_buffer *ring; |
||
917 | u32 ctx_id = i915_execbuffer2_get_context_id(*args); |
||
918 | u32 exec_start, exec_len; |
||
3480 | Serge | 919 | u32 mask, flags; |
3263 | Serge | 920 | int ret, mode, i; |
3480 | Serge | 921 | bool need_relocs; |
3263 | Serge | 922 | |
3480 | Serge | 923 | if (!i915_gem_check_execbuffer(args)) |
3263 | Serge | 924 | return -EINVAL; |
925 | |||
926 | ret = validate_exec_list(exec, args->buffer_count); |
||
927 | if (ret) |
||
928 | return ret; |
||
929 | |||
930 | flags = 0; |
||
931 | if (args->flags & I915_EXEC_SECURE) { |
||
932 | |||
933 | flags |= I915_DISPATCH_SECURE; |
||
934 | } |
||
935 | if (args->flags & I915_EXEC_IS_PINNED) |
||
936 | flags |= I915_DISPATCH_PINNED; |
||
937 | |||
938 | switch (args->flags & I915_EXEC_RING_MASK) { |
||
939 | case I915_EXEC_DEFAULT: |
||
940 | case I915_EXEC_RENDER: |
||
941 | ring = &dev_priv->ring[RCS]; |
||
942 | break; |
||
943 | case I915_EXEC_BSD: |
||
944 | ring = &dev_priv->ring[VCS]; |
||
4104 | Serge | 945 | if (ctx_id != DEFAULT_CONTEXT_ID) { |
3263 | Serge | 946 | DRM_DEBUG("Ring %s doesn't support contexts\n", |
947 | ring->name); |
||
948 | return -EPERM; |
||
949 | } |
||
950 | break; |
||
951 | case I915_EXEC_BLT: |
||
952 | ring = &dev_priv->ring[BCS]; |
||
4104 | Serge | 953 | if (ctx_id != DEFAULT_CONTEXT_ID) { |
3263 | Serge | 954 | DRM_DEBUG("Ring %s doesn't support contexts\n", |
955 | ring->name); |
||
956 | return -EPERM; |
||
957 | } |
||
958 | break; |
||
4104 | Serge | 959 | case I915_EXEC_VEBOX: |
960 | ring = &dev_priv->ring[VECS]; |
||
961 | if (ctx_id != DEFAULT_CONTEXT_ID) { |
||
962 | DRM_DEBUG("Ring %s doesn't support contexts\n", |
||
963 | ring->name); |
||
964 | return -EPERM; |
||
965 | } |
||
966 | break; |
||
967 | |||
3263 | Serge | 968 | default: |
969 | DRM_DEBUG("execbuf with unknown ring: %d\n", |
||
970 | (int)(args->flags & I915_EXEC_RING_MASK)); |
||
971 | return -EINVAL; |
||
972 | } |
||
973 | if (!intel_ring_initialized(ring)) { |
||
974 | DRM_DEBUG("execbuf with invalid ring: %d\n", |
||
975 | (int)(args->flags & I915_EXEC_RING_MASK)); |
||
976 | return -EINVAL; |
||
977 | } |
||
978 | |||
979 | mode = args->flags & I915_EXEC_CONSTANTS_MASK; |
||
980 | mask = I915_EXEC_CONSTANTS_MASK; |
||
981 | switch (mode) { |
||
982 | case I915_EXEC_CONSTANTS_REL_GENERAL: |
||
983 | case I915_EXEC_CONSTANTS_ABSOLUTE: |
||
984 | case I915_EXEC_CONSTANTS_REL_SURFACE: |
||
985 | if (ring == &dev_priv->ring[RCS] && |
||
986 | mode != dev_priv->relative_constants_mode) { |
||
987 | if (INTEL_INFO(dev)->gen < 4) |
||
988 | return -EINVAL; |
||
989 | |||
990 | if (INTEL_INFO(dev)->gen > 5 && |
||
991 | mode == I915_EXEC_CONSTANTS_REL_SURFACE) |
||
992 | return -EINVAL; |
||
993 | |||
994 | /* The HW changed the meaning on this bit on gen6 */ |
||
995 | if (INTEL_INFO(dev)->gen >= 6) |
||
996 | mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; |
||
997 | } |
||
998 | break; |
||
999 | default: |
||
1000 | DRM_DEBUG("execbuf with unknown constants: %d\n", mode); |
||
1001 | return -EINVAL; |
||
1002 | } |
||
1003 | |||
1004 | if (args->buffer_count < 1) { |
||
1005 | DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); |
||
1006 | return -EINVAL; |
||
1007 | } |
||
1008 | |||
1009 | if (args->num_cliprects != 0) { |
||
1010 | if (ring != &dev_priv->ring[RCS]) { |
||
1011 | DRM_DEBUG("clip rectangles are only valid with the render ring\n"); |
||
1012 | return -EINVAL; |
||
1013 | } |
||
1014 | |||
1015 | if (INTEL_INFO(dev)->gen >= 5) { |
||
1016 | DRM_DEBUG("clip rectangles are only valid on pre-gen5\n"); |
||
1017 | return -EINVAL; |
||
1018 | } |
||
1019 | |||
1020 | if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) { |
||
1021 | DRM_DEBUG("execbuf with %u cliprects\n", |
||
1022 | args->num_cliprects); |
||
1023 | return -EINVAL; |
||
1024 | } |
||
1025 | |||
1026 | cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), |
||
1027 | GFP_KERNEL); |
||
1028 | if (cliprects == NULL) { |
||
1029 | ret = -ENOMEM; |
||
1030 | goto pre_mutex_err; |
||
1031 | } |
||
1032 | |||
1033 | if (copy_from_user(cliprects, |
||
3746 | Serge | 1034 | to_user_ptr(args->cliprects_ptr), |
3263 | Serge | 1035 | sizeof(*cliprects)*args->num_cliprects)) { |
1036 | ret = -EFAULT; |
||
1037 | goto pre_mutex_err; |
||
1038 | } |
||
1039 | } |
||
1040 | |||
1041 | ret = i915_mutex_lock_interruptible(dev); |
||
1042 | if (ret) |
||
1043 | goto pre_mutex_err; |
||
1044 | |||
4104 | Serge | 1045 | if (dev_priv->ums.mm_suspended) { |
3263 | Serge | 1046 | mutex_unlock(&dev->struct_mutex); |
1047 | ret = -EBUSY; |
||
1048 | goto pre_mutex_err; |
||
1049 | } |
||
1050 | |||
3480 | Serge | 1051 | eb = eb_create(args); |
3263 | Serge | 1052 | if (eb == NULL) { |
1053 | mutex_unlock(&dev->struct_mutex); |
||
1054 | ret = -ENOMEM; |
||
1055 | goto pre_mutex_err; |
||
1056 | } |
||
1057 | |||
1058 | /* Look up object handles */ |
||
3480 | Serge | 1059 | ret = eb_lookup_objects(eb, exec, args, file); |
1060 | if (ret) |
||
3263 | Serge | 1061 | goto err; |
1062 | |||
1063 | /* take note of the batch buffer before we might reorder the lists */ |
||
3480 | Serge | 1064 | batch_obj = list_entry(eb->objects.prev, |
3263 | Serge | 1065 | struct drm_i915_gem_object, |
1066 | exec_list); |
||
1067 | |||
1068 | /* Move the objects en-masse into the GTT, evicting if necessary. */ |
||
3480 | Serge | 1069 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
4104 | Serge | 1070 | ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs); |
3263 | Serge | 1071 | if (ret) |
1072 | goto err; |
||
1073 | |||
1074 | /* The objects are in their final locations, apply the relocations. */ |
||
3480 | Serge | 1075 | if (need_relocs) |
4104 | Serge | 1076 | ret = i915_gem_execbuffer_relocate(eb, vm); |
3263 | Serge | 1077 | if (ret) { |
1078 | if (ret == -EFAULT) { |
||
3480 | Serge | 1079 | ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, |
4104 | Serge | 1080 | eb, exec, vm); |
3263 | Serge | 1081 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
1082 | } |
||
1083 | if (ret) |
||
1084 | goto err; |
||
1085 | } |
||
1086 | |||
1087 | /* Set the pending read domains for the batch buffer to COMMAND */ |
||
1088 | if (batch_obj->base.pending_write_domain) { |
||
1089 | DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); |
||
1090 | ret = -EINVAL; |
||
1091 | goto err; |
||
1092 | } |
||
1093 | batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; |
||
1094 | |||
1095 | /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure |
||
1096 | * batch" bit. Hence we need to pin secure batches into the global gtt. |
||
1097 | * hsw should have this fixed, but let's be paranoid and do it |
||
1098 | * unconditionally for now. */ |
||
1099 | if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) |
||
1100 | i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); |
||
1101 | |||
3480 | Serge | 1102 | ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects); |
3263 | Serge | 1103 | if (ret) |
1104 | goto err; |
||
1105 | |||
1106 | ret = i915_switch_context(ring, file, ctx_id); |
||
1107 | if (ret) |
||
1108 | goto err; |
||
1109 | |||
1110 | if (ring == &dev_priv->ring[RCS] && |
||
1111 | mode != dev_priv->relative_constants_mode) { |
||
1112 | ret = intel_ring_begin(ring, 4); |
||
1113 | if (ret) |
||
1114 | goto err; |
||
1115 | |||
1116 | intel_ring_emit(ring, MI_NOOP); |
||
1117 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
||
1118 | intel_ring_emit(ring, INSTPM); |
||
1119 | intel_ring_emit(ring, mask << 16 | mode); |
||
1120 | intel_ring_advance(ring); |
||
1121 | |||
1122 | dev_priv->relative_constants_mode = mode; |
||
1123 | } |
||
1124 | |||
1125 | if (args->flags & I915_EXEC_GEN7_SOL_RESET) { |
||
1126 | ret = i915_reset_gen7_sol_offsets(dev, ring); |
||
1127 | if (ret) |
||
1128 | goto err; |
||
1129 | } |
||
1130 | |||
4104 | Serge | 1131 | exec_start = i915_gem_obj_offset(batch_obj, vm) + |
1132 | args->batch_start_offset; |
||
3263 | Serge | 1133 | exec_len = args->batch_len; |
1134 | if (cliprects) { |
||
4246 | Serge | 1135 | for (i = 0; i < args->num_cliprects; i++) { |
1136 | ret = i915_emit_box(dev, &cliprects[i], |
||
1137 | args->DR1, args->DR4); |
||
1138 | if (ret) |
||
1139 | goto err; |
||
3263 | Serge | 1140 | |
4246 | Serge | 1141 | ret = ring->dispatch_execbuffer(ring, |
1142 | exec_start, exec_len, |
||
1143 | flags); |
||
1144 | if (ret) |
||
1145 | goto err; |
||
1146 | } |
||
3263 | Serge | 1147 | } else { |
1148 | ret = ring->dispatch_execbuffer(ring, |
||
1149 | exec_start, exec_len, |
||
1150 | flags); |
||
1151 | if (ret) |
||
1152 | goto err; |
||
1153 | } |
||
1154 | |||
3266 | Serge | 1155 | trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); |
3263 | Serge | 1156 | |
4104 | Serge | 1157 | i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring); |
1158 | i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); |
||
3266 | Serge | 1159 | |
3263 | Serge | 1160 | err: |
1161 | eb_destroy(eb); |
||
1162 | |||
1163 | mutex_unlock(&dev->struct_mutex); |
||
1164 | |||
1165 | pre_mutex_err: |
||
4104 | Serge | 1166 | kfree(cliprects); |
3263 | Serge | 1167 | return ret; |
1168 | } |
||
1169 | |||
4246 | Serge | 1170 | #if 0 |
1171 | /* |
||
1172 | * Legacy execbuffer just creates an exec2 list from the original exec object |
||
1173 | * list array and passes it to the real function. |
||
1174 | */ |
||
1175 | int |
||
1176 | i915_gem_execbuffer(struct drm_device *dev, void *data, |
||
1177 | struct drm_file *file) |
||
1178 | { |
||
1179 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1180 | struct drm_i915_gem_execbuffer *args = data; |
||
1181 | struct drm_i915_gem_execbuffer2 exec2; |
||
1182 | struct drm_i915_gem_exec_object *exec_list = NULL; |
||
1183 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; |
||
1184 | int ret, i; |
||
3480 | Serge | 1185 | |
4246 | Serge | 1186 | if (args->buffer_count < 1) { |
1187 | DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); |
||
1188 | return -EINVAL; |
||
1189 | } |
||
3480 | Serge | 1190 | |
4246 | Serge | 1191 | /* Copy in the exec list from userland */ |
1192 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); |
||
1193 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); |
||
1194 | if (exec_list == NULL || exec2_list == NULL) { |
||
1195 | DRM_DEBUG("Failed to allocate exec list for %d buffers\n", |
||
1196 | args->buffer_count); |
||
1197 | drm_free_large(exec_list); |
||
1198 | drm_free_large(exec2_list); |
||
1199 | return -ENOMEM; |
||
1200 | } |
||
1201 | ret = copy_from_user(exec_list, |
||
1202 | to_user_ptr(args->buffers_ptr), |
||
1203 | sizeof(*exec_list) * args->buffer_count); |
||
1204 | if (ret != 0) { |
||
1205 | DRM_DEBUG("copy %d exec entries failed %d\n", |
||
1206 | args->buffer_count, ret); |
||
1207 | drm_free_large(exec_list); |
||
1208 | drm_free_large(exec2_list); |
||
1209 | return -EFAULT; |
||
1210 | } |
||
1211 | |||
1212 | for (i = 0; i < args->buffer_count; i++) { |
||
1213 | exec2_list[i].handle = exec_list[i].handle; |
||
1214 | exec2_list[i].relocation_count = exec_list[i].relocation_count; |
||
1215 | exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; |
||
1216 | exec2_list[i].alignment = exec_list[i].alignment; |
||
1217 | exec2_list[i].offset = exec_list[i].offset; |
||
1218 | if (INTEL_INFO(dev)->gen < 4) |
||
1219 | exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; |
||
1220 | else |
||
1221 | exec2_list[i].flags = 0; |
||
1222 | } |
||
1223 | |||
1224 | exec2.buffers_ptr = args->buffers_ptr; |
||
1225 | exec2.buffer_count = args->buffer_count; |
||
1226 | exec2.batch_start_offset = args->batch_start_offset; |
||
1227 | exec2.batch_len = args->batch_len; |
||
1228 | exec2.DR1 = args->DR1; |
||
1229 | exec2.DR4 = args->DR4; |
||
1230 | exec2.num_cliprects = args->num_cliprects; |
||
1231 | exec2.cliprects_ptr = args->cliprects_ptr; |
||
1232 | exec2.flags = I915_EXEC_RENDER; |
||
1233 | i915_execbuffer2_set_context_id(exec2, 0); |
||
1234 | |||
1235 | ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list, |
||
1236 | &dev_priv->gtt.base); |
||
1237 | if (!ret) { |
||
1238 | /* Copy the new buffer offsets back to the user's exec list. */ |
||
1239 | for (i = 0; i < args->buffer_count; i++) |
||
1240 | exec_list[i].offset = exec2_list[i].offset; |
||
1241 | /* ... and back out to userspace */ |
||
1242 | ret = copy_to_user(to_user_ptr(args->buffers_ptr), |
||
1243 | exec_list, |
||
1244 | sizeof(*exec_list) * args->buffer_count); |
||
1245 | if (ret) { |
||
1246 | ret = -EFAULT; |
||
1247 | DRM_DEBUG("failed to copy %d exec entries " |
||
1248 | "back to user (%d)\n", |
||
1249 | args->buffer_count, ret); |
||
1250 | } |
||
1251 | } |
||
1252 | |||
1253 | drm_free_large(exec_list); |
||
1254 | drm_free_large(exec2_list); |
||
1255 | return ret; |
||
1256 | } |
||
1257 | #endif |
||
1258 | |||
3263 | Serge | 1259 | int |
1260 | i915_gem_execbuffer2(struct drm_device *dev, void *data, |
||
1261 | struct drm_file *file) |
||
1262 | { |
||
4104 | Serge | 1263 | struct drm_i915_private *dev_priv = dev->dev_private; |
3263 | Serge | 1264 | struct drm_i915_gem_execbuffer2 *args = data; |
1265 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; |
||
1266 | int ret; |
||
1267 | |||
1268 | if (args->buffer_count < 1 || |
||
1269 | args->buffer_count > UINT_MAX / sizeof(*exec2_list)) { |
||
1270 | DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); |
||
1271 | return -EINVAL; |
||
1272 | } |
||
1273 | |||
3480 | Serge | 1274 | exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, |
1275 | GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); |
||
3263 | Serge | 1276 | if (exec2_list == NULL) { |
1277 | DRM_DEBUG("Failed to allocate exec list for %d buffers\n", |
||
1278 | args->buffer_count); |
||
1279 | return -ENOMEM; |
||
1280 | } |
||
1281 | ret = copy_from_user(exec2_list, |
||
1282 | (struct drm_i915_relocation_entry __user *) |
||
1283 | (uintptr_t) args->buffers_ptr, |
||
1284 | sizeof(*exec2_list) * args->buffer_count); |
||
1285 | if (ret != 0) { |
||
1286 | DRM_DEBUG("copy %d exec entries failed %d\n", |
||
1287 | args->buffer_count, ret); |
||
3266 | Serge | 1288 | kfree(exec2_list); |
1289 | FAIL(); |
||
3263 | Serge | 1290 | return -EFAULT; |
1291 | } |
||
1292 | |||
4104 | Serge | 1293 | ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list, |
1294 | &dev_priv->gtt.base); |
||
3263 | Serge | 1295 | if (!ret) { |
1296 | /* Copy the new buffer offsets back to the user's exec list. */ |
||
1297 | ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr, |
||
1298 | exec2_list, |
||
1299 | sizeof(*exec2_list) * args->buffer_count); |
||
1300 | if (ret) { |
||
1301 | ret = -EFAULT; |
||
1302 | DRM_DEBUG("failed to copy %d exec entries " |
||
1303 | "back to user (%d)\n", |
||
1304 | args->buffer_count, ret); |
||
1305 | } |
||
1306 | } |
||
1307 | |||
3266 | Serge | 1308 | kfree(exec2_list); |
3263 | Serge | 1309 | return ret; |
1310 | }>>>>>>><>>>>-->>>>>>>30) |