Rev 3266 | Rev 3480 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
3263 | Serge | 1 | /* |
2 | * Copyright © 2008,2010 Intel Corporation |
||
3 | * |
||
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
5 | * copy of this software and associated documentation files (the "Software"), |
||
6 | * to deal in the Software without restriction, including without limitation |
||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
8 | * and/or sell copies of the Software, and to permit persons to whom the |
||
9 | * Software is furnished to do so, subject to the following conditions: |
||
10 | * |
||
11 | * The above copyright notice and this permission notice (including the next |
||
12 | * paragraph) shall be included in all copies or substantial portions of the |
||
13 | * Software. |
||
14 | * |
||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
||
21 | * IN THE SOFTWARE. |
||
22 | * |
||
23 | * Authors: |
||
24 | * Eric Anholt |
||
25 | * Chris Wilson |
||
26 | * |
||
27 | */ |
||
28 | |||
29 | #define iowrite32(v, addr) writel((v), (addr)) |
||
30 | |||
31 | #include |
||
32 | #include |
||
33 | #include "i915_drv.h" |
||
34 | #include "i915_trace.h" |
||
35 | #include "intel_drv.h" |
||
36 | //#include |
||
37 | |||
38 | #define I915_EXEC_SECURE (1<<9) |
||
39 | #define I915_EXEC_IS_PINNED (1<<10) |
||
40 | |||
41 | #define wmb() asm volatile ("sfence") |
||
42 | |||
43 | struct drm_i915_gem_object *get_fb_obj(); |
||
44 | |||
45 | static inline __attribute__((const)) |
||
46 | bool is_power_of_2(unsigned long n) |
||
47 | { |
||
48 | return (n != 0 && ((n & (n - 1)) == 0)); |
||
49 | } |
||
50 | |||
51 | static unsigned long |
||
52 | copy_to_user(void __user *to, const void *from, unsigned long n) |
||
53 | { |
||
54 | memcpy(to, from, n); |
||
55 | return 0; |
||
56 | } |
||
57 | |||
58 | static unsigned long |
||
59 | copy_from_user(void *to, const void __user *from, unsigned long n) |
||
60 | { |
||
61 | memcpy(to, from, n); |
||
62 | return 0; |
||
63 | } |
||
64 | |||
65 | struct eb_objects { |
||
66 | int and; |
||
67 | struct hlist_head buckets[0]; |
||
68 | }; |
||
69 | |||
70 | static struct eb_objects * |
||
71 | eb_create(int size) |
||
72 | { |
||
73 | struct eb_objects *eb; |
||
74 | int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; |
||
75 | BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); |
||
76 | while (count > size) |
||
77 | count >>= 1; |
||
78 | eb = kzalloc(count*sizeof(struct hlist_head) + |
||
79 | sizeof(struct eb_objects), |
||
80 | GFP_KERNEL); |
||
81 | if (eb == NULL) |
||
82 | return eb; |
||
83 | |||
84 | eb->and = count - 1; |
||
85 | return eb; |
||
86 | } |
||
87 | |||
88 | static void |
||
89 | eb_reset(struct eb_objects *eb) |
||
90 | { |
||
91 | memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); |
||
92 | } |
||
93 | |||
94 | static void |
||
95 | eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj) |
||
96 | { |
||
97 | hlist_add_head(&obj->exec_node, |
||
98 | &eb->buckets[obj->exec_handle & eb->and]); |
||
99 | } |
||
100 | |||
101 | static struct drm_i915_gem_object * |
||
102 | eb_get_object(struct eb_objects *eb, unsigned long handle) |
||
103 | { |
||
104 | struct hlist_head *head; |
||
105 | struct hlist_node *node; |
||
106 | struct drm_i915_gem_object *obj; |
||
107 | |||
108 | head = &eb->buckets[handle & eb->and]; |
||
109 | hlist_for_each(node, head) { |
||
110 | obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); |
||
111 | if (obj->exec_handle == handle) |
||
112 | return obj; |
||
113 | } |
||
114 | |||
115 | return NULL; |
||
116 | } |
||
117 | |||
118 | static void |
||
119 | eb_destroy(struct eb_objects *eb) |
||
120 | { |
||
121 | kfree(eb); |
||
122 | } |
||
123 | |||
124 | static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) |
||
125 | { |
||
126 | return (obj->base.write_domain == I915_GEM_DOMAIN_CPU || |
||
127 | !obj->map_and_fenceable || |
||
128 | obj->cache_level != I915_CACHE_NONE); |
||
129 | } |
||
130 | |||
131 | static int |
||
132 | i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, |
||
133 | struct eb_objects *eb, |
||
134 | struct drm_i915_gem_relocation_entry *reloc) |
||
135 | { |
||
136 | struct drm_device *dev = obj->base.dev; |
||
137 | struct drm_gem_object *target_obj; |
||
138 | struct drm_i915_gem_object *target_i915_obj; |
||
139 | uint32_t target_offset; |
||
140 | int ret = -EINVAL; |
||
141 | |||
142 | /* we've already hold a reference to all valid objects */ |
||
143 | target_obj = &eb_get_object(eb, reloc->target_handle)->base; |
||
144 | if (unlikely(target_obj == NULL)) |
||
145 | return -ENOENT; |
||
146 | |||
147 | target_i915_obj = to_intel_bo(target_obj); |
||
148 | target_offset = target_i915_obj->gtt_offset; |
||
149 | |||
150 | /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and |
||
151 | * pipe_control writes because the gpu doesn't properly redirect them |
||
152 | * through the ppgtt for non_secure batchbuffers. */ |
||
153 | if (unlikely(IS_GEN6(dev) && |
||
154 | reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && |
||
155 | !target_i915_obj->has_global_gtt_mapping)) { |
||
156 | i915_gem_gtt_bind_object(target_i915_obj, |
||
157 | target_i915_obj->cache_level); |
||
158 | } |
||
159 | |||
160 | /* Validate that the target is in a valid r/w GPU domain */ |
||
161 | if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { |
||
162 | DRM_DEBUG("reloc with multiple write domains: " |
||
163 | "obj %p target %d offset %d " |
||
164 | "read %08x write %08x", |
||
165 | obj, reloc->target_handle, |
||
166 | (int) reloc->offset, |
||
167 | reloc->read_domains, |
||
168 | reloc->write_domain); |
||
169 | return ret; |
||
170 | } |
||
171 | if (unlikely((reloc->write_domain | reloc->read_domains) |
||
172 | & ~I915_GEM_GPU_DOMAINS)) { |
||
173 | DRM_DEBUG("reloc with read/write non-GPU domains: " |
||
174 | "obj %p target %d offset %d " |
||
175 | "read %08x write %08x", |
||
176 | obj, reloc->target_handle, |
||
177 | (int) reloc->offset, |
||
178 | reloc->read_domains, |
||
179 | reloc->write_domain); |
||
180 | return ret; |
||
181 | } |
||
182 | if (unlikely(reloc->write_domain && target_obj->pending_write_domain && |
||
183 | reloc->write_domain != target_obj->pending_write_domain)) { |
||
184 | DRM_DEBUG("Write domain conflict: " |
||
185 | "obj %p target %d offset %d " |
||
186 | "new %08x old %08x\n", |
||
187 | obj, reloc->target_handle, |
||
188 | (int) reloc->offset, |
||
189 | reloc->write_domain, |
||
190 | target_obj->pending_write_domain); |
||
191 | return ret; |
||
192 | } |
||
193 | |||
194 | target_obj->pending_read_domains |= reloc->read_domains; |
||
195 | target_obj->pending_write_domain |= reloc->write_domain; |
||
196 | |||
197 | /* If the relocation already has the right value in it, no |
||
198 | * more work needs to be done. |
||
199 | */ |
||
200 | if (target_offset == reloc->presumed_offset) |
||
201 | return 0; |
||
202 | |||
203 | /* Check that the relocation address is valid... */ |
||
204 | if (unlikely(reloc->offset > obj->base.size - 4)) { |
||
205 | DRM_DEBUG("Relocation beyond object bounds: " |
||
206 | "obj %p target %d offset %d size %d.\n", |
||
207 | obj, reloc->target_handle, |
||
208 | (int) reloc->offset, |
||
209 | (int) obj->base.size); |
||
210 | return ret; |
||
211 | } |
||
212 | if (unlikely(reloc->offset & 3)) { |
||
213 | DRM_DEBUG("Relocation not 4-byte aligned: " |
||
214 | "obj %p target %d offset %d.\n", |
||
215 | obj, reloc->target_handle, |
||
216 | (int) reloc->offset); |
||
217 | return ret; |
||
218 | } |
||
219 | |||
220 | /* We can't wait for rendering with pagefaults disabled */ |
||
221 | // if (obj->active && in_atomic()) |
||
222 | // return -EFAULT; |
||
223 | |||
3277 | Serge | 224 | |
3263 | Serge | 225 | reloc->delta += target_offset; |
226 | if (use_cpu_reloc(obj)) { |
||
227 | uint32_t page_offset = reloc->offset & ~PAGE_MASK; |
||
228 | char *vaddr; |
||
229 | |||
230 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
||
231 | if (ret) |
||
232 | return ret; |
||
233 | |||
234 | vaddr = (char *)MapIoMem((addr_t)i915_gem_object_get_page(obj, |
||
235 | reloc->offset >> PAGE_SHIFT), 4096, 3); |
||
236 | *(uint32_t *)(vaddr + page_offset) = reloc->delta; |
||
237 | FreeKernelSpace(vaddr); |
||
238 | } else { |
||
239 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
240 | uint32_t __iomem *reloc_entry; |
||
241 | void __iomem *reloc_page; |
||
242 | |||
243 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
||
244 | if (ret) |
||
245 | return ret; |
||
246 | |||
247 | ret = i915_gem_object_put_fence(obj); |
||
248 | if (ret) |
||
249 | return ret; |
||
250 | |||
251 | /* Map the page containing the relocation we're going to perform. */ |
||
252 | reloc->offset += obj->gtt_offset; |
||
253 | reloc_page = (void*)MapIoMem(reloc->offset & PAGE_MASK, 4096, 3); |
||
254 | reloc_entry = (uint32_t __iomem *) |
||
255 | (reloc_page + (reloc->offset & ~PAGE_MASK)); |
||
256 | iowrite32(reloc->delta, reloc_entry); |
||
257 | FreeKernelSpace(reloc_page); |
||
258 | } |
||
259 | |||
260 | /* and update the user's relocation entry */ |
||
261 | reloc->presumed_offset = target_offset; |
||
262 | |||
263 | return 0; |
||
264 | } |
||
265 | |||
266 | static int |
||
267 | i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, |
||
268 | struct eb_objects *eb) |
||
269 | { |
||
270 | #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) |
||
3266 | Serge | 271 | struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(64)]; |
3263 | Serge | 272 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
273 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
||
274 | int remain, ret; |
||
275 | |||
276 | user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; |
||
277 | |||
278 | remain = entry->relocation_count; |
||
279 | while (remain) { |
||
280 | struct drm_i915_gem_relocation_entry *r = stack_reloc; |
||
281 | int count = remain; |
||
282 | if (count > ARRAY_SIZE(stack_reloc)) |
||
283 | count = ARRAY_SIZE(stack_reloc); |
||
284 | remain -= count; |
||
285 | |||
286 | memcpy(r, user_relocs, count*sizeof(r[0])); |
||
287 | |||
288 | do { |
||
289 | u64 offset = r->presumed_offset; |
||
290 | |||
291 | ret = i915_gem_execbuffer_relocate_entry(obj, eb, r); |
||
292 | if (ret) |
||
293 | return ret; |
||
294 | |||
295 | memcpy(&user_relocs->presumed_offset, |
||
296 | &r->presumed_offset, |
||
297 | sizeof(r->presumed_offset)); |
||
298 | |||
299 | user_relocs++; |
||
300 | r++; |
||
301 | } while (--count); |
||
302 | } |
||
303 | |||
304 | return 0; |
||
305 | #undef N_RELOC |
||
306 | } |
||
307 | |||
308 | static int |
||
309 | i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, |
||
310 | struct eb_objects *eb, |
||
311 | struct drm_i915_gem_relocation_entry *relocs) |
||
312 | { |
||
313 | const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
||
314 | int i, ret; |
||
315 | |||
316 | for (i = 0; i < entry->relocation_count; i++) { |
||
317 | ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]); |
||
318 | if (ret) |
||
319 | return ret; |
||
320 | } |
||
321 | |||
322 | return 0; |
||
323 | } |
||
324 | |||
325 | static int |
||
326 | i915_gem_execbuffer_relocate(struct drm_device *dev, |
||
327 | struct eb_objects *eb, |
||
328 | struct list_head *objects) |
||
329 | { |
||
330 | struct drm_i915_gem_object *obj; |
||
331 | int ret = 0; |
||
332 | |||
333 | /* This is the fast path and we cannot handle a pagefault whilst |
||
334 | * holding the struct mutex lest the user pass in the relocations |
||
335 | * contained within a mmaped bo. For in such a case we, the page |
||
336 | * fault handler would call i915_gem_fault() and we would try to |
||
337 | * acquire the struct mutex again. Obviously this is bad and so |
||
338 | * lockdep complains vehemently. |
||
339 | */ |
||
340 | // pagefault_disable(); |
||
341 | list_for_each_entry(obj, objects, exec_list) { |
||
342 | ret = i915_gem_execbuffer_relocate_object(obj, eb); |
||
343 | if (ret) |
||
344 | break; |
||
345 | } |
||
346 | // pagefault_enable(); |
||
347 | |||
348 | return ret; |
||
349 | } |
||
350 | |||
351 | #define __EXEC_OBJECT_HAS_PIN (1<<31) |
||
352 | #define __EXEC_OBJECT_HAS_FENCE (1<<30) |
||
353 | |||
354 | static int |
||
355 | need_reloc_mappable(struct drm_i915_gem_object *obj) |
||
356 | { |
||
357 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
||
358 | return entry->relocation_count && !use_cpu_reloc(obj); |
||
359 | } |
||
360 | |||
361 | static int |
||
362 | i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, |
||
363 | struct intel_ring_buffer *ring) |
||
364 | { |
||
365 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
||
366 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
||
367 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
||
368 | bool need_fence, need_mappable; |
||
369 | int ret; |
||
370 | |||
3266 | Serge | 371 | // ENTER(); |
372 | |||
3263 | Serge | 373 | need_fence = |
374 | has_fenced_gpu_access && |
||
375 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
||
376 | obj->tiling_mode != I915_TILING_NONE; |
||
377 | need_mappable = need_fence || need_reloc_mappable(obj); |
||
378 | |||
379 | ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false); |
||
380 | if (ret) |
||
3266 | Serge | 381 | { |
382 | FAIL(); |
||
3263 | Serge | 383 | return ret; |
3266 | Serge | 384 | }; |
3263 | Serge | 385 | |
386 | entry->flags |= __EXEC_OBJECT_HAS_PIN; |
||
387 | |||
388 | if (has_fenced_gpu_access) { |
||
389 | if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { |
||
390 | ret = i915_gem_object_get_fence(obj); |
||
391 | if (ret) |
||
3266 | Serge | 392 | { |
393 | FAIL(); |
||
3263 | Serge | 394 | return ret; |
3266 | Serge | 395 | }; |
3263 | Serge | 396 | |
397 | if (i915_gem_object_pin_fence(obj)) |
||
398 | entry->flags |= __EXEC_OBJECT_HAS_FENCE; |
||
399 | |||
400 | obj->pending_fenced_gpu_access = true; |
||
401 | } |
||
402 | } |
||
403 | |||
404 | /* Ensure ppgtt mapping exists if needed */ |
||
405 | if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { |
||
406 | i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, |
||
407 | obj, obj->cache_level); |
||
408 | |||
409 | obj->has_aliasing_ppgtt_mapping = 1; |
||
410 | } |
||
411 | |||
412 | entry->offset = obj->gtt_offset; |
||
3266 | Serge | 413 | // LEAVE(); |
414 | |||
3263 | Serge | 415 | return 0; |
416 | } |
||
417 | |||
418 | static void |
||
419 | i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) |
||
420 | { |
||
421 | struct drm_i915_gem_exec_object2 *entry; |
||
422 | |||
423 | if (!obj->gtt_space) |
||
424 | return; |
||
425 | |||
426 | entry = obj->exec_entry; |
||
427 | |||
428 | if (entry->flags & __EXEC_OBJECT_HAS_FENCE) |
||
429 | i915_gem_object_unpin_fence(obj); |
||
430 | |||
431 | if (entry->flags & __EXEC_OBJECT_HAS_PIN) |
||
432 | i915_gem_object_unpin(obj); |
||
433 | |||
434 | entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); |
||
435 | } |
||
436 | |||
437 | static int |
||
438 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, |
||
439 | struct drm_file *file, |
||
440 | struct list_head *objects) |
||
441 | { |
||
442 | struct drm_i915_gem_object *obj; |
||
443 | struct list_head ordered_objects; |
||
444 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
||
445 | int retry; |
||
446 | |||
3266 | Serge | 447 | // ENTER(); |
448 | |||
3263 | Serge | 449 | INIT_LIST_HEAD(&ordered_objects); |
450 | while (!list_empty(objects)) { |
||
451 | struct drm_i915_gem_exec_object2 *entry; |
||
452 | bool need_fence, need_mappable; |
||
453 | |||
454 | obj = list_first_entry(objects, |
||
455 | struct drm_i915_gem_object, |
||
456 | exec_list); |
||
457 | entry = obj->exec_entry; |
||
458 | |||
459 | need_fence = |
||
460 | has_fenced_gpu_access && |
||
461 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
||
462 | obj->tiling_mode != I915_TILING_NONE; |
||
463 | need_mappable = need_fence || need_reloc_mappable(obj); |
||
464 | |||
465 | if (need_mappable) |
||
466 | list_move(&obj->exec_list, &ordered_objects); |
||
467 | else |
||
468 | list_move_tail(&obj->exec_list, &ordered_objects); |
||
469 | |||
470 | obj->base.pending_read_domains = 0; |
||
471 | obj->base.pending_write_domain = 0; |
||
472 | obj->pending_fenced_gpu_access = false; |
||
473 | } |
||
474 | list_splice(&ordered_objects, objects); |
||
475 | |||
476 | /* Attempt to pin all of the buffers into the GTT. |
||
477 | * This is done in 3 phases: |
||
478 | * |
||
479 | * 1a. Unbind all objects that do not match the GTT constraints for |
||
480 | * the execbuffer (fenceable, mappable, alignment etc). |
||
481 | * 1b. Increment pin count for already bound objects. |
||
482 | * 2. Bind new objects. |
||
483 | * 3. Decrement pin count. |
||
484 | * |
||
485 | * This avoid unnecessary unbinding of later objects in order to make |
||
486 | * room for the earlier objects *unless* we need to defragment. |
||
487 | */ |
||
488 | retry = 0; |
||
489 | do { |
||
490 | int ret = 0; |
||
491 | |||
492 | /* Unbind any ill-fitting objects or pin. */ |
||
493 | list_for_each_entry(obj, objects, exec_list) { |
||
494 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
||
495 | bool need_fence, need_mappable; |
||
496 | |||
497 | if (!obj->gtt_space) |
||
498 | continue; |
||
499 | |||
500 | need_fence = |
||
501 | has_fenced_gpu_access && |
||
502 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
||
503 | obj->tiling_mode != I915_TILING_NONE; |
||
504 | need_mappable = need_fence || need_reloc_mappable(obj); |
||
505 | |||
506 | if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || |
||
507 | (need_mappable && !obj->map_and_fenceable)) |
||
508 | ret = i915_gem_object_unbind(obj); |
||
509 | else |
||
510 | ret = i915_gem_execbuffer_reserve_object(obj, ring); |
||
511 | if (ret) |
||
512 | goto err; |
||
513 | } |
||
514 | |||
515 | /* Bind fresh objects */ |
||
516 | list_for_each_entry(obj, objects, exec_list) { |
||
517 | if (obj->gtt_space) |
||
518 | continue; |
||
519 | |||
520 | ret = i915_gem_execbuffer_reserve_object(obj, ring); |
||
521 | if (ret) |
||
522 | goto err; |
||
523 | } |
||
524 | |||
525 | err: /* Decrement pin count for bound objects */ |
||
526 | list_for_each_entry(obj, objects, exec_list) |
||
527 | i915_gem_execbuffer_unreserve_object(obj); |
||
528 | |||
529 | if (ret != -ENOSPC || retry++) |
||
3266 | Serge | 530 | { |
531 | // LEAVE(); |
||
3263 | Serge | 532 | return ret; |
3266 | Serge | 533 | }; |
3263 | Serge | 534 | |
535 | // ret = i915_gem_evict_everything(ring->dev); |
||
536 | if (ret) |
||
537 | return ret; |
||
538 | } while (1); |
||
539 | } |
||
540 | |||
541 | static int |
||
542 | i915_gem_execbuffer_relocate_slow(struct drm_device *dev, |
||
543 | struct drm_file *file, |
||
544 | struct intel_ring_buffer *ring, |
||
545 | struct list_head *objects, |
||
546 | struct eb_objects *eb, |
||
547 | struct drm_i915_gem_exec_object2 *exec, |
||
548 | int count) |
||
549 | { |
||
550 | struct drm_i915_gem_relocation_entry *reloc; |
||
551 | struct drm_i915_gem_object *obj; |
||
552 | int *reloc_offset; |
||
553 | int i, total, ret; |
||
554 | |||
555 | /* We may process another execbuffer during the unlock... */ |
||
556 | while (!list_empty(objects)) { |
||
557 | obj = list_first_entry(objects, |
||
558 | struct drm_i915_gem_object, |
||
559 | exec_list); |
||
560 | list_del_init(&obj->exec_list); |
||
561 | drm_gem_object_unreference(&obj->base); |
||
562 | } |
||
563 | |||
564 | mutex_unlock(&dev->struct_mutex); |
||
565 | |||
566 | total = 0; |
||
567 | for (i = 0; i < count; i++) |
||
568 | total += exec[i].relocation_count; |
||
569 | |||
570 | reloc_offset = malloc(count * sizeof(*reloc_offset)); |
||
571 | reloc = malloc(total * sizeof(*reloc)); |
||
572 | if (reloc == NULL || reloc_offset == NULL) { |
||
3266 | Serge | 573 | kfree(reloc); |
574 | kfree(reloc_offset); |
||
3263 | Serge | 575 | mutex_lock(&dev->struct_mutex); |
576 | return -ENOMEM; |
||
577 | } |
||
578 | |||
579 | total = 0; |
||
580 | for (i = 0; i < count; i++) { |
||
581 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
||
582 | u64 invalid_offset = (u64)-1; |
||
583 | int j; |
||
584 | |||
585 | user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; |
||
586 | |||
587 | if (copy_from_user(reloc+total, user_relocs, |
||
588 | exec[i].relocation_count * sizeof(*reloc))) { |
||
589 | ret = -EFAULT; |
||
590 | mutex_lock(&dev->struct_mutex); |
||
591 | goto err; |
||
592 | } |
||
593 | |||
594 | /* As we do not update the known relocation offsets after |
||
595 | * relocating (due to the complexities in lock handling), |
||
596 | * we need to mark them as invalid now so that we force the |
||
597 | * relocation processing next time. Just in case the target |
||
598 | * object is evicted and then rebound into its old |
||
599 | * presumed_offset before the next execbuffer - if that |
||
600 | * happened we would make the mistake of assuming that the |
||
601 | * relocations were valid. |
||
602 | */ |
||
603 | for (j = 0; j < exec[i].relocation_count; j++) { |
||
604 | if (copy_to_user(&user_relocs[j].presumed_offset, |
||
605 | &invalid_offset, |
||
606 | sizeof(invalid_offset))) { |
||
607 | ret = -EFAULT; |
||
608 | mutex_lock(&dev->struct_mutex); |
||
609 | goto err; |
||
610 | } |
||
611 | } |
||
612 | |||
613 | reloc_offset[i] = total; |
||
614 | total += exec[i].relocation_count; |
||
615 | } |
||
616 | |||
617 | ret = i915_mutex_lock_interruptible(dev); |
||
618 | if (ret) { |
||
619 | mutex_lock(&dev->struct_mutex); |
||
620 | goto err; |
||
621 | } |
||
622 | |||
623 | /* reacquire the objects */ |
||
624 | eb_reset(eb); |
||
625 | for (i = 0; i < count; i++) { |
||
626 | |||
627 | if(exec[i].handle == -2) |
||
3266 | Serge | 628 | { |
3263 | Serge | 629 | obj = get_fb_obj(); |
3266 | Serge | 630 | drm_gem_object_reference(&obj->base); |
631 | } |
||
3263 | Serge | 632 | else |
633 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, |
||
634 | exec[i].handle)); |
||
635 | if (&obj->base == NULL) { |
||
636 | DRM_DEBUG("Invalid object handle %d at index %d\n", |
||
637 | exec[i].handle, i); |
||
638 | ret = -ENOENT; |
||
639 | goto err; |
||
640 | } |
||
641 | |||
642 | list_add_tail(&obj->exec_list, objects); |
||
643 | obj->exec_handle = exec[i].handle; |
||
644 | obj->exec_entry = &exec[i]; |
||
645 | eb_add_object(eb, obj); |
||
646 | } |
||
647 | |||
648 | ret = i915_gem_execbuffer_reserve(ring, file, objects); |
||
649 | if (ret) |
||
650 | goto err; |
||
651 | |||
652 | list_for_each_entry(obj, objects, exec_list) { |
||
653 | int offset = obj->exec_entry - exec; |
||
654 | ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, |
||
655 | reloc + reloc_offset[offset]); |
||
656 | if (ret) |
||
657 | goto err; |
||
658 | } |
||
659 | |||
660 | /* Leave the user relocations as are, this is the painfully slow path, |
||
661 | * and we want to avoid the complication of dropping the lock whilst |
||
662 | * having buffers reserved in the aperture and so causing spurious |
||
663 | * ENOSPC for random operations. |
||
664 | */ |
||
665 | |||
666 | err: |
||
3266 | Serge | 667 | kfree(reloc); |
668 | kfree(reloc_offset); |
||
3263 | Serge | 669 | return ret; |
670 | } |
||
671 | |||
672 | static int |
||
673 | i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) |
||
674 | { |
||
675 | u32 plane, flip_mask; |
||
676 | int ret; |
||
677 | |||
678 | /* Check for any pending flips. As we only maintain a flip queue depth |
||
679 | * of 1, we can simply insert a WAIT for the next display flip prior |
||
680 | * to executing the batch and avoid stalling the CPU. |
||
681 | */ |
||
682 | |||
683 | for (plane = 0; flips >> plane; plane++) { |
||
684 | if (((flips >> plane) & 1) == 0) |
||
685 | continue; |
||
686 | |||
687 | if (plane) |
||
688 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; |
||
689 | else |
||
690 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; |
||
691 | |||
692 | ret = intel_ring_begin(ring, 2); |
||
693 | if (ret) |
||
694 | return ret; |
||
695 | |||
696 | intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); |
||
697 | intel_ring_emit(ring, MI_NOOP); |
||
698 | intel_ring_advance(ring); |
||
699 | } |
||
700 | |||
701 | return 0; |
||
702 | } |
||
703 | |||
704 | static int |
||
705 | i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, |
||
706 | struct list_head *objects) |
||
707 | { |
||
708 | struct drm_i915_gem_object *obj; |
||
709 | uint32_t flush_domains = 0; |
||
710 | uint32_t flips = 0; |
||
711 | int ret; |
||
712 | |||
713 | list_for_each_entry(obj, objects, exec_list) { |
||
714 | ret = i915_gem_object_sync(obj, ring); |
||
715 | if (ret) |
||
716 | return ret; |
||
717 | |||
718 | if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) |
||
719 | i915_gem_clflush_object(obj); |
||
720 | |||
721 | if (obj->base.pending_write_domain) |
||
722 | flips |= atomic_read(&obj->pending_flip); |
||
723 | |||
724 | flush_domains |= obj->base.write_domain; |
||
725 | } |
||
726 | |||
727 | if (flips) { |
||
728 | ret = i915_gem_execbuffer_wait_for_flips(ring, flips); |
||
729 | if (ret) |
||
730 | return ret; |
||
731 | } |
||
732 | |||
733 | if (flush_domains & I915_GEM_DOMAIN_CPU) |
||
734 | i915_gem_chipset_flush(ring->dev); |
||
735 | |||
736 | if (flush_domains & I915_GEM_DOMAIN_GTT) |
||
737 | wmb(); |
||
738 | |||
739 | /* Unconditionally invalidate gpu caches and ensure that we do flush |
||
740 | * any residual writes from the previous batch. |
||
741 | */ |
||
742 | return intel_ring_invalidate_all_caches(ring); |
||
743 | } |
||
744 | |||
745 | static bool |
||
746 | i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) |
||
747 | { |
||
748 | return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; |
||
749 | } |
||
750 | |||
751 | static int |
||
752 | validate_exec_list(struct drm_i915_gem_exec_object2 *exec, |
||
753 | int count) |
||
754 | { |
||
755 | int i; |
||
756 | |||
757 | for (i = 0; i < count; i++) { |
||
758 | char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; |
||
759 | int length; /* limited by fault_in_pages_readable() */ |
||
760 | |||
761 | /* First check for malicious input causing overflow */ |
||
762 | if (exec[i].relocation_count > |
||
763 | INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) |
||
764 | return -EINVAL; |
||
765 | |||
766 | length = exec[i].relocation_count * |
||
767 | sizeof(struct drm_i915_gem_relocation_entry); |
||
768 | // if (!access_ok(VERIFY_READ, ptr, length)) |
||
769 | // return -EFAULT; |
||
770 | |||
771 | /* we may also need to update the presumed offsets */ |
||
772 | // if (!access_ok(VERIFY_WRITE, ptr, length)) |
||
773 | // return -EFAULT; |
||
774 | |||
775 | // if (fault_in_multipages_readable(ptr, length)) |
||
776 | // return -EFAULT; |
||
777 | } |
||
778 | |||
779 | return 0; |
||
780 | } |
||
781 | |||
782 | static void |
||
783 | i915_gem_execbuffer_move_to_active(struct list_head *objects, |
||
784 | struct intel_ring_buffer *ring) |
||
785 | { |
||
786 | struct drm_i915_gem_object *obj; |
||
787 | |||
788 | list_for_each_entry(obj, objects, exec_list) { |
||
789 | u32 old_read = obj->base.read_domains; |
||
790 | u32 old_write = obj->base.write_domain; |
||
791 | |||
792 | obj->base.read_domains = obj->base.pending_read_domains; |
||
793 | obj->base.write_domain = obj->base.pending_write_domain; |
||
794 | obj->fenced_gpu_access = obj->pending_fenced_gpu_access; |
||
795 | |||
796 | i915_gem_object_move_to_active(obj, ring); |
||
797 | if (obj->base.write_domain) { |
||
798 | obj->dirty = 1; |
||
799 | obj->last_write_seqno = intel_ring_get_seqno(ring); |
||
800 | if (obj->pin_count) /* check for potential scanout */ |
||
801 | intel_mark_fb_busy(obj); |
||
802 | } |
||
803 | |||
804 | trace_i915_gem_object_change_domain(obj, old_read, old_write); |
||
805 | } |
||
806 | } |
||
807 | |||
808 | static void |
||
809 | i915_gem_execbuffer_retire_commands(struct drm_device *dev, |
||
810 | struct drm_file *file, |
||
811 | struct intel_ring_buffer *ring) |
||
812 | { |
||
813 | /* Unconditionally force add_request to emit a full flush. */ |
||
814 | ring->gpu_caches_dirty = true; |
||
815 | |||
816 | /* Add a breadcrumb for the completion of the batch buffer */ |
||
817 | (void)i915_add_request(ring, file, NULL); |
||
818 | } |
||
819 | |||
820 | static int |
||
821 | i915_reset_gen7_sol_offsets(struct drm_device *dev, |
||
822 | struct intel_ring_buffer *ring) |
||
823 | { |
||
824 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
825 | int ret, i; |
||
826 | |||
827 | if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) |
||
828 | return 0; |
||
829 | |||
830 | ret = intel_ring_begin(ring, 4 * 3); |
||
831 | if (ret) |
||
832 | return ret; |
||
833 | |||
834 | for (i = 0; i < 4; i++) { |
||
835 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
||
836 | intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i)); |
||
837 | intel_ring_emit(ring, 0); |
||
838 | } |
||
839 | |||
840 | intel_ring_advance(ring); |
||
841 | |||
842 | return 0; |
||
843 | } |
||
844 | |||
845 | static int |
||
846 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
||
847 | struct drm_file *file, |
||
848 | struct drm_i915_gem_execbuffer2 *args, |
||
849 | struct drm_i915_gem_exec_object2 *exec) |
||
850 | { |
||
851 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
852 | struct list_head objects; |
||
853 | struct eb_objects *eb; |
||
854 | struct drm_i915_gem_object *batch_obj; |
||
855 | struct drm_clip_rect *cliprects = NULL; |
||
856 | struct intel_ring_buffer *ring; |
||
857 | u32 ctx_id = i915_execbuffer2_get_context_id(*args); |
||
858 | u32 exec_start, exec_len; |
||
859 | u32 mask; |
||
860 | u32 flags; |
||
861 | int ret, mode, i; |
||
862 | |||
863 | if (!i915_gem_check_execbuffer(args)) { |
||
864 | DRM_DEBUG("execbuf with invalid offset/length\n"); |
||
3266 | Serge | 865 | FAIL(); |
3263 | Serge | 866 | return -EINVAL; |
867 | } |
||
868 | |||
869 | ret = validate_exec_list(exec, args->buffer_count); |
||
870 | if (ret) |
||
3266 | Serge | 871 | { |
872 | FAIL(); |
||
3263 | Serge | 873 | return ret; |
3266 | Serge | 874 | }; |
3263 | Serge | 875 | |
876 | flags = 0; |
||
877 | if (args->flags & I915_EXEC_SECURE) { |
||
878 | // if (!file->is_master || !capable(CAP_SYS_ADMIN)) |
||
879 | // return -EPERM; |
||
880 | |||
881 | flags |= I915_DISPATCH_SECURE; |
||
882 | } |
||
883 | if (args->flags & I915_EXEC_IS_PINNED) |
||
884 | flags |= I915_DISPATCH_PINNED; |
||
885 | |||
886 | switch (args->flags & I915_EXEC_RING_MASK) { |
||
887 | case I915_EXEC_DEFAULT: |
||
888 | case I915_EXEC_RENDER: |
||
889 | ring = &dev_priv->ring[RCS]; |
||
890 | break; |
||
891 | case I915_EXEC_BSD: |
||
892 | ring = &dev_priv->ring[VCS]; |
||
893 | if (ctx_id != 0) { |
||
894 | DRM_DEBUG("Ring %s doesn't support contexts\n", |
||
895 | ring->name); |
||
3266 | Serge | 896 | FAIL(); |
3263 | Serge | 897 | return -EPERM; |
898 | } |
||
899 | break; |
||
900 | case I915_EXEC_BLT: |
||
901 | ring = &dev_priv->ring[BCS]; |
||
902 | if (ctx_id != 0) { |
||
903 | DRM_DEBUG("Ring %s doesn't support contexts\n", |
||
904 | ring->name); |
||
905 | return -EPERM; |
||
906 | } |
||
907 | break; |
||
908 | default: |
||
909 | DRM_DEBUG("execbuf with unknown ring: %d\n", |
||
910 | (int)(args->flags & I915_EXEC_RING_MASK)); |
||
911 | return -EINVAL; |
||
912 | } |
||
913 | if (!intel_ring_initialized(ring)) { |
||
914 | DRM_DEBUG("execbuf with invalid ring: %d\n", |
||
915 | (int)(args->flags & I915_EXEC_RING_MASK)); |
||
916 | return -EINVAL; |
||
917 | } |
||
918 | |||
919 | mode = args->flags & I915_EXEC_CONSTANTS_MASK; |
||
920 | mask = I915_EXEC_CONSTANTS_MASK; |
||
921 | switch (mode) { |
||
922 | case I915_EXEC_CONSTANTS_REL_GENERAL: |
||
923 | case I915_EXEC_CONSTANTS_ABSOLUTE: |
||
924 | case I915_EXEC_CONSTANTS_REL_SURFACE: |
||
925 | if (ring == &dev_priv->ring[RCS] && |
||
926 | mode != dev_priv->relative_constants_mode) { |
||
927 | if (INTEL_INFO(dev)->gen < 4) |
||
928 | return -EINVAL; |
||
929 | |||
930 | if (INTEL_INFO(dev)->gen > 5 && |
||
931 | mode == I915_EXEC_CONSTANTS_REL_SURFACE) |
||
932 | return -EINVAL; |
||
933 | |||
934 | /* The HW changed the meaning on this bit on gen6 */ |
||
935 | if (INTEL_INFO(dev)->gen >= 6) |
||
936 | mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; |
||
937 | } |
||
938 | break; |
||
939 | default: |
||
940 | DRM_DEBUG("execbuf with unknown constants: %d\n", mode); |
||
941 | return -EINVAL; |
||
942 | } |
||
943 | |||
944 | if (args->buffer_count < 1) { |
||
945 | DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); |
||
946 | return -EINVAL; |
||
947 | } |
||
948 | |||
949 | if (args->num_cliprects != 0) { |
||
950 | if (ring != &dev_priv->ring[RCS]) { |
||
951 | DRM_DEBUG("clip rectangles are only valid with the render ring\n"); |
||
952 | return -EINVAL; |
||
953 | } |
||
954 | |||
955 | if (INTEL_INFO(dev)->gen >= 5) { |
||
956 | DRM_DEBUG("clip rectangles are only valid on pre-gen5\n"); |
||
957 | return -EINVAL; |
||
958 | } |
||
959 | |||
960 | if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) { |
||
961 | DRM_DEBUG("execbuf with %u cliprects\n", |
||
962 | args->num_cliprects); |
||
963 | return -EINVAL; |
||
964 | } |
||
965 | |||
966 | cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), |
||
967 | GFP_KERNEL); |
||
968 | if (cliprects == NULL) { |
||
969 | ret = -ENOMEM; |
||
970 | goto pre_mutex_err; |
||
971 | } |
||
972 | |||
973 | if (copy_from_user(cliprects, |
||
974 | (struct drm_clip_rect __user *)(uintptr_t) |
||
975 | args->cliprects_ptr, |
||
976 | sizeof(*cliprects)*args->num_cliprects)) { |
||
977 | ret = -EFAULT; |
||
978 | goto pre_mutex_err; |
||
979 | } |
||
980 | } |
||
981 | |||
982 | ret = i915_mutex_lock_interruptible(dev); |
||
983 | if (ret) |
||
984 | goto pre_mutex_err; |
||
985 | |||
986 | if (dev_priv->mm.suspended) { |
||
987 | mutex_unlock(&dev->struct_mutex); |
||
988 | ret = -EBUSY; |
||
989 | goto pre_mutex_err; |
||
990 | } |
||
991 | |||
992 | eb = eb_create(args->buffer_count); |
||
993 | if (eb == NULL) { |
||
994 | mutex_unlock(&dev->struct_mutex); |
||
995 | ret = -ENOMEM; |
||
996 | goto pre_mutex_err; |
||
997 | } |
||
998 | |||
999 | /* Look up object handles */ |
||
1000 | INIT_LIST_HEAD(&objects); |
||
1001 | for (i = 0; i < args->buffer_count; i++) { |
||
1002 | struct drm_i915_gem_object *obj; |
||
1003 | |||
1004 | if(exec[i].handle == -2) |
||
3266 | Serge | 1005 | { |
3263 | Serge | 1006 | obj = get_fb_obj(); |
3266 | Serge | 1007 | drm_gem_object_reference(&obj->base); |
1008 | } |
||
3263 | Serge | 1009 | else |
1010 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, |
||
1011 | exec[i].handle)); |
||
3266 | Serge | 1012 | |
1013 | // printf("%s object %p handle %d\n", __FUNCTION__, obj, exec[i].handle); |
||
1014 | |||
3263 | Serge | 1015 | if (&obj->base == NULL) { |
1016 | DRM_DEBUG("Invalid object handle %d at index %d\n", |
||
1017 | exec[i].handle, i); |
||
1018 | /* prevent error path from reading uninitialized data */ |
||
1019 | ret = -ENOENT; |
||
1020 | goto err; |
||
1021 | } |
||
1022 | |||
1023 | if (!list_empty(&obj->exec_list)) { |
||
1024 | DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", |
||
1025 | obj, exec[i].handle, i); |
||
1026 | ret = -EINVAL; |
||
1027 | goto err; |
||
1028 | } |
||
1029 | |||
1030 | list_add_tail(&obj->exec_list, &objects); |
||
1031 | obj->exec_handle = exec[i].handle; |
||
1032 | obj->exec_entry = &exec[i]; |
||
1033 | eb_add_object(eb, obj); |
||
1034 | } |
||
1035 | |||
1036 | /* take note of the batch buffer before we might reorder the lists */ |
||
1037 | batch_obj = list_entry(objects.prev, |
||
1038 | struct drm_i915_gem_object, |
||
1039 | exec_list); |
||
1040 | |||
1041 | /* Move the objects en-masse into the GTT, evicting if necessary. */ |
||
1042 | ret = i915_gem_execbuffer_reserve(ring, file, &objects); |
||
1043 | if (ret) |
||
1044 | goto err; |
||
1045 | |||
1046 | /* The objects are in their final locations, apply the relocations. */ |
||
1047 | ret = i915_gem_execbuffer_relocate(dev, eb, &objects); |
||
1048 | if (ret) { |
||
1049 | if (ret == -EFAULT) { |
||
1050 | ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, |
||
1051 | &objects, eb, |
||
1052 | exec, |
||
1053 | args->buffer_count); |
||
1054 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
||
1055 | } |
||
1056 | if (ret) |
||
1057 | goto err; |
||
1058 | } |
||
1059 | |||
1060 | /* Set the pending read domains for the batch buffer to COMMAND */ |
||
1061 | if (batch_obj->base.pending_write_domain) { |
||
1062 | DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); |
||
1063 | ret = -EINVAL; |
||
1064 | goto err; |
||
1065 | } |
||
1066 | batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; |
||
1067 | |||
1068 | /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure |
||
1069 | * batch" bit. Hence we need to pin secure batches into the global gtt. |
||
1070 | * hsw should have this fixed, but let's be paranoid and do it |
||
1071 | * unconditionally for now. */ |
||
1072 | if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) |
||
1073 | i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); |
||
1074 | |||
1075 | ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); |
||
1076 | if (ret) |
||
1077 | goto err; |
||
1078 | |||
1079 | ret = i915_switch_context(ring, file, ctx_id); |
||
1080 | if (ret) |
||
1081 | goto err; |
||
1082 | |||
1083 | if (ring == &dev_priv->ring[RCS] && |
||
1084 | mode != dev_priv->relative_constants_mode) { |
||
1085 | ret = intel_ring_begin(ring, 4); |
||
1086 | if (ret) |
||
1087 | goto err; |
||
1088 | |||
1089 | intel_ring_emit(ring, MI_NOOP); |
||
1090 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
||
1091 | intel_ring_emit(ring, INSTPM); |
||
1092 | intel_ring_emit(ring, mask << 16 | mode); |
||
1093 | intel_ring_advance(ring); |
||
1094 | |||
1095 | dev_priv->relative_constants_mode = mode; |
||
1096 | } |
||
1097 | |||
1098 | if (args->flags & I915_EXEC_GEN7_SOL_RESET) { |
||
1099 | ret = i915_reset_gen7_sol_offsets(dev, ring); |
||
1100 | if (ret) |
||
1101 | goto err; |
||
1102 | } |
||
1103 | |||
1104 | exec_start = batch_obj->gtt_offset + args->batch_start_offset; |
||
1105 | exec_len = args->batch_len; |
||
1106 | if (cliprects) { |
||
1107 | // for (i = 0; i < args->num_cliprects; i++) { |
||
1108 | // ret = i915_emit_box(dev, &cliprects[i], |
||
1109 | // args->DR1, args->DR4); |
||
1110 | // if (ret) |
||
1111 | // goto err; |
||
1112 | |||
1113 | // ret = ring->dispatch_execbuffer(ring, |
||
1114 | // exec_start, exec_len, |
||
1115 | // flags); |
||
1116 | // if (ret) |
||
1117 | // goto err; |
||
1118 | // } |
||
1119 | } else { |
||
1120 | ret = ring->dispatch_execbuffer(ring, |
||
1121 | exec_start, exec_len, |
||
1122 | flags); |
||
1123 | if (ret) |
||
1124 | goto err; |
||
1125 | } |
||
1126 | |||
3266 | Serge | 1127 | trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); |
3263 | Serge | 1128 | |
3266 | Serge | 1129 | i915_gem_execbuffer_move_to_active(&objects, ring); |
1130 | i915_gem_execbuffer_retire_commands(dev, file, ring); |
||
1131 | |||
3263 | Serge | 1132 | err: |
1133 | eb_destroy(eb); |
||
1134 | while (!list_empty(&objects)) { |
||
1135 | struct drm_i915_gem_object *obj; |
||
1136 | |||
1137 | obj = list_first_entry(&objects, |
||
1138 | struct drm_i915_gem_object, |
||
1139 | exec_list); |
||
1140 | list_del_init(&obj->exec_list); |
||
1141 | drm_gem_object_unreference(&obj->base); |
||
1142 | } |
||
1143 | |||
1144 | mutex_unlock(&dev->struct_mutex); |
||
1145 | |||
1146 | pre_mutex_err: |
||
1147 | kfree(cliprects); |
||
3266 | Serge | 1148 | |
1149 | |||
3263 | Serge | 1150 | return ret; |
1151 | } |
||
1152 | |||
1153 | int |
||
1154 | i915_gem_execbuffer2(struct drm_device *dev, void *data, |
||
1155 | struct drm_file *file) |
||
1156 | { |
||
1157 | struct drm_i915_gem_execbuffer2 *args = data; |
||
1158 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; |
||
1159 | int ret; |
||
1160 | |||
3266 | Serge | 1161 | // ENTER(); |
1162 | |||
3263 | Serge | 1163 | if (args->buffer_count < 1 || |
1164 | args->buffer_count > UINT_MAX / sizeof(*exec2_list)) { |
||
1165 | DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); |
||
3266 | Serge | 1166 | FAIL(); |
3263 | Serge | 1167 | return -EINVAL; |
1168 | } |
||
1169 | |||
3266 | Serge | 1170 | exec2_list = malloc(sizeof(*exec2_list)*args->buffer_count); |
1171 | |||
1172 | // if (exec2_list == NULL) |
||
1173 | // exec2_list = drm_malloc_ab(sizeof(*exec2_list), |
||
1174 | // args->buffer_count); |
||
3263 | Serge | 1175 | if (exec2_list == NULL) { |
1176 | DRM_DEBUG("Failed to allocate exec list for %d buffers\n", |
||
1177 | args->buffer_count); |
||
3266 | Serge | 1178 | FAIL(); |
3263 | Serge | 1179 | return -ENOMEM; |
1180 | } |
||
1181 | ret = copy_from_user(exec2_list, |
||
1182 | (struct drm_i915_relocation_entry __user *) |
||
1183 | (uintptr_t) args->buffers_ptr, |
||
1184 | sizeof(*exec2_list) * args->buffer_count); |
||
1185 | if (ret != 0) { |
||
1186 | DRM_DEBUG("copy %d exec entries failed %d\n", |
||
1187 | args->buffer_count, ret); |
||
3266 | Serge | 1188 | kfree(exec2_list); |
1189 | FAIL(); |
||
3263 | Serge | 1190 | return -EFAULT; |
1191 | } |
||
1192 | |||
1193 | ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); |
||
1194 | if (!ret) { |
||
1195 | /* Copy the new buffer offsets back to the user's exec list. */ |
||
1196 | ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr, |
||
1197 | exec2_list, |
||
1198 | sizeof(*exec2_list) * args->buffer_count); |
||
1199 | if (ret) { |
||
1200 | ret = -EFAULT; |
||
1201 | DRM_DEBUG("failed to copy %d exec entries " |
||
1202 | "back to user (%d)\n", |
||
1203 | args->buffer_count, ret); |
||
1204 | } |
||
1205 | } |
||
1206 | |||
3266 | Serge | 1207 | kfree(exec2_list); |
1208 | |||
1209 | // LEAVE(); |
||
1210 | |||
3263 | Serge | 1211 | return ret; |
1212 | }>>><>>>>>>>>>>>>30) |