Rev 5068 | Rev 6110 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
4363 | Serge | 1 | /************************************************************************** |
2 | * |
||
3 | * Copyright © 2007 Red Hat Inc. |
||
4 | * Copyright © 2007-2012 Intel Corporation |
||
5 | * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA |
||
6 | * All Rights Reserved. |
||
7 | * |
||
8 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
9 | * copy of this software and associated documentation files (the |
||
10 | * "Software"), to deal in the Software without restriction, including |
||
11 | * without limitation the rights to use, copy, modify, merge, publish, |
||
12 | * distribute, sub license, and/or sell copies of the Software, and to |
||
13 | * permit persons to whom the Software is furnished to do so, subject to |
||
14 | * the following conditions: |
||
15 | * |
||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
19 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
20 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
21 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
22 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
23 | * |
||
24 | * The above copyright notice and this permission notice (including the |
||
25 | * next paragraph) shall be included in all copies or substantial portions |
||
26 | * of the Software. |
||
27 | * |
||
28 | * |
||
29 | **************************************************************************/ |
||
30 | /* |
||
31 | * Authors: Thomas Hellström |
||
32 | * Keith Whitwell |
||
33 | * Eric Anholt |
||
34 | * Dave Airlie |
||
35 | */ |
||
36 | |||
37 | #ifdef HAVE_CONFIG_H |
||
38 | #include "config.h" |
||
39 | #endif |
||
40 | |||
41 | #include |
||
42 | #include |
||
43 | #include |
||
44 | #include |
||
45 | #include |
||
46 | #include |
||
47 | #include |
||
48 | #include |
||
49 | //#include |
||
50 | #include |
||
51 | |||
52 | #include "errno.h" |
||
53 | #ifndef ETIME |
||
54 | #define ETIME ETIMEDOUT |
||
55 | #endif |
||
56 | #include "libdrm_lists.h" |
||
57 | #include "intel_bufmgr.h" |
||
58 | #include "intel_bufmgr_priv.h" |
||
59 | #include "intel_chipset.h" |
||
60 | #include "intel_aub.h" |
||
61 | #include "string.h" |
||
62 | |||
63 | #include "i915_drm.h" |
||
64 | |||
65 | #ifdef HAVE_VALGRIND |
||
66 | #include |
||
67 | #include |
||
68 | #define VG(x) x |
||
69 | #else |
||
70 | #define VG(x) |
||
71 | #endif |
||
72 | |||
73 | #define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s))) |
||
74 | |||
75 | #if 0 |
||
76 | #define DBG(...) do { \ |
||
5368 | serge | 77 | fprintf(stderr, __VA_ARGS__); \ |
4363 | Serge | 78 | } while (0) |
5368 | serge | 79 | #else |
80 | #define DBG(...) |
||
4363 | Serge | 81 | #endif |
82 | |||
83 | #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) |
||
84 | |||
85 | typedef struct _drm_intel_bo_gem drm_intel_bo_gem; |
||
86 | |||
87 | struct drm_intel_gem_bo_bucket { |
||
88 | drmMMListHead head; |
||
89 | unsigned long size; |
||
90 | }; |
||
91 | |||
92 | typedef struct _drm_intel_bufmgr_gem { |
||
93 | drm_intel_bufmgr bufmgr; |
||
94 | |||
95 | int fd; |
||
96 | |||
97 | int max_relocs; |
||
98 | |||
99 | // pthread_mutex_t lock; |
||
100 | |||
101 | struct drm_i915_gem_exec_object *exec_objects; |
||
102 | struct drm_i915_gem_exec_object2 *exec2_objects; |
||
103 | drm_intel_bo **exec_bos; |
||
104 | int exec_size; |
||
105 | int exec_count; |
||
106 | |||
107 | /** Array of lists of cached gem objects of power-of-two sizes */ |
||
108 | struct drm_intel_gem_bo_bucket cache_bucket[14 * 4]; |
||
109 | int num_buckets; |
||
110 | time_t time; |
||
111 | |||
112 | drmMMListHead named; |
||
113 | drmMMListHead vma_cache; |
||
114 | int vma_count, vma_open, vma_max; |
||
115 | |||
116 | uint64_t gtt_size; |
||
117 | int available_fences; |
||
118 | int pci_device; |
||
119 | int gen; |
||
120 | unsigned int has_bsd : 1; |
||
121 | unsigned int has_blt : 1; |
||
122 | unsigned int has_relaxed_fencing : 1; |
||
123 | unsigned int has_llc : 1; |
||
124 | unsigned int has_wait_timeout : 1; |
||
125 | unsigned int bo_reuse : 1; |
||
126 | unsigned int no_exec : 1; |
||
127 | unsigned int has_vebox : 1; |
||
128 | bool fenced_relocs; |
||
129 | |||
130 | char *aub_filename; |
||
131 | FILE *aub_file; |
||
132 | uint32_t aub_offset; |
||
133 | } drm_intel_bufmgr_gem; |
||
134 | |||
135 | #define DRM_INTEL_RELOC_FENCE (1<<0) |
||
136 | |||
137 | typedef struct _drm_intel_reloc_target_info { |
||
138 | drm_intel_bo *bo; |
||
139 | int flags; |
||
140 | } drm_intel_reloc_target; |
||
141 | |||
142 | struct _drm_intel_bo_gem { |
||
143 | drm_intel_bo bo; |
||
144 | |||
145 | atomic_t refcount; |
||
146 | uint32_t gem_handle; |
||
147 | const char *name; |
||
148 | |||
149 | /** |
||
150 | * Kenel-assigned global name for this object |
||
151 | * |
||
152 | * List contains both flink named and prime fd'd objects |
||
153 | */ |
||
154 | unsigned int global_name; |
||
155 | drmMMListHead name_list; |
||
156 | |||
157 | /** |
||
158 | * Index of the buffer within the validation list while preparing a |
||
159 | * batchbuffer execution. |
||
160 | */ |
||
161 | int validate_index; |
||
162 | |||
163 | /** |
||
164 | * Current tiling mode |
||
165 | */ |
||
166 | uint32_t tiling_mode; |
||
167 | uint32_t swizzle_mode; |
||
168 | unsigned long stride; |
||
169 | |||
170 | time_t free_time; |
||
171 | |||
172 | /** Array passed to the DRM containing relocation information. */ |
||
173 | struct drm_i915_gem_relocation_entry *relocs; |
||
174 | /** |
||
175 | * Array of info structs corresponding to relocs[i].target_handle etc |
||
176 | */ |
||
177 | drm_intel_reloc_target *reloc_target_info; |
||
178 | /** Number of entries in relocs */ |
||
179 | int reloc_count; |
||
180 | /** Mapped address for the buffer, saved across map/unmap cycles */ |
||
181 | void *mem_virtual; |
||
182 | /** GTT virtual address for the buffer, saved across map/unmap cycles */ |
||
183 | void *gtt_virtual; |
||
184 | int map_count; |
||
185 | drmMMListHead vma_list; |
||
186 | |||
187 | /** BO cache list */ |
||
188 | drmMMListHead head; |
||
189 | |||
190 | /** |
||
191 | * Boolean of whether this BO and its children have been included in |
||
192 | * the current drm_intel_bufmgr_check_aperture_space() total. |
||
193 | */ |
||
194 | bool included_in_check_aperture; |
||
195 | |||
196 | /** |
||
197 | * Boolean of whether this buffer has been used as a relocation |
||
198 | * target and had its size accounted for, and thus can't have any |
||
199 | * further relocations added to it. |
||
200 | */ |
||
201 | bool used_as_reloc_target; |
||
202 | |||
203 | /** |
||
204 | * Boolean of whether we have encountered an error whilst building the relocation tree. |
||
205 | */ |
||
206 | bool has_error; |
||
207 | |||
208 | /** |
||
209 | * Boolean of whether this buffer can be re-used |
||
210 | */ |
||
211 | bool reusable; |
||
212 | |||
213 | /** |
||
5068 | serge | 214 | * Boolean of whether the GPU is definitely not accessing the buffer. |
215 | * |
||
216 | * This is only valid when reusable, since non-reusable |
||
217 | * buffers are those that have been shared wth other |
||
218 | * processes, so we don't know their state. |
||
219 | */ |
||
220 | bool idle; |
||
221 | |||
222 | /** |
||
4363 | Serge | 223 | * Size in bytes of this buffer and its relocation descendents. |
224 | * |
||
225 | * Used to avoid costly tree walking in |
||
226 | * drm_intel_bufmgr_check_aperture in the common case. |
||
227 | */ |
||
228 | int reloc_tree_size; |
||
229 | |||
230 | /** |
||
231 | * Number of potential fence registers required by this buffer and its |
||
232 | * relocations. |
||
233 | */ |
||
234 | int reloc_tree_fences; |
||
235 | |||
236 | /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */ |
||
237 | bool mapped_cpu_write; |
||
238 | |||
239 | uint32_t aub_offset; |
||
240 | |||
241 | drm_intel_aub_annotation *aub_annotations; |
||
242 | unsigned aub_annotation_count; |
||
243 | }; |
||
244 | |||
245 | static unsigned int |
||
246 | drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count); |
||
247 | |||
248 | static unsigned int |
||
249 | drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count); |
||
250 | |||
251 | static int |
||
252 | drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, |
||
253 | uint32_t * swizzle_mode); |
||
254 | |||
255 | static int |
||
256 | drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo, |
||
257 | uint32_t tiling_mode, |
||
258 | uint32_t stride); |
||
259 | |||
260 | static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo, |
||
261 | time_t time); |
||
262 | |||
263 | static void drm_intel_gem_bo_unreference(drm_intel_bo *bo); |
||
264 | |||
265 | static void drm_intel_gem_bo_free(drm_intel_bo *bo); |
||
266 | |||
267 | static unsigned long |
||
268 | drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size, |
||
269 | uint32_t *tiling_mode) |
||
270 | { |
||
271 | unsigned long min_size, max_size; |
||
272 | unsigned long i; |
||
273 | |||
274 | if (*tiling_mode == I915_TILING_NONE) |
||
275 | return size; |
||
276 | |||
277 | /* 965+ just need multiples of page size for tiling */ |
||
278 | if (bufmgr_gem->gen >= 4) |
||
279 | return ROUND_UP_TO(size, 4096); |
||
280 | |||
281 | /* Older chips need powers of two, of at least 512k or 1M */ |
||
282 | if (bufmgr_gem->gen == 3) { |
||
283 | min_size = 1024*1024; |
||
284 | max_size = 128*1024*1024; |
||
285 | } else { |
||
286 | min_size = 512*1024; |
||
287 | max_size = 64*1024*1024; |
||
288 | } |
||
289 | |||
290 | if (size > max_size) { |
||
291 | *tiling_mode = I915_TILING_NONE; |
||
292 | return size; |
||
293 | } |
||
294 | |||
295 | /* Do we need to allocate every page for the fence? */ |
||
296 | if (bufmgr_gem->has_relaxed_fencing) |
||
297 | return ROUND_UP_TO(size, 4096); |
||
298 | |||
299 | for (i = min_size; i < size; i <<= 1) |
||
300 | ; |
||
301 | |||
302 | return i; |
||
303 | } |
||
304 | |||
305 | /* |
||
306 | * Round a given pitch up to the minimum required for X tiling on a |
||
307 | * given chip. We use 512 as the minimum to allow for a later tiling |
||
308 | * change. |
||
309 | */ |
||
310 | static unsigned long |
||
311 | drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem, |
||
312 | unsigned long pitch, uint32_t *tiling_mode) |
||
313 | { |
||
314 | unsigned long tile_width; |
||
315 | unsigned long i; |
||
316 | |||
317 | /* If untiled, then just align it so that we can do rendering |
||
318 | * to it with the 3D engine. |
||
319 | */ |
||
320 | if (*tiling_mode == I915_TILING_NONE) |
||
321 | return ALIGN(pitch, 64); |
||
322 | |||
323 | if (*tiling_mode == I915_TILING_X |
||
324 | || (IS_915(bufmgr_gem->pci_device) |
||
325 | && *tiling_mode == I915_TILING_Y)) |
||
326 | tile_width = 512; |
||
327 | else |
||
328 | tile_width = 128; |
||
329 | |||
330 | /* 965 is flexible */ |
||
331 | if (bufmgr_gem->gen >= 4) |
||
332 | return ROUND_UP_TO(pitch, tile_width); |
||
333 | |||
334 | /* The older hardware has a maximum pitch of 8192 with tiled |
||
335 | * surfaces, so fallback to untiled if it's too large. |
||
336 | */ |
||
337 | if (pitch > 8192) { |
||
338 | *tiling_mode = I915_TILING_NONE; |
||
339 | return ALIGN(pitch, 64); |
||
340 | } |
||
341 | |||
342 | /* Pre-965 needs power of two tile width */ |
||
343 | for (i = tile_width; i < pitch; i <<= 1) |
||
344 | ; |
||
345 | |||
346 | return i; |
||
347 | } |
||
348 | |||
349 | static struct drm_intel_gem_bo_bucket * |
||
350 | drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem, |
||
351 | unsigned long size) |
||
352 | { |
||
353 | int i; |
||
354 | |||
355 | for (i = 0; i < bufmgr_gem->num_buckets; i++) { |
||
356 | struct drm_intel_gem_bo_bucket *bucket = |
||
357 | &bufmgr_gem->cache_bucket[i]; |
||
358 | if (bucket->size >= size) { |
||
359 | return bucket; |
||
360 | } |
||
361 | } |
||
362 | |||
363 | return NULL; |
||
364 | } |
||
365 | |||
366 | static void |
||
367 | drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem) |
||
368 | { |
||
369 | int i, j; |
||
370 | |||
371 | for (i = 0; i < bufmgr_gem->exec_count; i++) { |
||
372 | drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; |
||
373 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
374 | |||
375 | if (bo_gem->relocs == NULL) { |
||
376 | DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, |
||
377 | bo_gem->name); |
||
378 | continue; |
||
379 | } |
||
380 | |||
381 | for (j = 0; j < bo_gem->reloc_count; j++) { |
||
382 | drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo; |
||
383 | drm_intel_bo_gem *target_gem = |
||
384 | (drm_intel_bo_gem *) target_bo; |
||
385 | |||
386 | DBG("%2d: %d (%s)@0x%08llx -> " |
||
387 | "%d (%s)@0x%08lx + 0x%08x\n", |
||
388 | i, |
||
389 | bo_gem->gem_handle, bo_gem->name, |
||
390 | (unsigned long long)bo_gem->relocs[j].offset, |
||
391 | target_gem->gem_handle, |
||
392 | target_gem->name, |
||
5068 | serge | 393 | target_bo->offset64, |
4363 | Serge | 394 | bo_gem->relocs[j].delta); |
395 | } |
||
396 | } |
||
397 | } |
||
398 | |||
399 | static inline void |
||
400 | drm_intel_gem_bo_reference(drm_intel_bo *bo) |
||
401 | { |
||
402 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
403 | |||
404 | atomic_inc(&bo_gem->refcount); |
||
405 | } |
||
406 | |||
407 | /** |
||
408 | * Adds the given buffer to the list of buffers to be validated (moved into the |
||
409 | * appropriate memory type) with the next batch submission. |
||
410 | * |
||
411 | * If a buffer is validated multiple times in a batch submission, it ends up |
||
412 | * with the intersection of the memory type flags and the union of the |
||
413 | * access flags. |
||
414 | */ |
||
415 | static void |
||
416 | drm_intel_add_validate_buffer(drm_intel_bo *bo) |
||
417 | { |
||
418 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
419 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
420 | int index; |
||
421 | |||
422 | if (bo_gem->validate_index != -1) |
||
423 | return; |
||
424 | |||
425 | /* Extend the array of validation entries as necessary. */ |
||
426 | if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) { |
||
427 | int new_size = bufmgr_gem->exec_size * 2; |
||
428 | |||
429 | if (new_size == 0) |
||
430 | new_size = 5; |
||
431 | |||
432 | bufmgr_gem->exec_objects = |
||
433 | realloc(bufmgr_gem->exec_objects, |
||
434 | sizeof(*bufmgr_gem->exec_objects) * new_size); |
||
435 | bufmgr_gem->exec_bos = |
||
436 | realloc(bufmgr_gem->exec_bos, |
||
437 | sizeof(*bufmgr_gem->exec_bos) * new_size); |
||
438 | bufmgr_gem->exec_size = new_size; |
||
439 | } |
||
440 | |||
441 | index = bufmgr_gem->exec_count; |
||
442 | bo_gem->validate_index = index; |
||
443 | /* Fill in array entry */ |
||
444 | bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle; |
||
445 | bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count; |
||
446 | bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs; |
||
447 | bufmgr_gem->exec_objects[index].alignment = 0; |
||
448 | bufmgr_gem->exec_objects[index].offset = 0; |
||
449 | bufmgr_gem->exec_bos[index] = bo; |
||
450 | bufmgr_gem->exec_count++; |
||
451 | } |
||
452 | |||
453 | static void |
||
454 | drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence) |
||
455 | { |
||
456 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; |
||
457 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; |
||
458 | int index; |
||
459 | |||
460 | if (bo_gem->validate_index != -1) { |
||
461 | if (need_fence) |
||
462 | bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= |
||
463 | EXEC_OBJECT_NEEDS_FENCE; |
||
464 | return; |
||
465 | } |
||
466 | |||
467 | /* Extend the array of validation entries as necessary. */ |
||
468 | if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) { |
||
469 | int new_size = bufmgr_gem->exec_size * 2; |
||
470 | |||
471 | if (new_size == 0) |
||
472 | new_size = 5; |
||
473 | |||
474 | bufmgr_gem->exec2_objects = |
||
475 | realloc(bufmgr_gem->exec2_objects, |
||
476 | sizeof(*bufmgr_gem->exec2_objects) * new_size); |
||
477 | bufmgr_gem->exec_bos = |
||
478 | realloc(bufmgr_gem->exec_bos, |
||
479 | sizeof(*bufmgr_gem->exec_bos) * new_size); |
||
480 | bufmgr_gem->exec_size = new_size; |
||
481 | } |
||
482 | |||
483 | index = bufmgr_gem->exec_count; |
||
484 | bo_gem->validate_index = index; |
||
485 | /* Fill in array entry */ |
||
486 | bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle; |
||
487 | bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count; |
||
488 | bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs; |
||
489 | bufmgr_gem->exec2_objects[index].alignment = 0; |
||
490 | bufmgr_gem->exec2_objects[index].offset = 0; |
||
491 | bufmgr_gem->exec_bos[index] = bo; |
||
492 | bufmgr_gem->exec2_objects[index].flags = 0; |
||
493 | bufmgr_gem->exec2_objects[index].rsvd1 = 0; |
||
494 | bufmgr_gem->exec2_objects[index].rsvd2 = 0; |
||
495 | if (need_fence) { |
||
496 | bufmgr_gem->exec2_objects[index].flags |= |
||
497 | EXEC_OBJECT_NEEDS_FENCE; |
||
498 | } |
||
499 | bufmgr_gem->exec_count++; |
||
500 | } |
||
501 | |||
502 | #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \ |
||
503 | sizeof(uint32_t)) |
||
504 | |||
505 | static void |
||
506 | drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem, |
||
507 | drm_intel_bo_gem *bo_gem) |
||
508 | { |
||
509 | int size; |
||
510 | |||
511 | assert(!bo_gem->used_as_reloc_target); |
||
512 | |||
513 | /* The older chipsets are far-less flexible in terms of tiling, |
||
514 | * and require tiled buffer to be size aligned in the aperture. |
||
515 | * This means that in the worst possible case we will need a hole |
||
516 | * twice as large as the object in order for it to fit into the |
||
517 | * aperture. Optimal packing is for wimps. |
||
518 | */ |
||
519 | size = bo_gem->bo.size; |
||
520 | if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) { |
||
521 | int min_size; |
||
522 | |||
523 | if (bufmgr_gem->has_relaxed_fencing) { |
||
524 | if (bufmgr_gem->gen == 3) |
||
525 | min_size = 1024*1024; |
||
526 | else |
||
527 | min_size = 512*1024; |
||
528 | |||
529 | while (min_size < size) |
||
530 | min_size *= 2; |
||
531 | } else |
||
532 | min_size = size; |
||
533 | |||
534 | /* Account for worst-case alignment. */ |
||
535 | size = 2 * min_size; |
||
536 | } |
||
537 | |||
538 | bo_gem->reloc_tree_size = size; |
||
539 | } |
||
540 | |||
541 | static int |
||
542 | drm_intel_setup_reloc_list(drm_intel_bo *bo) |
||
543 | { |
||
544 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
545 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
546 | unsigned int max_relocs = bufmgr_gem->max_relocs; |
||
547 | |||
548 | if (bo->size / 4 < max_relocs) |
||
549 | max_relocs = bo->size / 4; |
||
550 | |||
551 | bo_gem->relocs = malloc(max_relocs * |
||
552 | sizeof(struct drm_i915_gem_relocation_entry)); |
||
553 | bo_gem->reloc_target_info = malloc(max_relocs * |
||
554 | sizeof(drm_intel_reloc_target)); |
||
555 | if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) { |
||
556 | bo_gem->has_error = true; |
||
557 | |||
558 | free (bo_gem->relocs); |
||
559 | bo_gem->relocs = NULL; |
||
560 | |||
561 | free (bo_gem->reloc_target_info); |
||
562 | bo_gem->reloc_target_info = NULL; |
||
563 | |||
564 | return 1; |
||
565 | } |
||
566 | |||
567 | return 0; |
||
568 | } |
||
569 | |||
570 | static int |
||
571 | drm_intel_gem_bo_busy(drm_intel_bo *bo) |
||
572 | { |
||
573 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
574 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
575 | struct drm_i915_gem_busy busy; |
||
576 | int ret; |
||
577 | |||
5068 | serge | 578 | if (bo_gem->reusable && bo_gem->idle) |
579 | return false; |
||
580 | |||
4363 | Serge | 581 | VG_CLEAR(busy); |
582 | busy.handle = bo_gem->gem_handle; |
||
583 | |||
584 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); |
||
5068 | serge | 585 | if (ret == 0) { |
586 | bo_gem->idle = !busy.busy; |
||
587 | return busy.busy; |
||
588 | } else { |
||
589 | return false; |
||
590 | } |
||
4363 | Serge | 591 | return (ret == 0 && busy.busy); |
592 | } |
||
593 | |||
594 | static int |
||
595 | drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem, |
||
596 | drm_intel_bo_gem *bo_gem, int state) |
||
597 | { |
||
598 | struct drm_i915_gem_madvise madv; |
||
599 | |||
600 | VG_CLEAR(madv); |
||
601 | madv.handle = bo_gem->gem_handle; |
||
602 | madv.madv = state; |
||
603 | madv.retained = 1; |
||
604 | // drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv); |
||
605 | |||
606 | return madv.retained; |
||
607 | } |
||
608 | |||
609 | static int |
||
610 | drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv) |
||
611 | { |
||
612 | return drm_intel_gem_bo_madvise_internal |
||
613 | ((drm_intel_bufmgr_gem *) bo->bufmgr, |
||
614 | (drm_intel_bo_gem *) bo, |
||
615 | madv); |
||
616 | } |
||
617 | |||
618 | /* drop the oldest entries that have been purged by the kernel */ |
||
619 | static void |
||
620 | drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem, |
||
621 | struct drm_intel_gem_bo_bucket *bucket) |
||
622 | { |
||
623 | while (!DRMLISTEMPTY(&bucket->head)) { |
||
624 | drm_intel_bo_gem *bo_gem; |
||
625 | |||
626 | bo_gem = DRMLISTENTRY(drm_intel_bo_gem, |
||
627 | bucket->head.next, head); |
||
628 | if (drm_intel_gem_bo_madvise_internal |
||
629 | (bufmgr_gem, bo_gem, I915_MADV_DONTNEED)) |
||
630 | break; |
||
631 | |||
632 | DRMLISTDEL(&bo_gem->head); |
||
633 | drm_intel_gem_bo_free(&bo_gem->bo); |
||
634 | } |
||
635 | } |
||
636 | |||
637 | static drm_intel_bo * |
||
638 | drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, |
||
639 | const char *name, |
||
640 | unsigned long size, |
||
641 | unsigned long flags, |
||
642 | uint32_t tiling_mode, |
||
643 | unsigned long stride) |
||
644 | { |
||
645 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; |
||
646 | drm_intel_bo_gem *bo_gem; |
||
647 | unsigned int page_size = 4096; |
||
648 | int ret; |
||
649 | struct drm_intel_gem_bo_bucket *bucket; |
||
650 | bool alloc_from_cache; |
||
651 | unsigned long bo_size; |
||
652 | bool for_render = false; |
||
653 | |||
654 | if (flags & BO_ALLOC_FOR_RENDER) |
||
655 | for_render = true; |
||
656 | |||
657 | /* Round the allocated size up to a power of two number of pages. */ |
||
658 | bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size); |
||
659 | |||
660 | /* If we don't have caching at this size, don't actually round the |
||
661 | * allocation up. |
||
662 | */ |
||
663 | if (bucket == NULL) { |
||
664 | bo_size = size; |
||
665 | if (bo_size < page_size) |
||
666 | bo_size = page_size; |
||
667 | } else { |
||
668 | bo_size = bucket->size; |
||
669 | } |
||
670 | |||
671 | // pthread_mutex_lock(&bufmgr_gem->lock); |
||
672 | /* Get a buffer out of the cache if available */ |
||
673 | retry: |
||
674 | alloc_from_cache = false; |
||
675 | if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) { |
||
676 | if (for_render) { |
||
677 | /* Allocate new render-target BOs from the tail (MRU) |
||
678 | * of the list, as it will likely be hot in the GPU |
||
679 | * cache and in the aperture for us. |
||
680 | */ |
||
681 | bo_gem = DRMLISTENTRY(drm_intel_bo_gem, |
||
682 | bucket->head.prev, head); |
||
683 | DRMLISTDEL(&bo_gem->head); |
||
684 | alloc_from_cache = true; |
||
685 | } else { |
||
686 | /* For non-render-target BOs (where we're probably |
||
687 | * going to map it first thing in order to fill it |
||
688 | * with data), check if the last BO in the cache is |
||
689 | * unbusy, and only reuse in that case. Otherwise, |
||
690 | * allocating a new buffer is probably faster than |
||
691 | * waiting for the GPU to finish. |
||
692 | */ |
||
693 | bo_gem = DRMLISTENTRY(drm_intel_bo_gem, |
||
694 | bucket->head.next, head); |
||
695 | if (!drm_intel_gem_bo_busy(&bo_gem->bo)) { |
||
696 | alloc_from_cache = true; |
||
697 | DRMLISTDEL(&bo_gem->head); |
||
698 | } |
||
699 | } |
||
700 | |||
701 | if (alloc_from_cache) { |
||
702 | if (!drm_intel_gem_bo_madvise_internal |
||
703 | (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) { |
||
704 | drm_intel_gem_bo_free(&bo_gem->bo); |
||
705 | drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem, |
||
706 | bucket); |
||
707 | goto retry; |
||
708 | } |
||
709 | |||
710 | if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo, |
||
711 | tiling_mode, |
||
712 | stride)) { |
||
713 | drm_intel_gem_bo_free(&bo_gem->bo); |
||
714 | goto retry; |
||
715 | } |
||
716 | } |
||
717 | } |
||
718 | // pthread_mutex_unlock(&bufmgr_gem->lock); |
||
719 | |||
720 | if (!alloc_from_cache) { |
||
721 | struct drm_i915_gem_create create; |
||
722 | |||
723 | bo_gem = calloc(1, sizeof(*bo_gem)); |
||
724 | if (!bo_gem) |
||
725 | return NULL; |
||
726 | |||
727 | bo_gem->bo.size = bo_size; |
||
728 | |||
729 | VG_CLEAR(create); |
||
730 | create.size = bo_size; |
||
731 | |||
732 | ret = drmIoctl(bufmgr_gem->fd, |
||
733 | DRM_IOCTL_I915_GEM_CREATE, |
||
734 | &create); |
||
735 | bo_gem->gem_handle = create.handle; |
||
736 | bo_gem->bo.handle = bo_gem->gem_handle; |
||
737 | if (ret != 0) { |
||
738 | free(bo_gem); |
||
739 | return NULL; |
||
740 | } |
||
741 | bo_gem->bo.bufmgr = bufmgr; |
||
742 | |||
743 | bo_gem->tiling_mode = I915_TILING_NONE; |
||
744 | bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; |
||
745 | bo_gem->stride = 0; |
||
746 | |||
747 | if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo, |
||
748 | tiling_mode, |
||
749 | stride)) { |
||
750 | drm_intel_gem_bo_free(&bo_gem->bo); |
||
751 | return NULL; |
||
752 | } |
||
753 | |||
754 | DRMINITLISTHEAD(&bo_gem->name_list); |
||
755 | DRMINITLISTHEAD(&bo_gem->vma_list); |
||
756 | } |
||
757 | |||
758 | bo_gem->name = name; |
||
759 | atomic_set(&bo_gem->refcount, 1); |
||
760 | bo_gem->validate_index = -1; |
||
761 | bo_gem->reloc_tree_fences = 0; |
||
762 | bo_gem->used_as_reloc_target = false; |
||
763 | bo_gem->has_error = false; |
||
764 | bo_gem->reusable = true; |
||
765 | bo_gem->aub_annotations = NULL; |
||
766 | bo_gem->aub_annotation_count = 0; |
||
767 | |||
768 | drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); |
||
769 | |||
770 | DBG("bo_create: buf %d (%s) %ldb\n", |
||
771 | bo_gem->gem_handle, bo_gem->name, size); |
||
772 | |||
773 | return &bo_gem->bo; |
||
774 | } |
||
775 | |||
776 | static drm_intel_bo * |
||
777 | drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, |
||
778 | const char *name, |
||
779 | unsigned long size, |
||
780 | unsigned int alignment) |
||
781 | { |
||
782 | return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, |
||
783 | BO_ALLOC_FOR_RENDER, |
||
784 | I915_TILING_NONE, 0); |
||
785 | } |
||
786 | |||
787 | static drm_intel_bo * |
||
788 | drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, |
||
789 | const char *name, |
||
790 | unsigned long size, |
||
791 | unsigned int alignment) |
||
792 | { |
||
793 | return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0, |
||
794 | I915_TILING_NONE, 0); |
||
795 | } |
||
796 | |||
797 | static drm_intel_bo * |
||
798 | drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name, |
||
799 | int x, int y, int cpp, uint32_t *tiling_mode, |
||
800 | unsigned long *pitch, unsigned long flags) |
||
801 | { |
||
802 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; |
||
803 | unsigned long size, stride; |
||
804 | uint32_t tiling; |
||
805 | |||
806 | do { |
||
807 | unsigned long aligned_y, height_alignment; |
||
808 | |||
809 | tiling = *tiling_mode; |
||
810 | |||
811 | /* If we're tiled, our allocations are in 8 or 32-row blocks, |
||
812 | * so failure to align our height means that we won't allocate |
||
813 | * enough pages. |
||
814 | * |
||
815 | * If we're untiled, we still have to align to 2 rows high |
||
816 | * because the data port accesses 2x2 blocks even if the |
||
817 | * bottom row isn't to be rendered, so failure to align means |
||
818 | * we could walk off the end of the GTT and fault. This is |
||
819 | * documented on 965, and may be the case on older chipsets |
||
820 | * too so we try to be careful. |
||
821 | */ |
||
822 | aligned_y = y; |
||
823 | height_alignment = 2; |
||
824 | |||
825 | if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE) |
||
826 | height_alignment = 16; |
||
827 | else if (tiling == I915_TILING_X |
||
828 | || (IS_915(bufmgr_gem->pci_device) |
||
829 | && tiling == I915_TILING_Y)) |
||
830 | height_alignment = 8; |
||
831 | else if (tiling == I915_TILING_Y) |
||
832 | height_alignment = 32; |
||
833 | aligned_y = ALIGN(y, height_alignment); |
||
834 | |||
835 | stride = x * cpp; |
||
836 | stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode); |
||
837 | size = stride * aligned_y; |
||
838 | size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode); |
||
839 | } while (*tiling_mode != tiling); |
||
840 | *pitch = stride; |
||
841 | |||
842 | if (tiling == I915_TILING_NONE) |
||
843 | stride = 0; |
||
844 | |||
845 | return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags, |
||
846 | tiling, stride); |
||
847 | } |
||
848 | |||
849 | /** |
||
850 | * Returns a drm_intel_bo wrapping the given buffer object handle. |
||
851 | * |
||
852 | * This can be used when one application needs to pass a buffer object |
||
853 | * to another. |
||
854 | */ |
||
855 | drm_intel_bo * |
||
856 | drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, |
||
857 | const char *name, |
||
858 | unsigned int handle) |
||
859 | { |
||
860 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; |
||
861 | drm_intel_bo_gem *bo_gem; |
||
862 | int ret; |
||
863 | struct drm_gem_open open_arg; |
||
864 | struct drm_i915_gem_get_tiling get_tiling; |
||
865 | drmMMListHead *list; |
||
866 | |||
867 | /* At the moment most applications only have a few named bo. |
||
868 | * For instance, in a DRI client only the render buffers passed |
||
869 | * between X and the client are named. And since X returns the |
||
870 | * alternating names for the front/back buffer a linear search |
||
871 | * provides a sufficiently fast match. |
||
872 | */ |
||
873 | for (list = bufmgr_gem->named.next; |
||
874 | list != &bufmgr_gem->named; |
||
875 | list = list->next) { |
||
876 | bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list); |
||
877 | if (bo_gem->global_name == handle) { |
||
878 | drm_intel_gem_bo_reference(&bo_gem->bo); |
||
879 | return &bo_gem->bo; |
||
880 | } |
||
881 | } |
||
882 | |||
883 | VG_CLEAR(open_arg); |
||
884 | open_arg.name = handle; |
||
885 | ret = drmIoctl(bufmgr_gem->fd, |
||
886 | DRM_IOCTL_GEM_OPEN, |
||
887 | &open_arg); |
||
888 | if (ret != 0) { |
||
889 | DBG("Couldn't reference %s handle 0x%08x: %s\n", |
||
890 | name, handle, strerror(errno)); |
||
891 | return NULL; |
||
892 | } |
||
5068 | serge | 893 | /* Now see if someone has used a prime handle to get this |
894 | * object from the kernel before by looking through the list |
||
895 | * again for a matching gem_handle |
||
896 | */ |
||
897 | for (list = bufmgr_gem->named.next; |
||
898 | list != &bufmgr_gem->named; |
||
899 | list = list->next) { |
||
900 | bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list); |
||
901 | if (bo_gem->gem_handle == open_arg.handle) { |
||
902 | drm_intel_gem_bo_reference(&bo_gem->bo); |
||
903 | return &bo_gem->bo; |
||
904 | } |
||
905 | } |
||
906 | |||
907 | bo_gem = calloc(1, sizeof(*bo_gem)); |
||
908 | if (!bo_gem) |
||
909 | return NULL; |
||
910 | |||
4363 | Serge | 911 | bo_gem->bo.size = open_arg.size; |
912 | bo_gem->bo.offset = 0; |
||
5068 | serge | 913 | bo_gem->bo.offset64 = 0; |
4363 | Serge | 914 | bo_gem->bo.virtual = NULL; |
915 | bo_gem->bo.bufmgr = bufmgr; |
||
916 | bo_gem->name = name; |
||
917 | atomic_set(&bo_gem->refcount, 1); |
||
918 | bo_gem->validate_index = -1; |
||
919 | bo_gem->gem_handle = open_arg.handle; |
||
920 | bo_gem->bo.handle = open_arg.handle; |
||
921 | bo_gem->global_name = handle; |
||
922 | bo_gem->reusable = false; |
||
923 | |||
924 | VG_CLEAR(get_tiling); |
||
925 | get_tiling.handle = bo_gem->gem_handle; |
||
926 | ret = drmIoctl(bufmgr_gem->fd, |
||
927 | DRM_IOCTL_I915_GEM_GET_TILING, |
||
928 | &get_tiling); |
||
929 | if (ret != 0) { |
||
930 | drm_intel_gem_bo_unreference(&bo_gem->bo); |
||
931 | return NULL; |
||
932 | } |
||
933 | bo_gem->tiling_mode = get_tiling.tiling_mode; |
||
934 | bo_gem->swizzle_mode = get_tiling.swizzle_mode; |
||
935 | /* XXX stride is unknown */ |
||
936 | drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); |
||
937 | |||
938 | DRMINITLISTHEAD(&bo_gem->vma_list); |
||
939 | DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); |
||
940 | DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name); |
||
941 | |||
942 | return &bo_gem->bo; |
||
943 | } |
||
944 | |||
945 | static void |
||
946 | drm_intel_gem_bo_free(drm_intel_bo *bo) |
||
947 | { |
||
948 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
949 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
950 | struct drm_gem_close close; |
||
951 | int ret; |
||
952 | |||
953 | DRMLISTDEL(&bo_gem->vma_list); |
||
954 | if (bo_gem->mem_virtual) { |
||
955 | VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0)); |
||
956 | bufmgr_gem->vma_count--; |
||
957 | } |
||
958 | if (bo_gem->gtt_virtual) { |
||
959 | bufmgr_gem->vma_count--; |
||
960 | } |
||
961 | |||
962 | /* Close this object */ |
||
963 | VG_CLEAR(close); |
||
964 | close.handle = bo_gem->gem_handle; |
||
965 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close); |
||
966 | if (ret != 0) { |
||
967 | DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n", |
||
968 | bo_gem->gem_handle, bo_gem->name, strerror(errno)); |
||
969 | } |
||
970 | free(bo_gem->aub_annotations); |
||
971 | free(bo); |
||
972 | } |
||
973 | |||
974 | static void |
||
975 | drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo) |
||
976 | { |
||
977 | #if HAVE_VALGRIND |
||
978 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
979 | |||
980 | if (bo_gem->mem_virtual) |
||
981 | VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size); |
||
982 | |||
983 | if (bo_gem->gtt_virtual) |
||
984 | VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size); |
||
985 | #endif |
||
986 | } |
||
987 | |||
988 | /** Frees all cached buffers significantly older than @time. */ |
||
989 | static void |
||
990 | drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time) |
||
991 | { |
||
992 | int i; |
||
993 | |||
994 | if (bufmgr_gem->time == time) |
||
995 | return; |
||
996 | |||
997 | for (i = 0; i < bufmgr_gem->num_buckets; i++) { |
||
998 | struct drm_intel_gem_bo_bucket *bucket = |
||
999 | &bufmgr_gem->cache_bucket[i]; |
||
1000 | |||
1001 | while (!DRMLISTEMPTY(&bucket->head)) { |
||
1002 | drm_intel_bo_gem *bo_gem; |
||
1003 | |||
1004 | bo_gem = DRMLISTENTRY(drm_intel_bo_gem, |
||
1005 | bucket->head.next, head); |
||
1006 | if (time - bo_gem->free_time <= 1) |
||
1007 | break; |
||
1008 | |||
1009 | DRMLISTDEL(&bo_gem->head); |
||
1010 | |||
1011 | drm_intel_gem_bo_free(&bo_gem->bo); |
||
1012 | } |
||
1013 | } |
||
1014 | |||
1015 | bufmgr_gem->time = time; |
||
1016 | } |
||
1017 | |||
1018 | static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem) |
||
1019 | { |
||
1020 | int limit; |
||
1021 | |||
1022 | DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__, |
||
1023 | bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max); |
||
1024 | |||
1025 | if (bufmgr_gem->vma_max < 0) |
||
1026 | return; |
||
1027 | |||
1028 | /* We may need to evict a few entries in order to create new mmaps */ |
||
1029 | limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open; |
||
1030 | if (limit < 0) |
||
1031 | limit = 0; |
||
1032 | |||
1033 | while (bufmgr_gem->vma_count > limit) { |
||
1034 | drm_intel_bo_gem *bo_gem; |
||
1035 | |||
1036 | bo_gem = DRMLISTENTRY(drm_intel_bo_gem, |
||
1037 | bufmgr_gem->vma_cache.next, |
||
1038 | vma_list); |
||
1039 | assert(bo_gem->map_count == 0); |
||
1040 | DRMLISTDELINIT(&bo_gem->vma_list); |
||
1041 | |||
1042 | if (bo_gem->mem_virtual) { |
||
1043 | // munmap(bo_gem->mem_virtual, bo_gem->bo.size); |
||
1044 | bo_gem->mem_virtual = NULL; |
||
1045 | bufmgr_gem->vma_count--; |
||
1046 | } |
||
1047 | if (bo_gem->gtt_virtual) { |
||
1048 | // munmap(bo_gem->gtt_virtual, bo_gem->bo.size); |
||
1049 | bo_gem->gtt_virtual = NULL; |
||
1050 | bufmgr_gem->vma_count--; |
||
1051 | } |
||
1052 | } |
||
1053 | } |
||
1054 | |||
1055 | static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem, |
||
1056 | drm_intel_bo_gem *bo_gem) |
||
1057 | { |
||
1058 | bufmgr_gem->vma_open--; |
||
1059 | DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache); |
||
1060 | if (bo_gem->mem_virtual) |
||
1061 | bufmgr_gem->vma_count++; |
||
1062 | if (bo_gem->gtt_virtual) |
||
1063 | bufmgr_gem->vma_count++; |
||
1064 | drm_intel_gem_bo_purge_vma_cache(bufmgr_gem); |
||
1065 | } |
||
1066 | |||
1067 | static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem, |
||
1068 | drm_intel_bo_gem *bo_gem) |
||
1069 | { |
||
1070 | bufmgr_gem->vma_open++; |
||
1071 | DRMLISTDEL(&bo_gem->vma_list); |
||
1072 | if (bo_gem->mem_virtual) |
||
1073 | bufmgr_gem->vma_count--; |
||
1074 | if (bo_gem->gtt_virtual) |
||
1075 | bufmgr_gem->vma_count--; |
||
1076 | drm_intel_gem_bo_purge_vma_cache(bufmgr_gem); |
||
1077 | } |
||
1078 | |||
1079 | static void |
||
1080 | drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time) |
||
1081 | { |
||
1082 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1083 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1084 | struct drm_intel_gem_bo_bucket *bucket; |
||
1085 | int i; |
||
1086 | |||
1087 | /* Unreference all the target buffers */ |
||
1088 | for (i = 0; i < bo_gem->reloc_count; i++) { |
||
1089 | if (bo_gem->reloc_target_info[i].bo != bo) { |
||
1090 | drm_intel_gem_bo_unreference_locked_timed(bo_gem-> |
||
1091 | reloc_target_info[i].bo, |
||
1092 | time); |
||
1093 | } |
||
1094 | } |
||
1095 | bo_gem->reloc_count = 0; |
||
1096 | bo_gem->used_as_reloc_target = false; |
||
1097 | |||
1098 | DBG("bo_unreference final: %d (%s)\n", |
||
1099 | bo_gem->gem_handle, bo_gem->name); |
||
1100 | |||
1101 | /* release memory associated with this object */ |
||
1102 | if (bo_gem->reloc_target_info) { |
||
1103 | free(bo_gem->reloc_target_info); |
||
1104 | bo_gem->reloc_target_info = NULL; |
||
1105 | } |
||
1106 | if (bo_gem->relocs) { |
||
1107 | free(bo_gem->relocs); |
||
1108 | bo_gem->relocs = NULL; |
||
1109 | } |
||
1110 | |||
1111 | /* Clear any left-over mappings */ |
||
1112 | if (bo_gem->map_count) { |
||
1113 | DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count); |
||
1114 | bo_gem->map_count = 0; |
||
1115 | drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); |
||
1116 | drm_intel_gem_bo_mark_mmaps_incoherent(bo); |
||
1117 | } |
||
1118 | |||
1119 | DRMLISTDEL(&bo_gem->name_list); |
||
1120 | |||
1121 | bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size); |
||
1122 | /* Put the buffer into our internal cache for reuse if we can. */ |
||
1123 | if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL && |
||
1124 | drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem, |
||
1125 | I915_MADV_DONTNEED)) { |
||
1126 | bo_gem->free_time = time; |
||
1127 | |||
1128 | bo_gem->name = NULL; |
||
1129 | bo_gem->validate_index = -1; |
||
1130 | |||
1131 | DRMLISTADDTAIL(&bo_gem->head, &bucket->head); |
||
1132 | } else { |
||
1133 | drm_intel_gem_bo_free(bo); |
||
1134 | } |
||
1135 | } |
||
1136 | |||
1137 | static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo, |
||
1138 | time_t time) |
||
1139 | { |
||
1140 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1141 | |||
1142 | assert(atomic_read(&bo_gem->refcount) > 0); |
||
1143 | if (atomic_dec_and_test(&bo_gem->refcount)) |
||
1144 | drm_intel_gem_bo_unreference_final(bo, time); |
||
1145 | } |
||
1146 | |||
1147 | static void drm_intel_gem_bo_unreference(drm_intel_bo *bo) |
||
1148 | { |
||
1149 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1150 | |||
1151 | assert(atomic_read(&bo_gem->refcount) > 0); |
||
1152 | if (atomic_dec_and_test(&bo_gem->refcount)) { |
||
1153 | drm_intel_bufmgr_gem *bufmgr_gem = |
||
1154 | (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1155 | // struct timespec time; |
||
1156 | |||
1157 | // clock_gettime(CLOCK_MONOTONIC, &time); |
||
1158 | |||
1159 | // pthread_mutex_lock(&bufmgr_gem->lock); |
||
1160 | drm_intel_gem_bo_unreference_final(bo, 0); |
||
1161 | drm_intel_gem_cleanup_bo_cache(bufmgr_gem, 0); |
||
1162 | // pthread_mutex_unlock(&bufmgr_gem->lock); |
||
1163 | } |
||
1164 | } |
||
1165 | |||
1166 | static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable) |
||
1167 | { |
||
1168 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1169 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1170 | struct drm_i915_gem_set_domain set_domain; |
||
1171 | int ret; |
||
1172 | |||
1173 | // pthread_mutex_lock(&bufmgr_gem->lock); |
||
1174 | |||
1175 | if (bo_gem->map_count++ == 0) |
||
1176 | drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem); |
||
1177 | |||
1178 | if (!bo_gem->mem_virtual) { |
||
1179 | struct drm_i915_gem_mmap mmap_arg; |
||
1180 | |||
1181 | DBG("bo_map: %d (%s), map_count=%d\n", |
||
1182 | bo_gem->gem_handle, bo_gem->name, bo_gem->map_count); |
||
1183 | |||
1184 | VG_CLEAR(mmap_arg); |
||
1185 | mmap_arg.handle = bo_gem->gem_handle; |
||
1186 | mmap_arg.offset = 0; |
||
1187 | mmap_arg.size = bo->size; |
||
1188 | ret = drmIoctl(bufmgr_gem->fd, |
||
1189 | DRM_IOCTL_I915_GEM_MMAP, |
||
1190 | &mmap_arg); |
||
1191 | if (ret != 0) { |
||
1192 | ret = -errno; |
||
1193 | DBG("%s:%d: Error mapping buffer %d (%s): %s .\n", |
||
1194 | __FILE__, __LINE__, bo_gem->gem_handle, |
||
1195 | bo_gem->name, strerror(errno)); |
||
1196 | if (--bo_gem->map_count == 0) |
||
1197 | drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); |
||
1198 | // pthread_mutex_unlock(&bufmgr_gem->lock); |
||
1199 | return ret; |
||
1200 | } |
||
1201 | VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1)); |
||
1202 | bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr; |
||
1203 | } |
||
1204 | DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, |
||
1205 | bo_gem->mem_virtual); |
||
1206 | bo->virtual = bo_gem->mem_virtual; |
||
1207 | |||
1208 | VG_CLEAR(set_domain); |
||
1209 | set_domain.handle = bo_gem->gem_handle; |
||
1210 | set_domain.read_domains = I915_GEM_DOMAIN_CPU; |
||
1211 | if (write_enable) |
||
1212 | set_domain.write_domain = I915_GEM_DOMAIN_CPU; |
||
1213 | else |
||
1214 | set_domain.write_domain = 0; |
||
1215 | ret = drmIoctl(bufmgr_gem->fd, |
||
1216 | DRM_IOCTL_I915_GEM_SET_DOMAIN, |
||
1217 | &set_domain); |
||
1218 | if (ret != 0) { |
||
1219 | DBG("%s:%d: Error setting to CPU domain %d: %s\n", |
||
1220 | __FILE__, __LINE__, bo_gem->gem_handle, |
||
1221 | strerror(errno)); |
||
1222 | } |
||
1223 | |||
1224 | if (write_enable) |
||
1225 | bo_gem->mapped_cpu_write = true; |
||
1226 | |||
1227 | drm_intel_gem_bo_mark_mmaps_incoherent(bo); |
||
1228 | VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size)); |
||
1229 | // pthread_mutex_unlock(&bufmgr_gem->lock); |
||
1230 | |||
1231 | return 0; |
||
1232 | } |
||
1233 | |||
1234 | static int |
||
1235 | map_gtt(drm_intel_bo *bo) |
||
1236 | { |
||
1237 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1238 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1239 | int ret; |
||
1240 | |||
1241 | if (bo_gem->map_count++ == 0) |
||
1242 | drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem); |
||
1243 | |||
1244 | /* Get a mapping of the buffer if we haven't before. */ |
||
1245 | if (bo_gem->gtt_virtual == NULL) { |
||
1246 | struct drm_i915_gem_mmap_gtt mmap_arg; |
||
1247 | |||
1248 | DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n", |
||
1249 | bo_gem->gem_handle, bo_gem->name, bo_gem->map_count); |
||
1250 | |||
1251 | VG_CLEAR(mmap_arg); |
||
1252 | mmap_arg.handle = bo_gem->gem_handle; |
||
1253 | mmap_arg.offset = 0; |
||
1254 | |||
1255 | /* Get the fake offset back... */ |
||
1256 | ret = drmIoctl(bufmgr_gem->fd, |
||
1257 | DRM_IOCTL_I915_GEM_MMAP_GTT, |
||
1258 | &mmap_arg); |
||
1259 | if (ret != 0) { |
||
1260 | ret = -errno; |
||
1261 | DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n", |
||
1262 | __FILE__, __LINE__, |
||
1263 | bo_gem->gem_handle, bo_gem->name, |
||
1264 | strerror(errno)); |
||
1265 | if (--bo_gem->map_count == 0) |
||
1266 | drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); |
||
1267 | return ret; |
||
1268 | } |
||
1269 | |||
1270 | /* and mmap it */ |
||
1271 | bo_gem->gtt_virtual = mmap_arg.offset; |
||
1272 | if (bo_gem->gtt_virtual == 0) { |
||
1273 | bo_gem->gtt_virtual = NULL; |
||
1274 | ret = -errno; |
||
1275 | DBG("%s:%d: Error mapping buffer %d (%s): %s .\n", |
||
1276 | __FILE__, __LINE__, |
||
1277 | bo_gem->gem_handle, bo_gem->name, |
||
1278 | strerror(errno)); |
||
1279 | if (--bo_gem->map_count == 0) |
||
1280 | drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); |
||
1281 | return ret; |
||
1282 | } |
||
1283 | } |
||
1284 | |||
1285 | bo->virtual = bo_gem->gtt_virtual; |
||
1286 | |||
1287 | DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, |
||
1288 | bo_gem->gtt_virtual); |
||
1289 | |||
1290 | return 0; |
||
1291 | } |
||
1292 | |||
1293 | int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) |
||
1294 | { |
||
1295 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1296 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1297 | struct drm_i915_gem_set_domain set_domain; |
||
1298 | int ret; |
||
1299 | |||
1300 | // pthread_mutex_lock(&bufmgr_gem->lock); |
||
1301 | |||
1302 | ret = map_gtt(bo); |
||
1303 | if (ret) { |
||
1304 | // pthread_mutex_unlock(&bufmgr_gem->lock); |
||
1305 | return ret; |
||
1306 | } |
||
1307 | |||
1308 | /* Now move it to the GTT domain so that the GPU and CPU |
||
1309 | * caches are flushed and the GPU isn't actively using the |
||
1310 | * buffer. |
||
1311 | * |
||
1312 | * The pagefault handler does this domain change for us when |
||
1313 | * it has unbound the BO from the GTT, but it's up to us to |
||
1314 | * tell it when we're about to use things if we had done |
||
1315 | * rendering and it still happens to be bound to the GTT. |
||
1316 | */ |
||
1317 | VG_CLEAR(set_domain); |
||
1318 | set_domain.handle = bo_gem->gem_handle; |
||
1319 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
||
1320 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
||
1321 | ret = drmIoctl(bufmgr_gem->fd, |
||
1322 | DRM_IOCTL_I915_GEM_SET_DOMAIN, |
||
1323 | &set_domain); |
||
1324 | if (ret != 0) { |
||
1325 | DBG("%s:%d: Error setting domain %d: %s\n", |
||
1326 | __FILE__, __LINE__, bo_gem->gem_handle, |
||
1327 | strerror(errno)); |
||
1328 | } |
||
1329 | |||
1330 | drm_intel_gem_bo_mark_mmaps_incoherent(bo); |
||
1331 | VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size)); |
||
1332 | // pthread_mutex_unlock(&bufmgr_gem->lock); |
||
1333 | |||
1334 | return 0; |
||
1335 | } |
||
1336 | |||
1337 | /** |
||
1338 | * Performs a mapping of the buffer object like the normal GTT |
||
1339 | * mapping, but avoids waiting for the GPU to be done reading from or |
||
1340 | * rendering to the buffer. |
||
1341 | * |
||
1342 | * This is used in the implementation of GL_ARB_map_buffer_range: The |
||
1343 | * user asks to create a buffer, then does a mapping, fills some |
||
1344 | * space, runs a drawing command, then asks to map it again without |
||
1345 | * synchronizing because it guarantees that it won't write over the |
||
1346 | * data that the GPU is busy using (or, more specifically, that if it |
||
1347 | * does write over the data, it acknowledges that rendering is |
||
1348 | * undefined). |
||
1349 | */ |
||
1350 | |||
1351 | int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo) |
||
1352 | { |
||
1353 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
5068 | serge | 1354 | #ifdef HAVE_VALGRIND |
1355 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1356 | #endif |
||
4363 | Serge | 1357 | int ret; |
1358 | |||
1359 | /* If the CPU cache isn't coherent with the GTT, then use a |
||
1360 | * regular synchronized mapping. The problem is that we don't |
||
1361 | * track where the buffer was last used on the CPU side in |
||
1362 | * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so |
||
1363 | * we would potentially corrupt the buffer even when the user |
||
1364 | * does reasonable things. |
||
1365 | */ |
||
1366 | if (!bufmgr_gem->has_llc) |
||
1367 | return drm_intel_gem_bo_map_gtt(bo); |
||
1368 | |||
1369 | // pthread_mutex_lock(&bufmgr_gem->lock); |
||
1370 | ret = map_gtt(bo); |
||
1371 | // pthread_mutex_unlock(&bufmgr_gem->lock); |
||
1372 | |||
1373 | return ret; |
||
1374 | } |
||
1375 | |||
1376 | static int drm_intel_gem_bo_unmap(drm_intel_bo *bo) |
||
1377 | { |
||
1378 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1379 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1380 | int ret = 0; |
||
1381 | |||
1382 | if (bo == NULL) |
||
1383 | return 0; |
||
1384 | |||
1385 | // pthread_mutex_lock(&bufmgr_gem->lock); |
||
1386 | |||
1387 | if (bo_gem->map_count <= 0) { |
||
1388 | DBG("attempted to unmap an unmapped bo\n"); |
||
1389 | // pthread_mutex_unlock(&bufmgr_gem->lock); |
||
1390 | /* Preserve the old behaviour of just treating this as a |
||
1391 | * no-op rather than reporting the error. |
||
1392 | */ |
||
1393 | return 0; |
||
1394 | } |
||
1395 | |||
1396 | if (bo_gem->mapped_cpu_write) { |
||
1397 | struct drm_i915_gem_sw_finish sw_finish; |
||
1398 | |||
1399 | /* Cause a flush to happen if the buffer's pinned for |
||
1400 | * scanout, so the results show up in a timely manner. |
||
1401 | * Unlike GTT set domains, this only does work if the |
||
1402 | * buffer should be scanout-related. |
||
1403 | */ |
||
1404 | |||
1405 | bo_gem->mapped_cpu_write = false; |
||
1406 | } |
||
1407 | |||
1408 | /* We need to unmap after every innovation as we cannot track |
||
1409 | * an open vma for every bo as that will exhaasut the system |
||
1410 | * limits and cause later failures. |
||
1411 | */ |
||
1412 | if (--bo_gem->map_count == 0) { |
||
1413 | drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); |
||
1414 | drm_intel_gem_bo_mark_mmaps_incoherent(bo); |
||
1415 | bo->virtual = NULL; |
||
1416 | } |
||
1417 | // pthread_mutex_unlock(&bufmgr_gem->lock); |
||
1418 | |||
1419 | return ret; |
||
1420 | } |
||
1421 | |||
1422 | int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo) |
||
1423 | { |
||
1424 | return drm_intel_gem_bo_unmap(bo); |
||
1425 | } |
||
1426 | |||
1427 | static int |
||
1428 | drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset, |
||
1429 | unsigned long size, const void *data) |
||
1430 | { |
||
1431 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1432 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1433 | struct drm_i915_gem_pwrite pwrite; |
||
1434 | int ret; |
||
1435 | |||
1436 | VG_CLEAR(pwrite); |
||
1437 | pwrite.handle = bo_gem->gem_handle; |
||
1438 | pwrite.offset = offset; |
||
1439 | pwrite.size = size; |
||
1440 | pwrite.data_ptr = (uint64_t) (uintptr_t) data; |
||
1441 | ret = drmIoctl(bufmgr_gem->fd, |
||
1442 | DRM_IOCTL_I915_GEM_PWRITE, |
||
1443 | &pwrite); |
||
1444 | if (ret != 0) { |
||
1445 | ret = -errno; |
||
1446 | DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n", |
||
1447 | __FILE__, __LINE__, bo_gem->gem_handle, (int)offset, |
||
1448 | (int)size, strerror(errno)); |
||
1449 | } |
||
1450 | |||
1451 | return ret; |
||
1452 | } |
||
1453 | |||
1454 | #if 0 |
||
1455 | static int |
||
1456 | drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id) |
||
1457 | { |
||
1458 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; |
||
1459 | struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id; |
||
1460 | int ret; |
||
1461 | |||
1462 | VG_CLEAR(get_pipe_from_crtc_id); |
||
1463 | get_pipe_from_crtc_id.crtc_id = crtc_id; |
||
1464 | ret = drmIoctl(bufmgr_gem->fd, |
||
1465 | DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID, |
||
1466 | &get_pipe_from_crtc_id); |
||
1467 | if (ret != 0) { |
||
1468 | /* We return -1 here to signal that we don't |
||
1469 | * know which pipe is associated with this crtc. |
||
1470 | * This lets the caller know that this information |
||
1471 | * isn't available; using the wrong pipe for |
||
1472 | * vblank waiting can cause the chipset to lock up |
||
1473 | */ |
||
1474 | return -1; |
||
1475 | } |
||
1476 | |||
1477 | return get_pipe_from_crtc_id.pipe; |
||
1478 | } |
||
1479 | |||
1480 | static int |
||
1481 | drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset, |
||
1482 | unsigned long size, void *data) |
||
1483 | { |
||
1484 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1485 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1486 | struct drm_i915_gem_pread pread; |
||
1487 | int ret; |
||
1488 | |||
1489 | VG_CLEAR(pread); |
||
1490 | pread.handle = bo_gem->gem_handle; |
||
1491 | pread.offset = offset; |
||
1492 | pread.size = size; |
||
1493 | pread.data_ptr = (uint64_t) (uintptr_t) data; |
||
1494 | ret = drmIoctl(bufmgr_gem->fd, |
||
1495 | DRM_IOCTL_I915_GEM_PREAD, |
||
1496 | &pread); |
||
1497 | if (ret != 0) { |
||
1498 | ret = -errno; |
||
1499 | DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n", |
||
1500 | __FILE__, __LINE__, bo_gem->gem_handle, (int)offset, |
||
1501 | (int)size, strerror(errno)); |
||
1502 | } |
||
1503 | |||
1504 | return ret; |
||
1505 | } |
||
1506 | |||
1507 | #endif |
||
1508 | |||
1509 | /** Waits for all GPU rendering with the object to have completed. */ |
||
1510 | static void |
||
1511 | drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo) |
||
1512 | { |
||
1513 | drm_intel_gem_bo_start_gtt_access(bo, 1); |
||
1514 | } |
||
1515 | |||
1516 | /** |
||
1517 | * Waits on a BO for the given amount of time. |
||
1518 | * |
||
1519 | * @bo: buffer object to wait for |
||
1520 | * @timeout_ns: amount of time to wait in nanoseconds. |
||
1521 | * If value is less than 0, an infinite wait will occur. |
||
1522 | * |
||
1523 | * Returns 0 if the wait was successful ie. the last batch referencing the |
||
1524 | * object has completed within the allotted time. Otherwise some negative return |
||
1525 | * value describes the error. Of particular interest is -ETIME when the wait has |
||
1526 | * failed to yield the desired result. |
||
1527 | * |
||
1528 | * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows |
||
1529 | * the operation to give up after a certain amount of time. Another subtle |
||
1530 | * difference is the internal locking semantics are different (this variant does |
||
1531 | * not hold the lock for the duration of the wait). This makes the wait subject |
||
1532 | * to a larger userspace race window. |
||
1533 | * |
||
1534 | * The implementation shall wait until the object is no longer actively |
||
1535 | * referenced within a batch buffer at the time of the call. The wait will |
||
1536 | * not guarantee that the buffer is re-issued via another thread, or an flinked |
||
1537 | * handle. Userspace must make sure this race does not occur if such precision |
||
1538 | * is important. |
||
1539 | */ |
||
1540 | int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns) |
||
1541 | { |
||
1542 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1543 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1544 | struct drm_i915_gem_wait wait; |
||
1545 | int ret; |
||
1546 | |||
1547 | if (!bufmgr_gem->has_wait_timeout) { |
||
1548 | DBG("%s:%d: Timed wait is not supported. Falling back to " |
||
1549 | "infinite wait\n", __FILE__, __LINE__); |
||
1550 | if (timeout_ns) { |
||
1551 | drm_intel_gem_bo_wait_rendering(bo); |
||
1552 | return 0; |
||
1553 | } else { |
||
1554 | return drm_intel_gem_bo_busy(bo) ? -1 : 0; |
||
1555 | } |
||
1556 | } |
||
1557 | |||
1558 | wait.bo_handle = bo_gem->gem_handle; |
||
1559 | wait.timeout_ns = timeout_ns; |
||
1560 | wait.flags = 0; |
||
1561 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait); |
||
1562 | if (ret == -1) |
||
1563 | return -errno; |
||
1564 | |||
1565 | return ret; |
||
1566 | } |
||
1567 | |||
1568 | /** |
||
1569 | * Sets the object to the GTT read and possibly write domain, used by the X |
||
1570 | * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt(). |
||
1571 | * |
||
1572 | * In combination with drm_intel_gem_bo_pin() and manual fence management, we |
||
1573 | * can do tiled pixmaps this way. |
||
1574 | */ |
||
1575 | void |
||
1576 | drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable) |
||
1577 | { |
||
1578 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1579 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1580 | struct drm_i915_gem_set_domain set_domain; |
||
1581 | int ret; |
||
1582 | |||
1583 | VG_CLEAR(set_domain); |
||
1584 | set_domain.handle = bo_gem->gem_handle; |
||
1585 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
||
1586 | set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0; |
||
1587 | ret = drmIoctl(bufmgr_gem->fd, |
||
1588 | DRM_IOCTL_I915_GEM_SET_DOMAIN, |
||
1589 | &set_domain); |
||
1590 | if (ret != 0) { |
||
1591 | DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n", |
||
1592 | __FILE__, __LINE__, bo_gem->gem_handle, |
||
1593 | set_domain.read_domains, set_domain.write_domain, |
||
1594 | strerror(errno)); |
||
1595 | } |
||
1596 | } |
||
1597 | |||
1598 | static void |
||
1599 | drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr) |
||
1600 | { |
||
1601 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; |
||
1602 | int i; |
||
1603 | |||
1604 | free(bufmgr_gem->exec2_objects); |
||
1605 | free(bufmgr_gem->exec_objects); |
||
1606 | free(bufmgr_gem->exec_bos); |
||
1607 | free(bufmgr_gem->aub_filename); |
||
1608 | |||
1609 | // pthread_mutex_destroy(&bufmgr_gem->lock); |
||
1610 | |||
1611 | /* Free any cached buffer objects we were going to reuse */ |
||
1612 | for (i = 0; i < bufmgr_gem->num_buckets; i++) { |
||
1613 | struct drm_intel_gem_bo_bucket *bucket = |
||
1614 | &bufmgr_gem->cache_bucket[i]; |
||
1615 | drm_intel_bo_gem *bo_gem; |
||
1616 | |||
1617 | while (!DRMLISTEMPTY(&bucket->head)) { |
||
1618 | bo_gem = DRMLISTENTRY(drm_intel_bo_gem, |
||
1619 | bucket->head.next, head); |
||
1620 | DRMLISTDEL(&bo_gem->head); |
||
1621 | |||
1622 | drm_intel_gem_bo_free(&bo_gem->bo); |
||
1623 | } |
||
1624 | } |
||
1625 | |||
1626 | free(bufmgr); |
||
1627 | } |
||
1628 | |||
1629 | /** |
||
1630 | * Adds the target buffer to the validation list and adds the relocation |
||
1631 | * to the reloc_buffer's relocation list. |
||
1632 | * |
||
1633 | * The relocation entry at the given offset must already contain the |
||
1634 | * precomputed relocation value, because the kernel will optimize out |
||
1635 | * the relocation entry write when the buffer hasn't moved from the |
||
1636 | * last known offset in target_bo. |
||
1637 | */ |
||
1638 | static int |
||
1639 | do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, |
||
1640 | drm_intel_bo *target_bo, uint32_t target_offset, |
||
1641 | uint32_t read_domains, uint32_t write_domain, |
||
1642 | bool need_fence) |
||
1643 | { |
||
1644 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1645 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1646 | drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo; |
||
1647 | bool fenced_command; |
||
1648 | |||
1649 | if (bo_gem->has_error) |
||
1650 | return -ENOMEM; |
||
1651 | |||
1652 | if (target_bo_gem->has_error) { |
||
1653 | bo_gem->has_error = true; |
||
1654 | return -ENOMEM; |
||
1655 | } |
||
1656 | |||
1657 | /* We never use HW fences for rendering on 965+ */ |
||
1658 | if (bufmgr_gem->gen >= 4) |
||
1659 | need_fence = false; |
||
1660 | |||
1661 | fenced_command = need_fence; |
||
1662 | if (target_bo_gem->tiling_mode == I915_TILING_NONE) |
||
1663 | need_fence = false; |
||
1664 | |||
1665 | /* Create a new relocation list if needed */ |
||
1666 | if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo)) |
||
1667 | return -ENOMEM; |
||
1668 | |||
1669 | /* Check overflow */ |
||
1670 | assert(bo_gem->reloc_count < bufmgr_gem->max_relocs); |
||
1671 | |||
1672 | /* Check args */ |
||
1673 | assert(offset <= bo->size - 4); |
||
1674 | assert((write_domain & (write_domain - 1)) == 0); |
||
1675 | |||
1676 | /* Make sure that we're not adding a reloc to something whose size has |
||
1677 | * already been accounted for. |
||
1678 | */ |
||
1679 | assert(!bo_gem->used_as_reloc_target); |
||
1680 | if (target_bo_gem != bo_gem) { |
||
1681 | target_bo_gem->used_as_reloc_target = true; |
||
1682 | bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size; |
||
1683 | } |
||
1684 | /* An object needing a fence is a tiled buffer, so it won't have |
||
1685 | * relocs to other buffers. |
||
1686 | */ |
||
1687 | if (need_fence) |
||
1688 | target_bo_gem->reloc_tree_fences = 1; |
||
1689 | bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences; |
||
1690 | |||
1691 | bo_gem->relocs[bo_gem->reloc_count].offset = offset; |
||
1692 | bo_gem->relocs[bo_gem->reloc_count].delta = target_offset; |
||
1693 | bo_gem->relocs[bo_gem->reloc_count].target_handle = |
||
1694 | target_bo_gem->gem_handle; |
||
1695 | bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains; |
||
1696 | bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain; |
||
5068 | serge | 1697 | bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64; |
4363 | Serge | 1698 | |
1699 | bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo; |
||
1700 | if (target_bo != bo) |
||
1701 | drm_intel_gem_bo_reference(target_bo); |
||
1702 | if (fenced_command) |
||
1703 | bo_gem->reloc_target_info[bo_gem->reloc_count].flags = |
||
1704 | DRM_INTEL_RELOC_FENCE; |
||
1705 | else |
||
1706 | bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0; |
||
1707 | |||
1708 | bo_gem->reloc_count++; |
||
1709 | |||
1710 | return 0; |
||
1711 | } |
||
1712 | |||
1713 | static int |
||
1714 | drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, |
||
1715 | drm_intel_bo *target_bo, uint32_t target_offset, |
||
1716 | uint32_t read_domains, uint32_t write_domain) |
||
1717 | { |
||
1718 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; |
||
1719 | |||
1720 | return do_bo_emit_reloc(bo, offset, target_bo, target_offset, |
||
1721 | read_domains, write_domain, |
||
1722 | !bufmgr_gem->fenced_relocs); |
||
1723 | } |
||
1724 | |||
1725 | static int |
||
1726 | drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset, |
||
1727 | drm_intel_bo *target_bo, |
||
1728 | uint32_t target_offset, |
||
1729 | uint32_t read_domains, uint32_t write_domain) |
||
1730 | { |
||
1731 | return do_bo_emit_reloc(bo, offset, target_bo, target_offset, |
||
1732 | read_domains, write_domain, true); |
||
1733 | } |
||
1734 | |||
1735 | int |
||
1736 | drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo) |
||
1737 | { |
||
1738 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1739 | |||
1740 | return bo_gem->reloc_count; |
||
1741 | } |
||
1742 | |||
1743 | /** |
||
1744 | * Removes existing relocation entries in the BO after "start". |
||
1745 | * |
||
1746 | * This allows a user to avoid a two-step process for state setup with |
||
1747 | * counting up all the buffer objects and doing a |
||
1748 | * drm_intel_bufmgr_check_aperture_space() before emitting any of the |
||
1749 | * relocations for the state setup. Instead, save the state of the |
||
1750 | * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the |
||
1751 | * state, and then check if it still fits in the aperture. |
||
1752 | * |
||
1753 | * Any further drm_intel_bufmgr_check_aperture_space() queries |
||
1754 | * involving this buffer in the tree are undefined after this call. |
||
1755 | */ |
||
1756 | void |
||
1757 | drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start) |
||
1758 | { |
||
1759 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1760 | int i; |
||
1761 | // struct timespec time; |
||
1762 | |||
1763 | // clock_gettime(CLOCK_MONOTONIC, &time); |
||
1764 | |||
1765 | assert(bo_gem->reloc_count >= start); |
||
1766 | /* Unreference the cleared target buffers */ |
||
1767 | for (i = start; i < bo_gem->reloc_count; i++) { |
||
1768 | drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo; |
||
1769 | if (&target_bo_gem->bo != bo) { |
||
1770 | bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences; |
||
1771 | drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, |
||
1772 | 0); |
||
1773 | } |
||
1774 | } |
||
1775 | bo_gem->reloc_count = start; |
||
1776 | } |
||
1777 | |||
1778 | /** |
||
1779 | * Walk the tree of relocations rooted at BO and accumulate the list of |
||
1780 | * validations to be performed and update the relocation buffers with |
||
1781 | * index values into the validation list. |
||
1782 | */ |
||
1783 | static void |
||
1784 | drm_intel_gem_bo_process_reloc(drm_intel_bo *bo) |
||
1785 | { |
||
1786 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1787 | int i; |
||
1788 | |||
1789 | if (bo_gem->relocs == NULL) |
||
1790 | return; |
||
1791 | |||
1792 | for (i = 0; i < bo_gem->reloc_count; i++) { |
||
1793 | drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo; |
||
1794 | |||
1795 | if (target_bo == bo) |
||
1796 | continue; |
||
1797 | |||
1798 | drm_intel_gem_bo_mark_mmaps_incoherent(bo); |
||
1799 | |||
1800 | /* Continue walking the tree depth-first. */ |
||
1801 | drm_intel_gem_bo_process_reloc(target_bo); |
||
1802 | |||
1803 | /* Add the target to the validate list */ |
||
1804 | drm_intel_add_validate_buffer(target_bo); |
||
1805 | } |
||
1806 | } |
||
1807 | |||
1808 | static void |
||
1809 | drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo) |
||
1810 | { |
||
1811 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; |
||
1812 | int i; |
||
1813 | |||
1814 | if (bo_gem->relocs == NULL) |
||
1815 | return; |
||
1816 | |||
1817 | for (i = 0; i < bo_gem->reloc_count; i++) { |
||
1818 | drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo; |
||
1819 | int need_fence; |
||
1820 | |||
1821 | if (target_bo == bo) |
||
1822 | continue; |
||
1823 | |||
1824 | drm_intel_gem_bo_mark_mmaps_incoherent(bo); |
||
1825 | |||
1826 | /* Continue walking the tree depth-first. */ |
||
1827 | drm_intel_gem_bo_process_reloc2(target_bo); |
||
1828 | |||
1829 | need_fence = (bo_gem->reloc_target_info[i].flags & |
||
1830 | DRM_INTEL_RELOC_FENCE); |
||
1831 | |||
1832 | /* Add the target to the validate list */ |
||
1833 | drm_intel_add_validate_buffer2(target_bo, need_fence); |
||
1834 | } |
||
1835 | } |
||
1836 | |||
1837 | |||
1838 | static void |
||
1839 | drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem) |
||
1840 | { |
||
1841 | int i; |
||
1842 | |||
1843 | for (i = 0; i < bufmgr_gem->exec_count; i++) { |
||
1844 | drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; |
||
1845 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1846 | |||
1847 | /* Update the buffer offset */ |
||
5068 | serge | 1848 | if (bufmgr_gem->exec_objects[i].offset != bo->offset64) { |
4363 | Serge | 1849 | DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n", |
5068 | serge | 1850 | bo_gem->gem_handle, bo_gem->name, bo->offset64, |
4363 | Serge | 1851 | (unsigned long long)bufmgr_gem->exec_objects[i]. |
1852 | offset); |
||
5068 | serge | 1853 | bo->offset64 = bufmgr_gem->exec_objects[i].offset; |
4363 | Serge | 1854 | bo->offset = bufmgr_gem->exec_objects[i].offset; |
1855 | } |
||
1856 | } |
||
1857 | } |
||
1858 | |||
1859 | static void |
||
1860 | drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem) |
||
1861 | { |
||
1862 | int i; |
||
1863 | |||
1864 | for (i = 0; i < bufmgr_gem->exec_count; i++) { |
||
1865 | drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; |
||
1866 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; |
||
1867 | |||
1868 | /* Update the buffer offset */ |
||
5068 | serge | 1869 | if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) { |
4363 | Serge | 1870 | DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n", |
5068 | serge | 1871 | bo_gem->gem_handle, bo_gem->name, bo->offset64, |
4363 | Serge | 1872 | (unsigned long long)bufmgr_gem->exec2_objects[i].offset); |
5068 | serge | 1873 | bo->offset64 = bufmgr_gem->exec2_objects[i].offset; |
4363 | Serge | 1874 | bo->offset = bufmgr_gem->exec2_objects[i].offset; |
1875 | } |
||
1876 | } |
||
1877 | } |
||
1878 | |||
1879 | static void |
||
1880 | aub_out(drm_intel_bufmgr_gem *bufmgr_gem, uint32_t data) |
||
1881 | { |
||
1882 | fwrite(&data, 1, 4, bufmgr_gem->aub_file); |
||
1883 | } |
||
1884 | |||
1885 | static void |
||
1886 | aub_out_data(drm_intel_bufmgr_gem *bufmgr_gem, void *data, size_t size) |
||
1887 | { |
||
1888 | fwrite(data, 1, size, bufmgr_gem->aub_file); |
||
1889 | } |
||
1890 | |||
1891 | static void |
||
1892 | aub_write_bo_data(drm_intel_bo *bo, uint32_t offset, uint32_t size) |
||
1893 | { |
||
1894 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1895 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1896 | uint32_t *data; |
||
1897 | unsigned int i; |
||
1898 | |||
1899 | data = malloc(bo->size); |
||
1900 | drm_intel_bo_get_subdata(bo, offset, size, data); |
||
1901 | |||
1902 | /* Easy mode: write out bo with no relocations */ |
||
1903 | if (!bo_gem->reloc_count) { |
||
1904 | aub_out_data(bufmgr_gem, data, size); |
||
1905 | free(data); |
||
1906 | return; |
||
1907 | } |
||
1908 | |||
1909 | /* Otherwise, handle the relocations while writing. */ |
||
1910 | for (i = 0; i < size / 4; i++) { |
||
1911 | int r; |
||
1912 | for (r = 0; r < bo_gem->reloc_count; r++) { |
||
1913 | struct drm_i915_gem_relocation_entry *reloc; |
||
1914 | drm_intel_reloc_target *info; |
||
1915 | |||
1916 | reloc = &bo_gem->relocs[r]; |
||
1917 | info = &bo_gem->reloc_target_info[r]; |
||
1918 | |||
1919 | if (reloc->offset == offset + i * 4) { |
||
1920 | drm_intel_bo_gem *target_gem; |
||
1921 | uint32_t val; |
||
1922 | |||
1923 | target_gem = (drm_intel_bo_gem *)info->bo; |
||
1924 | |||
1925 | val = reloc->delta; |
||
1926 | val += target_gem->aub_offset; |
||
1927 | |||
1928 | aub_out(bufmgr_gem, val); |
||
1929 | data[i] = val; |
||
1930 | break; |
||
1931 | } |
||
1932 | } |
||
1933 | if (r == bo_gem->reloc_count) { |
||
1934 | /* no relocation, just the data */ |
||
1935 | aub_out(bufmgr_gem, data[i]); |
||
1936 | } |
||
1937 | } |
||
1938 | |||
1939 | free(data); |
||
1940 | } |
||
1941 | |||
1942 | static void |
||
1943 | aub_bo_get_address(drm_intel_bo *bo) |
||
1944 | { |
||
1945 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1946 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1947 | |||
1948 | /* Give the object a graphics address in the AUB file. We |
||
1949 | * don't just use the GEM object address because we do AUB |
||
1950 | * dumping before execution -- we want to successfully log |
||
1951 | * when the hardware might hang, and we might even want to aub |
||
1952 | * capture for a driver trying to execute on a different |
||
1953 | * generation of hardware by disabling the actual kernel exec |
||
1954 | * call. |
||
1955 | */ |
||
1956 | bo_gem->aub_offset = bufmgr_gem->aub_offset; |
||
1957 | bufmgr_gem->aub_offset += bo->size; |
||
1958 | /* XXX: Handle aperture overflow. */ |
||
1959 | assert(bufmgr_gem->aub_offset < 256 * 1024 * 1024); |
||
1960 | } |
||
1961 | |||
1962 | static void |
||
1963 | aub_write_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype, |
||
1964 | uint32_t offset, uint32_t size) |
||
1965 | { |
||
1966 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1967 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1968 | |||
1969 | aub_out(bufmgr_gem, |
||
1970 | CMD_AUB_TRACE_HEADER_BLOCK | |
||
1971 | ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2)); |
||
1972 | aub_out(bufmgr_gem, |
||
1973 | AUB_TRACE_MEMTYPE_GTT | type | AUB_TRACE_OP_DATA_WRITE); |
||
1974 | aub_out(bufmgr_gem, subtype); |
||
1975 | aub_out(bufmgr_gem, bo_gem->aub_offset + offset); |
||
1976 | aub_out(bufmgr_gem, size); |
||
1977 | if (bufmgr_gem->gen >= 8) |
||
1978 | aub_out(bufmgr_gem, 0); |
||
1979 | aub_write_bo_data(bo, offset, size); |
||
1980 | } |
||
1981 | |||
1982 | /** |
||
1983 | * Break up large objects into multiple writes. Otherwise a 128kb VBO |
||
1984 | * would overflow the 16 bits of size field in the packet header and |
||
1985 | * everything goes badly after that. |
||
1986 | */ |
||
1987 | static void |
||
1988 | aub_write_large_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype, |
||
1989 | uint32_t offset, uint32_t size) |
||
1990 | { |
||
1991 | uint32_t block_size; |
||
1992 | uint32_t sub_offset; |
||
1993 | |||
1994 | for (sub_offset = 0; sub_offset < size; sub_offset += block_size) { |
||
1995 | block_size = size - sub_offset; |
||
1996 | |||
1997 | if (block_size > 8 * 4096) |
||
1998 | block_size = 8 * 4096; |
||
1999 | |||
2000 | aub_write_trace_block(bo, type, subtype, offset + sub_offset, |
||
2001 | block_size); |
||
2002 | } |
||
2003 | } |
||
2004 | |||
2005 | static void |
||
2006 | aub_write_bo(drm_intel_bo *bo) |
||
2007 | { |
||
2008 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2009 | uint32_t offset = 0; |
||
2010 | unsigned i; |
||
2011 | |||
2012 | aub_bo_get_address(bo); |
||
2013 | |||
2014 | /* Write out each annotated section separately. */ |
||
2015 | for (i = 0; i < bo_gem->aub_annotation_count; ++i) { |
||
2016 | drm_intel_aub_annotation *annotation = |
||
2017 | &bo_gem->aub_annotations[i]; |
||
2018 | uint32_t ending_offset = annotation->ending_offset; |
||
2019 | if (ending_offset > bo->size) |
||
2020 | ending_offset = bo->size; |
||
2021 | if (ending_offset > offset) { |
||
2022 | aub_write_large_trace_block(bo, annotation->type, |
||
2023 | annotation->subtype, |
||
2024 | offset, |
||
2025 | ending_offset - offset); |
||
2026 | offset = ending_offset; |
||
2027 | } |
||
2028 | } |
||
2029 | |||
2030 | /* Write out any remaining unannotated data */ |
||
2031 | if (offset < bo->size) { |
||
2032 | aub_write_large_trace_block(bo, AUB_TRACE_TYPE_NOTYPE, 0, |
||
2033 | offset, bo->size - offset); |
||
2034 | } |
||
2035 | } |
||
2036 | |||
2037 | /* |
||
2038 | * Make a ringbuffer on fly and dump it |
||
2039 | */ |
||
2040 | static void |
||
2041 | aub_build_dump_ringbuffer(drm_intel_bufmgr_gem *bufmgr_gem, |
||
2042 | uint32_t batch_buffer, int ring_flag) |
||
2043 | { |
||
2044 | uint32_t ringbuffer[4096]; |
||
2045 | int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */ |
||
2046 | int ring_count = 0; |
||
2047 | |||
2048 | if (ring_flag == I915_EXEC_BSD) |
||
2049 | ring = AUB_TRACE_TYPE_RING_PRB1; |
||
2050 | else if (ring_flag == I915_EXEC_BLT) |
||
2051 | ring = AUB_TRACE_TYPE_RING_PRB2; |
||
2052 | |||
2053 | /* Make a ring buffer to execute our batchbuffer. */ |
||
2054 | memset(ringbuffer, 0, sizeof(ringbuffer)); |
||
2055 | if (bufmgr_gem->gen >= 8) { |
||
2056 | ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START | (3 - 2); |
||
2057 | ringbuffer[ring_count++] = batch_buffer; |
||
2058 | ringbuffer[ring_count++] = 0; |
||
2059 | } else { |
||
2060 | ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START; |
||
2061 | ringbuffer[ring_count++] = batch_buffer; |
||
2062 | } |
||
2063 | |||
2064 | /* Write out the ring. This appears to trigger execution of |
||
2065 | * the ring in the simulator. |
||
2066 | */ |
||
2067 | aub_out(bufmgr_gem, |
||
2068 | CMD_AUB_TRACE_HEADER_BLOCK | |
||
2069 | ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2)); |
||
2070 | aub_out(bufmgr_gem, |
||
2071 | AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE); |
||
2072 | aub_out(bufmgr_gem, 0); /* general/surface subtype */ |
||
2073 | aub_out(bufmgr_gem, bufmgr_gem->aub_offset); |
||
2074 | aub_out(bufmgr_gem, ring_count * 4); |
||
2075 | if (bufmgr_gem->gen >= 8) |
||
2076 | aub_out(bufmgr_gem, 0); |
||
2077 | |||
2078 | /* FIXME: Need some flush operations here? */ |
||
2079 | aub_out_data(bufmgr_gem, ringbuffer, ring_count * 4); |
||
2080 | |||
2081 | /* Update offset pointer */ |
||
2082 | bufmgr_gem->aub_offset += 4096; |
||
2083 | } |
||
2084 | |||
2085 | void |
||
2086 | drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo, |
||
2087 | int x1, int y1, int width, int height, |
||
2088 | enum aub_dump_bmp_format format, |
||
2089 | int pitch, int offset) |
||
2090 | { |
||
2091 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
2092 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; |
||
2093 | uint32_t cpp; |
||
2094 | |||
2095 | switch (format) { |
||
2096 | case AUB_DUMP_BMP_FORMAT_8BIT: |
||
2097 | cpp = 1; |
||
2098 | break; |
||
2099 | case AUB_DUMP_BMP_FORMAT_ARGB_4444: |
||
2100 | cpp = 2; |
||
2101 | break; |
||
2102 | case AUB_DUMP_BMP_FORMAT_ARGB_0888: |
||
2103 | case AUB_DUMP_BMP_FORMAT_ARGB_8888: |
||
2104 | cpp = 4; |
||
2105 | break; |
||
2106 | default: |
||
2107 | printf("Unknown AUB dump format %d\n", format); |
||
2108 | return; |
||
2109 | } |
||
2110 | |||
2111 | if (!bufmgr_gem->aub_file) |
||
2112 | return; |
||
2113 | |||
2114 | aub_out(bufmgr_gem, CMD_AUB_DUMP_BMP | 4); |
||
2115 | aub_out(bufmgr_gem, (y1 << 16) | x1); |
||
2116 | aub_out(bufmgr_gem, |
||
2117 | (format << 24) | |
||
2118 | (cpp << 19) | |
||
2119 | pitch / 4); |
||
2120 | aub_out(bufmgr_gem, (height << 16) | width); |
||
2121 | aub_out(bufmgr_gem, bo_gem->aub_offset + offset); |
||
2122 | aub_out(bufmgr_gem, |
||
2123 | ((bo_gem->tiling_mode != I915_TILING_NONE) ? (1 << 2) : 0) | |
||
2124 | ((bo_gem->tiling_mode == I915_TILING_Y) ? (1 << 3) : 0)); |
||
2125 | } |
||
2126 | |||
2127 | static void |
||
2128 | aub_exec(drm_intel_bo *bo, int ring_flag, int used) |
||
2129 | { |
||
2130 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
2131 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2132 | int i; |
||
2133 | bool batch_buffer_needs_annotations; |
||
2134 | |||
2135 | if (!bufmgr_gem->aub_file) |
||
2136 | return; |
||
2137 | |||
2138 | /* If batch buffer is not annotated, annotate it the best we |
||
2139 | * can. |
||
2140 | */ |
||
2141 | batch_buffer_needs_annotations = bo_gem->aub_annotation_count == 0; |
||
2142 | if (batch_buffer_needs_annotations) { |
||
2143 | drm_intel_aub_annotation annotations[2] = { |
||
2144 | { AUB_TRACE_TYPE_BATCH, 0, used }, |
||
2145 | { AUB_TRACE_TYPE_NOTYPE, 0, bo->size } |
||
2146 | }; |
||
2147 | drm_intel_bufmgr_gem_set_aub_annotations(bo, annotations, 2); |
||
2148 | } |
||
2149 | |||
2150 | /* Write out all buffers to AUB memory */ |
||
2151 | for (i = 0; i < bufmgr_gem->exec_count; i++) { |
||
2152 | aub_write_bo(bufmgr_gem->exec_bos[i]); |
||
2153 | } |
||
2154 | |||
2155 | /* Remove any annotations we added */ |
||
2156 | if (batch_buffer_needs_annotations) |
||
2157 | drm_intel_bufmgr_gem_set_aub_annotations(bo, NULL, 0); |
||
2158 | |||
2159 | /* Dump ring buffer */ |
||
2160 | aub_build_dump_ringbuffer(bufmgr_gem, bo_gem->aub_offset, ring_flag); |
||
2161 | |||
2162 | fflush(bufmgr_gem->aub_file); |
||
2163 | |||
2164 | /* |
||
2165 | * One frame has been dumped. So reset the aub_offset for the next frame. |
||
2166 | * |
||
2167 | * FIXME: Can we do this? |
||
2168 | */ |
||
2169 | bufmgr_gem->aub_offset = 0x10000; |
||
2170 | } |
||
2171 | |||
2172 | |||
2173 | static int |
||
2174 | do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx, |
||
2175 | drm_clip_rect_t *cliprects, int num_cliprects, int DR4, |
||
2176 | unsigned int flags) |
||
2177 | { |
||
2178 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; |
||
2179 | struct drm_i915_gem_execbuffer2 execbuf; |
||
2180 | int ret = 0; |
||
2181 | int i; |
||
2182 | |||
2183 | switch (flags & 0x7) { |
||
2184 | default: |
||
2185 | return -EINVAL; |
||
2186 | case I915_EXEC_BLT: |
||
2187 | if (!bufmgr_gem->has_blt) |
||
2188 | return -EINVAL; |
||
2189 | break; |
||
2190 | case I915_EXEC_BSD: |
||
2191 | if (!bufmgr_gem->has_bsd) |
||
2192 | return -EINVAL; |
||
2193 | break; |
||
2194 | case I915_EXEC_VEBOX: |
||
2195 | if (!bufmgr_gem->has_vebox) |
||
2196 | return -EINVAL; |
||
2197 | break; |
||
2198 | case I915_EXEC_RENDER: |
||
2199 | case I915_EXEC_DEFAULT: |
||
2200 | break; |
||
2201 | } |
||
2202 | |||
2203 | // pthread_mutex_lock(&bufmgr_gem->lock); |
||
2204 | /* Update indices and set up the validate list. */ |
||
2205 | drm_intel_gem_bo_process_reloc2(bo); |
||
2206 | |||
2207 | /* Add the batch buffer to the validation list. There are no relocations |
||
2208 | * pointing to it. |
||
2209 | */ |
||
2210 | drm_intel_add_validate_buffer2(bo, 0); |
||
2211 | |||
2212 | VG_CLEAR(execbuf); |
||
2213 | execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects; |
||
2214 | execbuf.buffer_count = bufmgr_gem->exec_count; |
||
2215 | execbuf.batch_start_offset = 0; |
||
2216 | execbuf.batch_len = used; |
||
2217 | execbuf.cliprects_ptr = (uintptr_t)cliprects; |
||
2218 | execbuf.num_cliprects = num_cliprects; |
||
2219 | execbuf.DR1 = 0; |
||
2220 | execbuf.DR4 = DR4; |
||
2221 | execbuf.flags = flags; |
||
2222 | if (ctx == NULL) |
||
2223 | i915_execbuffer2_set_context_id(execbuf, 0); |
||
2224 | else |
||
2225 | i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id); |
||
2226 | execbuf.rsvd2 = 0; |
||
2227 | |||
2228 | aub_exec(bo, flags, used); |
||
2229 | |||
2230 | if (bufmgr_gem->no_exec) |
||
2231 | goto skip_execution; |
||
2232 | |||
2233 | ret = drmIoctl(bufmgr_gem->fd, |
||
2234 | DRM_IOCTL_I915_GEM_EXECBUFFER2, |
||
2235 | &execbuf); |
||
2236 | if (ret != 0) { |
||
2237 | ret = -errno; |
||
2238 | if (ret == -ENOSPC) { |
||
2239 | DBG("Execbuffer fails to pin. " |
||
2240 | "Estimate: %u. Actual: %u. Available: %u\n", |
||
2241 | drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos, |
||
2242 | bufmgr_gem->exec_count), |
||
2243 | drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos, |
||
2244 | bufmgr_gem->exec_count), |
||
2245 | (unsigned int) bufmgr_gem->gtt_size); |
||
2246 | } |
||
2247 | } |
||
2248 | drm_intel_update_buffer_offsets2(bufmgr_gem); |
||
2249 | |||
2250 | skip_execution: |
||
2251 | if (bufmgr_gem->bufmgr.debug) |
||
2252 | drm_intel_gem_dump_validation_list(bufmgr_gem); |
||
2253 | |||
2254 | for (i = 0; i < bufmgr_gem->exec_count; i++) { |
||
2255 | drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; |
||
2256 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; |
||
2257 | |||
5068 | serge | 2258 | bo_gem->idle = false; |
2259 | |||
4363 | Serge | 2260 | /* Disconnect the buffer from the validate list */ |
2261 | bo_gem->validate_index = -1; |
||
2262 | bufmgr_gem->exec_bos[i] = NULL; |
||
2263 | } |
||
2264 | bufmgr_gem->exec_count = 0; |
||
2265 | // pthread_mutex_unlock(&bufmgr_gem->lock); |
||
2266 | |||
2267 | return ret; |
||
2268 | } |
||
2269 | |||
2270 | static int |
||
2271 | drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used, |
||
2272 | drm_clip_rect_t *cliprects, int num_cliprects, |
||
2273 | int DR4) |
||
2274 | { |
||
2275 | return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4, |
||
2276 | I915_EXEC_RENDER); |
||
2277 | } |
||
2278 | |||
2279 | static int |
||
2280 | drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used, |
||
2281 | drm_clip_rect_t *cliprects, int num_cliprects, int DR4, |
||
2282 | unsigned int flags) |
||
2283 | { |
||
2284 | return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4, |
||
2285 | flags); |
||
2286 | } |
||
2287 | |||
2288 | int |
||
2289 | drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx, |
||
2290 | int used, unsigned int flags) |
||
2291 | { |
||
2292 | return do_exec2(bo, used, ctx, NULL, 0, 0, flags); |
||
2293 | } |
||
2294 | |||
2295 | static int |
||
2296 | drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment) |
||
2297 | { |
||
2298 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
2299 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2300 | struct drm_i915_gem_pin pin; |
||
2301 | int ret; |
||
2302 | |||
2303 | VG_CLEAR(pin); |
||
2304 | pin.handle = bo_gem->gem_handle; |
||
2305 | pin.alignment = alignment; |
||
2306 | |||
2307 | ret = drmIoctl(bufmgr_gem->fd, |
||
2308 | DRM_IOCTL_I915_GEM_PIN, |
||
2309 | &pin); |
||
2310 | if (ret != 0) |
||
2311 | return -errno; |
||
2312 | |||
5068 | serge | 2313 | bo->offset64 = pin.offset; |
4363 | Serge | 2314 | bo->offset = pin.offset; |
2315 | return 0; |
||
2316 | } |
||
2317 | |||
2318 | static int |
||
2319 | drm_intel_gem_bo_unpin(drm_intel_bo *bo) |
||
2320 | { |
||
2321 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
2322 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2323 | struct drm_i915_gem_unpin unpin; |
||
2324 | int ret; |
||
2325 | |||
2326 | VG_CLEAR(unpin); |
||
2327 | unpin.handle = bo_gem->gem_handle; |
||
2328 | |||
2329 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin); |
||
2330 | if (ret != 0) |
||
2331 | return -errno; |
||
2332 | |||
2333 | return 0; |
||
2334 | } |
||
2335 | |||
2336 | static int |
||
2337 | drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo, |
||
2338 | uint32_t tiling_mode, |
||
2339 | uint32_t stride) |
||
2340 | { |
||
2341 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
2342 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2343 | struct drm_i915_gem_set_tiling set_tiling; |
||
2344 | int ret; |
||
2345 | |||
2346 | if (bo_gem->global_name == 0 && |
||
2347 | tiling_mode == bo_gem->tiling_mode && |
||
2348 | stride == bo_gem->stride) |
||
2349 | return 0; |
||
2350 | |||
2351 | memset(&set_tiling, 0, sizeof(set_tiling)); |
||
2352 | // do { |
||
2353 | /* set_tiling is slightly broken and overwrites the |
||
2354 | * input on the error path, so we have to open code |
||
2355 | * rmIoctl. |
||
2356 | */ |
||
2357 | set_tiling.handle = bo_gem->gem_handle; |
||
2358 | set_tiling.tiling_mode = tiling_mode; |
||
2359 | set_tiling.stride = stride; |
||
2360 | |||
2361 | ret = drmIoctl(bufmgr_gem->fd, |
||
2362 | DRM_IOCTL_I915_GEM_SET_TILING, |
||
2363 | &set_tiling); |
||
2364 | // } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); |
||
2365 | if (ret == -1) |
||
2366 | return -errno; |
||
2367 | |||
2368 | bo_gem->tiling_mode = set_tiling.tiling_mode; |
||
2369 | bo_gem->swizzle_mode = set_tiling.swizzle_mode; |
||
2370 | bo_gem->stride = set_tiling.stride; |
||
2371 | return 0; |
||
2372 | } |
||
2373 | |||
2374 | static int |
||
2375 | drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, |
||
2376 | uint32_t stride) |
||
2377 | { |
||
2378 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
2379 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2380 | int ret; |
||
2381 | |||
2382 | /* Linear buffers have no stride. By ensuring that we only ever use |
||
2383 | * stride 0 with linear buffers, we simplify our code. |
||
2384 | */ |
||
2385 | if (*tiling_mode == I915_TILING_NONE) |
||
2386 | stride = 0; |
||
2387 | |||
2388 | ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride); |
||
2389 | if (ret == 0) |
||
2390 | drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); |
||
2391 | |||
2392 | *tiling_mode = bo_gem->tiling_mode; |
||
2393 | return ret; |
||
2394 | } |
||
2395 | |||
2396 | static int |
||
2397 | drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, |
||
2398 | uint32_t * swizzle_mode) |
||
2399 | { |
||
2400 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2401 | |||
2402 | *tiling_mode = bo_gem->tiling_mode; |
||
2403 | *swizzle_mode = bo_gem->swizzle_mode; |
||
2404 | return 0; |
||
2405 | } |
||
2406 | |||
2407 | #if 0 |
||
2408 | drm_intel_bo * |
||
2409 | drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size) |
||
2410 | { |
||
2411 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; |
||
2412 | int ret; |
||
2413 | uint32_t handle; |
||
2414 | drm_intel_bo_gem *bo_gem; |
||
2415 | struct drm_i915_gem_get_tiling get_tiling; |
||
2416 | drmMMListHead *list; |
||
2417 | |||
2418 | ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle); |
||
2419 | |||
2420 | /* |
||
2421 | * See if the kernel has already returned this buffer to us. Just as |
||
2422 | * for named buffers, we must not create two bo's pointing at the same |
||
2423 | * kernel object |
||
2424 | */ |
||
2425 | for (list = bufmgr_gem->named.next; |
||
2426 | list != &bufmgr_gem->named; |
||
2427 | list = list->next) { |
||
2428 | bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list); |
||
2429 | if (bo_gem->gem_handle == handle) { |
||
2430 | drm_intel_gem_bo_reference(&bo_gem->bo); |
||
2431 | return &bo_gem->bo; |
||
2432 | } |
||
2433 | } |
||
2434 | |||
2435 | if (ret) { |
||
2436 | fprintf(stderr,"ret is %d %d\n", ret, errno); |
||
2437 | return NULL; |
||
2438 | } |
||
2439 | |||
2440 | bo_gem = calloc(1, sizeof(*bo_gem)); |
||
2441 | if (!bo_gem) |
||
2442 | return NULL; |
||
2443 | |||
2444 | /* Determine size of bo. The fd-to-handle ioctl really should |
||
2445 | * return the size, but it doesn't. If we have kernel 3.12 or |
||
2446 | * later, we can lseek on the prime fd to get the size. Older |
||
2447 | * kernels will just fail, in which case we fall back to the |
||
2448 | * provided (estimated or guess size). */ |
||
2449 | ret = lseek(prime_fd, 0, SEEK_END); |
||
2450 | if (ret != -1) |
||
2451 | bo_gem->bo.size = ret; |
||
2452 | else |
||
2453 | bo_gem->bo.size = size; |
||
2454 | |||
2455 | bo_gem->bo.handle = handle; |
||
2456 | bo_gem->bo.bufmgr = bufmgr; |
||
2457 | |||
2458 | bo_gem->gem_handle = handle; |
||
2459 | |||
2460 | atomic_set(&bo_gem->refcount, 1); |
||
2461 | |||
2462 | bo_gem->name = "prime"; |
||
2463 | bo_gem->validate_index = -1; |
||
2464 | bo_gem->reloc_tree_fences = 0; |
||
2465 | bo_gem->used_as_reloc_target = false; |
||
2466 | bo_gem->has_error = false; |
||
2467 | bo_gem->reusable = false; |
||
2468 | |||
2469 | DRMINITLISTHEAD(&bo_gem->vma_list); |
||
2470 | DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); |
||
2471 | |||
2472 | VG_CLEAR(get_tiling); |
||
2473 | get_tiling.handle = bo_gem->gem_handle; |
||
2474 | ret = drmIoctl(bufmgr_gem->fd, |
||
2475 | DRM_IOCTL_I915_GEM_GET_TILING, |
||
2476 | &get_tiling); |
||
2477 | if (ret != 0) { |
||
2478 | drm_intel_gem_bo_unreference(&bo_gem->bo); |
||
2479 | return NULL; |
||
2480 | } |
||
2481 | bo_gem->tiling_mode = get_tiling.tiling_mode; |
||
2482 | bo_gem->swizzle_mode = get_tiling.swizzle_mode; |
||
2483 | /* XXX stride is unknown */ |
||
2484 | drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); |
||
2485 | |||
2486 | return &bo_gem->bo; |
||
2487 | } |
||
2488 | |||
2489 | int |
||
2490 | drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd) |
||
2491 | { |
||
2492 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
2493 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2494 | |||
2495 | if (DRMLISTEMPTY(&bo_gem->name_list)) |
||
2496 | DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); |
||
2497 | |||
2498 | if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle, |
||
2499 | DRM_CLOEXEC, prime_fd) != 0) |
||
2500 | return -errno; |
||
2501 | |||
2502 | bo_gem->reusable = false; |
||
2503 | |||
2504 | return 0; |
||
2505 | } |
||
2506 | #endif |
||
2507 | |||
2508 | static int |
||
2509 | drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name) |
||
2510 | { |
||
2511 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
2512 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2513 | int ret; |
||
2514 | |||
2515 | if (!bo_gem->global_name) { |
||
2516 | struct drm_gem_flink flink; |
||
2517 | |||
2518 | VG_CLEAR(flink); |
||
2519 | flink.handle = bo_gem->gem_handle; |
||
2520 | |||
2521 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink); |
||
2522 | if (ret != 0) |
||
2523 | return -errno; |
||
2524 | |||
2525 | bo_gem->global_name = flink.name; |
||
2526 | bo_gem->reusable = false; |
||
2527 | |||
5068 | serge | 2528 | if (DRMLISTEMPTY(&bo_gem->name_list)) |
2529 | DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); |
||
4363 | Serge | 2530 | } |
2531 | |||
2532 | *name = bo_gem->global_name; |
||
2533 | return 0; |
||
2534 | } |
||
2535 | |||
2536 | /** |
||
2537 | * Enables unlimited caching of buffer objects for reuse. |
||
2538 | * |
||
2539 | * This is potentially very memory expensive, as the cache at each bucket |
||
2540 | * size is only bounded by how many buffers of that size we've managed to have |
||
2541 | * in flight at once. |
||
2542 | */ |
||
2543 | void |
||
2544 | drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr) |
||
2545 | { |
||
2546 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; |
||
2547 | |||
2548 | bufmgr_gem->bo_reuse = true; |
||
2549 | } |
||
2550 | |||
2551 | /** |
||
2552 | * Enable use of fenced reloc type. |
||
2553 | * |
||
2554 | * New code should enable this to avoid unnecessary fence register |
||
2555 | * allocation. If this option is not enabled, all relocs will have fence |
||
2556 | * register allocated. |
||
2557 | */ |
||
2558 | void |
||
2559 | drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr) |
||
2560 | { |
||
2561 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; |
||
2562 | |||
2563 | if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2) |
||
2564 | bufmgr_gem->fenced_relocs = true; |
||
2565 | } |
||
2566 | |||
2567 | /** |
||
2568 | * Return the additional aperture space required by the tree of buffer objects |
||
2569 | * rooted at bo. |
||
2570 | */ |
||
2571 | static int |
||
2572 | drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo) |
||
2573 | { |
||
2574 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2575 | int i; |
||
2576 | int total = 0; |
||
2577 | |||
2578 | if (bo == NULL || bo_gem->included_in_check_aperture) |
||
2579 | return 0; |
||
2580 | |||
2581 | total += bo->size; |
||
2582 | bo_gem->included_in_check_aperture = true; |
||
2583 | |||
2584 | for (i = 0; i < bo_gem->reloc_count; i++) |
||
2585 | total += |
||
2586 | drm_intel_gem_bo_get_aperture_space(bo_gem-> |
||
2587 | reloc_target_info[i].bo); |
||
2588 | |||
2589 | return total; |
||
2590 | } |
||
2591 | |||
2592 | /** |
||
2593 | * Count the number of buffers in this list that need a fence reg |
||
2594 | * |
||
2595 | * If the count is greater than the number of available regs, we'll have |
||
2596 | * to ask the caller to resubmit a batch with fewer tiled buffers. |
||
2597 | * |
||
2598 | * This function over-counts if the same buffer is used multiple times. |
||
2599 | */ |
||
2600 | static unsigned int |
||
2601 | drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count) |
||
2602 | { |
||
2603 | int i; |
||
2604 | unsigned int total = 0; |
||
2605 | |||
2606 | for (i = 0; i < count; i++) { |
||
2607 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i]; |
||
2608 | |||
2609 | if (bo_gem == NULL) |
||
2610 | continue; |
||
2611 | |||
2612 | total += bo_gem->reloc_tree_fences; |
||
2613 | } |
||
2614 | return total; |
||
2615 | } |
||
2616 | |||
2617 | /** |
||
2618 | * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready |
||
2619 | * for the next drm_intel_bufmgr_check_aperture_space() call. |
||
2620 | */ |
||
2621 | static void |
||
2622 | drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo) |
||
2623 | { |
||
2624 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2625 | int i; |
||
2626 | |||
2627 | if (bo == NULL || !bo_gem->included_in_check_aperture) |
||
2628 | return; |
||
2629 | |||
2630 | bo_gem->included_in_check_aperture = false; |
||
2631 | |||
2632 | for (i = 0; i < bo_gem->reloc_count; i++) |
||
2633 | drm_intel_gem_bo_clear_aperture_space_flag(bo_gem-> |
||
2634 | reloc_target_info[i].bo); |
||
2635 | } |
||
2636 | |||
2637 | /** |
||
2638 | * Return a conservative estimate for the amount of aperture required |
||
2639 | * for a collection of buffers. This may double-count some buffers. |
||
2640 | */ |
||
2641 | static unsigned int |
||
2642 | drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count) |
||
2643 | { |
||
2644 | int i; |
||
2645 | unsigned int total = 0; |
||
2646 | |||
2647 | for (i = 0; i < count; i++) { |
||
2648 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i]; |
||
2649 | if (bo_gem != NULL) |
||
2650 | total += bo_gem->reloc_tree_size; |
||
2651 | } |
||
2652 | return total; |
||
2653 | } |
||
2654 | |||
2655 | /** |
||
2656 | * Return the amount of aperture needed for a collection of buffers. |
||
2657 | * This avoids double counting any buffers, at the cost of looking |
||
2658 | * at every buffer in the set. |
||
2659 | */ |
||
2660 | static unsigned int |
||
2661 | drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count) |
||
2662 | { |
||
2663 | int i; |
||
2664 | unsigned int total = 0; |
||
2665 | |||
2666 | for (i = 0; i < count; i++) { |
||
2667 | total += drm_intel_gem_bo_get_aperture_space(bo_array[i]); |
||
2668 | /* For the first buffer object in the array, we get an |
||
2669 | * accurate count back for its reloc_tree size (since nothing |
||
2670 | * had been flagged as being counted yet). We can save that |
||
2671 | * value out as a more conservative reloc_tree_size that |
||
2672 | * avoids double-counting target buffers. Since the first |
||
2673 | * buffer happens to usually be the batch buffer in our |
||
2674 | * callers, this can pull us back from doing the tree |
||
2675 | * walk on every new batch emit. |
||
2676 | */ |
||
2677 | if (i == 0) { |
||
2678 | drm_intel_bo_gem *bo_gem = |
||
2679 | (drm_intel_bo_gem *) bo_array[i]; |
||
2680 | bo_gem->reloc_tree_size = total; |
||
2681 | } |
||
2682 | } |
||
2683 | |||
2684 | for (i = 0; i < count; i++) |
||
2685 | drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]); |
||
2686 | return total; |
||
2687 | } |
||
2688 | |||
2689 | /** |
||
2690 | * Return -1 if the batchbuffer should be flushed before attempting to |
||
2691 | * emit rendering referencing the buffers pointed to by bo_array. |
||
2692 | * |
||
2693 | * This is required because if we try to emit a batchbuffer with relocations |
||
2694 | * to a tree of buffers that won't simultaneously fit in the aperture, |
||
2695 | * the rendering will return an error at a point where the software is not |
||
2696 | * prepared to recover from it. |
||
2697 | * |
||
2698 | * However, we also want to emit the batchbuffer significantly before we reach |
||
2699 | * the limit, as a series of batchbuffers each of which references buffers |
||
2700 | * covering almost all of the aperture means that at each emit we end up |
||
2701 | * waiting to evict a buffer from the last rendering, and we get synchronous |
||
2702 | * performance. By emitting smaller batchbuffers, we eat some CPU overhead to |
||
2703 | * get better parallelism. |
||
2704 | */ |
||
2705 | static int |
||
2706 | drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count) |
||
2707 | { |
||
2708 | drm_intel_bufmgr_gem *bufmgr_gem = |
||
2709 | (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr; |
||
2710 | unsigned int total = 0; |
||
2711 | unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4; |
||
2712 | int total_fences; |
||
2713 | |||
2714 | /* Check for fence reg constraints if necessary */ |
||
2715 | if (bufmgr_gem->available_fences) { |
||
2716 | total_fences = drm_intel_gem_total_fences(bo_array, count); |
||
2717 | if (total_fences > bufmgr_gem->available_fences) |
||
2718 | return -ENOSPC; |
||
2719 | } |
||
2720 | |||
2721 | total = drm_intel_gem_estimate_batch_space(bo_array, count); |
||
2722 | |||
2723 | if (total > threshold) |
||
2724 | total = drm_intel_gem_compute_batch_space(bo_array, count); |
||
2725 | |||
2726 | if (total > threshold) { |
||
2727 | DBG("check_space: overflowed available aperture, " |
||
2728 | "%dkb vs %dkb\n", |
||
2729 | total / 1024, (int)bufmgr_gem->gtt_size / 1024); |
||
2730 | return -ENOSPC; |
||
2731 | } else { |
||
2732 | DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024, |
||
2733 | (int)bufmgr_gem->gtt_size / 1024); |
||
2734 | return 0; |
||
2735 | } |
||
2736 | } |
||
2737 | |||
2738 | /* |
||
2739 | * Disable buffer reuse for objects which are shared with the kernel |
||
2740 | * as scanout buffers |
||
2741 | */ |
||
2742 | static int |
||
2743 | drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo) |
||
2744 | { |
||
2745 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2746 | |||
2747 | bo_gem->reusable = false; |
||
2748 | return 0; |
||
2749 | } |
||
2750 | |||
2751 | static int |
||
2752 | drm_intel_gem_bo_is_reusable(drm_intel_bo *bo) |
||
2753 | { |
||
2754 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2755 | |||
2756 | return bo_gem->reusable; |
||
2757 | } |
||
2758 | |||
2759 | static int |
||
2760 | _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo) |
||
2761 | { |
||
2762 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2763 | int i; |
||
2764 | |||
2765 | for (i = 0; i < bo_gem->reloc_count; i++) { |
||
2766 | if (bo_gem->reloc_target_info[i].bo == target_bo) |
||
2767 | return 1; |
||
2768 | if (bo == bo_gem->reloc_target_info[i].bo) |
||
2769 | continue; |
||
2770 | if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo, |
||
2771 | target_bo)) |
||
2772 | return 1; |
||
2773 | } |
||
2774 | |||
2775 | return 0; |
||
2776 | } |
||
2777 | |||
2778 | /** Return true if target_bo is referenced by bo's relocation tree. */ |
||
2779 | static int |
||
2780 | drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo) |
||
2781 | { |
||
2782 | drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo; |
||
2783 | |||
2784 | if (bo == NULL || target_bo == NULL) |
||
2785 | return 0; |
||
2786 | if (target_bo_gem->used_as_reloc_target) |
||
2787 | return _drm_intel_gem_bo_references(bo, target_bo); |
||
2788 | return 0; |
||
2789 | } |
||
2790 | |||
2791 | static void |
||
2792 | add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size) |
||
2793 | { |
||
2794 | unsigned int i = bufmgr_gem->num_buckets; |
||
2795 | |||
2796 | assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket)); |
||
2797 | |||
2798 | DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head); |
||
2799 | bufmgr_gem->cache_bucket[i].size = size; |
||
2800 | bufmgr_gem->num_buckets++; |
||
2801 | } |
||
2802 | |||
2803 | static void |
||
2804 | init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem) |
||
2805 | { |
||
2806 | unsigned long size, cache_max_size = 64 * 1024 * 1024; |
||
2807 | |||
2808 | /* OK, so power of two buckets was too wasteful of memory. |
||
2809 | * Give 3 other sizes between each power of two, to hopefully |
||
2810 | * cover things accurately enough. (The alternative is |
||
2811 | * probably to just go for exact matching of sizes, and assume |
||
2812 | * that for things like composited window resize the tiled |
||
2813 | * width/height alignment and rounding of sizes to pages will |
||
2814 | * get us useful cache hit rates anyway) |
||
2815 | */ |
||
2816 | add_bucket(bufmgr_gem, 4096); |
||
2817 | add_bucket(bufmgr_gem, 4096 * 2); |
||
2818 | add_bucket(bufmgr_gem, 4096 * 3); |
||
2819 | |||
2820 | /* Initialize the linked lists for BO reuse cache. */ |
||
2821 | for (size = 4 * 4096; size <= cache_max_size; size *= 2) { |
||
2822 | add_bucket(bufmgr_gem, size); |
||
2823 | |||
2824 | add_bucket(bufmgr_gem, size + size * 1 / 4); |
||
2825 | add_bucket(bufmgr_gem, size + size * 2 / 4); |
||
2826 | add_bucket(bufmgr_gem, size + size * 3 / 4); |
||
2827 | } |
||
2828 | } |
||
2829 | |||
2830 | void |
||
2831 | drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit) |
||
2832 | { |
||
2833 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; |
||
2834 | |||
2835 | bufmgr_gem->vma_max = limit; |
||
2836 | |||
2837 | drm_intel_gem_bo_purge_vma_cache(bufmgr_gem); |
||
2838 | } |
||
2839 | |||
2840 | /** |
||
2841 | * Get the PCI ID for the device. This can be overridden by setting the |
||
2842 | * INTEL_DEVID_OVERRIDE environment variable to the desired ID. |
||
2843 | */ |
||
2844 | static int |
||
2845 | get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem) |
||
2846 | { |
||
2847 | char *devid_override; |
||
2848 | int devid; |
||
2849 | int ret; |
||
2850 | drm_i915_getparam_t gp; |
||
2851 | |||
2852 | VG_CLEAR(devid); |
||
2853 | VG_CLEAR(gp); |
||
2854 | gp.param = I915_PARAM_CHIPSET_ID; |
||
2855 | gp.value = &devid; |
||
2856 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
2857 | if (ret) { |
||
2858 | fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno); |
||
2859 | fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value); |
||
2860 | } |
||
2861 | return devid; |
||
2862 | } |
||
2863 | |||
2864 | int |
||
2865 | drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr) |
||
2866 | { |
||
2867 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; |
||
2868 | |||
2869 | return bufmgr_gem->pci_device; |
||
2870 | } |
||
2871 | |||
2872 | /** |
||
2873 | * Sets up AUB dumping. |
||
2874 | * |
||
2875 | * This is a trace file format that can be used with the simulator. |
||
2876 | * Packets are emitted in a format somewhat like GPU command packets. |
||
2877 | * You can set up a GTT and upload your objects into the referenced |
||
2878 | * space, then send off batchbuffers and get BMPs out the other end. |
||
2879 | */ |
||
2880 | void |
||
2881 | drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable) |
||
2882 | { |
||
2883 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; |
||
2884 | int entry = 0x200003; |
||
2885 | int i; |
||
2886 | int gtt_size = 0x10000; |
||
2887 | const char *filename; |
||
2888 | |||
2889 | if (!enable) { |
||
2890 | if (bufmgr_gem->aub_file) { |
||
2891 | fclose(bufmgr_gem->aub_file); |
||
2892 | bufmgr_gem->aub_file = NULL; |
||
2893 | } |
||
2894 | return; |
||
2895 | } |
||
2896 | |||
2897 | bufmgr_gem->aub_file = fopen("intel.aub", "w+"); |
||
2898 | if (!bufmgr_gem->aub_file) |
||
2899 | return; |
||
2900 | |||
2901 | /* Start allocating objects from just after the GTT. */ |
||
2902 | bufmgr_gem->aub_offset = gtt_size; |
||
2903 | |||
2904 | /* Start with a (required) version packet. */ |
||
2905 | aub_out(bufmgr_gem, CMD_AUB_HEADER | (13 - 2)); |
||
2906 | aub_out(bufmgr_gem, |
||
2907 | (4 << AUB_HEADER_MAJOR_SHIFT) | |
||
2908 | (0 << AUB_HEADER_MINOR_SHIFT)); |
||
2909 | for (i = 0; i < 8; i++) { |
||
2910 | aub_out(bufmgr_gem, 0); /* app name */ |
||
2911 | } |
||
2912 | aub_out(bufmgr_gem, 0); /* timestamp */ |
||
2913 | aub_out(bufmgr_gem, 0); /* timestamp */ |
||
2914 | aub_out(bufmgr_gem, 0); /* comment len */ |
||
2915 | |||
2916 | /* Set up the GTT. The max we can handle is 256M */ |
||
5068 | serge | 2917 | aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2)); |
4363 | Serge | 2918 | aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE); |
2919 | aub_out(bufmgr_gem, 0); /* subtype */ |
||
2920 | aub_out(bufmgr_gem, 0); /* offset */ |
||
2921 | aub_out(bufmgr_gem, gtt_size); /* size */ |
||
2922 | for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) { |
||
2923 | aub_out(bufmgr_gem, entry); |
||
2924 | } |
||
2925 | } |
||
2926 | |||
2927 | drm_intel_context * |
||
2928 | drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr) |
||
2929 | { |
||
2930 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; |
||
2931 | struct drm_i915_gem_context_create create; |
||
2932 | drm_intel_context *context = NULL; |
||
2933 | int ret; |
||
2934 | |||
5068 | serge | 2935 | context = calloc(1, sizeof(*context)); |
2936 | if (!context) |
||
2937 | return NULL; |
||
2938 | |||
4363 | Serge | 2939 | VG_CLEAR(create); |
2940 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create); |
||
2941 | if (ret != 0) { |
||
2942 | DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", |
||
2943 | strerror(errno)); |
||
5068 | serge | 2944 | free(context); |
4363 | Serge | 2945 | return NULL; |
2946 | } |
||
2947 | |||
2948 | context->ctx_id = create.ctx_id; |
||
2949 | context->bufmgr = bufmgr; |
||
2950 | |||
2951 | return context; |
||
2952 | } |
||
2953 | |||
2954 | void |
||
2955 | drm_intel_gem_context_destroy(drm_intel_context *ctx) |
||
2956 | { |
||
2957 | drm_intel_bufmgr_gem *bufmgr_gem; |
||
2958 | struct drm_i915_gem_context_destroy destroy; |
||
2959 | int ret; |
||
2960 | |||
2961 | if (ctx == NULL) |
||
2962 | return; |
||
2963 | |||
2964 | VG_CLEAR(destroy); |
||
2965 | |||
2966 | bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr; |
||
2967 | destroy.ctx_id = ctx->ctx_id; |
||
2968 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, |
||
2969 | &destroy); |
||
2970 | if (ret != 0) |
||
2971 | fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n", |
||
2972 | strerror(errno)); |
||
2973 | |||
2974 | free(ctx); |
||
2975 | } |
||
2976 | |||
2977 | int |
||
2978 | drm_intel_reg_read(drm_intel_bufmgr *bufmgr, |
||
2979 | uint32_t offset, |
||
2980 | uint64_t *result) |
||
2981 | { |
||
2982 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; |
||
2983 | struct drm_i915_reg_read reg_read; |
||
2984 | int ret; |
||
2985 | |||
2986 | VG_CLEAR(reg_read); |
||
2987 | reg_read.offset = offset; |
||
2988 | |||
2989 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read); |
||
2990 | |||
2991 | *result = reg_read.val; |
||
2992 | return ret; |
||
2993 | } |
||
2994 | |||
2995 | |||
2996 | /** |
||
2997 | * Annotate the given bo for use in aub dumping. |
||
2998 | * |
||
2999 | * \param annotations is an array of drm_intel_aub_annotation objects |
||
3000 | * describing the type of data in various sections of the bo. Each |
||
3001 | * element of the array specifies the type and subtype of a section of |
||
3002 | * the bo, and the past-the-end offset of that section. The elements |
||
3003 | * of \c annotations must be sorted so that ending_offset is |
||
3004 | * increasing. |
||
3005 | * |
||
3006 | * \param count is the number of elements in the \c annotations array. |
||
3007 | * If \c count is zero, then \c annotations will not be dereferenced. |
||
3008 | * |
||
3009 | * Annotations are copied into a private data structure, so caller may |
||
3010 | * re-use the memory pointed to by \c annotations after the call |
||
3011 | * returns. |
||
3012 | * |
||
3013 | * Annotations are stored for the lifetime of the bo; to reset to the |
||
3014 | * default state (no annotations), call this function with a \c count |
||
3015 | * of zero. |
||
3016 | */ |
||
3017 | void |
||
3018 | drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo, |
||
3019 | drm_intel_aub_annotation *annotations, |
||
3020 | unsigned count) |
||
3021 | { |
||
3022 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
3023 | unsigned size = sizeof(*annotations) * count; |
||
3024 | drm_intel_aub_annotation *new_annotations = |
||
3025 | count > 0 ? realloc(bo_gem->aub_annotations, size) : NULL; |
||
3026 | if (new_annotations == NULL) { |
||
3027 | free(bo_gem->aub_annotations); |
||
3028 | bo_gem->aub_annotations = NULL; |
||
3029 | bo_gem->aub_annotation_count = 0; |
||
3030 | return; |
||
3031 | } |
||
3032 | memcpy(new_annotations, annotations, size); |
||
3033 | bo_gem->aub_annotations = new_annotations; |
||
3034 | bo_gem->aub_annotation_count = count; |
||
3035 | } |
||
3036 | |||
3037 | /** |
||
3038 | * Initializes the GEM buffer manager, which uses the kernel to allocate, map, |
||
3039 | * and manage map buffer objections. |
||
3040 | * |
||
3041 | * \param fd File descriptor of the opened DRM device. |
||
3042 | */ |
||
3043 | drm_intel_bufmgr * |
||
3044 | drm_intel_bufmgr_gem_init(int fd, int batch_size) |
||
3045 | { |
||
3046 | drm_intel_bufmgr_gem *bufmgr_gem; |
||
3047 | struct drm_i915_gem_get_aperture aperture; |
||
3048 | drm_i915_getparam_t gp; |
||
3049 | int ret, tmp; |
||
3050 | bool exec2 = false; |
||
3051 | |||
3052 | bufmgr_gem = calloc(1, sizeof(*bufmgr_gem)); |
||
3053 | if (bufmgr_gem == NULL) |
||
3054 | return NULL; |
||
3055 | |||
3056 | bufmgr_gem->fd = fd; |
||
3057 | |||
3058 | // if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) { |
||
3059 | // free(bufmgr_gem); |
||
3060 | // return NULL; |
||
3061 | // } |
||
3062 | |||
3063 | ret = drmIoctl(bufmgr_gem->fd, |
||
3064 | DRM_IOCTL_I915_GEM_GET_APERTURE, |
||
3065 | &aperture); |
||
3066 | |||
3067 | if (ret == 0) |
||
3068 | bufmgr_gem->gtt_size = aperture.aper_available_size; |
||
3069 | else { |
||
3070 | printf("DRM_IOCTL_I915_GEM_APERTURE failed: %s\n", |
||
3071 | strerror(errno)); |
||
3072 | bufmgr_gem->gtt_size = 128 * 1024 * 1024; |
||
3073 | printf("Assuming %dkB available aperture size.\n" |
||
3074 | "May lead to reduced performance or incorrect " |
||
3075 | "rendering.\n", |
||
3076 | (int)bufmgr_gem->gtt_size / 1024); |
||
3077 | } |
||
3078 | |||
3079 | bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem); |
||
3080 | |||
3081 | if (IS_GEN2(bufmgr_gem->pci_device)) |
||
3082 | bufmgr_gem->gen = 2; |
||
3083 | else if (IS_GEN3(bufmgr_gem->pci_device)) |
||
3084 | bufmgr_gem->gen = 3; |
||
3085 | else if (IS_GEN4(bufmgr_gem->pci_device)) |
||
3086 | bufmgr_gem->gen = 4; |
||
3087 | else if (IS_GEN5(bufmgr_gem->pci_device)) |
||
3088 | bufmgr_gem->gen = 5; |
||
3089 | else if (IS_GEN6(bufmgr_gem->pci_device)) |
||
3090 | bufmgr_gem->gen = 6; |
||
3091 | else if (IS_GEN7(bufmgr_gem->pci_device)) |
||
3092 | bufmgr_gem->gen = 7; |
||
3093 | else { |
||
3094 | free(bufmgr_gem); |
||
3095 | return NULL; |
||
3096 | } |
||
3097 | |||
3098 | // printf("gen %d\n", bufmgr_gem->gen); |
||
3099 | |||
3100 | if (IS_GEN3(bufmgr_gem->pci_device) && |
||
3101 | bufmgr_gem->gtt_size > 256*1024*1024) { |
||
3102 | /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't |
||
3103 | * be used for tiled blits. To simplify the accounting, just |
||
3104 | * substract the unmappable part (fixed to 256MB on all known |
||
3105 | * gen3 devices) if the kernel advertises it. */ |
||
3106 | bufmgr_gem->gtt_size -= 256*1024*1024; |
||
3107 | } |
||
3108 | |||
3109 | VG_CLEAR(gp); |
||
3110 | gp.value = &tmp; |
||
3111 | |||
3112 | gp.param = I915_PARAM_HAS_EXECBUF2; |
||
3113 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
3114 | if (!ret) |
||
3115 | exec2 = true; |
||
3116 | |||
3117 | gp.param = I915_PARAM_HAS_BSD; |
||
3118 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
3119 | bufmgr_gem->has_bsd = ret == 0; |
||
3120 | |||
3121 | gp.param = I915_PARAM_HAS_BLT; |
||
3122 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
3123 | bufmgr_gem->has_blt = ret == 0; |
||
3124 | |||
3125 | gp.param = I915_PARAM_HAS_RELAXED_FENCING; |
||
3126 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
3127 | bufmgr_gem->has_relaxed_fencing = ret == 0; |
||
3128 | |||
3129 | gp.param = I915_PARAM_HAS_WAIT_TIMEOUT; |
||
3130 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
3131 | bufmgr_gem->has_wait_timeout = ret == 0; |
||
3132 | |||
3133 | gp.param = I915_PARAM_HAS_LLC; |
||
3134 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
3135 | if (ret != 0) { |
||
3136 | /* Kernel does not supports HAS_LLC query, fallback to GPU |
||
3137 | * generation detection and assume that we have LLC on GEN6/7 |
||
3138 | */ |
||
3139 | bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) | |
||
3140 | IS_GEN7(bufmgr_gem->pci_device)); |
||
3141 | } else |
||
3142 | bufmgr_gem->has_llc = *gp.value; |
||
3143 | |||
3144 | gp.param = I915_PARAM_HAS_VEBOX; |
||
3145 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
3146 | bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0); |
||
3147 | |||
3148 | if (bufmgr_gem->gen < 4) { |
||
3149 | gp.param = I915_PARAM_NUM_FENCES_AVAIL; |
||
3150 | gp.value = &bufmgr_gem->available_fences; |
||
3151 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
3152 | if (ret) { |
||
3153 | fprintf(stderr, "get fences failed: %d [%d]\n", ret, |
||
3154 | errno); |
||
3155 | fprintf(stderr, "param: %d, val: %d\n", gp.param, |
||
3156 | *gp.value); |
||
3157 | bufmgr_gem->available_fences = 0; |
||
3158 | } else { |
||
3159 | /* XXX The kernel reports the total number of fences, |
||
3160 | * including any that may be pinned. |
||
3161 | * |
||
3162 | * We presume that there will be at least one pinned |
||
3163 | * fence for the scanout buffer, but there may be more |
||
3164 | * than one scanout and the user may be manually |
||
3165 | * pinning buffers. Let's move to execbuffer2 and |
||
3166 | * thereby forget the insanity of using fences... |
||
3167 | */ |
||
3168 | bufmgr_gem->available_fences -= 2; |
||
3169 | if (bufmgr_gem->available_fences < 0) |
||
3170 | bufmgr_gem->available_fences = 0; |
||
3171 | } |
||
3172 | } |
||
3173 | |||
3174 | /* Let's go with one relocation per every 2 dwords (but round down a bit |
||
3175 | * since a power of two will mean an extra page allocation for the reloc |
||
3176 | * buffer). |
||
3177 | * |
||
3178 | * Every 4 was too few for the blender benchmark. |
||
3179 | */ |
||
3180 | bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2; |
||
3181 | |||
3182 | bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc; |
||
5068 | serge | 3183 | bufmgr_gem->bufmgr.bo_alloc_for_render = |
3184 | drm_intel_gem_bo_alloc_for_render; |
||
4363 | Serge | 3185 | bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled; |
3186 | bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference; |
||
3187 | bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference; |
||
3188 | bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map; |
||
3189 | bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap; |
||
3190 | bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata; |
||
3191 | // bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata; |
||
3192 | bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering; |
||
3193 | bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc; |
||
3194 | bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence; |
||
3195 | bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin; |
||
3196 | bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin; |
||
3197 | bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling; |
||
3198 | bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling; |
||
3199 | bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink; |
||
3200 | /* Use the new one if available */ |
||
3201 | // if (exec2) { |
||
3202 | bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2; |
||
3203 | bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2; |
||
3204 | // } else |
||
3205 | // bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec; |
||
3206 | bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy; |
||
3207 | bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise; |
||
3208 | bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy; |
||
3209 | bufmgr_gem->bufmgr.debug = 0; |
||
3210 | bufmgr_gem->bufmgr.check_aperture_space = |
||
3211 | drm_intel_gem_check_aperture_space; |
||
3212 | bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse; |
||
3213 | bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable; |
||
3214 | // bufmgr_gem->bufmgr.get_pipe_from_crtc_id = |
||
3215 | // drm_intel_gem_get_pipe_from_crtc_id; |
||
3216 | bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references; |
||
3217 | |||
3218 | DRMINITLISTHEAD(&bufmgr_gem->named); |
||
3219 | init_cache_buckets(bufmgr_gem); |
||
3220 | |||
3221 | DRMINITLISTHEAD(&bufmgr_gem->vma_cache); |
||
3222 | bufmgr_gem->vma_max = -1; /* unlimited by default */ |
||
3223 | |||
3224 | return &bufmgr_gem->bufmgr; |
||
3225 | } |
||
3226 | |||
3227 | |||
3228 | drm_intel_bo * |
||
3229 | bo_create_from_gem_handle(drm_intel_bufmgr *bufmgr, |
||
3230 | unsigned int size, unsigned int handle) |
||
3231 | { |
||
3232 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; |
||
3233 | drm_intel_bo_gem *bo_gem; |
||
3234 | int ret; |
||
3235 | struct drm_i915_gem_get_tiling get_tiling; |
||
3236 | drmMMListHead *list; |
||
3237 | |||
3238 | /* At the moment most applications only have a few named bo. |
||
3239 | * For instance, in a DRI client only the render buffers passed |
||
3240 | * between X and the client are named. And since X returns the |
||
3241 | * alternating names for the front/back buffer a linear search |
||
3242 | * provides a sufficiently fast match. |
||
3243 | */ |
||
3244 | for (list = bufmgr_gem->named.next; |
||
3245 | list != &bufmgr_gem->named; |
||
3246 | list = list->next) { |
||
3247 | bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list); |
||
3248 | if (bo_gem->gem_handle == handle) { |
||
3249 | return &bo_gem->bo; |
||
3250 | } |
||
3251 | } |
||
3252 | |||
3253 | bo_gem = calloc(1, sizeof(*bo_gem)); |
||
3254 | if (!bo_gem) |
||
3255 | return NULL; |
||
3256 | |||
3257 | bo_gem->bo.size = size; |
||
3258 | bo_gem->bo.offset = 0; |
||
3259 | bo_gem->bo.virtual = NULL; |
||
3260 | bo_gem->bo.bufmgr = bufmgr; |
||
3261 | bo_gem->name = NULL; |
||
3262 | atomic_set(&bo_gem->refcount, 1); |
||
3263 | bo_gem->validate_index = -1; |
||
3264 | bo_gem->gem_handle = handle; |
||
3265 | bo_gem->bo.handle = handle; |
||
3266 | bo_gem->global_name = 0; |
||
3267 | bo_gem->reusable = false; |
||
3268 | |||
3269 | VG_CLEAR(get_tiling); |
||
3270 | get_tiling.handle = bo_gem->gem_handle; |
||
3271 | ret = drmIoctl(bufmgr_gem->fd, |
||
3272 | DRM_IOCTL_I915_GEM_GET_TILING, |
||
3273 | &get_tiling); |
||
3274 | if (ret != 0) { |
||
3275 | drm_intel_gem_bo_unreference(&bo_gem->bo); |
||
3276 | return NULL; |
||
3277 | } |
||
3278 | bo_gem->tiling_mode = get_tiling.tiling_mode; |
||
3279 | bo_gem->swizzle_mode = get_tiling.swizzle_mode; |
||
3280 | /* XXX stride is unknown */ |
||
3281 | drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); |
||
3282 | |||
3283 | DRMINITLISTHEAD(&bo_gem->vma_list); |
||
3284 | DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); |
||
3285 | printf("bo_create_from_handle: %d\n", handle); |
||
3286 | |||
3287 | return &bo_gem->bo; |
||
3288 | }>>>>><>><>=>>>>>>>>>>>><>><>><>><>><>><>>>>>>>>>>>>=>>>=>>>>=>>>>>>>>>=><=>>=><=>>0) |