Rev 5368 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
4363 | Serge | 1 | /************************************************************************** |
2 | * |
||
3 | * Copyright © 2007 Red Hat Inc. |
||
4 | * Copyright © 2007-2012 Intel Corporation |
||
5 | * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA |
||
6 | * All Rights Reserved. |
||
7 | * |
||
8 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
9 | * copy of this software and associated documentation files (the |
||
10 | * "Software"), to deal in the Software without restriction, including |
||
11 | * without limitation the rights to use, copy, modify, merge, publish, |
||
12 | * distribute, sub license, and/or sell copies of the Software, and to |
||
13 | * permit persons to whom the Software is furnished to do so, subject to |
||
14 | * the following conditions: |
||
15 | * |
||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
19 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
20 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
21 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
22 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
23 | * |
||
24 | * The above copyright notice and this permission notice (including the |
||
25 | * next paragraph) shall be included in all copies or substantial portions |
||
26 | * of the Software. |
||
27 | * |
||
28 | * |
||
29 | **************************************************************************/ |
||
30 | /* |
||
31 | * Authors: Thomas Hellström |
||
32 | * Keith Whitwell |
||
33 | * Eric Anholt |
||
34 | * Dave Airlie |
||
35 | */ |
||
36 | |||
37 | #ifdef HAVE_CONFIG_H |
||
38 | #include "config.h" |
||
39 | #endif |
||
40 | |||
41 | #include |
||
42 | #include |
||
43 | #include |
||
44 | #include |
||
45 | #include |
||
46 | #include |
||
47 | #include |
||
48 | #include |
||
49 | //#include |
||
6110 | serge | 50 | #include |
51 | #include |
||
4363 | Serge | 52 | #include |
53 | |||
54 | #include "errno.h" |
||
55 | #ifndef ETIME |
||
56 | #define ETIME ETIMEDOUT |
||
57 | #endif |
||
6110 | serge | 58 | #include "libdrm_macros.h" |
4363 | Serge | 59 | #include "libdrm_lists.h" |
60 | #include "intel_bufmgr.h" |
||
61 | #include "intel_bufmgr_priv.h" |
||
62 | #include "intel_chipset.h" |
||
63 | #include "string.h" |
||
64 | |||
65 | #include "i915_drm.h" |
||
66 | |||
67 | #ifdef HAVE_VALGRIND |
||
68 | #include |
||
69 | #include |
||
70 | #define VG(x) x |
||
71 | #else |
||
72 | #define VG(x) |
||
73 | #endif |
||
74 | |||
6110 | serge | 75 | #define memclear(s) memset(&s, 0, sizeof(s)) |
4363 | Serge | 76 | |
77 | #if 0 |
||
78 | #define DBG(...) do { \ |
||
6110 | serge | 79 | if (bufmgr_gem->bufmgr.debug) \ |
80 | fprintf(stderr, __VA_ARGS__); \ |
||
4363 | Serge | 81 | } while (0) |
5368 | serge | 82 | #else |
83 | #define DBG(...) |
||
4363 | Serge | 84 | #endif |
85 | |||
86 | #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) |
||
6110 | serge | 87 | #define MAX2(A, B) ((A) > (B) ? (A) : (B)) |
4363 | Serge | 88 | |
6110 | serge | 89 | /** |
90 | * upper_32_bits - return bits 32-63 of a number |
||
91 | * @n: the number we're accessing |
||
92 | * |
||
93 | * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress |
||
94 | * the "right shift count >= width of type" warning when that quantity is |
||
95 | * 32-bits. |
||
96 | */ |
||
97 | #define upper_32_bits(n) ((__u32)(((n) >> 16) >> 16)) |
||
98 | |||
99 | /** |
||
100 | * lower_32_bits - return bits 0-31 of a number |
||
101 | * @n: the number we're accessing |
||
102 | */ |
||
103 | #define lower_32_bits(n) ((__u32)(n)) |
||
104 | |||
4363 | Serge | 105 | typedef struct _drm_intel_bo_gem drm_intel_bo_gem; |
106 | |||
107 | struct drm_intel_gem_bo_bucket { |
||
108 | drmMMListHead head; |
||
109 | unsigned long size; |
||
110 | }; |
||
111 | |||
112 | typedef struct _drm_intel_bufmgr_gem { |
||
113 | drm_intel_bufmgr bufmgr; |
||
114 | |||
6110 | serge | 115 | atomic_t refcount; |
116 | |||
4363 | Serge | 117 | int fd; |
118 | |||
119 | int max_relocs; |
||
120 | |||
121 | // pthread_mutex_t lock; |
||
122 | |||
123 | struct drm_i915_gem_exec_object *exec_objects; |
||
124 | struct drm_i915_gem_exec_object2 *exec2_objects; |
||
125 | drm_intel_bo **exec_bos; |
||
126 | int exec_size; |
||
127 | int exec_count; |
||
128 | |||
129 | /** Array of lists of cached gem objects of power-of-two sizes */ |
||
130 | struct drm_intel_gem_bo_bucket cache_bucket[14 * 4]; |
||
131 | int num_buckets; |
||
132 | time_t time; |
||
133 | |||
6110 | serge | 134 | drmMMListHead managers; |
135 | |||
4363 | Serge | 136 | drmMMListHead named; |
137 | drmMMListHead vma_cache; |
||
138 | int vma_count, vma_open, vma_max; |
||
139 | |||
140 | uint64_t gtt_size; |
||
141 | int available_fences; |
||
142 | int pci_device; |
||
143 | int gen; |
||
144 | unsigned int has_bsd : 1; |
||
145 | unsigned int has_blt : 1; |
||
146 | unsigned int has_relaxed_fencing : 1; |
||
147 | unsigned int has_llc : 1; |
||
148 | unsigned int has_wait_timeout : 1; |
||
149 | unsigned int bo_reuse : 1; |
||
150 | unsigned int no_exec : 1; |
||
151 | unsigned int has_vebox : 1; |
||
152 | bool fenced_relocs; |
||
153 | |||
6110 | serge | 154 | struct { |
155 | void *ptr; |
||
156 | uint32_t handle; |
||
157 | } userptr_active; |
||
158 | |||
4363 | Serge | 159 | } drm_intel_bufmgr_gem; |
160 | |||
161 | #define DRM_INTEL_RELOC_FENCE (1<<0) |
||
162 | |||
163 | typedef struct _drm_intel_reloc_target_info { |
||
164 | drm_intel_bo *bo; |
||
165 | int flags; |
||
166 | } drm_intel_reloc_target; |
||
167 | |||
168 | struct _drm_intel_bo_gem { |
||
169 | drm_intel_bo bo; |
||
170 | |||
171 | atomic_t refcount; |
||
172 | uint32_t gem_handle; |
||
173 | const char *name; |
||
174 | |||
175 | /** |
||
176 | * Kenel-assigned global name for this object |
||
177 | * |
||
178 | * List contains both flink named and prime fd'd objects |
||
179 | */ |
||
180 | unsigned int global_name; |
||
181 | drmMMListHead name_list; |
||
182 | |||
183 | /** |
||
184 | * Index of the buffer within the validation list while preparing a |
||
185 | * batchbuffer execution. |
||
186 | */ |
||
187 | int validate_index; |
||
188 | |||
189 | /** |
||
190 | * Current tiling mode |
||
191 | */ |
||
192 | uint32_t tiling_mode; |
||
193 | uint32_t swizzle_mode; |
||
194 | unsigned long stride; |
||
195 | |||
196 | time_t free_time; |
||
197 | |||
198 | /** Array passed to the DRM containing relocation information. */ |
||
199 | struct drm_i915_gem_relocation_entry *relocs; |
||
200 | /** |
||
201 | * Array of info structs corresponding to relocs[i].target_handle etc |
||
202 | */ |
||
203 | drm_intel_reloc_target *reloc_target_info; |
||
204 | /** Number of entries in relocs */ |
||
205 | int reloc_count; |
||
6110 | serge | 206 | /** Array of BOs that are referenced by this buffer and will be softpinned */ |
207 | drm_intel_bo **softpin_target; |
||
208 | /** Number softpinned BOs that are referenced by this buffer */ |
||
209 | int softpin_target_count; |
||
210 | /** Maximum amount of softpinned BOs that are referenced by this buffer */ |
||
211 | int softpin_target_size; |
||
212 | |||
4363 | Serge | 213 | /** Mapped address for the buffer, saved across map/unmap cycles */ |
214 | void *mem_virtual; |
||
215 | /** GTT virtual address for the buffer, saved across map/unmap cycles */ |
||
216 | void *gtt_virtual; |
||
6110 | serge | 217 | /** |
218 | * Virtual address of the buffer allocated by user, used for userptr |
||
219 | * objects only. |
||
220 | */ |
||
221 | void *user_virtual; |
||
4363 | Serge | 222 | int map_count; |
223 | drmMMListHead vma_list; |
||
224 | |||
225 | /** BO cache list */ |
||
226 | drmMMListHead head; |
||
227 | |||
228 | /** |
||
229 | * Boolean of whether this BO and its children have been included in |
||
230 | * the current drm_intel_bufmgr_check_aperture_space() total. |
||
231 | */ |
||
232 | bool included_in_check_aperture; |
||
233 | |||
234 | /** |
||
235 | * Boolean of whether this buffer has been used as a relocation |
||
236 | * target and had its size accounted for, and thus can't have any |
||
237 | * further relocations added to it. |
||
238 | */ |
||
239 | bool used_as_reloc_target; |
||
240 | |||
241 | /** |
||
242 | * Boolean of whether we have encountered an error whilst building the relocation tree. |
||
243 | */ |
||
244 | bool has_error; |
||
245 | |||
246 | /** |
||
247 | * Boolean of whether this buffer can be re-used |
||
248 | */ |
||
249 | bool reusable; |
||
250 | |||
251 | /** |
||
5068 | serge | 252 | * Boolean of whether the GPU is definitely not accessing the buffer. |
253 | * |
||
254 | * This is only valid when reusable, since non-reusable |
||
255 | * buffers are those that have been shared wth other |
||
256 | * processes, so we don't know their state. |
||
257 | */ |
||
258 | bool idle; |
||
259 | |||
260 | /** |
||
6110 | serge | 261 | * Boolean of whether this buffer was allocated with userptr |
262 | */ |
||
263 | bool is_userptr; |
||
264 | |||
265 | /** |
||
266 | * Boolean of whether this buffer can be placed in the full 48-bit |
||
267 | * address range on gen8+. |
||
268 | * |
||
269 | * By default, buffers will be keep in a 32-bit range, unless this |
||
270 | * flag is explicitly set. |
||
271 | */ |
||
272 | bool use_48b_address_range; |
||
273 | |||
274 | /** |
||
275 | * Whether this buffer is softpinned at offset specified by the user |
||
276 | */ |
||
277 | bool is_softpin; |
||
278 | |||
279 | /** |
||
4363 | Serge | 280 | * Size in bytes of this buffer and its relocation descendents. |
281 | * |
||
282 | * Used to avoid costly tree walking in |
||
283 | * drm_intel_bufmgr_check_aperture in the common case. |
||
284 | */ |
||
285 | int reloc_tree_size; |
||
286 | |||
287 | /** |
||
288 | * Number of potential fence registers required by this buffer and its |
||
289 | * relocations. |
||
290 | */ |
||
291 | int reloc_tree_fences; |
||
292 | |||
293 | /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */ |
||
294 | bool mapped_cpu_write; |
||
295 | }; |
||
296 | |||
297 | static unsigned int |
||
298 | drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count); |
||
299 | |||
300 | static unsigned int |
||
301 | drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count); |
||
302 | |||
303 | static int |
||
304 | drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, |
||
305 | uint32_t * swizzle_mode); |
||
306 | |||
307 | static int |
||
308 | drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo, |
||
309 | uint32_t tiling_mode, |
||
310 | uint32_t stride); |
||
311 | |||
312 | static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo, |
||
313 | time_t time); |
||
314 | |||
315 | static void drm_intel_gem_bo_unreference(drm_intel_bo *bo); |
||
316 | |||
317 | static void drm_intel_gem_bo_free(drm_intel_bo *bo); |
||
318 | |||
6110 | serge | 319 | static inline drm_intel_bo_gem *to_bo_gem(drm_intel_bo *bo) |
320 | { |
||
321 | return (drm_intel_bo_gem *)bo; |
||
322 | } |
||
323 | |||
4363 | Serge | 324 | static unsigned long |
325 | drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size, |
||
326 | uint32_t *tiling_mode) |
||
327 | { |
||
328 | unsigned long min_size, max_size; |
||
329 | unsigned long i; |
||
330 | |||
331 | if (*tiling_mode == I915_TILING_NONE) |
||
332 | return size; |
||
333 | |||
334 | /* 965+ just need multiples of page size for tiling */ |
||
335 | if (bufmgr_gem->gen >= 4) |
||
336 | return ROUND_UP_TO(size, 4096); |
||
337 | |||
338 | /* Older chips need powers of two, of at least 512k or 1M */ |
||
339 | if (bufmgr_gem->gen == 3) { |
||
340 | min_size = 1024*1024; |
||
341 | max_size = 128*1024*1024; |
||
342 | } else { |
||
343 | min_size = 512*1024; |
||
344 | max_size = 64*1024*1024; |
||
345 | } |
||
346 | |||
347 | if (size > max_size) { |
||
348 | *tiling_mode = I915_TILING_NONE; |
||
349 | return size; |
||
350 | } |
||
351 | |||
352 | /* Do we need to allocate every page for the fence? */ |
||
353 | if (bufmgr_gem->has_relaxed_fencing) |
||
354 | return ROUND_UP_TO(size, 4096); |
||
355 | |||
356 | for (i = min_size; i < size; i <<= 1) |
||
357 | ; |
||
358 | |||
359 | return i; |
||
360 | } |
||
361 | |||
362 | /* |
||
363 | * Round a given pitch up to the minimum required for X tiling on a |
||
364 | * given chip. We use 512 as the minimum to allow for a later tiling |
||
365 | * change. |
||
366 | */ |
||
367 | static unsigned long |
||
368 | drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem, |
||
369 | unsigned long pitch, uint32_t *tiling_mode) |
||
370 | { |
||
371 | unsigned long tile_width; |
||
372 | unsigned long i; |
||
373 | |||
374 | /* If untiled, then just align it so that we can do rendering |
||
375 | * to it with the 3D engine. |
||
376 | */ |
||
377 | if (*tiling_mode == I915_TILING_NONE) |
||
378 | return ALIGN(pitch, 64); |
||
379 | |||
380 | if (*tiling_mode == I915_TILING_X |
||
381 | || (IS_915(bufmgr_gem->pci_device) |
||
382 | && *tiling_mode == I915_TILING_Y)) |
||
383 | tile_width = 512; |
||
384 | else |
||
385 | tile_width = 128; |
||
386 | |||
387 | /* 965 is flexible */ |
||
388 | if (bufmgr_gem->gen >= 4) |
||
389 | return ROUND_UP_TO(pitch, tile_width); |
||
390 | |||
391 | /* The older hardware has a maximum pitch of 8192 with tiled |
||
392 | * surfaces, so fallback to untiled if it's too large. |
||
393 | */ |
||
394 | if (pitch > 8192) { |
||
395 | *tiling_mode = I915_TILING_NONE; |
||
396 | return ALIGN(pitch, 64); |
||
397 | } |
||
398 | |||
399 | /* Pre-965 needs power of two tile width */ |
||
400 | for (i = tile_width; i < pitch; i <<= 1) |
||
401 | ; |
||
402 | |||
403 | return i; |
||
404 | } |
||
405 | |||
406 | static struct drm_intel_gem_bo_bucket * |
||
407 | drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem, |
||
408 | unsigned long size) |
||
409 | { |
||
410 | int i; |
||
411 | |||
412 | for (i = 0; i < bufmgr_gem->num_buckets; i++) { |
||
413 | struct drm_intel_gem_bo_bucket *bucket = |
||
414 | &bufmgr_gem->cache_bucket[i]; |
||
415 | if (bucket->size >= size) { |
||
416 | return bucket; |
||
417 | } |
||
418 | } |
||
419 | |||
420 | return NULL; |
||
421 | } |
||
422 | |||
423 | static void |
||
424 | drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem) |
||
425 | { |
||
426 | int i, j; |
||
427 | |||
428 | for (i = 0; i < bufmgr_gem->exec_count; i++) { |
||
429 | drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; |
||
430 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
431 | |||
6110 | serge | 432 | if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) { |
433 | DBG("%2d: %d %s(%s)\n", i, bo_gem->gem_handle, |
||
434 | bo_gem->is_softpin ? "*" : "", |
||
4363 | Serge | 435 | bo_gem->name); |
436 | continue; |
||
437 | } |
||
438 | |||
439 | for (j = 0; j < bo_gem->reloc_count; j++) { |
||
440 | drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo; |
||
441 | drm_intel_bo_gem *target_gem = |
||
442 | (drm_intel_bo_gem *) target_bo; |
||
443 | |||
6110 | serge | 444 | DBG("%2d: %d %s(%s)@0x%08x %08x -> " |
445 | "%d (%s)@0x%08x %08x + 0x%08x\n", |
||
4363 | Serge | 446 | i, |
6110 | serge | 447 | bo_gem->gem_handle, |
448 | bo_gem->is_softpin ? "*" : "", |
||
449 | bo_gem->name, |
||
450 | upper_32_bits(bo_gem->relocs[j].offset), |
||
451 | lower_32_bits(bo_gem->relocs[j].offset), |
||
4363 | Serge | 452 | target_gem->gem_handle, |
453 | target_gem->name, |
||
6110 | serge | 454 | upper_32_bits(target_bo->offset64), |
455 | lower_32_bits(target_bo->offset64), |
||
4363 | Serge | 456 | bo_gem->relocs[j].delta); |
457 | } |
||
6110 | serge | 458 | |
459 | for (j = 0; j < bo_gem->softpin_target_count; j++) { |
||
460 | drm_intel_bo *target_bo = bo_gem->softpin_target[j]; |
||
461 | drm_intel_bo_gem *target_gem = |
||
462 | (drm_intel_bo_gem *) target_bo; |
||
463 | DBG("%2d: %d %s(%s) -> " |
||
464 | "%d *(%s)@0x%08x %08x\n", |
||
465 | i, |
||
466 | bo_gem->gem_handle, |
||
467 | bo_gem->is_softpin ? "*" : "", |
||
468 | bo_gem->name, |
||
469 | target_gem->gem_handle, |
||
470 | target_gem->name, |
||
471 | upper_32_bits(target_bo->offset64), |
||
472 | lower_32_bits(target_bo->offset64)); |
||
473 | } |
||
4363 | Serge | 474 | } |
475 | } |
||
476 | |||
477 | static inline void |
||
478 | drm_intel_gem_bo_reference(drm_intel_bo *bo) |
||
479 | { |
||
480 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
481 | |||
482 | atomic_inc(&bo_gem->refcount); |
||
483 | } |
||
484 | |||
485 | /** |
||
486 | * Adds the given buffer to the list of buffers to be validated (moved into the |
||
487 | * appropriate memory type) with the next batch submission. |
||
488 | * |
||
489 | * If a buffer is validated multiple times in a batch submission, it ends up |
||
490 | * with the intersection of the memory type flags and the union of the |
||
491 | * access flags. |
||
492 | */ |
||
493 | static void |
||
494 | drm_intel_add_validate_buffer(drm_intel_bo *bo) |
||
495 | { |
||
496 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
497 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
498 | int index; |
||
499 | |||
500 | if (bo_gem->validate_index != -1) |
||
501 | return; |
||
502 | |||
503 | /* Extend the array of validation entries as necessary. */ |
||
504 | if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) { |
||
505 | int new_size = bufmgr_gem->exec_size * 2; |
||
506 | |||
507 | if (new_size == 0) |
||
508 | new_size = 5; |
||
509 | |||
510 | bufmgr_gem->exec_objects = |
||
511 | realloc(bufmgr_gem->exec_objects, |
||
512 | sizeof(*bufmgr_gem->exec_objects) * new_size); |
||
513 | bufmgr_gem->exec_bos = |
||
514 | realloc(bufmgr_gem->exec_bos, |
||
515 | sizeof(*bufmgr_gem->exec_bos) * new_size); |
||
516 | bufmgr_gem->exec_size = new_size; |
||
517 | } |
||
518 | |||
519 | index = bufmgr_gem->exec_count; |
||
520 | bo_gem->validate_index = index; |
||
521 | /* Fill in array entry */ |
||
522 | bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle; |
||
523 | bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count; |
||
524 | bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs; |
||
6110 | serge | 525 | bufmgr_gem->exec_objects[index].alignment = bo->align; |
4363 | Serge | 526 | bufmgr_gem->exec_objects[index].offset = 0; |
527 | bufmgr_gem->exec_bos[index] = bo; |
||
528 | bufmgr_gem->exec_count++; |
||
529 | } |
||
530 | |||
531 | static void |
||
532 | drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence) |
||
533 | { |
||
534 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; |
||
535 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; |
||
536 | int index; |
||
6110 | serge | 537 | int flags = 0; |
4363 | Serge | 538 | |
6110 | serge | 539 | if (need_fence) |
540 | flags |= EXEC_OBJECT_NEEDS_FENCE; |
||
541 | if (bo_gem->use_48b_address_range) |
||
542 | flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS; |
||
543 | if (bo_gem->is_softpin) |
||
544 | flags |= EXEC_OBJECT_PINNED; |
||
545 | |||
4363 | Serge | 546 | if (bo_gem->validate_index != -1) { |
6110 | serge | 547 | bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= flags; |
4363 | Serge | 548 | return; |
549 | } |
||
550 | |||
551 | /* Extend the array of validation entries as necessary. */ |
||
552 | if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) { |
||
553 | int new_size = bufmgr_gem->exec_size * 2; |
||
554 | |||
555 | if (new_size == 0) |
||
556 | new_size = 5; |
||
557 | |||
558 | bufmgr_gem->exec2_objects = |
||
559 | realloc(bufmgr_gem->exec2_objects, |
||
560 | sizeof(*bufmgr_gem->exec2_objects) * new_size); |
||
561 | bufmgr_gem->exec_bos = |
||
562 | realloc(bufmgr_gem->exec_bos, |
||
563 | sizeof(*bufmgr_gem->exec_bos) * new_size); |
||
564 | bufmgr_gem->exec_size = new_size; |
||
565 | } |
||
566 | |||
567 | index = bufmgr_gem->exec_count; |
||
568 | bo_gem->validate_index = index; |
||
569 | /* Fill in array entry */ |
||
570 | bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle; |
||
571 | bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count; |
||
572 | bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs; |
||
6110 | serge | 573 | bufmgr_gem->exec2_objects[index].alignment = bo->align; |
574 | bufmgr_gem->exec2_objects[index].offset = bo_gem->is_softpin ? |
||
575 | bo->offset64 : 0; |
||
4363 | Serge | 576 | bufmgr_gem->exec_bos[index] = bo; |
6110 | serge | 577 | bufmgr_gem->exec2_objects[index].flags = flags; |
4363 | Serge | 578 | bufmgr_gem->exec2_objects[index].rsvd1 = 0; |
579 | bufmgr_gem->exec2_objects[index].rsvd2 = 0; |
||
580 | bufmgr_gem->exec_count++; |
||
581 | } |
||
582 | |||
583 | #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \ |
||
584 | sizeof(uint32_t)) |
||
585 | |||
586 | static void |
||
587 | drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem, |
||
6110 | serge | 588 | drm_intel_bo_gem *bo_gem, |
589 | unsigned int alignment) |
||
4363 | Serge | 590 | { |
6110 | serge | 591 | unsigned int size; |
4363 | Serge | 592 | |
593 | assert(!bo_gem->used_as_reloc_target); |
||
594 | |||
595 | /* The older chipsets are far-less flexible in terms of tiling, |
||
596 | * and require tiled buffer to be size aligned in the aperture. |
||
597 | * This means that in the worst possible case we will need a hole |
||
598 | * twice as large as the object in order for it to fit into the |
||
599 | * aperture. Optimal packing is for wimps. |
||
600 | */ |
||
601 | size = bo_gem->bo.size; |
||
602 | if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) { |
||
6110 | serge | 603 | unsigned int min_size; |
4363 | Serge | 604 | |
605 | if (bufmgr_gem->has_relaxed_fencing) { |
||
606 | if (bufmgr_gem->gen == 3) |
||
607 | min_size = 1024*1024; |
||
608 | else |
||
609 | min_size = 512*1024; |
||
610 | |||
611 | while (min_size < size) |
||
612 | min_size *= 2; |
||
613 | } else |
||
614 | min_size = size; |
||
615 | |||
616 | /* Account for worst-case alignment. */ |
||
6110 | serge | 617 | alignment = MAX2(alignment, min_size); |
4363 | Serge | 618 | } |
619 | |||
6110 | serge | 620 | bo_gem->reloc_tree_size = size + alignment; |
4363 | Serge | 621 | } |
622 | |||
623 | static int |
||
624 | drm_intel_setup_reloc_list(drm_intel_bo *bo) |
||
625 | { |
||
626 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
627 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
628 | unsigned int max_relocs = bufmgr_gem->max_relocs; |
||
629 | |||
630 | if (bo->size / 4 < max_relocs) |
||
631 | max_relocs = bo->size / 4; |
||
632 | |||
633 | bo_gem->relocs = malloc(max_relocs * |
||
634 | sizeof(struct drm_i915_gem_relocation_entry)); |
||
635 | bo_gem->reloc_target_info = malloc(max_relocs * |
||
636 | sizeof(drm_intel_reloc_target)); |
||
637 | if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) { |
||
638 | bo_gem->has_error = true; |
||
639 | |||
640 | free (bo_gem->relocs); |
||
641 | bo_gem->relocs = NULL; |
||
642 | |||
643 | free (bo_gem->reloc_target_info); |
||
644 | bo_gem->reloc_target_info = NULL; |
||
645 | |||
646 | return 1; |
||
647 | } |
||
648 | |||
649 | return 0; |
||
650 | } |
||
651 | |||
652 | static int |
||
653 | drm_intel_gem_bo_busy(drm_intel_bo *bo) |
||
654 | { |
||
655 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
656 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
657 | struct drm_i915_gem_busy busy; |
||
658 | int ret; |
||
659 | |||
5068 | serge | 660 | if (bo_gem->reusable && bo_gem->idle) |
661 | return false; |
||
662 | |||
6110 | serge | 663 | memclear(busy); |
4363 | Serge | 664 | busy.handle = bo_gem->gem_handle; |
665 | |||
666 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); |
||
5068 | serge | 667 | if (ret == 0) { |
668 | bo_gem->idle = !busy.busy; |
||
669 | return busy.busy; |
||
670 | } else { |
||
671 | return false; |
||
672 | } |
||
4363 | Serge | 673 | return (ret == 0 && busy.busy); |
674 | } |
||
675 | |||
676 | static int |
||
677 | drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem, |
||
678 | drm_intel_bo_gem *bo_gem, int state) |
||
679 | { |
||
680 | struct drm_i915_gem_madvise madv; |
||
681 | |||
6110 | serge | 682 | memclear(madv); |
4363 | Serge | 683 | madv.handle = bo_gem->gem_handle; |
684 | madv.madv = state; |
||
685 | madv.retained = 1; |
||
686 | // drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv); |
||
687 | |||
688 | return madv.retained; |
||
689 | } |
||
690 | |||
691 | static int |
||
692 | drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv) |
||
693 | { |
||
694 | return drm_intel_gem_bo_madvise_internal |
||
695 | ((drm_intel_bufmgr_gem *) bo->bufmgr, |
||
696 | (drm_intel_bo_gem *) bo, |
||
697 | madv); |
||
698 | } |
||
699 | |||
700 | /* drop the oldest entries that have been purged by the kernel */ |
||
701 | static void |
||
702 | drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem, |
||
703 | struct drm_intel_gem_bo_bucket *bucket) |
||
704 | { |
||
705 | while (!DRMLISTEMPTY(&bucket->head)) { |
||
706 | drm_intel_bo_gem *bo_gem; |
||
707 | |||
708 | bo_gem = DRMLISTENTRY(drm_intel_bo_gem, |
||
709 | bucket->head.next, head); |
||
710 | if (drm_intel_gem_bo_madvise_internal |
||
711 | (bufmgr_gem, bo_gem, I915_MADV_DONTNEED)) |
||
712 | break; |
||
713 | |||
714 | DRMLISTDEL(&bo_gem->head); |
||
715 | drm_intel_gem_bo_free(&bo_gem->bo); |
||
716 | } |
||
717 | } |
||
718 | |||
719 | static drm_intel_bo * |
||
720 | drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, |
||
721 | const char *name, |
||
722 | unsigned long size, |
||
723 | unsigned long flags, |
||
724 | uint32_t tiling_mode, |
||
6110 | serge | 725 | unsigned long stride, |
726 | unsigned int alignment) |
||
4363 | Serge | 727 | { |
728 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; |
||
729 | drm_intel_bo_gem *bo_gem; |
||
730 | unsigned int page_size = 4096; |
||
731 | int ret; |
||
732 | struct drm_intel_gem_bo_bucket *bucket; |
||
733 | bool alloc_from_cache; |
||
734 | unsigned long bo_size; |
||
735 | bool for_render = false; |
||
736 | |||
737 | if (flags & BO_ALLOC_FOR_RENDER) |
||
738 | for_render = true; |
||
739 | |||
740 | /* Round the allocated size up to a power of two number of pages. */ |
||
741 | bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size); |
||
742 | |||
743 | /* If we don't have caching at this size, don't actually round the |
||
744 | * allocation up. |
||
745 | */ |
||
746 | if (bucket == NULL) { |
||
747 | bo_size = size; |
||
748 | if (bo_size < page_size) |
||
749 | bo_size = page_size; |
||
750 | } else { |
||
751 | bo_size = bucket->size; |
||
752 | } |
||
753 | |||
754 | // pthread_mutex_lock(&bufmgr_gem->lock); |
||
755 | /* Get a buffer out of the cache if available */ |
||
756 | retry: |
||
757 | alloc_from_cache = false; |
||
758 | if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) { |
||
759 | if (for_render) { |
||
760 | /* Allocate new render-target BOs from the tail (MRU) |
||
761 | * of the list, as it will likely be hot in the GPU |
||
762 | * cache and in the aperture for us. |
||
763 | */ |
||
764 | bo_gem = DRMLISTENTRY(drm_intel_bo_gem, |
||
765 | bucket->head.prev, head); |
||
766 | DRMLISTDEL(&bo_gem->head); |
||
767 | alloc_from_cache = true; |
||
6110 | serge | 768 | bo_gem->bo.align = alignment; |
4363 | Serge | 769 | } else { |
6110 | serge | 770 | assert(alignment == 0); |
4363 | Serge | 771 | /* For non-render-target BOs (where we're probably |
772 | * going to map it first thing in order to fill it |
||
773 | * with data), check if the last BO in the cache is |
||
774 | * unbusy, and only reuse in that case. Otherwise, |
||
775 | * allocating a new buffer is probably faster than |
||
776 | * waiting for the GPU to finish. |
||
777 | */ |
||
778 | bo_gem = DRMLISTENTRY(drm_intel_bo_gem, |
||
779 | bucket->head.next, head); |
||
780 | if (!drm_intel_gem_bo_busy(&bo_gem->bo)) { |
||
781 | alloc_from_cache = true; |
||
782 | DRMLISTDEL(&bo_gem->head); |
||
783 | } |
||
784 | } |
||
785 | |||
786 | if (alloc_from_cache) { |
||
787 | if (!drm_intel_gem_bo_madvise_internal |
||
788 | (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) { |
||
789 | drm_intel_gem_bo_free(&bo_gem->bo); |
||
790 | drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem, |
||
791 | bucket); |
||
792 | goto retry; |
||
793 | } |
||
794 | |||
795 | if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo, |
||
796 | tiling_mode, |
||
797 | stride)) { |
||
798 | drm_intel_gem_bo_free(&bo_gem->bo); |
||
799 | goto retry; |
||
800 | } |
||
801 | } |
||
802 | } |
||
803 | // pthread_mutex_unlock(&bufmgr_gem->lock); |
||
804 | |||
805 | if (!alloc_from_cache) { |
||
806 | struct drm_i915_gem_create create; |
||
807 | |||
808 | bo_gem = calloc(1, sizeof(*bo_gem)); |
||
809 | if (!bo_gem) |
||
810 | return NULL; |
||
811 | |||
812 | bo_gem->bo.size = bo_size; |
||
813 | |||
6110 | serge | 814 | memclear(create); |
4363 | Serge | 815 | create.size = bo_size; |
816 | |||
817 | ret = drmIoctl(bufmgr_gem->fd, |
||
818 | DRM_IOCTL_I915_GEM_CREATE, |
||
819 | &create); |
||
820 | bo_gem->gem_handle = create.handle; |
||
821 | bo_gem->bo.handle = bo_gem->gem_handle; |
||
822 | if (ret != 0) { |
||
823 | free(bo_gem); |
||
824 | return NULL; |
||
825 | } |
||
826 | bo_gem->bo.bufmgr = bufmgr; |
||
6110 | serge | 827 | bo_gem->bo.align = alignment; |
4363 | Serge | 828 | |
829 | bo_gem->tiling_mode = I915_TILING_NONE; |
||
830 | bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; |
||
831 | bo_gem->stride = 0; |
||
832 | |||
6110 | serge | 833 | /* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized |
834 | list (vma_list), so better set the list head here */ |
||
835 | DRMINITLISTHEAD(&bo_gem->name_list); |
||
836 | DRMINITLISTHEAD(&bo_gem->vma_list); |
||
4363 | Serge | 837 | if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo, |
838 | tiling_mode, |
||
839 | stride)) { |
||
840 | drm_intel_gem_bo_free(&bo_gem->bo); |
||
841 | return NULL; |
||
842 | } |
||
843 | } |
||
844 | |||
845 | bo_gem->name = name; |
||
846 | atomic_set(&bo_gem->refcount, 1); |
||
847 | bo_gem->validate_index = -1; |
||
848 | bo_gem->reloc_tree_fences = 0; |
||
849 | bo_gem->used_as_reloc_target = false; |
||
850 | bo_gem->has_error = false; |
||
851 | bo_gem->reusable = true; |
||
6110 | serge | 852 | bo_gem->use_48b_address_range = false; |
4363 | Serge | 853 | |
6110 | serge | 854 | drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment); |
4363 | Serge | 855 | |
856 | DBG("bo_create: buf %d (%s) %ldb\n", |
||
857 | bo_gem->gem_handle, bo_gem->name, size); |
||
858 | |||
859 | return &bo_gem->bo; |
||
860 | } |
||
861 | |||
862 | static drm_intel_bo * |
||
863 | drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, |
||
864 | const char *name, |
||
865 | unsigned long size, |
||
866 | unsigned int alignment) |
||
867 | { |
||
868 | return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, |
||
869 | BO_ALLOC_FOR_RENDER, |
||
6110 | serge | 870 | I915_TILING_NONE, 0, |
871 | alignment); |
||
4363 | Serge | 872 | } |
873 | |||
874 | static drm_intel_bo * |
||
875 | drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, |
||
876 | const char *name, |
||
877 | unsigned long size, |
||
878 | unsigned int alignment) |
||
879 | { |
||
880 | return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0, |
||
6110 | serge | 881 | I915_TILING_NONE, 0, 0); |
4363 | Serge | 882 | } |
883 | |||
884 | static drm_intel_bo * |
||
885 | drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name, |
||
886 | int x, int y, int cpp, uint32_t *tiling_mode, |
||
887 | unsigned long *pitch, unsigned long flags) |
||
888 | { |
||
889 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; |
||
890 | unsigned long size, stride; |
||
891 | uint32_t tiling; |
||
892 | |||
893 | do { |
||
894 | unsigned long aligned_y, height_alignment; |
||
895 | |||
896 | tiling = *tiling_mode; |
||
897 | |||
898 | /* If we're tiled, our allocations are in 8 or 32-row blocks, |
||
899 | * so failure to align our height means that we won't allocate |
||
900 | * enough pages. |
||
901 | * |
||
902 | * If we're untiled, we still have to align to 2 rows high |
||
903 | * because the data port accesses 2x2 blocks even if the |
||
904 | * bottom row isn't to be rendered, so failure to align means |
||
905 | * we could walk off the end of the GTT and fault. This is |
||
906 | * documented on 965, and may be the case on older chipsets |
||
907 | * too so we try to be careful. |
||
908 | */ |
||
909 | aligned_y = y; |
||
910 | height_alignment = 2; |
||
911 | |||
912 | if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE) |
||
913 | height_alignment = 16; |
||
914 | else if (tiling == I915_TILING_X |
||
915 | || (IS_915(bufmgr_gem->pci_device) |
||
916 | && tiling == I915_TILING_Y)) |
||
917 | height_alignment = 8; |
||
918 | else if (tiling == I915_TILING_Y) |
||
919 | height_alignment = 32; |
||
920 | aligned_y = ALIGN(y, height_alignment); |
||
921 | |||
922 | stride = x * cpp; |
||
923 | stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode); |
||
924 | size = stride * aligned_y; |
||
925 | size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode); |
||
926 | } while (*tiling_mode != tiling); |
||
927 | *pitch = stride; |
||
928 | |||
929 | if (tiling == I915_TILING_NONE) |
||
930 | stride = 0; |
||
931 | |||
932 | return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags, |
||
6110 | serge | 933 | tiling, stride, 0); |
4363 | Serge | 934 | } |
935 | |||
6110 | serge | 936 | #if 0 |
937 | static drm_intel_bo * |
||
938 | drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr, |
||
939 | const char *name, |
||
940 | void *addr, |
||
941 | uint32_t tiling_mode, |
||
942 | uint32_t stride, |
||
943 | unsigned long size, |
||
944 | unsigned long flags) |
||
945 | { |
||
946 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; |
||
947 | drm_intel_bo_gem *bo_gem; |
||
948 | int ret; |
||
949 | struct drm_i915_gem_userptr userptr; |
||
950 | |||
951 | /* Tiling with userptr surfaces is not supported |
||
952 | * on all hardware so refuse it for time being. |
||
953 | */ |
||
954 | if (tiling_mode != I915_TILING_NONE) |
||
955 | return NULL; |
||
956 | |||
957 | bo_gem = calloc(1, sizeof(*bo_gem)); |
||
958 | if (!bo_gem) |
||
959 | return NULL; |
||
960 | |||
961 | bo_gem->bo.size = size; |
||
962 | |||
963 | memclear(userptr); |
||
964 | userptr.user_ptr = (__u64)((unsigned long)addr); |
||
965 | userptr.user_size = size; |
||
966 | userptr.flags = flags; |
||
967 | |||
968 | ret = drmIoctl(bufmgr_gem->fd, |
||
969 | DRM_IOCTL_I915_GEM_USERPTR, |
||
970 | &userptr); |
||
971 | if (ret != 0) { |
||
972 | DBG("bo_create_userptr: " |
||
973 | "ioctl failed with user ptr %p size 0x%lx, " |
||
974 | "user flags 0x%lx\n", addr, size, flags); |
||
975 | free(bo_gem); |
||
976 | return NULL; |
||
977 | } |
||
978 | |||
979 | bo_gem->gem_handle = userptr.handle; |
||
980 | bo_gem->bo.handle = bo_gem->gem_handle; |
||
981 | bo_gem->bo.bufmgr = bufmgr; |
||
982 | bo_gem->is_userptr = true; |
||
983 | bo_gem->bo.virtual = addr; |
||
984 | /* Save the address provided by user */ |
||
985 | bo_gem->user_virtual = addr; |
||
986 | bo_gem->tiling_mode = I915_TILING_NONE; |
||
987 | bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; |
||
988 | bo_gem->stride = 0; |
||
989 | |||
990 | DRMINITLISTHEAD(&bo_gem->name_list); |
||
991 | DRMINITLISTHEAD(&bo_gem->vma_list); |
||
992 | |||
993 | bo_gem->name = name; |
||
994 | atomic_set(&bo_gem->refcount, 1); |
||
995 | bo_gem->validate_index = -1; |
||
996 | bo_gem->reloc_tree_fences = 0; |
||
997 | bo_gem->used_as_reloc_target = false; |
||
998 | bo_gem->has_error = false; |
||
999 | bo_gem->reusable = false; |
||
1000 | bo_gem->use_48b_address_range = false; |
||
1001 | |||
1002 | drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0); |
||
1003 | |||
1004 | DBG("bo_create_userptr: " |
||
1005 | "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n", |
||
1006 | addr, bo_gem->gem_handle, bo_gem->name, |
||
1007 | size, stride, tiling_mode); |
||
1008 | |||
1009 | return &bo_gem->bo; |
||
1010 | } |
||
1011 | |||
1012 | static bool |
||
1013 | has_userptr(drm_intel_bufmgr_gem *bufmgr_gem) |
||
1014 | { |
||
1015 | int ret; |
||
1016 | void *ptr; |
||
1017 | long pgsz; |
||
1018 | struct drm_i915_gem_userptr userptr; |
||
1019 | |||
1020 | pgsz = sysconf(_SC_PAGESIZE); |
||
1021 | assert(pgsz > 0); |
||
1022 | |||
1023 | ret = posix_memalign(&ptr, pgsz, pgsz); |
||
1024 | if (ret) { |
||
1025 | DBG("Failed to get a page (%ld) for userptr detection!\n", |
||
1026 | pgsz); |
||
1027 | return false; |
||
1028 | } |
||
1029 | |||
1030 | memclear(userptr); |
||
1031 | userptr.user_ptr = (__u64)(unsigned long)ptr; |
||
1032 | userptr.user_size = pgsz; |
||
1033 | |||
1034 | retry: |
||
1035 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr); |
||
1036 | if (ret) { |
||
1037 | if (errno == ENODEV && userptr.flags == 0) { |
||
1038 | userptr.flags = I915_USERPTR_UNSYNCHRONIZED; |
||
1039 | goto retry; |
||
1040 | } |
||
1041 | free(ptr); |
||
1042 | return false; |
||
1043 | } |
||
1044 | |||
1045 | /* We don't release the userptr bo here as we want to keep the |
||
1046 | * kernel mm tracking alive for our lifetime. The first time we |
||
1047 | * create a userptr object the kernel has to install a mmu_notifer |
||
1048 | * which is a heavyweight operation (e.g. it requires taking all |
||
1049 | * mm_locks and stop_machine()). |
||
1050 | */ |
||
1051 | |||
1052 | bufmgr_gem->userptr_active.ptr = ptr; |
||
1053 | bufmgr_gem->userptr_active.handle = userptr.handle; |
||
1054 | |||
1055 | return true; |
||
1056 | } |
||
1057 | |||
1058 | #endif |
||
1059 | |||
1060 | static drm_intel_bo * |
||
1061 | check_bo_alloc_userptr(drm_intel_bufmgr *bufmgr, |
||
1062 | const char *name, |
||
1063 | void *addr, |
||
1064 | uint32_t tiling_mode, |
||
1065 | uint32_t stride, |
||
1066 | unsigned long size, |
||
1067 | unsigned long flags) |
||
1068 | { |
||
1069 | bufmgr->bo_alloc_userptr = NULL; |
||
1070 | |||
1071 | return drm_intel_bo_alloc_userptr(bufmgr, name, addr, |
||
1072 | tiling_mode, stride, size, flags); |
||
1073 | } |
||
1074 | |||
4363 | Serge | 1075 | /** |
1076 | * Returns a drm_intel_bo wrapping the given buffer object handle. |
||
1077 | * |
||
1078 | * This can be used when one application needs to pass a buffer object |
||
1079 | * to another. |
||
1080 | */ |
||
1081 | drm_intel_bo * |
||
1082 | drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, |
||
1083 | const char *name, |
||
1084 | unsigned int handle) |
||
1085 | { |
||
1086 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; |
||
1087 | drm_intel_bo_gem *bo_gem; |
||
1088 | int ret; |
||
1089 | struct drm_gem_open open_arg; |
||
1090 | struct drm_i915_gem_get_tiling get_tiling; |
||
1091 | drmMMListHead *list; |
||
1092 | |||
1093 | /* At the moment most applications only have a few named bo. |
||
1094 | * For instance, in a DRI client only the render buffers passed |
||
1095 | * between X and the client are named. And since X returns the |
||
1096 | * alternating names for the front/back buffer a linear search |
||
1097 | * provides a sufficiently fast match. |
||
1098 | */ |
||
1099 | for (list = bufmgr_gem->named.next; |
||
1100 | list != &bufmgr_gem->named; |
||
1101 | list = list->next) { |
||
1102 | bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list); |
||
1103 | if (bo_gem->global_name == handle) { |
||
1104 | drm_intel_gem_bo_reference(&bo_gem->bo); |
||
1105 | return &bo_gem->bo; |
||
1106 | } |
||
1107 | } |
||
1108 | |||
6110 | serge | 1109 | memclear(open_arg); |
4363 | Serge | 1110 | open_arg.name = handle; |
1111 | ret = drmIoctl(bufmgr_gem->fd, |
||
1112 | DRM_IOCTL_GEM_OPEN, |
||
1113 | &open_arg); |
||
1114 | if (ret != 0) { |
||
1115 | DBG("Couldn't reference %s handle 0x%08x: %s\n", |
||
1116 | name, handle, strerror(errno)); |
||
1117 | return NULL; |
||
1118 | } |
||
5068 | serge | 1119 | /* Now see if someone has used a prime handle to get this |
1120 | * object from the kernel before by looking through the list |
||
1121 | * again for a matching gem_handle |
||
1122 | */ |
||
1123 | for (list = bufmgr_gem->named.next; |
||
1124 | list != &bufmgr_gem->named; |
||
1125 | list = list->next) { |
||
1126 | bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list); |
||
1127 | if (bo_gem->gem_handle == open_arg.handle) { |
||
1128 | drm_intel_gem_bo_reference(&bo_gem->bo); |
||
1129 | return &bo_gem->bo; |
||
1130 | } |
||
1131 | } |
||
1132 | |||
1133 | bo_gem = calloc(1, sizeof(*bo_gem)); |
||
1134 | if (!bo_gem) |
||
1135 | return NULL; |
||
1136 | |||
4363 | Serge | 1137 | bo_gem->bo.size = open_arg.size; |
1138 | bo_gem->bo.offset = 0; |
||
5068 | serge | 1139 | bo_gem->bo.offset64 = 0; |
4363 | Serge | 1140 | bo_gem->bo.virtual = NULL; |
1141 | bo_gem->bo.bufmgr = bufmgr; |
||
1142 | bo_gem->name = name; |
||
1143 | atomic_set(&bo_gem->refcount, 1); |
||
1144 | bo_gem->validate_index = -1; |
||
1145 | bo_gem->gem_handle = open_arg.handle; |
||
1146 | bo_gem->bo.handle = open_arg.handle; |
||
1147 | bo_gem->global_name = handle; |
||
1148 | bo_gem->reusable = false; |
||
6110 | serge | 1149 | bo_gem->use_48b_address_range = false; |
4363 | Serge | 1150 | |
6110 | serge | 1151 | memclear(get_tiling); |
4363 | Serge | 1152 | get_tiling.handle = bo_gem->gem_handle; |
1153 | ret = drmIoctl(bufmgr_gem->fd, |
||
1154 | DRM_IOCTL_I915_GEM_GET_TILING, |
||
1155 | &get_tiling); |
||
1156 | if (ret != 0) { |
||
1157 | drm_intel_gem_bo_unreference(&bo_gem->bo); |
||
1158 | return NULL; |
||
1159 | } |
||
1160 | bo_gem->tiling_mode = get_tiling.tiling_mode; |
||
1161 | bo_gem->swizzle_mode = get_tiling.swizzle_mode; |
||
1162 | /* XXX stride is unknown */ |
||
6110 | serge | 1163 | drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0); |
4363 | Serge | 1164 | |
1165 | DRMINITLISTHEAD(&bo_gem->vma_list); |
||
1166 | DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); |
||
1167 | DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name); |
||
1168 | |||
1169 | return &bo_gem->bo; |
||
1170 | } |
||
1171 | |||
1172 | static void |
||
1173 | drm_intel_gem_bo_free(drm_intel_bo *bo) |
||
1174 | { |
||
1175 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1176 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1177 | struct drm_gem_close close; |
||
1178 | int ret; |
||
1179 | |||
1180 | DRMLISTDEL(&bo_gem->vma_list); |
||
1181 | if (bo_gem->mem_virtual) { |
||
1182 | VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0)); |
||
1183 | bufmgr_gem->vma_count--; |
||
1184 | } |
||
1185 | if (bo_gem->gtt_virtual) { |
||
1186 | bufmgr_gem->vma_count--; |
||
1187 | } |
||
1188 | |||
1189 | /* Close this object */ |
||
6110 | serge | 1190 | memclear(close); |
4363 | Serge | 1191 | close.handle = bo_gem->gem_handle; |
1192 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close); |
||
1193 | if (ret != 0) { |
||
1194 | DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n", |
||
1195 | bo_gem->gem_handle, bo_gem->name, strerror(errno)); |
||
1196 | } |
||
1197 | free(bo); |
||
1198 | } |
||
1199 | |||
1200 | static void |
||
1201 | drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo) |
||
1202 | { |
||
1203 | #if HAVE_VALGRIND |
||
1204 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1205 | |||
1206 | if (bo_gem->mem_virtual) |
||
1207 | VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size); |
||
1208 | |||
1209 | if (bo_gem->gtt_virtual) |
||
1210 | VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size); |
||
1211 | #endif |
||
1212 | } |
||
1213 | |||
1214 | /** Frees all cached buffers significantly older than @time. */ |
||
1215 | static void |
||
1216 | drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time) |
||
1217 | { |
||
1218 | int i; |
||
1219 | |||
1220 | if (bufmgr_gem->time == time) |
||
1221 | return; |
||
1222 | |||
1223 | for (i = 0; i < bufmgr_gem->num_buckets; i++) { |
||
1224 | struct drm_intel_gem_bo_bucket *bucket = |
||
1225 | &bufmgr_gem->cache_bucket[i]; |
||
1226 | |||
1227 | while (!DRMLISTEMPTY(&bucket->head)) { |
||
1228 | drm_intel_bo_gem *bo_gem; |
||
1229 | |||
1230 | bo_gem = DRMLISTENTRY(drm_intel_bo_gem, |
||
1231 | bucket->head.next, head); |
||
1232 | if (time - bo_gem->free_time <= 1) |
||
1233 | break; |
||
1234 | |||
1235 | DRMLISTDEL(&bo_gem->head); |
||
1236 | |||
1237 | drm_intel_gem_bo_free(&bo_gem->bo); |
||
1238 | } |
||
1239 | } |
||
1240 | |||
1241 | bufmgr_gem->time = time; |
||
1242 | } |
||
1243 | |||
1244 | static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem) |
||
1245 | { |
||
1246 | int limit; |
||
1247 | |||
1248 | DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__, |
||
1249 | bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max); |
||
1250 | |||
1251 | if (bufmgr_gem->vma_max < 0) |
||
1252 | return; |
||
1253 | |||
1254 | /* We may need to evict a few entries in order to create new mmaps */ |
||
1255 | limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open; |
||
1256 | if (limit < 0) |
||
1257 | limit = 0; |
||
1258 | |||
1259 | while (bufmgr_gem->vma_count > limit) { |
||
1260 | drm_intel_bo_gem *bo_gem; |
||
1261 | |||
1262 | bo_gem = DRMLISTENTRY(drm_intel_bo_gem, |
||
1263 | bufmgr_gem->vma_cache.next, |
||
1264 | vma_list); |
||
1265 | assert(bo_gem->map_count == 0); |
||
1266 | DRMLISTDELINIT(&bo_gem->vma_list); |
||
1267 | |||
1268 | if (bo_gem->mem_virtual) { |
||
1269 | // munmap(bo_gem->mem_virtual, bo_gem->bo.size); |
||
1270 | bo_gem->mem_virtual = NULL; |
||
1271 | bufmgr_gem->vma_count--; |
||
1272 | } |
||
1273 | if (bo_gem->gtt_virtual) { |
||
1274 | // munmap(bo_gem->gtt_virtual, bo_gem->bo.size); |
||
1275 | bo_gem->gtt_virtual = NULL; |
||
1276 | bufmgr_gem->vma_count--; |
||
1277 | } |
||
1278 | } |
||
1279 | } |
||
1280 | |||
1281 | static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem, |
||
1282 | drm_intel_bo_gem *bo_gem) |
||
1283 | { |
||
1284 | bufmgr_gem->vma_open--; |
||
1285 | DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache); |
||
1286 | if (bo_gem->mem_virtual) |
||
1287 | bufmgr_gem->vma_count++; |
||
1288 | if (bo_gem->gtt_virtual) |
||
1289 | bufmgr_gem->vma_count++; |
||
1290 | drm_intel_gem_bo_purge_vma_cache(bufmgr_gem); |
||
1291 | } |
||
1292 | |||
1293 | static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem, |
||
1294 | drm_intel_bo_gem *bo_gem) |
||
1295 | { |
||
1296 | bufmgr_gem->vma_open++; |
||
1297 | DRMLISTDEL(&bo_gem->vma_list); |
||
1298 | if (bo_gem->mem_virtual) |
||
1299 | bufmgr_gem->vma_count--; |
||
1300 | if (bo_gem->gtt_virtual) |
||
1301 | bufmgr_gem->vma_count--; |
||
1302 | drm_intel_gem_bo_purge_vma_cache(bufmgr_gem); |
||
1303 | } |
||
1304 | |||
1305 | static void |
||
1306 | drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time) |
||
1307 | { |
||
1308 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1309 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1310 | struct drm_intel_gem_bo_bucket *bucket; |
||
1311 | int i; |
||
1312 | |||
1313 | /* Unreference all the target buffers */ |
||
1314 | for (i = 0; i < bo_gem->reloc_count; i++) { |
||
1315 | if (bo_gem->reloc_target_info[i].bo != bo) { |
||
1316 | drm_intel_gem_bo_unreference_locked_timed(bo_gem-> |
||
1317 | reloc_target_info[i].bo, |
||
1318 | time); |
||
1319 | } |
||
1320 | } |
||
6110 | serge | 1321 | for (i = 0; i < bo_gem->softpin_target_count; i++) |
1322 | drm_intel_gem_bo_unreference_locked_timed(bo_gem->softpin_target[i], |
||
1323 | time); |
||
4363 | Serge | 1324 | bo_gem->reloc_count = 0; |
1325 | bo_gem->used_as_reloc_target = false; |
||
6110 | serge | 1326 | bo_gem->softpin_target_count = 0; |
4363 | Serge | 1327 | |
1328 | DBG("bo_unreference final: %d (%s)\n", |
||
1329 | bo_gem->gem_handle, bo_gem->name); |
||
1330 | |||
1331 | /* release memory associated with this object */ |
||
1332 | if (bo_gem->reloc_target_info) { |
||
1333 | free(bo_gem->reloc_target_info); |
||
1334 | bo_gem->reloc_target_info = NULL; |
||
1335 | } |
||
1336 | if (bo_gem->relocs) { |
||
1337 | free(bo_gem->relocs); |
||
1338 | bo_gem->relocs = NULL; |
||
1339 | } |
||
6110 | serge | 1340 | if (bo_gem->softpin_target) { |
1341 | free(bo_gem->softpin_target); |
||
1342 | bo_gem->softpin_target = NULL; |
||
1343 | bo_gem->softpin_target_size = 0; |
||
1344 | } |
||
4363 | Serge | 1345 | |
1346 | /* Clear any left-over mappings */ |
||
1347 | if (bo_gem->map_count) { |
||
1348 | DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count); |
||
1349 | bo_gem->map_count = 0; |
||
1350 | drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); |
||
1351 | drm_intel_gem_bo_mark_mmaps_incoherent(bo); |
||
1352 | } |
||
1353 | |||
1354 | DRMLISTDEL(&bo_gem->name_list); |
||
1355 | |||
1356 | bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size); |
||
1357 | /* Put the buffer into our internal cache for reuse if we can. */ |
||
1358 | if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL && |
||
1359 | drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem, |
||
1360 | I915_MADV_DONTNEED)) { |
||
1361 | bo_gem->free_time = time; |
||
1362 | |||
1363 | bo_gem->name = NULL; |
||
1364 | bo_gem->validate_index = -1; |
||
1365 | |||
1366 | DRMLISTADDTAIL(&bo_gem->head, &bucket->head); |
||
1367 | } else { |
||
1368 | drm_intel_gem_bo_free(bo); |
||
1369 | } |
||
1370 | } |
||
1371 | |||
1372 | static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo, |
||
1373 | time_t time) |
||
1374 | { |
||
1375 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1376 | |||
1377 | assert(atomic_read(&bo_gem->refcount) > 0); |
||
1378 | if (atomic_dec_and_test(&bo_gem->refcount)) |
||
1379 | drm_intel_gem_bo_unreference_final(bo, time); |
||
1380 | } |
||
1381 | |||
1382 | static void drm_intel_gem_bo_unreference(drm_intel_bo *bo) |
||
1383 | { |
||
1384 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1385 | |||
1386 | assert(atomic_read(&bo_gem->refcount) > 0); |
||
6110 | serge | 1387 | |
1388 | if (atomic_add_unless(&bo_gem->refcount, -1, 1)) { |
||
4363 | Serge | 1389 | drm_intel_bufmgr_gem *bufmgr_gem = |
1390 | (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
6110 | serge | 1391 | struct timespec time; |
4363 | Serge | 1392 | |
6110 | serge | 1393 | clock_gettime(CLOCK_MONOTONIC, &time); |
4363 | Serge | 1394 | |
1395 | // pthread_mutex_lock(&bufmgr_gem->lock); |
||
6110 | serge | 1396 | |
1397 | if (atomic_dec_and_test(&bo_gem->refcount)) { |
||
1398 | drm_intel_gem_bo_unreference_final(bo, time.tv_sec); |
||
1399 | drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec); |
||
1400 | } |
||
1401 | |||
4363 | Serge | 1402 | // pthread_mutex_unlock(&bufmgr_gem->lock); |
1403 | } |
||
1404 | } |
||
1405 | |||
1406 | static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable) |
||
1407 | { |
||
1408 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1409 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1410 | struct drm_i915_gem_set_domain set_domain; |
||
1411 | int ret; |
||
1412 | |||
6110 | serge | 1413 | if (bo_gem->is_userptr) { |
1414 | /* Return the same user ptr */ |
||
1415 | bo->virtual = bo_gem->user_virtual; |
||
1416 | return 0; |
||
1417 | } |
||
1418 | |||
4363 | Serge | 1419 | // pthread_mutex_lock(&bufmgr_gem->lock); |
1420 | |||
1421 | if (bo_gem->map_count++ == 0) |
||
1422 | drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem); |
||
1423 | |||
1424 | if (!bo_gem->mem_virtual) { |
||
1425 | struct drm_i915_gem_mmap mmap_arg; |
||
1426 | |||
1427 | DBG("bo_map: %d (%s), map_count=%d\n", |
||
1428 | bo_gem->gem_handle, bo_gem->name, bo_gem->map_count); |
||
1429 | |||
6110 | serge | 1430 | memclear(mmap_arg); |
4363 | Serge | 1431 | mmap_arg.handle = bo_gem->gem_handle; |
1432 | mmap_arg.size = bo->size; |
||
1433 | ret = drmIoctl(bufmgr_gem->fd, |
||
1434 | DRM_IOCTL_I915_GEM_MMAP, |
||
1435 | &mmap_arg); |
||
1436 | if (ret != 0) { |
||
1437 | ret = -errno; |
||
1438 | DBG("%s:%d: Error mapping buffer %d (%s): %s .\n", |
||
1439 | __FILE__, __LINE__, bo_gem->gem_handle, |
||
1440 | bo_gem->name, strerror(errno)); |
||
1441 | if (--bo_gem->map_count == 0) |
||
1442 | drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); |
||
1443 | // pthread_mutex_unlock(&bufmgr_gem->lock); |
||
1444 | return ret; |
||
1445 | } |
||
1446 | VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1)); |
||
1447 | bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr; |
||
1448 | } |
||
1449 | DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, |
||
1450 | bo_gem->mem_virtual); |
||
1451 | bo->virtual = bo_gem->mem_virtual; |
||
1452 | |||
6110 | serge | 1453 | memclear(set_domain); |
4363 | Serge | 1454 | set_domain.handle = bo_gem->gem_handle; |
1455 | set_domain.read_domains = I915_GEM_DOMAIN_CPU; |
||
1456 | if (write_enable) |
||
1457 | set_domain.write_domain = I915_GEM_DOMAIN_CPU; |
||
1458 | else |
||
1459 | set_domain.write_domain = 0; |
||
1460 | ret = drmIoctl(bufmgr_gem->fd, |
||
1461 | DRM_IOCTL_I915_GEM_SET_DOMAIN, |
||
1462 | &set_domain); |
||
1463 | if (ret != 0) { |
||
1464 | DBG("%s:%d: Error setting to CPU domain %d: %s\n", |
||
1465 | __FILE__, __LINE__, bo_gem->gem_handle, |
||
1466 | strerror(errno)); |
||
1467 | } |
||
1468 | |||
1469 | if (write_enable) |
||
1470 | bo_gem->mapped_cpu_write = true; |
||
1471 | |||
1472 | drm_intel_gem_bo_mark_mmaps_incoherent(bo); |
||
1473 | VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size)); |
||
1474 | // pthread_mutex_unlock(&bufmgr_gem->lock); |
||
1475 | |||
1476 | return 0; |
||
1477 | } |
||
1478 | |||
1479 | static int |
||
1480 | map_gtt(drm_intel_bo *bo) |
||
1481 | { |
||
1482 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1483 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1484 | int ret; |
||
1485 | |||
6110 | serge | 1486 | if (bo_gem->is_userptr) |
1487 | return -EINVAL; |
||
1488 | |||
4363 | Serge | 1489 | if (bo_gem->map_count++ == 0) |
1490 | drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem); |
||
1491 | |||
1492 | /* Get a mapping of the buffer if we haven't before. */ |
||
1493 | if (bo_gem->gtt_virtual == NULL) { |
||
1494 | struct drm_i915_gem_mmap_gtt mmap_arg; |
||
1495 | |||
1496 | DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n", |
||
1497 | bo_gem->gem_handle, bo_gem->name, bo_gem->map_count); |
||
1498 | |||
6110 | serge | 1499 | memclear(mmap_arg); |
4363 | Serge | 1500 | mmap_arg.handle = bo_gem->gem_handle; |
1501 | |||
1502 | /* Get the fake offset back... */ |
||
1503 | ret = drmIoctl(bufmgr_gem->fd, |
||
1504 | DRM_IOCTL_I915_GEM_MMAP_GTT, |
||
1505 | &mmap_arg); |
||
1506 | if (ret != 0) { |
||
1507 | ret = -errno; |
||
1508 | DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n", |
||
1509 | __FILE__, __LINE__, |
||
1510 | bo_gem->gem_handle, bo_gem->name, |
||
1511 | strerror(errno)); |
||
1512 | if (--bo_gem->map_count == 0) |
||
1513 | drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); |
||
1514 | return ret; |
||
1515 | } |
||
1516 | |||
1517 | /* and mmap it */ |
||
6110 | serge | 1518 | bo_gem->gtt_virtual = (void*)(__u32)mmap_arg.offset; |
4363 | Serge | 1519 | if (bo_gem->gtt_virtual == 0) { |
1520 | bo_gem->gtt_virtual = NULL; |
||
1521 | ret = -errno; |
||
1522 | DBG("%s:%d: Error mapping buffer %d (%s): %s .\n", |
||
1523 | __FILE__, __LINE__, |
||
1524 | bo_gem->gem_handle, bo_gem->name, |
||
1525 | strerror(errno)); |
||
1526 | if (--bo_gem->map_count == 0) |
||
1527 | drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); |
||
1528 | return ret; |
||
1529 | } |
||
1530 | } |
||
1531 | |||
1532 | bo->virtual = bo_gem->gtt_virtual; |
||
1533 | |||
1534 | DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, |
||
1535 | bo_gem->gtt_virtual); |
||
1536 | |||
1537 | return 0; |
||
1538 | } |
||
1539 | |||
6110 | serge | 1540 | int |
1541 | drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) |
||
4363 | Serge | 1542 | { |
1543 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1544 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1545 | struct drm_i915_gem_set_domain set_domain; |
||
1546 | int ret; |
||
1547 | |||
1548 | // pthread_mutex_lock(&bufmgr_gem->lock); |
||
1549 | |||
1550 | ret = map_gtt(bo); |
||
1551 | if (ret) { |
||
1552 | // pthread_mutex_unlock(&bufmgr_gem->lock); |
||
1553 | return ret; |
||
1554 | } |
||
1555 | |||
1556 | /* Now move it to the GTT domain so that the GPU and CPU |
||
1557 | * caches are flushed and the GPU isn't actively using the |
||
1558 | * buffer. |
||
1559 | * |
||
1560 | * The pagefault handler does this domain change for us when |
||
1561 | * it has unbound the BO from the GTT, but it's up to us to |
||
1562 | * tell it when we're about to use things if we had done |
||
1563 | * rendering and it still happens to be bound to the GTT. |
||
1564 | */ |
||
6110 | serge | 1565 | memclear(set_domain); |
4363 | Serge | 1566 | set_domain.handle = bo_gem->gem_handle; |
1567 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
||
1568 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
||
1569 | ret = drmIoctl(bufmgr_gem->fd, |
||
1570 | DRM_IOCTL_I915_GEM_SET_DOMAIN, |
||
1571 | &set_domain); |
||
1572 | if (ret != 0) { |
||
1573 | DBG("%s:%d: Error setting domain %d: %s\n", |
||
1574 | __FILE__, __LINE__, bo_gem->gem_handle, |
||
1575 | strerror(errno)); |
||
1576 | } |
||
1577 | |||
1578 | drm_intel_gem_bo_mark_mmaps_incoherent(bo); |
||
1579 | VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size)); |
||
1580 | // pthread_mutex_unlock(&bufmgr_gem->lock); |
||
1581 | |||
1582 | return 0; |
||
1583 | } |
||
1584 | |||
1585 | /** |
||
1586 | * Performs a mapping of the buffer object like the normal GTT |
||
1587 | * mapping, but avoids waiting for the GPU to be done reading from or |
||
1588 | * rendering to the buffer. |
||
1589 | * |
||
1590 | * This is used in the implementation of GL_ARB_map_buffer_range: The |
||
1591 | * user asks to create a buffer, then does a mapping, fills some |
||
1592 | * space, runs a drawing command, then asks to map it again without |
||
1593 | * synchronizing because it guarantees that it won't write over the |
||
1594 | * data that the GPU is busy using (or, more specifically, that if it |
||
1595 | * does write over the data, it acknowledges that rendering is |
||
1596 | * undefined). |
||
1597 | */ |
||
1598 | |||
6110 | serge | 1599 | int |
1600 | drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo) |
||
4363 | Serge | 1601 | { |
1602 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
5068 | serge | 1603 | #ifdef HAVE_VALGRIND |
1604 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1605 | #endif |
||
4363 | Serge | 1606 | int ret; |
1607 | |||
1608 | /* If the CPU cache isn't coherent with the GTT, then use a |
||
1609 | * regular synchronized mapping. The problem is that we don't |
||
1610 | * track where the buffer was last used on the CPU side in |
||
1611 | * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so |
||
1612 | * we would potentially corrupt the buffer even when the user |
||
1613 | * does reasonable things. |
||
1614 | */ |
||
1615 | if (!bufmgr_gem->has_llc) |
||
1616 | return drm_intel_gem_bo_map_gtt(bo); |
||
1617 | |||
1618 | // pthread_mutex_lock(&bufmgr_gem->lock); |
||
1619 | ret = map_gtt(bo); |
||
1620 | // pthread_mutex_unlock(&bufmgr_gem->lock); |
||
1621 | |||
1622 | return ret; |
||
1623 | } |
||
1624 | |||
1625 | static int drm_intel_gem_bo_unmap(drm_intel_bo *bo) |
||
1626 | { |
||
6110 | serge | 1627 | drm_intel_bufmgr_gem *bufmgr_gem; |
4363 | Serge | 1628 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
1629 | int ret = 0; |
||
1630 | |||
1631 | if (bo == NULL) |
||
1632 | return 0; |
||
1633 | |||
6110 | serge | 1634 | if (bo_gem->is_userptr) |
1635 | return 0; |
||
1636 | |||
1637 | bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
4363 | Serge | 1638 | // pthread_mutex_lock(&bufmgr_gem->lock); |
1639 | |||
1640 | if (bo_gem->map_count <= 0) { |
||
1641 | DBG("attempted to unmap an unmapped bo\n"); |
||
1642 | // pthread_mutex_unlock(&bufmgr_gem->lock); |
||
1643 | /* Preserve the old behaviour of just treating this as a |
||
1644 | * no-op rather than reporting the error. |
||
1645 | */ |
||
1646 | return 0; |
||
1647 | } |
||
1648 | |||
1649 | if (bo_gem->mapped_cpu_write) { |
||
1650 | struct drm_i915_gem_sw_finish sw_finish; |
||
1651 | |||
1652 | /* Cause a flush to happen if the buffer's pinned for |
||
1653 | * scanout, so the results show up in a timely manner. |
||
1654 | * Unlike GTT set domains, this only does work if the |
||
1655 | * buffer should be scanout-related. |
||
1656 | */ |
||
1657 | |||
1658 | bo_gem->mapped_cpu_write = false; |
||
1659 | } |
||
1660 | |||
1661 | /* We need to unmap after every innovation as we cannot track |
||
1662 | * an open vma for every bo as that will exhaasut the system |
||
1663 | * limits and cause later failures. |
||
1664 | */ |
||
1665 | if (--bo_gem->map_count == 0) { |
||
1666 | drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); |
||
1667 | drm_intel_gem_bo_mark_mmaps_incoherent(bo); |
||
1668 | bo->virtual = NULL; |
||
1669 | } |
||
1670 | // pthread_mutex_unlock(&bufmgr_gem->lock); |
||
1671 | |||
1672 | return ret; |
||
1673 | } |
||
1674 | |||
6110 | serge | 1675 | int |
1676 | drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo) |
||
4363 | Serge | 1677 | { |
1678 | return drm_intel_gem_bo_unmap(bo); |
||
1679 | } |
||
1680 | |||
1681 | static int |
||
1682 | drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset, |
||
1683 | unsigned long size, const void *data) |
||
1684 | { |
||
1685 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1686 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1687 | struct drm_i915_gem_pwrite pwrite; |
||
1688 | int ret; |
||
1689 | |||
6110 | serge | 1690 | if (bo_gem->is_userptr) |
1691 | return -EINVAL; |
||
1692 | |||
1693 | memclear(pwrite); |
||
4363 | Serge | 1694 | pwrite.handle = bo_gem->gem_handle; |
1695 | pwrite.offset = offset; |
||
1696 | pwrite.size = size; |
||
1697 | pwrite.data_ptr = (uint64_t) (uintptr_t) data; |
||
1698 | ret = drmIoctl(bufmgr_gem->fd, |
||
1699 | DRM_IOCTL_I915_GEM_PWRITE, |
||
1700 | &pwrite); |
||
1701 | if (ret != 0) { |
||
1702 | ret = -errno; |
||
1703 | DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n", |
||
1704 | __FILE__, __LINE__, bo_gem->gem_handle, (int)offset, |
||
1705 | (int)size, strerror(errno)); |
||
1706 | } |
||
1707 | |||
1708 | return ret; |
||
1709 | } |
||
1710 | |||
1711 | #if 0 |
||
1712 | static int |
||
1713 | drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id) |
||
1714 | { |
||
1715 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; |
||
1716 | struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id; |
||
1717 | int ret; |
||
1718 | |||
6110 | serge | 1719 | memclear(get_pipe_from_crtc_id); |
4363 | Serge | 1720 | get_pipe_from_crtc_id.crtc_id = crtc_id; |
1721 | ret = drmIoctl(bufmgr_gem->fd, |
||
1722 | DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID, |
||
1723 | &get_pipe_from_crtc_id); |
||
1724 | if (ret != 0) { |
||
1725 | /* We return -1 here to signal that we don't |
||
1726 | * know which pipe is associated with this crtc. |
||
1727 | * This lets the caller know that this information |
||
1728 | * isn't available; using the wrong pipe for |
||
1729 | * vblank waiting can cause the chipset to lock up |
||
1730 | */ |
||
1731 | return -1; |
||
1732 | } |
||
1733 | |||
1734 | return get_pipe_from_crtc_id.pipe; |
||
1735 | } |
||
6110 | serge | 1736 | #endif |
4363 | Serge | 1737 | |
1738 | static int |
||
1739 | drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset, |
||
1740 | unsigned long size, void *data) |
||
1741 | { |
||
1742 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1743 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1744 | struct drm_i915_gem_pread pread; |
||
1745 | int ret; |
||
1746 | |||
6110 | serge | 1747 | if (bo_gem->is_userptr) |
1748 | return -EINVAL; |
||
1749 | |||
1750 | memclear(pread); |
||
4363 | Serge | 1751 | pread.handle = bo_gem->gem_handle; |
1752 | pread.offset = offset; |
||
1753 | pread.size = size; |
||
1754 | pread.data_ptr = (uint64_t) (uintptr_t) data; |
||
1755 | ret = drmIoctl(bufmgr_gem->fd, |
||
1756 | DRM_IOCTL_I915_GEM_PREAD, |
||
1757 | &pread); |
||
1758 | if (ret != 0) { |
||
1759 | ret = -errno; |
||
1760 | DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n", |
||
1761 | __FILE__, __LINE__, bo_gem->gem_handle, (int)offset, |
||
1762 | (int)size, strerror(errno)); |
||
1763 | } |
||
1764 | |||
1765 | return ret; |
||
1766 | } |
||
1767 | |||
1768 | /** Waits for all GPU rendering with the object to have completed. */ |
||
1769 | static void |
||
1770 | drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo) |
||
1771 | { |
||
1772 | drm_intel_gem_bo_start_gtt_access(bo, 1); |
||
1773 | } |
||
1774 | |||
1775 | /** |
||
1776 | * Waits on a BO for the given amount of time. |
||
1777 | * |
||
1778 | * @bo: buffer object to wait for |
||
1779 | * @timeout_ns: amount of time to wait in nanoseconds. |
||
1780 | * If value is less than 0, an infinite wait will occur. |
||
1781 | * |
||
1782 | * Returns 0 if the wait was successful ie. the last batch referencing the |
||
1783 | * object has completed within the allotted time. Otherwise some negative return |
||
1784 | * value describes the error. Of particular interest is -ETIME when the wait has |
||
1785 | * failed to yield the desired result. |
||
1786 | * |
||
1787 | * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows |
||
1788 | * the operation to give up after a certain amount of time. Another subtle |
||
1789 | * difference is the internal locking semantics are different (this variant does |
||
1790 | * not hold the lock for the duration of the wait). This makes the wait subject |
||
1791 | * to a larger userspace race window. |
||
1792 | * |
||
1793 | * The implementation shall wait until the object is no longer actively |
||
1794 | * referenced within a batch buffer at the time of the call. The wait will |
||
1795 | * not guarantee that the buffer is re-issued via another thread, or an flinked |
||
1796 | * handle. Userspace must make sure this race does not occur if such precision |
||
1797 | * is important. |
||
6110 | serge | 1798 | * |
1799 | * Note that some kernels have broken the inifite wait for negative values |
||
1800 | * promise, upgrade to latest stable kernels if this is the case. |
||
4363 | Serge | 1801 | */ |
6110 | serge | 1802 | int |
1803 | drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns) |
||
4363 | Serge | 1804 | { |
1805 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1806 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1807 | struct drm_i915_gem_wait wait; |
||
1808 | int ret; |
||
1809 | |||
1810 | if (!bufmgr_gem->has_wait_timeout) { |
||
1811 | DBG("%s:%d: Timed wait is not supported. Falling back to " |
||
1812 | "infinite wait\n", __FILE__, __LINE__); |
||
1813 | if (timeout_ns) { |
||
1814 | drm_intel_gem_bo_wait_rendering(bo); |
||
1815 | return 0; |
||
1816 | } else { |
||
6110 | serge | 1817 | return drm_intel_gem_bo_busy(bo) ? -ETIME : 0; |
4363 | Serge | 1818 | } |
1819 | } |
||
1820 | |||
6110 | serge | 1821 | memclear(wait); |
4363 | Serge | 1822 | wait.bo_handle = bo_gem->gem_handle; |
1823 | wait.timeout_ns = timeout_ns; |
||
1824 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait); |
||
1825 | if (ret == -1) |
||
1826 | return -errno; |
||
1827 | |||
1828 | return ret; |
||
1829 | } |
||
1830 | |||
1831 | /** |
||
1832 | * Sets the object to the GTT read and possibly write domain, used by the X |
||
1833 | * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt(). |
||
1834 | * |
||
1835 | * In combination with drm_intel_gem_bo_pin() and manual fence management, we |
||
1836 | * can do tiled pixmaps this way. |
||
1837 | */ |
||
1838 | void |
||
1839 | drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable) |
||
1840 | { |
||
1841 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1842 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1843 | struct drm_i915_gem_set_domain set_domain; |
||
1844 | int ret; |
||
1845 | |||
6110 | serge | 1846 | memclear(set_domain); |
4363 | Serge | 1847 | set_domain.handle = bo_gem->gem_handle; |
1848 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
||
1849 | set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0; |
||
1850 | ret = drmIoctl(bufmgr_gem->fd, |
||
1851 | DRM_IOCTL_I915_GEM_SET_DOMAIN, |
||
1852 | &set_domain); |
||
1853 | if (ret != 0) { |
||
1854 | DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n", |
||
1855 | __FILE__, __LINE__, bo_gem->gem_handle, |
||
1856 | set_domain.read_domains, set_domain.write_domain, |
||
1857 | strerror(errno)); |
||
1858 | } |
||
1859 | } |
||
1860 | |||
1861 | static void |
||
1862 | drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr) |
||
1863 | { |
||
1864 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; |
||
6110 | serge | 1865 | struct drm_gem_close close_bo; |
1866 | int i, ret; |
||
4363 | Serge | 1867 | |
1868 | free(bufmgr_gem->exec2_objects); |
||
1869 | free(bufmgr_gem->exec_objects); |
||
1870 | free(bufmgr_gem->exec_bos); |
||
1871 | |||
1872 | // pthread_mutex_destroy(&bufmgr_gem->lock); |
||
1873 | |||
1874 | /* Free any cached buffer objects we were going to reuse */ |
||
1875 | for (i = 0; i < bufmgr_gem->num_buckets; i++) { |
||
1876 | struct drm_intel_gem_bo_bucket *bucket = |
||
1877 | &bufmgr_gem->cache_bucket[i]; |
||
1878 | drm_intel_bo_gem *bo_gem; |
||
1879 | |||
1880 | while (!DRMLISTEMPTY(&bucket->head)) { |
||
1881 | bo_gem = DRMLISTENTRY(drm_intel_bo_gem, |
||
1882 | bucket->head.next, head); |
||
1883 | DRMLISTDEL(&bo_gem->head); |
||
1884 | |||
1885 | drm_intel_gem_bo_free(&bo_gem->bo); |
||
1886 | } |
||
1887 | } |
||
1888 | |||
1889 | free(bufmgr); |
||
1890 | } |
||
1891 | |||
1892 | /** |
||
1893 | * Adds the target buffer to the validation list and adds the relocation |
||
1894 | * to the reloc_buffer's relocation list. |
||
1895 | * |
||
1896 | * The relocation entry at the given offset must already contain the |
||
1897 | * precomputed relocation value, because the kernel will optimize out |
||
1898 | * the relocation entry write when the buffer hasn't moved from the |
||
1899 | * last known offset in target_bo. |
||
1900 | */ |
||
1901 | static int |
||
1902 | do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, |
||
1903 | drm_intel_bo *target_bo, uint32_t target_offset, |
||
1904 | uint32_t read_domains, uint32_t write_domain, |
||
1905 | bool need_fence) |
||
1906 | { |
||
1907 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1908 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1909 | drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo; |
||
1910 | bool fenced_command; |
||
1911 | |||
1912 | if (bo_gem->has_error) |
||
1913 | return -ENOMEM; |
||
1914 | |||
1915 | if (target_bo_gem->has_error) { |
||
1916 | bo_gem->has_error = true; |
||
1917 | return -ENOMEM; |
||
1918 | } |
||
1919 | |||
1920 | /* We never use HW fences for rendering on 965+ */ |
||
1921 | if (bufmgr_gem->gen >= 4) |
||
1922 | need_fence = false; |
||
1923 | |||
1924 | fenced_command = need_fence; |
||
1925 | if (target_bo_gem->tiling_mode == I915_TILING_NONE) |
||
1926 | need_fence = false; |
||
1927 | |||
1928 | /* Create a new relocation list if needed */ |
||
1929 | if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo)) |
||
1930 | return -ENOMEM; |
||
1931 | |||
1932 | /* Check overflow */ |
||
1933 | assert(bo_gem->reloc_count < bufmgr_gem->max_relocs); |
||
1934 | |||
1935 | /* Check args */ |
||
1936 | assert(offset <= bo->size - 4); |
||
1937 | assert((write_domain & (write_domain - 1)) == 0); |
||
1938 | |||
6110 | serge | 1939 | /* An object needing a fence is a tiled buffer, so it won't have |
1940 | * relocs to other buffers. |
||
1941 | */ |
||
1942 | if (need_fence) { |
||
1943 | assert(target_bo_gem->reloc_count == 0); |
||
1944 | target_bo_gem->reloc_tree_fences = 1; |
||
1945 | } |
||
1946 | |||
4363 | Serge | 1947 | /* Make sure that we're not adding a reloc to something whose size has |
1948 | * already been accounted for. |
||
1949 | */ |
||
1950 | assert(!bo_gem->used_as_reloc_target); |
||
1951 | if (target_bo_gem != bo_gem) { |
||
1952 | target_bo_gem->used_as_reloc_target = true; |
||
1953 | bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size; |
||
6110 | serge | 1954 | bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences; |
4363 | Serge | 1955 | } |
1956 | |||
1957 | bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo; |
||
1958 | if (target_bo != bo) |
||
1959 | drm_intel_gem_bo_reference(target_bo); |
||
1960 | if (fenced_command) |
||
1961 | bo_gem->reloc_target_info[bo_gem->reloc_count].flags = |
||
1962 | DRM_INTEL_RELOC_FENCE; |
||
1963 | else |
||
1964 | bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0; |
||
1965 | |||
6110 | serge | 1966 | bo_gem->relocs[bo_gem->reloc_count].offset = offset; |
1967 | bo_gem->relocs[bo_gem->reloc_count].delta = target_offset; |
||
1968 | bo_gem->relocs[bo_gem->reloc_count].target_handle = |
||
1969 | target_bo_gem->gem_handle; |
||
1970 | bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains; |
||
1971 | bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain; |
||
1972 | bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64; |
||
4363 | Serge | 1973 | bo_gem->reloc_count++; |
1974 | |||
1975 | return 0; |
||
1976 | } |
||
1977 | |||
6110 | serge | 1978 | static void |
1979 | drm_intel_gem_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable) |
||
1980 | { |
||
1981 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1982 | bo_gem->use_48b_address_range = enable; |
||
1983 | } |
||
1984 | |||
4363 | Serge | 1985 | static int |
6110 | serge | 1986 | drm_intel_gem_bo_add_softpin_target(drm_intel_bo *bo, drm_intel_bo *target_bo) |
1987 | { |
||
1988 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
1989 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
1990 | drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo; |
||
1991 | if (bo_gem->has_error) |
||
1992 | return -ENOMEM; |
||
1993 | |||
1994 | if (target_bo_gem->has_error) { |
||
1995 | bo_gem->has_error = true; |
||
1996 | return -ENOMEM; |
||
1997 | } |
||
1998 | |||
1999 | if (!target_bo_gem->is_softpin) |
||
2000 | return -EINVAL; |
||
2001 | if (target_bo_gem == bo_gem) |
||
2002 | return -EINVAL; |
||
2003 | |||
2004 | if (bo_gem->softpin_target_count == bo_gem->softpin_target_size) { |
||
2005 | int new_size = bo_gem->softpin_target_size * 2; |
||
2006 | if (new_size == 0) |
||
2007 | new_size = bufmgr_gem->max_relocs; |
||
2008 | |||
2009 | bo_gem->softpin_target = realloc(bo_gem->softpin_target, new_size * |
||
2010 | sizeof(drm_intel_bo *)); |
||
2011 | if (!bo_gem->softpin_target) |
||
2012 | return -ENOMEM; |
||
2013 | |||
2014 | bo_gem->softpin_target_size = new_size; |
||
2015 | } |
||
2016 | bo_gem->softpin_target[bo_gem->softpin_target_count] = target_bo; |
||
2017 | drm_intel_gem_bo_reference(target_bo); |
||
2018 | bo_gem->softpin_target_count++; |
||
2019 | |||
2020 | return 0; |
||
2021 | } |
||
2022 | |||
2023 | static int |
||
4363 | Serge | 2024 | drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, |
2025 | drm_intel_bo *target_bo, uint32_t target_offset, |
||
2026 | uint32_t read_domains, uint32_t write_domain) |
||
2027 | { |
||
2028 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; |
||
6110 | serge | 2029 | drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo; |
4363 | Serge | 2030 | |
6110 | serge | 2031 | if (target_bo_gem->is_softpin) |
2032 | return drm_intel_gem_bo_add_softpin_target(bo, target_bo); |
||
2033 | else |
||
2034 | return do_bo_emit_reloc(bo, offset, target_bo, target_offset, |
||
2035 | read_domains, write_domain, |
||
2036 | !bufmgr_gem->fenced_relocs); |
||
4363 | Serge | 2037 | } |
2038 | |||
2039 | static int |
||
2040 | drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset, |
||
2041 | drm_intel_bo *target_bo, |
||
2042 | uint32_t target_offset, |
||
2043 | uint32_t read_domains, uint32_t write_domain) |
||
2044 | { |
||
2045 | return do_bo_emit_reloc(bo, offset, target_bo, target_offset, |
||
2046 | read_domains, write_domain, true); |
||
2047 | } |
||
2048 | |||
2049 | int |
||
2050 | drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo) |
||
2051 | { |
||
2052 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2053 | |||
2054 | return bo_gem->reloc_count; |
||
2055 | } |
||
2056 | |||
2057 | /** |
||
2058 | * Removes existing relocation entries in the BO after "start". |
||
2059 | * |
||
2060 | * This allows a user to avoid a two-step process for state setup with |
||
2061 | * counting up all the buffer objects and doing a |
||
2062 | * drm_intel_bufmgr_check_aperture_space() before emitting any of the |
||
2063 | * relocations for the state setup. Instead, save the state of the |
||
2064 | * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the |
||
2065 | * state, and then check if it still fits in the aperture. |
||
2066 | * |
||
2067 | * Any further drm_intel_bufmgr_check_aperture_space() queries |
||
2068 | * involving this buffer in the tree are undefined after this call. |
||
6110 | serge | 2069 | * |
2070 | * This also removes all softpinned targets being referenced by the BO. |
||
4363 | Serge | 2071 | */ |
2072 | void |
||
2073 | drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start) |
||
2074 | { |
||
6110 | serge | 2075 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
4363 | Serge | 2076 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
2077 | int i; |
||
6110 | serge | 2078 | struct timespec time; |
4363 | Serge | 2079 | |
6110 | serge | 2080 | clock_gettime(CLOCK_MONOTONIC, &time); |
4363 | Serge | 2081 | |
2082 | assert(bo_gem->reloc_count >= start); |
||
6110 | serge | 2083 | |
4363 | Serge | 2084 | /* Unreference the cleared target buffers */ |
2085 | for (i = start; i < bo_gem->reloc_count; i++) { |
||
2086 | drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo; |
||
2087 | if (&target_bo_gem->bo != bo) { |
||
2088 | bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences; |
||
2089 | drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, |
||
6110 | serge | 2090 | time.tv_sec); |
4363 | Serge | 2091 | } |
2092 | } |
||
2093 | bo_gem->reloc_count = start; |
||
6110 | serge | 2094 | |
2095 | for (i = 0; i < bo_gem->softpin_target_count; i++) { |
||
2096 | drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->softpin_target[i]; |
||
2097 | drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, time.tv_sec); |
||
2098 | } |
||
2099 | bo_gem->softpin_target_count = 0; |
||
4363 | Serge | 2100 | } |
2101 | |||
2102 | /** |
||
2103 | * Walk the tree of relocations rooted at BO and accumulate the list of |
||
2104 | * validations to be performed and update the relocation buffers with |
||
2105 | * index values into the validation list. |
||
2106 | */ |
||
2107 | static void |
||
2108 | drm_intel_gem_bo_process_reloc(drm_intel_bo *bo) |
||
2109 | { |
||
2110 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2111 | int i; |
||
2112 | |||
2113 | if (bo_gem->relocs == NULL) |
||
2114 | return; |
||
2115 | |||
2116 | for (i = 0; i < bo_gem->reloc_count; i++) { |
||
2117 | drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo; |
||
2118 | |||
2119 | if (target_bo == bo) |
||
2120 | continue; |
||
2121 | |||
2122 | drm_intel_gem_bo_mark_mmaps_incoherent(bo); |
||
2123 | |||
2124 | /* Continue walking the tree depth-first. */ |
||
2125 | drm_intel_gem_bo_process_reloc(target_bo); |
||
2126 | |||
2127 | /* Add the target to the validate list */ |
||
2128 | drm_intel_add_validate_buffer(target_bo); |
||
2129 | } |
||
2130 | } |
||
2131 | |||
2132 | static void |
||
2133 | drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo) |
||
2134 | { |
||
2135 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; |
||
2136 | int i; |
||
2137 | |||
6110 | serge | 2138 | if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) |
4363 | Serge | 2139 | return; |
2140 | |||
2141 | for (i = 0; i < bo_gem->reloc_count; i++) { |
||
2142 | drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo; |
||
2143 | int need_fence; |
||
2144 | |||
2145 | if (target_bo == bo) |
||
2146 | continue; |
||
2147 | |||
2148 | drm_intel_gem_bo_mark_mmaps_incoherent(bo); |
||
2149 | |||
2150 | /* Continue walking the tree depth-first. */ |
||
2151 | drm_intel_gem_bo_process_reloc2(target_bo); |
||
2152 | |||
2153 | need_fence = (bo_gem->reloc_target_info[i].flags & |
||
2154 | DRM_INTEL_RELOC_FENCE); |
||
2155 | |||
2156 | /* Add the target to the validate list */ |
||
2157 | drm_intel_add_validate_buffer2(target_bo, need_fence); |
||
2158 | } |
||
6110 | serge | 2159 | |
2160 | for (i = 0; i < bo_gem->softpin_target_count; i++) { |
||
2161 | drm_intel_bo *target_bo = bo_gem->softpin_target[i]; |
||
2162 | |||
2163 | if (target_bo == bo) |
||
2164 | continue; |
||
2165 | |||
2166 | drm_intel_gem_bo_mark_mmaps_incoherent(bo); |
||
2167 | drm_intel_gem_bo_process_reloc2(target_bo); |
||
2168 | drm_intel_add_validate_buffer2(target_bo, false); |
||
2169 | } |
||
4363 | Serge | 2170 | } |
2171 | |||
2172 | |||
2173 | static void |
||
2174 | drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem) |
||
2175 | { |
||
2176 | int i; |
||
2177 | |||
2178 | for (i = 0; i < bufmgr_gem->exec_count; i++) { |
||
2179 | drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; |
||
2180 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2181 | |||
2182 | /* Update the buffer offset */ |
||
5068 | serge | 2183 | if (bufmgr_gem->exec_objects[i].offset != bo->offset64) { |
6110 | serge | 2184 | DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n", |
2185 | bo_gem->gem_handle, bo_gem->name, |
||
2186 | upper_32_bits(bo->offset64), |
||
2187 | lower_32_bits(bo->offset64), |
||
2188 | upper_32_bits(bufmgr_gem->exec_objects[i].offset), |
||
2189 | lower_32_bits(bufmgr_gem->exec_objects[i].offset)); |
||
5068 | serge | 2190 | bo->offset64 = bufmgr_gem->exec_objects[i].offset; |
4363 | Serge | 2191 | bo->offset = bufmgr_gem->exec_objects[i].offset; |
2192 | } |
||
2193 | } |
||
2194 | } |
||
2195 | |||
2196 | static void |
||
2197 | drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem) |
||
2198 | { |
||
2199 | int i; |
||
2200 | |||
2201 | for (i = 0; i < bufmgr_gem->exec_count; i++) { |
||
2202 | drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; |
||
2203 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; |
||
2204 | |||
2205 | /* Update the buffer offset */ |
||
5068 | serge | 2206 | if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) { |
6110 | serge | 2207 | /* If we're seeing softpinned object here it means that the kernel |
2208 | * has relocated our object... Indicating a programming error |
||
2209 | */ |
||
2210 | assert(!bo_gem->is_softpin); |
||
2211 | DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n", |
||
2212 | bo_gem->gem_handle, bo_gem->name, |
||
2213 | upper_32_bits(bo->offset64), |
||
2214 | lower_32_bits(bo->offset64), |
||
2215 | upper_32_bits(bufmgr_gem->exec2_objects[i].offset), |
||
2216 | lower_32_bits(bufmgr_gem->exec2_objects[i].offset)); |
||
5068 | serge | 2217 | bo->offset64 = bufmgr_gem->exec2_objects[i].offset; |
4363 | Serge | 2218 | bo->offset = bufmgr_gem->exec2_objects[i].offset; |
2219 | } |
||
2220 | } |
||
2221 | } |
||
2222 | |||
6110 | serge | 2223 | void |
2224 | drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo, |
||
2225 | int x1, int y1, int width, int height, |
||
2226 | enum aub_dump_bmp_format format, |
||
2227 | int pitch, int offset) |
||
4363 | Serge | 2228 | { |
2229 | } |
||
2230 | |||
6110 | serge | 2231 | static int |
2232 | drm_intel_gem_bo_exec(drm_intel_bo *bo, int used, |
||
2233 | drm_clip_rect_t * cliprects, int num_cliprects, int DR4) |
||
4363 | Serge | 2234 | { |
2235 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
6110 | serge | 2236 | struct drm_i915_gem_execbuffer execbuf; |
2237 | int ret, i; |
||
4363 | Serge | 2238 | |
6110 | serge | 2239 | if (to_bo_gem(bo)->has_error) |
2240 | return -ENOMEM; |
||
4363 | Serge | 2241 | |
6110 | serge | 2242 | /* Update indices and set up the validate list. */ |
2243 | drm_intel_gem_bo_process_reloc(bo); |
||
4363 | Serge | 2244 | |
6110 | serge | 2245 | /* Add the batch buffer to the validation list. There are no |
2246 | * relocations pointing to it. |
||
4363 | Serge | 2247 | */ |
6110 | serge | 2248 | drm_intel_add_validate_buffer(bo); |
4363 | Serge | 2249 | |
6110 | serge | 2250 | memclear(execbuf); |
2251 | execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects; |
||
2252 | execbuf.buffer_count = bufmgr_gem->exec_count; |
||
2253 | execbuf.batch_start_offset = 0; |
||
2254 | execbuf.batch_len = used; |
||
2255 | execbuf.cliprects_ptr = (uintptr_t) cliprects; |
||
2256 | execbuf.num_cliprects = num_cliprects; |
||
2257 | execbuf.DR1 = 0; |
||
2258 | execbuf.DR4 = DR4; |
||
4363 | Serge | 2259 | |
6110 | serge | 2260 | ret = drmIoctl(bufmgr_gem->fd, |
2261 | DRM_IOCTL_I915_GEM_EXECBUFFER, |
||
2262 | &execbuf); |
||
2263 | if (ret != 0) { |
||
2264 | ret = -errno; |
||
2265 | if (errno == ENOSPC) { |
||
2266 | DBG("Execbuffer fails to pin. " |
||
2267 | "Estimate: %u. Actual: %u. Available: %u\n", |
||
2268 | drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos, |
||
2269 | bufmgr_gem-> |
||
2270 | exec_count), |
||
2271 | drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos, |
||
2272 | bufmgr_gem-> |
||
2273 | exec_count), |
||
2274 | (unsigned int)bufmgr_gem->gtt_size); |
||
4363 | Serge | 2275 | } |
2276 | } |
||
6110 | serge | 2277 | drm_intel_update_buffer_offsets(bufmgr_gem); |
4363 | Serge | 2278 | |
6110 | serge | 2279 | if (bufmgr_gem->bufmgr.debug) |
2280 | drm_intel_gem_dump_validation_list(bufmgr_gem); |
||
4363 | Serge | 2281 | |
6110 | serge | 2282 | for (i = 0; i < bufmgr_gem->exec_count; i++) { |
2283 | drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]); |
||
4363 | Serge | 2284 | |
6110 | serge | 2285 | bo_gem->idle = false; |
4363 | Serge | 2286 | |
6110 | serge | 2287 | /* Disconnect the buffer from the validate list */ |
2288 | bo_gem->validate_index = -1; |
||
2289 | bufmgr_gem->exec_bos[i] = NULL; |
||
4363 | Serge | 2290 | } |
6110 | serge | 2291 | bufmgr_gem->exec_count = 0; |
4363 | Serge | 2292 | |
6110 | serge | 2293 | return ret; |
4363 | Serge | 2294 | } |
2295 | |||
2296 | static int |
||
2297 | do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx, |
||
2298 | drm_clip_rect_t *cliprects, int num_cliprects, int DR4, |
||
2299 | unsigned int flags) |
||
2300 | { |
||
2301 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; |
||
2302 | struct drm_i915_gem_execbuffer2 execbuf; |
||
2303 | int ret = 0; |
||
2304 | int i; |
||
2305 | |||
6110 | serge | 2306 | if (to_bo_gem(bo)->has_error) |
2307 | return -ENOMEM; |
||
2308 | |||
4363 | Serge | 2309 | switch (flags & 0x7) { |
2310 | default: |
||
2311 | return -EINVAL; |
||
2312 | case I915_EXEC_BLT: |
||
2313 | if (!bufmgr_gem->has_blt) |
||
2314 | return -EINVAL; |
||
2315 | break; |
||
2316 | case I915_EXEC_BSD: |
||
2317 | if (!bufmgr_gem->has_bsd) |
||
2318 | return -EINVAL; |
||
2319 | break; |
||
2320 | case I915_EXEC_VEBOX: |
||
2321 | if (!bufmgr_gem->has_vebox) |
||
2322 | return -EINVAL; |
||
2323 | break; |
||
2324 | case I915_EXEC_RENDER: |
||
2325 | case I915_EXEC_DEFAULT: |
||
2326 | break; |
||
2327 | } |
||
2328 | |||
2329 | // pthread_mutex_lock(&bufmgr_gem->lock); |
||
2330 | /* Update indices and set up the validate list. */ |
||
2331 | drm_intel_gem_bo_process_reloc2(bo); |
||
2332 | |||
2333 | /* Add the batch buffer to the validation list. There are no relocations |
||
2334 | * pointing to it. |
||
2335 | */ |
||
2336 | drm_intel_add_validate_buffer2(bo, 0); |
||
2337 | |||
6110 | serge | 2338 | memclear(execbuf); |
4363 | Serge | 2339 | execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects; |
2340 | execbuf.buffer_count = bufmgr_gem->exec_count; |
||
2341 | execbuf.batch_start_offset = 0; |
||
2342 | execbuf.batch_len = used; |
||
2343 | execbuf.cliprects_ptr = (uintptr_t)cliprects; |
||
2344 | execbuf.num_cliprects = num_cliprects; |
||
2345 | execbuf.DR1 = 0; |
||
2346 | execbuf.DR4 = DR4; |
||
2347 | execbuf.flags = flags; |
||
2348 | if (ctx == NULL) |
||
2349 | i915_execbuffer2_set_context_id(execbuf, 0); |
||
2350 | else |
||
2351 | i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id); |
||
2352 | execbuf.rsvd2 = 0; |
||
2353 | |||
2354 | if (bufmgr_gem->no_exec) |
||
2355 | goto skip_execution; |
||
2356 | |||
2357 | ret = drmIoctl(bufmgr_gem->fd, |
||
2358 | DRM_IOCTL_I915_GEM_EXECBUFFER2, |
||
2359 | &execbuf); |
||
2360 | if (ret != 0) { |
||
2361 | ret = -errno; |
||
2362 | if (ret == -ENOSPC) { |
||
2363 | DBG("Execbuffer fails to pin. " |
||
2364 | "Estimate: %u. Actual: %u. Available: %u\n", |
||
2365 | drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos, |
||
2366 | bufmgr_gem->exec_count), |
||
2367 | drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos, |
||
2368 | bufmgr_gem->exec_count), |
||
2369 | (unsigned int) bufmgr_gem->gtt_size); |
||
2370 | } |
||
2371 | } |
||
2372 | drm_intel_update_buffer_offsets2(bufmgr_gem); |
||
2373 | |||
2374 | skip_execution: |
||
2375 | if (bufmgr_gem->bufmgr.debug) |
||
2376 | drm_intel_gem_dump_validation_list(bufmgr_gem); |
||
2377 | |||
2378 | for (i = 0; i < bufmgr_gem->exec_count; i++) { |
||
6110 | serge | 2379 | drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]); |
4363 | Serge | 2380 | |
5068 | serge | 2381 | bo_gem->idle = false; |
2382 | |||
4363 | Serge | 2383 | /* Disconnect the buffer from the validate list */ |
2384 | bo_gem->validate_index = -1; |
||
2385 | bufmgr_gem->exec_bos[i] = NULL; |
||
2386 | } |
||
2387 | bufmgr_gem->exec_count = 0; |
||
2388 | // pthread_mutex_unlock(&bufmgr_gem->lock); |
||
2389 | |||
2390 | return ret; |
||
2391 | } |
||
2392 | |||
2393 | static int |
||
2394 | drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used, |
||
2395 | drm_clip_rect_t *cliprects, int num_cliprects, |
||
2396 | int DR4) |
||
2397 | { |
||
2398 | return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4, |
||
2399 | I915_EXEC_RENDER); |
||
2400 | } |
||
2401 | |||
2402 | static int |
||
2403 | drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used, |
||
2404 | drm_clip_rect_t *cliprects, int num_cliprects, int DR4, |
||
2405 | unsigned int flags) |
||
2406 | { |
||
2407 | return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4, |
||
2408 | flags); |
||
2409 | } |
||
2410 | |||
2411 | int |
||
2412 | drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx, |
||
2413 | int used, unsigned int flags) |
||
2414 | { |
||
2415 | return do_exec2(bo, used, ctx, NULL, 0, 0, flags); |
||
2416 | } |
||
2417 | |||
2418 | static int |
||
2419 | drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment) |
||
2420 | { |
||
2421 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
2422 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2423 | struct drm_i915_gem_pin pin; |
||
2424 | int ret; |
||
2425 | |||
6110 | serge | 2426 | memclear(pin); |
4363 | Serge | 2427 | pin.handle = bo_gem->gem_handle; |
2428 | pin.alignment = alignment; |
||
2429 | |||
2430 | ret = drmIoctl(bufmgr_gem->fd, |
||
2431 | DRM_IOCTL_I915_GEM_PIN, |
||
2432 | &pin); |
||
2433 | if (ret != 0) |
||
2434 | return -errno; |
||
2435 | |||
5068 | serge | 2436 | bo->offset64 = pin.offset; |
4363 | Serge | 2437 | bo->offset = pin.offset; |
2438 | return 0; |
||
2439 | } |
||
2440 | |||
2441 | static int |
||
2442 | drm_intel_gem_bo_unpin(drm_intel_bo *bo) |
||
2443 | { |
||
2444 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
2445 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2446 | struct drm_i915_gem_unpin unpin; |
||
2447 | int ret; |
||
2448 | |||
6110 | serge | 2449 | memclear(unpin); |
4363 | Serge | 2450 | unpin.handle = bo_gem->gem_handle; |
2451 | |||
2452 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin); |
||
2453 | if (ret != 0) |
||
2454 | return -errno; |
||
2455 | |||
2456 | return 0; |
||
2457 | } |
||
2458 | |||
2459 | static int |
||
2460 | drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo, |
||
2461 | uint32_t tiling_mode, |
||
2462 | uint32_t stride) |
||
2463 | { |
||
2464 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
2465 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2466 | struct drm_i915_gem_set_tiling set_tiling; |
||
2467 | int ret; |
||
2468 | |||
2469 | if (bo_gem->global_name == 0 && |
||
2470 | tiling_mode == bo_gem->tiling_mode && |
||
2471 | stride == bo_gem->stride) |
||
2472 | return 0; |
||
2473 | |||
2474 | memset(&set_tiling, 0, sizeof(set_tiling)); |
||
2475 | // do { |
||
2476 | /* set_tiling is slightly broken and overwrites the |
||
2477 | * input on the error path, so we have to open code |
||
2478 | * rmIoctl. |
||
2479 | */ |
||
2480 | set_tiling.handle = bo_gem->gem_handle; |
||
2481 | set_tiling.tiling_mode = tiling_mode; |
||
2482 | set_tiling.stride = stride; |
||
2483 | |||
2484 | ret = drmIoctl(bufmgr_gem->fd, |
||
2485 | DRM_IOCTL_I915_GEM_SET_TILING, |
||
2486 | &set_tiling); |
||
2487 | // } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); |
||
2488 | if (ret == -1) |
||
2489 | return -errno; |
||
2490 | |||
2491 | bo_gem->tiling_mode = set_tiling.tiling_mode; |
||
2492 | bo_gem->swizzle_mode = set_tiling.swizzle_mode; |
||
2493 | bo_gem->stride = set_tiling.stride; |
||
2494 | return 0; |
||
2495 | } |
||
2496 | |||
2497 | static int |
||
2498 | drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, |
||
2499 | uint32_t stride) |
||
2500 | { |
||
2501 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
2502 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2503 | int ret; |
||
2504 | |||
6110 | serge | 2505 | /* Tiling with userptr surfaces is not supported |
2506 | * on all hardware so refuse it for time being. |
||
2507 | */ |
||
2508 | if (bo_gem->is_userptr) |
||
2509 | return -EINVAL; |
||
2510 | |||
4363 | Serge | 2511 | /* Linear buffers have no stride. By ensuring that we only ever use |
2512 | * stride 0 with linear buffers, we simplify our code. |
||
2513 | */ |
||
2514 | if (*tiling_mode == I915_TILING_NONE) |
||
2515 | stride = 0; |
||
2516 | |||
2517 | ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride); |
||
2518 | if (ret == 0) |
||
6110 | serge | 2519 | drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0); |
4363 | Serge | 2520 | |
2521 | *tiling_mode = bo_gem->tiling_mode; |
||
2522 | return ret; |
||
2523 | } |
||
2524 | |||
2525 | static int |
||
2526 | drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, |
||
2527 | uint32_t * swizzle_mode) |
||
2528 | { |
||
2529 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2530 | |||
2531 | *tiling_mode = bo_gem->tiling_mode; |
||
2532 | *swizzle_mode = bo_gem->swizzle_mode; |
||
2533 | return 0; |
||
2534 | } |
||
2535 | |||
6110 | serge | 2536 | static int |
2537 | drm_intel_gem_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset) |
||
2538 | { |
||
2539 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2540 | |||
2541 | bo_gem->is_softpin = true; |
||
2542 | bo->offset64 = offset; |
||
2543 | bo->offset = offset; |
||
2544 | return 0; |
||
2545 | } |
||
4363 | Serge | 2546 | #if 0 |
2547 | drm_intel_bo * |
||
2548 | drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size) |
||
2549 | { |
||
2550 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; |
||
2551 | int ret; |
||
2552 | uint32_t handle; |
||
2553 | drm_intel_bo_gem *bo_gem; |
||
2554 | struct drm_i915_gem_get_tiling get_tiling; |
||
2555 | drmMMListHead *list; |
||
2556 | |||
2557 | ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle); |
||
6110 | serge | 2558 | if (ret) { |
2559 | DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno)); |
||
2560 | pthread_mutex_unlock(&bufmgr_gem->lock); |
||
2561 | return NULL; |
||
2562 | } |
||
4363 | Serge | 2563 | |
2564 | /* |
||
2565 | * See if the kernel has already returned this buffer to us. Just as |
||
2566 | * for named buffers, we must not create two bo's pointing at the same |
||
2567 | * kernel object |
||
2568 | */ |
||
2569 | for (list = bufmgr_gem->named.next; |
||
2570 | list != &bufmgr_gem->named; |
||
2571 | list = list->next) { |
||
2572 | bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list); |
||
2573 | if (bo_gem->gem_handle == handle) { |
||
2574 | drm_intel_gem_bo_reference(&bo_gem->bo); |
||
2575 | return &bo_gem->bo; |
||
2576 | } |
||
2577 | } |
||
2578 | |||
6110 | serge | 2579 | bo_gem = calloc(1, sizeof(*bo_gem)); |
2580 | if (!bo_gem) { |
||
4363 | Serge | 2581 | return NULL; |
2582 | } |
||
2583 | /* Determine size of bo. The fd-to-handle ioctl really should |
||
2584 | * return the size, but it doesn't. If we have kernel 3.12 or |
||
2585 | * later, we can lseek on the prime fd to get the size. Older |
||
2586 | * kernels will just fail, in which case we fall back to the |
||
2587 | * provided (estimated or guess size). */ |
||
2588 | ret = lseek(prime_fd, 0, SEEK_END); |
||
2589 | if (ret != -1) |
||
2590 | bo_gem->bo.size = ret; |
||
2591 | else |
||
6110 | serge | 2592 | bo_gem->bo.size = size; |
4363 | Serge | 2593 | |
2594 | bo_gem->bo.handle = handle; |
||
2595 | bo_gem->bo.bufmgr = bufmgr; |
||
2596 | |||
2597 | bo_gem->gem_handle = handle; |
||
2598 | |||
2599 | atomic_set(&bo_gem->refcount, 1); |
||
2600 | |||
2601 | bo_gem->name = "prime"; |
||
2602 | bo_gem->validate_index = -1; |
||
2603 | bo_gem->reloc_tree_fences = 0; |
||
2604 | bo_gem->used_as_reloc_target = false; |
||
2605 | bo_gem->has_error = false; |
||
2606 | bo_gem->reusable = false; |
||
6110 | serge | 2607 | bo_gem->use_48b_address_range = false; |
4363 | Serge | 2608 | |
2609 | DRMINITLISTHEAD(&bo_gem->vma_list); |
||
2610 | DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); |
||
2611 | |||
6110 | serge | 2612 | memclear(get_tiling); |
4363 | Serge | 2613 | get_tiling.handle = bo_gem->gem_handle; |
2614 | ret = drmIoctl(bufmgr_gem->fd, |
||
2615 | DRM_IOCTL_I915_GEM_GET_TILING, |
||
2616 | &get_tiling); |
||
2617 | if (ret != 0) { |
||
6110 | serge | 2618 | DBG("create_from_prime: failed to get tiling: %s\n", strerror(errno)); |
4363 | Serge | 2619 | drm_intel_gem_bo_unreference(&bo_gem->bo); |
2620 | return NULL; |
||
2621 | } |
||
2622 | bo_gem->tiling_mode = get_tiling.tiling_mode; |
||
2623 | bo_gem->swizzle_mode = get_tiling.swizzle_mode; |
||
2624 | /* XXX stride is unknown */ |
||
6110 | serge | 2625 | drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0); |
4363 | Serge | 2626 | |
2627 | return &bo_gem->bo; |
||
2628 | } |
||
2629 | |||
2630 | int |
||
2631 | drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd) |
||
2632 | { |
||
2633 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
2634 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2635 | |||
2636 | if (DRMLISTEMPTY(&bo_gem->name_list)) |
||
2637 | DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); |
||
2638 | |||
2639 | if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle, |
||
2640 | DRM_CLOEXEC, prime_fd) != 0) |
||
2641 | return -errno; |
||
2642 | |||
2643 | bo_gem->reusable = false; |
||
2644 | |||
2645 | return 0; |
||
2646 | } |
||
2647 | #endif |
||
2648 | |||
2649 | static int |
||
2650 | drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name) |
||
2651 | { |
||
2652 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
||
2653 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2654 | int ret; |
||
2655 | |||
2656 | if (!bo_gem->global_name) { |
||
2657 | struct drm_gem_flink flink; |
||
2658 | |||
6110 | serge | 2659 | memclear(flink); |
4363 | Serge | 2660 | flink.handle = bo_gem->gem_handle; |
2661 | |||
2662 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink); |
||
2663 | if (ret != 0) |
||
2664 | return -errno; |
||
2665 | |||
2666 | bo_gem->global_name = flink.name; |
||
2667 | bo_gem->reusable = false; |
||
2668 | |||
6110 | serge | 2669 | if (DRMLISTEMPTY(&bo_gem->name_list)) |
2670 | DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); |
||
2671 | |||
4363 | Serge | 2672 | } |
2673 | |||
2674 | *name = bo_gem->global_name; |
||
2675 | return 0; |
||
2676 | } |
||
2677 | |||
2678 | /** |
||
2679 | * Enables unlimited caching of buffer objects for reuse. |
||
2680 | * |
||
2681 | * This is potentially very memory expensive, as the cache at each bucket |
||
2682 | * size is only bounded by how many buffers of that size we've managed to have |
||
2683 | * in flight at once. |
||
2684 | */ |
||
2685 | void |
||
2686 | drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr) |
||
2687 | { |
||
2688 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; |
||
2689 | |||
2690 | bufmgr_gem->bo_reuse = true; |
||
2691 | } |
||
2692 | |||
2693 | /** |
||
2694 | * Enable use of fenced reloc type. |
||
2695 | * |
||
2696 | * New code should enable this to avoid unnecessary fence register |
||
2697 | * allocation. If this option is not enabled, all relocs will have fence |
||
2698 | * register allocated. |
||
2699 | */ |
||
2700 | void |
||
2701 | drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr) |
||
2702 | { |
||
2703 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; |
||
2704 | |||
2705 | if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2) |
||
2706 | bufmgr_gem->fenced_relocs = true; |
||
2707 | } |
||
2708 | |||
2709 | /** |
||
2710 | * Return the additional aperture space required by the tree of buffer objects |
||
2711 | * rooted at bo. |
||
2712 | */ |
||
2713 | static int |
||
2714 | drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo) |
||
2715 | { |
||
2716 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2717 | int i; |
||
2718 | int total = 0; |
||
2719 | |||
2720 | if (bo == NULL || bo_gem->included_in_check_aperture) |
||
2721 | return 0; |
||
2722 | |||
2723 | total += bo->size; |
||
2724 | bo_gem->included_in_check_aperture = true; |
||
2725 | |||
2726 | for (i = 0; i < bo_gem->reloc_count; i++) |
||
2727 | total += |
||
2728 | drm_intel_gem_bo_get_aperture_space(bo_gem-> |
||
2729 | reloc_target_info[i].bo); |
||
2730 | |||
2731 | return total; |
||
2732 | } |
||
2733 | |||
2734 | /** |
||
2735 | * Count the number of buffers in this list that need a fence reg |
||
2736 | * |
||
2737 | * If the count is greater than the number of available regs, we'll have |
||
2738 | * to ask the caller to resubmit a batch with fewer tiled buffers. |
||
2739 | * |
||
2740 | * This function over-counts if the same buffer is used multiple times. |
||
2741 | */ |
||
2742 | static unsigned int |
||
2743 | drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count) |
||
2744 | { |
||
2745 | int i; |
||
2746 | unsigned int total = 0; |
||
2747 | |||
2748 | for (i = 0; i < count; i++) { |
||
2749 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i]; |
||
2750 | |||
2751 | if (bo_gem == NULL) |
||
2752 | continue; |
||
2753 | |||
2754 | total += bo_gem->reloc_tree_fences; |
||
2755 | } |
||
2756 | return total; |
||
2757 | } |
||
2758 | |||
2759 | /** |
||
2760 | * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready |
||
2761 | * for the next drm_intel_bufmgr_check_aperture_space() call. |
||
2762 | */ |
||
2763 | static void |
||
2764 | drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo) |
||
2765 | { |
||
2766 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2767 | int i; |
||
2768 | |||
2769 | if (bo == NULL || !bo_gem->included_in_check_aperture) |
||
2770 | return; |
||
2771 | |||
2772 | bo_gem->included_in_check_aperture = false; |
||
2773 | |||
2774 | for (i = 0; i < bo_gem->reloc_count; i++) |
||
2775 | drm_intel_gem_bo_clear_aperture_space_flag(bo_gem-> |
||
2776 | reloc_target_info[i].bo); |
||
2777 | } |
||
2778 | |||
2779 | /** |
||
2780 | * Return a conservative estimate for the amount of aperture required |
||
2781 | * for a collection of buffers. This may double-count some buffers. |
||
2782 | */ |
||
2783 | static unsigned int |
||
2784 | drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count) |
||
2785 | { |
||
2786 | int i; |
||
2787 | unsigned int total = 0; |
||
2788 | |||
2789 | for (i = 0; i < count; i++) { |
||
2790 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i]; |
||
2791 | if (bo_gem != NULL) |
||
2792 | total += bo_gem->reloc_tree_size; |
||
2793 | } |
||
2794 | return total; |
||
2795 | } |
||
2796 | |||
2797 | /** |
||
2798 | * Return the amount of aperture needed for a collection of buffers. |
||
2799 | * This avoids double counting any buffers, at the cost of looking |
||
2800 | * at every buffer in the set. |
||
2801 | */ |
||
2802 | static unsigned int |
||
2803 | drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count) |
||
2804 | { |
||
2805 | int i; |
||
2806 | unsigned int total = 0; |
||
2807 | |||
2808 | for (i = 0; i < count; i++) { |
||
2809 | total += drm_intel_gem_bo_get_aperture_space(bo_array[i]); |
||
2810 | /* For the first buffer object in the array, we get an |
||
2811 | * accurate count back for its reloc_tree size (since nothing |
||
2812 | * had been flagged as being counted yet). We can save that |
||
2813 | * value out as a more conservative reloc_tree_size that |
||
2814 | * avoids double-counting target buffers. Since the first |
||
2815 | * buffer happens to usually be the batch buffer in our |
||
2816 | * callers, this can pull us back from doing the tree |
||
2817 | * walk on every new batch emit. |
||
2818 | */ |
||
2819 | if (i == 0) { |
||
2820 | drm_intel_bo_gem *bo_gem = |
||
2821 | (drm_intel_bo_gem *) bo_array[i]; |
||
2822 | bo_gem->reloc_tree_size = total; |
||
2823 | } |
||
2824 | } |
||
2825 | |||
2826 | for (i = 0; i < count; i++) |
||
2827 | drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]); |
||
2828 | return total; |
||
2829 | } |
||
2830 | |||
2831 | /** |
||
2832 | * Return -1 if the batchbuffer should be flushed before attempting to |
||
2833 | * emit rendering referencing the buffers pointed to by bo_array. |
||
2834 | * |
||
2835 | * This is required because if we try to emit a batchbuffer with relocations |
||
2836 | * to a tree of buffers that won't simultaneously fit in the aperture, |
||
2837 | * the rendering will return an error at a point where the software is not |
||
2838 | * prepared to recover from it. |
||
2839 | * |
||
2840 | * However, we also want to emit the batchbuffer significantly before we reach |
||
2841 | * the limit, as a series of batchbuffers each of which references buffers |
||
2842 | * covering almost all of the aperture means that at each emit we end up |
||
2843 | * waiting to evict a buffer from the last rendering, and we get synchronous |
||
2844 | * performance. By emitting smaller batchbuffers, we eat some CPU overhead to |
||
2845 | * get better parallelism. |
||
2846 | */ |
||
2847 | static int |
||
2848 | drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count) |
||
2849 | { |
||
2850 | drm_intel_bufmgr_gem *bufmgr_gem = |
||
2851 | (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr; |
||
2852 | unsigned int total = 0; |
||
2853 | unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4; |
||
2854 | int total_fences; |
||
2855 | |||
2856 | /* Check for fence reg constraints if necessary */ |
||
2857 | if (bufmgr_gem->available_fences) { |
||
2858 | total_fences = drm_intel_gem_total_fences(bo_array, count); |
||
2859 | if (total_fences > bufmgr_gem->available_fences) |
||
2860 | return -ENOSPC; |
||
2861 | } |
||
2862 | |||
2863 | total = drm_intel_gem_estimate_batch_space(bo_array, count); |
||
2864 | |||
2865 | if (total > threshold) |
||
2866 | total = drm_intel_gem_compute_batch_space(bo_array, count); |
||
2867 | |||
2868 | if (total > threshold) { |
||
2869 | DBG("check_space: overflowed available aperture, " |
||
2870 | "%dkb vs %dkb\n", |
||
2871 | total / 1024, (int)bufmgr_gem->gtt_size / 1024); |
||
2872 | return -ENOSPC; |
||
2873 | } else { |
||
2874 | DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024, |
||
2875 | (int)bufmgr_gem->gtt_size / 1024); |
||
2876 | return 0; |
||
2877 | } |
||
2878 | } |
||
2879 | |||
2880 | /* |
||
2881 | * Disable buffer reuse for objects which are shared with the kernel |
||
2882 | * as scanout buffers |
||
2883 | */ |
||
2884 | static int |
||
2885 | drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo) |
||
2886 | { |
||
2887 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2888 | |||
2889 | bo_gem->reusable = false; |
||
2890 | return 0; |
||
2891 | } |
||
2892 | |||
2893 | static int |
||
2894 | drm_intel_gem_bo_is_reusable(drm_intel_bo *bo) |
||
2895 | { |
||
2896 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2897 | |||
2898 | return bo_gem->reusable; |
||
2899 | } |
||
2900 | |||
2901 | static int |
||
2902 | _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo) |
||
2903 | { |
||
2904 | drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
||
2905 | int i; |
||
2906 | |||
2907 | for (i = 0; i < bo_gem->reloc_count; i++) { |
||
2908 | if (bo_gem->reloc_target_info[i].bo == target_bo) |
||
2909 | return 1; |
||
2910 | if (bo == bo_gem->reloc_target_info[i].bo) |
||
2911 | continue; |
||
2912 | if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo, |
||
2913 | target_bo)) |
||
2914 | return 1; |
||
2915 | } |
||
2916 | |||
6110 | serge | 2917 | for (i = 0; i< bo_gem->softpin_target_count; i++) { |
2918 | if (bo_gem->softpin_target[i] == target_bo) |
||
2919 | return 1; |
||
2920 | if (_drm_intel_gem_bo_references(bo_gem->softpin_target[i], target_bo)) |
||
2921 | return 1; |
||
2922 | } |
||
2923 | |||
4363 | Serge | 2924 | return 0; |
2925 | } |
||
2926 | |||
2927 | /** Return true if target_bo is referenced by bo's relocation tree. */ |
||
2928 | static int |
||
2929 | drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo) |
||
2930 | { |
||
2931 | drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo; |
||
2932 | |||
2933 | if (bo == NULL || target_bo == NULL) |
||
2934 | return 0; |
||
2935 | if (target_bo_gem->used_as_reloc_target) |
||
2936 | return _drm_intel_gem_bo_references(bo, target_bo); |
||
2937 | return 0; |
||
2938 | } |
||
2939 | |||
2940 | static void |
||
2941 | add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size) |
||
2942 | { |
||
2943 | unsigned int i = bufmgr_gem->num_buckets; |
||
2944 | |||
2945 | assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket)); |
||
2946 | |||
2947 | DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head); |
||
2948 | bufmgr_gem->cache_bucket[i].size = size; |
||
2949 | bufmgr_gem->num_buckets++; |
||
2950 | } |
||
2951 | |||
2952 | static void |
||
2953 | init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem) |
||
2954 | { |
||
2955 | unsigned long size, cache_max_size = 64 * 1024 * 1024; |
||
2956 | |||
2957 | /* OK, so power of two buckets was too wasteful of memory. |
||
2958 | * Give 3 other sizes between each power of two, to hopefully |
||
2959 | * cover things accurately enough. (The alternative is |
||
2960 | * probably to just go for exact matching of sizes, and assume |
||
2961 | * that for things like composited window resize the tiled |
||
2962 | * width/height alignment and rounding of sizes to pages will |
||
2963 | * get us useful cache hit rates anyway) |
||
2964 | */ |
||
2965 | add_bucket(bufmgr_gem, 4096); |
||
2966 | add_bucket(bufmgr_gem, 4096 * 2); |
||
2967 | add_bucket(bufmgr_gem, 4096 * 3); |
||
2968 | |||
2969 | /* Initialize the linked lists for BO reuse cache. */ |
||
2970 | for (size = 4 * 4096; size <= cache_max_size; size *= 2) { |
||
2971 | add_bucket(bufmgr_gem, size); |
||
2972 | |||
2973 | add_bucket(bufmgr_gem, size + size * 1 / 4); |
||
2974 | add_bucket(bufmgr_gem, size + size * 2 / 4); |
||
2975 | add_bucket(bufmgr_gem, size + size * 3 / 4); |
||
2976 | } |
||
2977 | } |
||
2978 | |||
2979 | void |
||
2980 | drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit) |
||
2981 | { |
||
2982 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; |
||
2983 | |||
2984 | bufmgr_gem->vma_max = limit; |
||
2985 | |||
2986 | drm_intel_gem_bo_purge_vma_cache(bufmgr_gem); |
||
2987 | } |
||
2988 | |||
2989 | /** |
||
2990 | * Get the PCI ID for the device. This can be overridden by setting the |
||
2991 | * INTEL_DEVID_OVERRIDE environment variable to the desired ID. |
||
2992 | */ |
||
2993 | static int |
||
2994 | get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem) |
||
2995 | { |
||
2996 | char *devid_override; |
||
6110 | serge | 2997 | int devid = 0; |
4363 | Serge | 2998 | int ret; |
2999 | drm_i915_getparam_t gp; |
||
3000 | |||
6110 | serge | 3001 | memclear(gp); |
4363 | Serge | 3002 | gp.param = I915_PARAM_CHIPSET_ID; |
3003 | gp.value = &devid; |
||
3004 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
3005 | if (ret) { |
||
3006 | fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno); |
||
3007 | fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value); |
||
3008 | } |
||
3009 | return devid; |
||
3010 | } |
||
3011 | |||
3012 | int |
||
3013 | drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr) |
||
3014 | { |
||
3015 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; |
||
3016 | |||
3017 | return bufmgr_gem->pci_device; |
||
3018 | } |
||
3019 | |||
3020 | /** |
||
6110 | serge | 3021 | * Sets the AUB filename. |
3022 | * |
||
3023 | * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump() |
||
3024 | * for it to have any effect. |
||
3025 | */ |
||
3026 | void |
||
3027 | drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr, |
||
3028 | const char *filename) |
||
3029 | { |
||
3030 | } |
||
3031 | |||
3032 | /** |
||
4363 | Serge | 3033 | * Sets up AUB dumping. |
3034 | * |
||
3035 | * This is a trace file format that can be used with the simulator. |
||
3036 | * Packets are emitted in a format somewhat like GPU command packets. |
||
3037 | * You can set up a GTT and upload your objects into the referenced |
||
3038 | * space, then send off batchbuffers and get BMPs out the other end. |
||
3039 | */ |
||
3040 | void |
||
3041 | drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable) |
||
3042 | { |
||
6110 | serge | 3043 | fprintf(stderr, "libdrm aub dumping is deprecated.\n\n" |
3044 | "Use intel_aubdump from intel-gpu-tools instead. Install intel-gpu-tools,\n" |
||
3045 | "then run (for example)\n\n" |
||
3046 | "\t$ intel_aubdump --output=trace.aub glxgears -geometry 500x500\n\n" |
||
3047 | "See the intel_aubdump man page for more details.\n"); |
||
4363 | Serge | 3048 | } |
3049 | |||
3050 | drm_intel_context * |
||
3051 | drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr) |
||
3052 | { |
||
3053 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; |
||
3054 | struct drm_i915_gem_context_create create; |
||
3055 | drm_intel_context *context = NULL; |
||
3056 | int ret; |
||
3057 | |||
5068 | serge | 3058 | context = calloc(1, sizeof(*context)); |
3059 | if (!context) |
||
3060 | return NULL; |
||
3061 | |||
6110 | serge | 3062 | memclear(create); |
4363 | Serge | 3063 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create); |
3064 | if (ret != 0) { |
||
3065 | DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", |
||
3066 | strerror(errno)); |
||
5068 | serge | 3067 | free(context); |
4363 | Serge | 3068 | return NULL; |
3069 | } |
||
3070 | |||
3071 | context->ctx_id = create.ctx_id; |
||
3072 | context->bufmgr = bufmgr; |
||
3073 | |||
3074 | return context; |
||
3075 | } |
||
3076 | |||
3077 | void |
||
3078 | drm_intel_gem_context_destroy(drm_intel_context *ctx) |
||
3079 | { |
||
3080 | drm_intel_bufmgr_gem *bufmgr_gem; |
||
3081 | struct drm_i915_gem_context_destroy destroy; |
||
3082 | int ret; |
||
3083 | |||
3084 | if (ctx == NULL) |
||
3085 | return; |
||
3086 | |||
6110 | serge | 3087 | memclear(destroy); |
4363 | Serge | 3088 | |
3089 | bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr; |
||
3090 | destroy.ctx_id = ctx->ctx_id; |
||
3091 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, |
||
3092 | &destroy); |
||
3093 | if (ret != 0) |
||
3094 | fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n", |
||
3095 | strerror(errno)); |
||
3096 | |||
3097 | free(ctx); |
||
3098 | } |
||
3099 | |||
3100 | int |
||
3101 | drm_intel_reg_read(drm_intel_bufmgr *bufmgr, |
||
3102 | uint32_t offset, |
||
3103 | uint64_t *result) |
||
3104 | { |
||
3105 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; |
||
3106 | struct drm_i915_reg_read reg_read; |
||
3107 | int ret; |
||
3108 | |||
6110 | serge | 3109 | memclear(reg_read); |
4363 | Serge | 3110 | reg_read.offset = offset; |
3111 | |||
3112 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read); |
||
3113 | |||
3114 | *result = reg_read.val; |
||
3115 | return ret; |
||
3116 | } |
||
3117 | |||
6110 | serge | 3118 | int |
3119 | drm_intel_get_subslice_total(int fd, unsigned int *subslice_total) |
||
3120 | { |
||
3121 | drm_i915_getparam_t gp; |
||
3122 | int ret; |
||
4363 | Serge | 3123 | |
6110 | serge | 3124 | memclear(gp); |
3125 | gp.value = (int*)subslice_total; |
||
3126 | gp.param = I915_PARAM_SUBSLICE_TOTAL; |
||
3127 | ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
3128 | if (ret) |
||
3129 | return -errno; |
||
3130 | |||
3131 | return 0; |
||
3132 | } |
||
3133 | |||
3134 | int |
||
3135 | drm_intel_get_eu_total(int fd, unsigned int *eu_total) |
||
3136 | { |
||
3137 | drm_i915_getparam_t gp; |
||
3138 | int ret; |
||
3139 | |||
3140 | memclear(gp); |
||
3141 | gp.value = (int*)eu_total; |
||
3142 | gp.param = I915_PARAM_EU_TOTAL; |
||
3143 | ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
3144 | if (ret) |
||
3145 | return -errno; |
||
3146 | |||
3147 | return 0; |
||
3148 | } |
||
3149 | |||
4363 | Serge | 3150 | /** |
3151 | * Annotate the given bo for use in aub dumping. |
||
3152 | * |
||
3153 | * \param annotations is an array of drm_intel_aub_annotation objects |
||
3154 | * describing the type of data in various sections of the bo. Each |
||
3155 | * element of the array specifies the type and subtype of a section of |
||
3156 | * the bo, and the past-the-end offset of that section. The elements |
||
3157 | * of \c annotations must be sorted so that ending_offset is |
||
3158 | * increasing. |
||
3159 | * |
||
3160 | * \param count is the number of elements in the \c annotations array. |
||
3161 | * If \c count is zero, then \c annotations will not be dereferenced. |
||
3162 | * |
||
3163 | * Annotations are copied into a private data structure, so caller may |
||
3164 | * re-use the memory pointed to by \c annotations after the call |
||
3165 | * returns. |
||
3166 | * |
||
3167 | * Annotations are stored for the lifetime of the bo; to reset to the |
||
3168 | * default state (no annotations), call this function with a \c count |
||
3169 | * of zero. |
||
3170 | */ |
||
3171 | void |
||
3172 | drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo, |
||
3173 | drm_intel_aub_annotation *annotations, |
||
3174 | unsigned count) |
||
3175 | { |
||
6110 | serge | 3176 | } |
3177 | |||
3178 | static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list }; |
||
3179 | |||
3180 | static drm_intel_bufmgr_gem * |
||
3181 | drm_intel_bufmgr_gem_find(int fd) |
||
3182 | { |
||
3183 | drm_intel_bufmgr_gem *bufmgr_gem; |
||
3184 | |||
3185 | DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) { |
||
3186 | if (bufmgr_gem->fd == fd) { |
||
3187 | atomic_inc(&bufmgr_gem->refcount); |
||
3188 | return bufmgr_gem; |
||
3189 | } |
||
4363 | Serge | 3190 | } |
6110 | serge | 3191 | |
3192 | return NULL; |
||
4363 | Serge | 3193 | } |
3194 | |||
6110 | serge | 3195 | static void |
3196 | drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr) |
||
3197 | { |
||
3198 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; |
||
3199 | |||
3200 | if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) { |
||
3201 | // pthread_mutex_lock(&bufmgr_list_mutex); |
||
3202 | |||
3203 | if (atomic_dec_and_test(&bufmgr_gem->refcount)) { |
||
3204 | DRMLISTDEL(&bufmgr_gem->managers); |
||
3205 | drm_intel_bufmgr_gem_destroy(bufmgr); |
||
3206 | } |
||
3207 | |||
3208 | // pthread_mutex_unlock(&bufmgr_list_mutex); |
||
3209 | } |
||
3210 | } |
||
3211 | |||
4363 | Serge | 3212 | /** |
3213 | * Initializes the GEM buffer manager, which uses the kernel to allocate, map, |
||
3214 | * and manage map buffer objections. |
||
3215 | * |
||
3216 | * \param fd File descriptor of the opened DRM device. |
||
3217 | */ |
||
3218 | drm_intel_bufmgr * |
||
3219 | drm_intel_bufmgr_gem_init(int fd, int batch_size) |
||
3220 | { |
||
3221 | drm_intel_bufmgr_gem *bufmgr_gem; |
||
3222 | struct drm_i915_gem_get_aperture aperture; |
||
3223 | drm_i915_getparam_t gp; |
||
3224 | int ret, tmp; |
||
3225 | bool exec2 = false; |
||
3226 | |||
6110 | serge | 3227 | // pthread_mutex_lock(&bufmgr_list_mutex); |
3228 | |||
3229 | bufmgr_gem = drm_intel_bufmgr_gem_find(fd); |
||
3230 | if (bufmgr_gem) |
||
3231 | goto exit; |
||
3232 | |||
4363 | Serge | 3233 | bufmgr_gem = calloc(1, sizeof(*bufmgr_gem)); |
3234 | if (bufmgr_gem == NULL) |
||
6110 | serge | 3235 | goto exit; |
4363 | Serge | 3236 | |
3237 | bufmgr_gem->fd = fd; |
||
6110 | serge | 3238 | atomic_set(&bufmgr_gem->refcount, 1); |
4363 | Serge | 3239 | |
3240 | // if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) { |
||
3241 | // free(bufmgr_gem); |
||
3242 | // return NULL; |
||
3243 | // } |
||
3244 | |||
6110 | serge | 3245 | memclear(aperture); |
4363 | Serge | 3246 | ret = drmIoctl(bufmgr_gem->fd, |
3247 | DRM_IOCTL_I915_GEM_GET_APERTURE, |
||
3248 | &aperture); |
||
3249 | |||
3250 | if (ret == 0) |
||
3251 | bufmgr_gem->gtt_size = aperture.aper_available_size; |
||
3252 | else { |
||
6110 | serge | 3253 | fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n", |
4363 | Serge | 3254 | strerror(errno)); |
3255 | bufmgr_gem->gtt_size = 128 * 1024 * 1024; |
||
6110 | serge | 3256 | fprintf(stderr, "Assuming %dkB available aperture size.\n" |
4363 | Serge | 3257 | "May lead to reduced performance or incorrect " |
3258 | "rendering.\n", |
||
3259 | (int)bufmgr_gem->gtt_size / 1024); |
||
3260 | } |
||
3261 | |||
3262 | bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem); |
||
3263 | |||
3264 | if (IS_GEN2(bufmgr_gem->pci_device)) |
||
3265 | bufmgr_gem->gen = 2; |
||
3266 | else if (IS_GEN3(bufmgr_gem->pci_device)) |
||
3267 | bufmgr_gem->gen = 3; |
||
3268 | else if (IS_GEN4(bufmgr_gem->pci_device)) |
||
3269 | bufmgr_gem->gen = 4; |
||
3270 | else if (IS_GEN5(bufmgr_gem->pci_device)) |
||
3271 | bufmgr_gem->gen = 5; |
||
3272 | else if (IS_GEN6(bufmgr_gem->pci_device)) |
||
3273 | bufmgr_gem->gen = 6; |
||
3274 | else if (IS_GEN7(bufmgr_gem->pci_device)) |
||
3275 | bufmgr_gem->gen = 7; |
||
6110 | serge | 3276 | else if (IS_GEN8(bufmgr_gem->pci_device)) |
3277 | bufmgr_gem->gen = 8; |
||
3278 | else if (IS_GEN9(bufmgr_gem->pci_device)) |
||
3279 | bufmgr_gem->gen = 9; |
||
4363 | Serge | 3280 | else { |
3281 | free(bufmgr_gem); |
||
6110 | serge | 3282 | bufmgr_gem = NULL; |
3283 | goto exit; |
||
4363 | Serge | 3284 | } |
3285 | |||
3286 | if (IS_GEN3(bufmgr_gem->pci_device) && |
||
3287 | bufmgr_gem->gtt_size > 256*1024*1024) { |
||
3288 | /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't |
||
3289 | * be used for tiled blits. To simplify the accounting, just |
||
3290 | * substract the unmappable part (fixed to 256MB on all known |
||
3291 | * gen3 devices) if the kernel advertises it. */ |
||
3292 | bufmgr_gem->gtt_size -= 256*1024*1024; |
||
3293 | } |
||
3294 | |||
6110 | serge | 3295 | memclear(gp); |
4363 | Serge | 3296 | gp.value = &tmp; |
3297 | |||
3298 | gp.param = I915_PARAM_HAS_EXECBUF2; |
||
3299 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
3300 | if (!ret) |
||
3301 | exec2 = true; |
||
3302 | |||
3303 | gp.param = I915_PARAM_HAS_BSD; |
||
3304 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
3305 | bufmgr_gem->has_bsd = ret == 0; |
||
3306 | |||
3307 | gp.param = I915_PARAM_HAS_BLT; |
||
3308 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
3309 | bufmgr_gem->has_blt = ret == 0; |
||
3310 | |||
3311 | gp.param = I915_PARAM_HAS_RELAXED_FENCING; |
||
3312 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
3313 | bufmgr_gem->has_relaxed_fencing = ret == 0; |
||
3314 | |||
6110 | serge | 3315 | bufmgr_gem->bufmgr.bo_alloc_userptr = NULL; |
3316 | |||
4363 | Serge | 3317 | gp.param = I915_PARAM_HAS_WAIT_TIMEOUT; |
3318 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
3319 | bufmgr_gem->has_wait_timeout = ret == 0; |
||
3320 | |||
3321 | gp.param = I915_PARAM_HAS_LLC; |
||
3322 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
3323 | if (ret != 0) { |
||
3324 | /* Kernel does not supports HAS_LLC query, fallback to GPU |
||
3325 | * generation detection and assume that we have LLC on GEN6/7 |
||
3326 | */ |
||
3327 | bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) | |
||
3328 | IS_GEN7(bufmgr_gem->pci_device)); |
||
3329 | } else |
||
3330 | bufmgr_gem->has_llc = *gp.value; |
||
3331 | |||
3332 | gp.param = I915_PARAM_HAS_VEBOX; |
||
3333 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
3334 | bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0); |
||
3335 | |||
6110 | serge | 3336 | gp.param = I915_PARAM_HAS_EXEC_SOFTPIN; |
3337 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
3338 | if (ret == 0 && *gp.value > 0) |
||
3339 | bufmgr_gem->bufmgr.bo_set_softpin_offset = drm_intel_gem_bo_set_softpin_offset; |
||
3340 | |||
4363 | Serge | 3341 | if (bufmgr_gem->gen < 4) { |
3342 | gp.param = I915_PARAM_NUM_FENCES_AVAIL; |
||
3343 | gp.value = &bufmgr_gem->available_fences; |
||
3344 | ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); |
||
3345 | if (ret) { |
||
3346 | fprintf(stderr, "get fences failed: %d [%d]\n", ret, |
||
3347 | errno); |
||
3348 | fprintf(stderr, "param: %d, val: %d\n", gp.param, |
||
3349 | *gp.value); |
||
3350 | bufmgr_gem->available_fences = 0; |
||
3351 | } else { |
||
3352 | /* XXX The kernel reports the total number of fences, |
||
3353 | * including any that may be pinned. |
||
3354 | * |
||
3355 | * We presume that there will be at least one pinned |
||
3356 | * fence for the scanout buffer, but there may be more |
||
3357 | * than one scanout and the user may be manually |
||
3358 | * pinning buffers. Let's move to execbuffer2 and |
||
3359 | * thereby forget the insanity of using fences... |
||
3360 | */ |
||
3361 | bufmgr_gem->available_fences -= 2; |
||
3362 | if (bufmgr_gem->available_fences < 0) |
||
3363 | bufmgr_gem->available_fences = 0; |
||
3364 | } |
||
3365 | } |
||
3366 | |||
3367 | /* Let's go with one relocation per every 2 dwords (but round down a bit |
||
3368 | * since a power of two will mean an extra page allocation for the reloc |
||
3369 | * buffer). |
||
3370 | * |
||
3371 | * Every 4 was too few for the blender benchmark. |
||
3372 | */ |
||
3373 | bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2; |
||
3374 | |||
3375 | bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc; |
||
5068 | serge | 3376 | bufmgr_gem->bufmgr.bo_alloc_for_render = |
3377 | drm_intel_gem_bo_alloc_for_render; |
||
4363 | Serge | 3378 | bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled; |
3379 | bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference; |
||
3380 | bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference; |
||
3381 | bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map; |
||
3382 | bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap; |
||
3383 | bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata; |
||
6110 | serge | 3384 | bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata; |
4363 | Serge | 3385 | bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering; |
3386 | bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc; |
||
3387 | bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence; |
||
3388 | bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin; |
||
3389 | bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin; |
||
3390 | bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling; |
||
3391 | bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling; |
||
3392 | bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink; |
||
3393 | /* Use the new one if available */ |
||
6110 | serge | 3394 | if (exec2) { |
4363 | Serge | 3395 | bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2; |
3396 | bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2; |
||
6110 | serge | 3397 | } else |
3398 | bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec; |
||
3399 | bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy; |
||
4363 | Serge | 3400 | bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise; |
6110 | serge | 3401 | bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref; |
4363 | Serge | 3402 | bufmgr_gem->bufmgr.debug = 0; |
3403 | bufmgr_gem->bufmgr.check_aperture_space = |
||
3404 | drm_intel_gem_check_aperture_space; |
||
3405 | bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse; |
||
3406 | bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable; |
||
3407 | // bufmgr_gem->bufmgr.get_pipe_from_crtc_id = |
||
3408 | // drm_intel_gem_get_pipe_from_crtc_id; |
||
3409 | bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references; |
||
3410 | |||
3411 | DRMINITLISTHEAD(&bufmgr_gem->named); |
||
3412 | init_cache_buckets(bufmgr_gem); |
||
3413 | |||
3414 | DRMINITLISTHEAD(&bufmgr_gem->vma_cache); |
||
3415 | bufmgr_gem->vma_max = -1; /* unlimited by default */ |
||
3416 | |||
6110 | serge | 3417 | DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list); |
3418 | |||
3419 | exit: |
||
3420 | // pthread_mutex_unlock(&bufmgr_list_mutex); |
||
3421 | |||
3422 | return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL; |
||
4363 | Serge | 3423 | } |
3424 | |||
3425 | |||
3426 | drm_intel_bo * |
||
3427 | bo_create_from_gem_handle(drm_intel_bufmgr *bufmgr, |
||
3428 | unsigned int size, unsigned int handle) |
||
3429 | { |
||
3430 | drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; |
||
3431 | drm_intel_bo_gem *bo_gem; |
||
3432 | int ret; |
||
3433 | struct drm_i915_gem_get_tiling get_tiling; |
||
3434 | drmMMListHead *list; |
||
3435 | |||
3436 | /* At the moment most applications only have a few named bo. |
||
3437 | * For instance, in a DRI client only the render buffers passed |
||
3438 | * between X and the client are named. And since X returns the |
||
3439 | * alternating names for the front/back buffer a linear search |
||
3440 | * provides a sufficiently fast match. |
||
3441 | */ |
||
3442 | for (list = bufmgr_gem->named.next; |
||
3443 | list != &bufmgr_gem->named; |
||
3444 | list = list->next) { |
||
3445 | bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list); |
||
3446 | if (bo_gem->gem_handle == handle) { |
||
3447 | return &bo_gem->bo; |
||
3448 | } |
||
3449 | } |
||
3450 | |||
3451 | bo_gem = calloc(1, sizeof(*bo_gem)); |
||
3452 | if (!bo_gem) |
||
3453 | return NULL; |
||
3454 | |||
3455 | bo_gem->bo.size = size; |
||
3456 | bo_gem->bo.offset = 0; |
||
3457 | bo_gem->bo.virtual = NULL; |
||
3458 | bo_gem->bo.bufmgr = bufmgr; |
||
3459 | bo_gem->name = NULL; |
||
3460 | atomic_set(&bo_gem->refcount, 1); |
||
3461 | bo_gem->validate_index = -1; |
||
3462 | bo_gem->gem_handle = handle; |
||
3463 | bo_gem->bo.handle = handle; |
||
3464 | bo_gem->global_name = 0; |
||
3465 | bo_gem->reusable = false; |
||
3466 | |||
6110 | serge | 3467 | memclear(get_tiling); |
4363 | Serge | 3468 | get_tiling.handle = bo_gem->gem_handle; |
3469 | ret = drmIoctl(bufmgr_gem->fd, |
||
3470 | DRM_IOCTL_I915_GEM_GET_TILING, |
||
3471 | &get_tiling); |
||
3472 | if (ret != 0) { |
||
3473 | drm_intel_gem_bo_unreference(&bo_gem->bo); |
||
3474 | return NULL; |
||
3475 | } |
||
3476 | bo_gem->tiling_mode = get_tiling.tiling_mode; |
||
3477 | bo_gem->swizzle_mode = get_tiling.swizzle_mode; |
||
3478 | /* XXX stride is unknown */ |
||
6110 | serge | 3479 | drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0); |
4363 | Serge | 3480 | |
3481 | DRMINITLISTHEAD(&bo_gem->vma_list); |
||
3482 | DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); |
||
3483 | printf("bo_create_from_handle: %d\n", handle); |
||
3484 | |||
3485 | return &bo_gem->bo; |
||
3486 | }>>=>>>>>>>>>>>>>>>>>>>=>>>=>>>>>=>>>>>>>>>>=><=>>=><=>>0) |