Rev 4315 | Go to most recent revision | Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
4304 | Serge | 1 | /* |
2 | * Copyright (c) 2011 Intel Corporation |
||
3 | * |
||
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
5 | * copy of this software and associated documentation files (the "Software"), |
||
6 | * to deal in the Software without restriction, including without limitation |
||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
8 | * and/or sell copies of the Software, and to permit persons to whom the |
||
9 | * Software is furnished to do so, subject to the following conditions: |
||
10 | * |
||
11 | * The above copyright notice and this permission notice (including the next |
||
12 | * paragraph) shall be included in all copies or substantial portions of the |
||
13 | * Software. |
||
14 | * |
||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
||
21 | * SOFTWARE. |
||
22 | * |
||
23 | * Authors: |
||
24 | * Chris Wilson |
||
25 | * |
||
26 | */ |
||
27 | |||
28 | #ifdef HAVE_CONFIG_H |
||
29 | #include "config.h" |
||
30 | #endif |
||
31 | |||
32 | #include "sna.h" |
||
33 | #include "sna_reg.h" |
||
34 | |||
35 | #include |
||
36 | #include |
||
37 | #include |
||
38 | |||
39 | #ifdef HAVE_VALGRIND |
||
40 | #include |
||
41 | #include |
||
42 | #endif |
||
43 | |||
44 | #ifdef HAVE_STRUCT_SYSINFO_TOTALRAM |
||
45 | #include |
||
46 | #endif |
||
47 | |||
48 | #include "sna_cpuid.h" |
||
49 | |||
50 | static struct kgem_bo * |
||
51 | search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags); |
||
52 | |||
53 | static struct kgem_bo * |
||
54 | search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags); |
||
55 | |||
56 | #define DBG_NO_HW 0 |
||
57 | #define DBG_NO_TILING 0 |
||
58 | #define DBG_NO_CACHE 0 |
||
59 | #define DBG_NO_CACHE_LEVEL 0 |
||
60 | #define DBG_NO_CPU 0 |
||
61 | #define DBG_NO_CREATE2 1 |
||
62 | #define DBG_NO_USERPTR 0 |
||
63 | #define DBG_NO_UNSYNCHRONIZED_USERPTR 0 |
||
64 | #define DBG_NO_LLC 0 |
||
65 | #define DBG_NO_SEMAPHORES 0 |
||
66 | #define DBG_NO_MADV 1 |
||
67 | #define DBG_NO_UPLOAD_CACHE 0 |
||
68 | #define DBG_NO_UPLOAD_ACTIVE 0 |
||
69 | #define DBG_NO_MAP_UPLOAD 0 |
||
70 | #define DBG_NO_RELAXED_FENCING 0 |
||
71 | #define DBG_NO_SECURE_BATCHES 0 |
||
72 | #define DBG_NO_PINNED_BATCHES 0 |
||
73 | #define DBG_NO_FAST_RELOC 0 |
||
74 | #define DBG_NO_HANDLE_LUT 1 |
||
75 | #define DBG_NO_WT 0 |
||
76 | #define DBG_DUMP 0 |
||
77 | |||
78 | #define FORCE_MMAP_SYNC 0 /* ((1 << DOMAIN_CPU) | (1 << DOMAIN_GTT)) */ |
||
79 | |||
80 | #ifndef DEBUG_SYNC |
||
81 | #define DEBUG_SYNC 0 |
||
82 | #endif |
||
83 | |||
84 | |||
85 | #if 0 |
||
86 | #define ASSERT_IDLE(kgem__, handle__) assert(!__kgem_busy(kgem__, handle__)) |
||
87 | #define ASSERT_MAYBE_IDLE(kgem__, handle__, expect__) assert(!(expect__) || !__kgem_busy(kgem__, handle__)) |
||
88 | #else |
||
89 | #define ASSERT_IDLE(kgem__, handle__) |
||
90 | #define ASSERT_MAYBE_IDLE(kgem__, handle__, expect__) |
||
91 | #endif |
||
92 | |||
93 | /* Worst case seems to be 965gm where we cannot write within a cacheline that |
||
94 | * is being simultaneously being read by the GPU, or within the sampler |
||
95 | * prefetch. In general, the chipsets seem to have a requirement that sampler |
||
96 | * offsets be aligned to a cacheline (64 bytes). |
||
97 | */ |
||
98 | #define UPLOAD_ALIGNMENT 128 |
||
99 | |||
100 | #define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE) |
||
101 | #define NUM_PAGES(x) (((x) + PAGE_SIZE-1) / PAGE_SIZE) |
||
102 | |||
103 | #define MAX_GTT_VMA_CACHE 512 |
||
104 | #define MAX_CPU_VMA_CACHE INT16_MAX |
||
105 | #define MAP_PRESERVE_TIME 10 |
||
106 | |||
107 | #define MAKE_CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1)) |
||
108 | #define MAKE_USER_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3)) |
||
109 | #define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 2) |
||
110 | #define __MAP_TYPE(ptr) ((uintptr_t)(ptr) & 3) |
||
111 | |||
112 | #define MAKE_REQUEST(rq, ring) ((struct kgem_request *)((uintptr_t)(rq) | (ring))) |
||
113 | |||
114 | #define LOCAL_I915_PARAM_HAS_BLT 11 |
||
115 | #define LOCAL_I915_PARAM_HAS_RELAXED_FENCING 12 |
||
116 | #define LOCAL_I915_PARAM_HAS_RELAXED_DELTA 15 |
||
117 | #define LOCAL_I915_PARAM_HAS_SEMAPHORES 20 |
||
118 | #define LOCAL_I915_PARAM_HAS_SECURE_BATCHES 23 |
||
119 | #define LOCAL_I915_PARAM_HAS_PINNED_BATCHES 24 |
||
120 | #define LOCAL_I915_PARAM_HAS_NO_RELOC 25 |
||
121 | #define LOCAL_I915_PARAM_HAS_HANDLE_LUT 26 |
||
122 | #define LOCAL_I915_PARAM_HAS_WT 27 |
||
123 | |||
124 | #define LOCAL_I915_EXEC_IS_PINNED (1<<10) |
||
125 | #define LOCAL_I915_EXEC_NO_RELOC (1<<11) |
||
126 | #define LOCAL_I915_EXEC_HANDLE_LUT (1<<12) |
||
127 | struct local_i915_gem_userptr { |
||
128 | uint64_t user_ptr; |
||
129 | uint64_t user_size; |
||
130 | uint32_t flags; |
||
131 | #define I915_USERPTR_READ_ONLY (1<<0) |
||
132 | #define I915_USERPTR_UNSYNCHRONIZED (1<<31) |
||
133 | uint32_t handle; |
||
134 | }; |
||
135 | |||
136 | #define UNCACHED 0 |
||
137 | #define SNOOPED 1 |
||
138 | #define DISPLAY 2 |
||
139 | |||
140 | struct local_i915_gem_caching { |
||
141 | uint32_t handle; |
||
142 | uint32_t caching; |
||
143 | }; |
||
144 | |||
145 | #define LOCAL_IOCTL_I915_GEM_SET_CACHING SRV_I915_GEM_SET_CACHING |
||
146 | |||
147 | struct local_fbinfo { |
||
148 | int width; |
||
149 | int height; |
||
150 | int pitch; |
||
151 | int tiling; |
||
152 | }; |
||
153 | |||
154 | struct kgem_buffer { |
||
155 | struct kgem_bo base; |
||
156 | void *mem; |
||
157 | uint32_t used; |
||
158 | uint32_t need_io : 1; |
||
159 | uint32_t write : 2; |
||
160 | uint32_t mmapped : 1; |
||
161 | }; |
||
162 | |||
163 | static struct kgem_bo *__kgem_freed_bo; |
||
164 | static struct kgem_request *__kgem_freed_request; |
||
165 | static struct drm_i915_gem_exec_object2 _kgem_dummy_exec; |
||
166 | |||
167 | static inline int bytes(struct kgem_bo *bo) |
||
168 | { |
||
169 | return __kgem_bo_size(bo); |
||
170 | } |
||
171 | |||
172 | #define bucket(B) (B)->size.pages.bucket |
||
173 | #define num_pages(B) (B)->size.pages.count |
||
174 | |||
175 | #ifdef DEBUG_MEMORY |
||
176 | static void debug_alloc(struct kgem *kgem, size_t size) |
||
177 | { |
||
178 | kgem->debug_memory.bo_allocs++; |
||
179 | kgem->debug_memory.bo_bytes += size; |
||
180 | } |
||
181 | static void debug_alloc__bo(struct kgem *kgem, struct kgem_bo *bo) |
||
182 | { |
||
183 | debug_alloc(kgem, bytes(bo)); |
||
184 | } |
||
185 | #else |
||
186 | #define debug_alloc(k, b) |
||
187 | #define debug_alloc__bo(k, b) |
||
188 | #endif |
||
189 | |||
190 | #ifndef NDEBUG |
||
191 | static void assert_tiling(struct kgem *kgem, struct kgem_bo *bo) |
||
192 | { |
||
193 | struct drm_i915_gem_get_tiling tiling; |
||
194 | |||
195 | assert(bo); |
||
196 | |||
197 | VG_CLEAR(tiling); |
||
198 | tiling.handle = bo->handle; |
||
199 | tiling.tiling_mode = -1; |
||
200 | (void)drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &tiling); |
||
201 | assert(tiling.tiling_mode == bo->tiling); |
||
202 | } |
||
203 | #else |
||
204 | #define assert_tiling(kgem, bo) |
||
205 | #endif |
||
206 | |||
207 | static void kgem_sna_reset(struct kgem *kgem) |
||
208 | { |
||
209 | struct sna *sna = container_of(kgem, struct sna, kgem); |
||
210 | |||
211 | sna->render.reset(sna); |
||
212 | sna->blt_state.fill_bo = 0; |
||
213 | } |
||
214 | |||
215 | static void kgem_sna_flush(struct kgem *kgem) |
||
216 | { |
||
217 | struct sna *sna = container_of(kgem, struct sna, kgem); |
||
218 | |||
219 | sna->render.flush(sna); |
||
220 | |||
221 | // if (sna->render.solid_cache.dirty) |
||
222 | // sna_render_flush_solid(sna); |
||
223 | } |
||
224 | |||
225 | static bool gem_set_tiling(int fd, uint32_t handle, int tiling, int stride) |
||
226 | { |
||
227 | struct drm_i915_gem_set_tiling set_tiling; |
||
228 | int ret; |
||
229 | |||
230 | if (DBG_NO_TILING) |
||
231 | return false; |
||
232 | |||
233 | VG_CLEAR(set_tiling); |
||
234 | do { |
||
235 | set_tiling.handle = handle; |
||
236 | set_tiling.tiling_mode = tiling; |
||
237 | set_tiling.stride = stride; |
||
238 | |||
239 | ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling); |
||
240 | } while (ret != 0); |
||
241 | return ret == 0; |
||
242 | } |
||
243 | |||
244 | static bool gem_set_caching(int fd, uint32_t handle, int caching) |
||
245 | { |
||
246 | struct local_i915_gem_caching arg; |
||
247 | |||
248 | VG_CLEAR(arg); |
||
249 | arg.handle = handle; |
||
250 | arg.caching = caching; |
||
251 | return drmIoctl(fd, LOCAL_IOCTL_I915_GEM_SET_CACHING, &arg) == 0; |
||
252 | } |
||
253 | |||
254 | |||
255 | |||
256 | |||
257 | |||
258 | static bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags) |
||
259 | { |
||
260 | if (flags & CREATE_NO_RETIRE) { |
||
261 | DBG(("%s: not retiring per-request\n", __FUNCTION__)); |
||
262 | return false; |
||
263 | } |
||
264 | |||
265 | if (!kgem->need_retire) { |
||
266 | DBG(("%s: nothing to retire\n", __FUNCTION__)); |
||
267 | return false; |
||
268 | } |
||
269 | |||
270 | if (kgem_retire(kgem)) |
||
271 | return true; |
||
272 | |||
273 | if (flags & CREATE_NO_THROTTLE || !kgem->need_throttle) { |
||
274 | DBG(("%s: not throttling\n", __FUNCTION__)); |
||
275 | return false; |
||
276 | } |
||
277 | |||
278 | kgem_throttle(kgem); |
||
279 | return kgem_retire(kgem); |
||
280 | } |
||
281 | |||
282 | static void *__kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo) |
||
283 | { |
||
284 | struct drm_i915_gem_mmap_gtt mmap_arg; |
||
285 | void *ptr; |
||
286 | |||
287 | DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__, |
||
288 | bo->handle, bytes(bo))); |
||
289 | assert(bo->proxy == NULL); |
||
290 | assert(!bo->snoop); |
||
291 | assert(kgem_bo_can_map(kgem, bo)); |
||
292 | |||
293 | retry_gtt: |
||
294 | VG_CLEAR(mmap_arg); |
||
295 | mmap_arg.handle = bo->handle; |
||
296 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg)) { |
||
297 | |||
298 | (void)__kgem_throttle_retire(kgem, 0); |
||
299 | if (kgem_expire_cache(kgem)) |
||
300 | goto retry_gtt; |
||
301 | |||
302 | if (kgem->need_expire) { |
||
303 | kgem_cleanup_cache(kgem); |
||
304 | goto retry_gtt; |
||
305 | } |
||
306 | |||
307 | printf("%s: failed to retrieve GTT offset for handle=%d\n", |
||
308 | __FUNCTION__, bo->handle); |
||
309 | return NULL; |
||
310 | } |
||
311 | |||
312 | retry_mmap: |
||
313 | ptr = (void*)(int)mmap_arg.offset; |
||
314 | if (ptr == NULL) { |
||
315 | ErrorF("%s: failed to mmap handle=%d, %d bytes, into GTT domain\n", |
||
316 | __FUNCTION__, bo->handle, bytes(bo)); |
||
317 | ptr = NULL; |
||
318 | } |
||
319 | |||
320 | return ptr; |
||
321 | } |
||
322 | |||
323 | static int __gem_write(int fd, uint32_t handle, |
||
324 | int offset, int length, |
||
325 | const void *src) |
||
326 | { |
||
327 | struct drm_i915_gem_pwrite pwrite; |
||
328 | |||
329 | DBG(("%s(handle=%d, offset=%d, len=%d)\n", __FUNCTION__, |
||
330 | handle, offset, length)); |
||
331 | |||
332 | VG_CLEAR(pwrite); |
||
333 | pwrite.handle = handle; |
||
334 | pwrite.offset = offset; |
||
335 | pwrite.size = length; |
||
336 | pwrite.data_ptr = (uintptr_t)src; |
||
337 | return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite); |
||
338 | } |
||
339 | |||
340 | static int gem_write(int fd, uint32_t handle, |
||
341 | int offset, int length, |
||
342 | const void *src) |
||
343 | { |
||
344 | struct drm_i915_gem_pwrite pwrite; |
||
345 | |||
346 | DBG(("%s(handle=%d, offset=%d, len=%d)\n", __FUNCTION__, |
||
347 | handle, offset, length)); |
||
348 | |||
349 | VG_CLEAR(pwrite); |
||
350 | pwrite.handle = handle; |
||
351 | /* align the transfer to cachelines; fortuitously this is safe! */ |
||
352 | if ((offset | length) & 63) { |
||
353 | pwrite.offset = offset & ~63; |
||
354 | pwrite.size = ALIGN(offset+length, 64) - pwrite.offset; |
||
355 | pwrite.data_ptr = (uintptr_t)src + pwrite.offset - offset; |
||
356 | } else { |
||
357 | pwrite.offset = offset; |
||
358 | pwrite.size = length; |
||
359 | pwrite.data_ptr = (uintptr_t)src; |
||
360 | } |
||
361 | return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite); |
||
362 | } |
||
363 | |||
364 | |||
365 | bool __kgem_busy(struct kgem *kgem, int handle) |
||
366 | { |
||
367 | struct drm_i915_gem_busy busy; |
||
368 | |||
369 | VG_CLEAR(busy); |
||
370 | busy.handle = handle; |
||
371 | busy.busy = !kgem->wedged; |
||
372 | (void)drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); |
||
373 | DBG(("%s: handle=%d, busy=%d, wedged=%d\n", |
||
374 | __FUNCTION__, handle, busy.busy, kgem->wedged)); |
||
375 | |||
376 | return busy.busy; |
||
377 | } |
||
378 | |||
379 | static void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo) |
||
380 | { |
||
381 | DBG(("%s: retiring bo handle=%d (needed flush? %d), rq? %d [busy?=%d]\n", |
||
382 | __FUNCTION__, bo->handle, bo->needs_flush, bo->rq != NULL, |
||
383 | __kgem_busy(kgem, bo->handle))); |
||
384 | assert(bo->exec == NULL); |
||
385 | assert(list_is_empty(&bo->vma)); |
||
386 | |||
387 | if (bo->rq) { |
||
388 | if (!__kgem_busy(kgem, bo->handle)) { |
||
389 | __kgem_bo_clear_busy(bo); |
||
390 | kgem_retire(kgem); |
||
391 | } |
||
392 | } else { |
||
393 | assert(!bo->needs_flush); |
||
394 | ASSERT_IDLE(kgem, bo->handle); |
||
395 | } |
||
396 | } |
||
397 | |||
398 | bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo, |
||
399 | const void *data, int length) |
||
400 | { |
||
401 | assert(bo->refcnt); |
||
402 | assert(!bo->purged); |
||
403 | assert(bo->proxy == NULL); |
||
404 | ASSERT_IDLE(kgem, bo->handle); |
||
405 | |||
406 | assert(length <= bytes(bo)); |
||
407 | if (gem_write(kgem->fd, bo->handle, 0, length, data)) |
||
408 | return false; |
||
409 | |||
410 | DBG(("%s: flush=%d, domain=%d\n", __FUNCTION__, bo->flush, bo->domain)); |
||
411 | if (bo->exec == NULL) { |
||
412 | kgem_bo_retire(kgem, bo); |
||
413 | bo->domain = DOMAIN_NONE; |
||
414 | } |
||
415 | bo->gtt_dirty = true; |
||
416 | return true; |
||
417 | } |
||
418 | |||
419 | static uint32_t gem_create(int fd, int num_pages) |
||
420 | { |
||
421 | struct drm_i915_gem_create create; |
||
422 | |||
423 | VG_CLEAR(create); |
||
424 | create.handle = 0; |
||
425 | create.size = PAGE_SIZE * num_pages; |
||
426 | (void)drmIoctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create); |
||
427 | |||
428 | return create.handle; |
||
429 | } |
||
430 | |||
431 | static bool |
||
432 | kgem_bo_set_purgeable(struct kgem *kgem, struct kgem_bo *bo) |
||
433 | { |
||
434 | #if DBG_NO_MADV |
||
435 | return true; |
||
436 | #else |
||
437 | struct drm_i915_gem_madvise madv; |
||
438 | |||
439 | assert(bo->exec == NULL); |
||
440 | assert(!bo->purged); |
||
441 | |||
442 | VG_CLEAR(madv); |
||
443 | madv.handle = bo->handle; |
||
444 | madv.madv = I915_MADV_DONTNEED; |
||
445 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) { |
||
446 | bo->purged = 1; |
||
447 | kgem->need_purge |= !madv.retained && bo->domain == DOMAIN_GPU; |
||
448 | return madv.retained; |
||
449 | } |
||
450 | |||
451 | return true; |
||
452 | #endif |
||
453 | } |
||
454 | |||
455 | static bool |
||
456 | kgem_bo_is_retained(struct kgem *kgem, struct kgem_bo *bo) |
||
457 | { |
||
458 | #if DBG_NO_MADV |
||
459 | return true; |
||
460 | #else |
||
461 | struct drm_i915_gem_madvise madv; |
||
462 | |||
463 | if (!bo->purged) |
||
464 | return true; |
||
465 | |||
466 | VG_CLEAR(madv); |
||
467 | madv.handle = bo->handle; |
||
468 | madv.madv = I915_MADV_DONTNEED; |
||
469 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) |
||
470 | return madv.retained; |
||
471 | |||
472 | return false; |
||
473 | #endif |
||
474 | } |
||
475 | |||
476 | static bool |
||
477 | kgem_bo_clear_purgeable(struct kgem *kgem, struct kgem_bo *bo) |
||
478 | { |
||
479 | #if DBG_NO_MADV |
||
480 | return true; |
||
481 | #else |
||
482 | struct drm_i915_gem_madvise madv; |
||
483 | |||
484 | assert(bo->purged); |
||
485 | |||
486 | VG_CLEAR(madv); |
||
487 | madv.handle = bo->handle; |
||
488 | madv.madv = I915_MADV_WILLNEED; |
||
489 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) { |
||
490 | bo->purged = !madv.retained; |
||
491 | kgem->need_purge |= !madv.retained && bo->domain == DOMAIN_GPU; |
||
492 | return madv.retained; |
||
493 | } |
||
494 | |||
495 | return false; |
||
496 | #endif |
||
497 | } |
||
498 | |||
499 | static void gem_close(int fd, uint32_t handle) |
||
500 | { |
||
501 | struct drm_gem_close close; |
||
502 | |||
503 | VG_CLEAR(close); |
||
504 | close.handle = handle; |
||
505 | (void)drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &close); |
||
506 | } |
||
507 | |||
508 | constant inline static unsigned long __fls(unsigned long word) |
||
509 | { |
||
510 | #if defined(__GNUC__) && (defined(__i386__) || defined(__x86__) || defined(__x86_64__)) |
||
511 | asm("bsr %1,%0" |
||
512 | : "=r" (word) |
||
513 | : "rm" (word)); |
||
514 | return word; |
||
515 | #else |
||
516 | unsigned int v = 0; |
||
517 | |||
518 | while (word >>= 1) |
||
519 | v++; |
||
520 | |||
521 | return v; |
||
522 | #endif |
||
523 | } |
||
524 | |||
525 | constant inline static int cache_bucket(int num_pages) |
||
526 | { |
||
527 | return __fls(num_pages); |
||
528 | } |
||
529 | |||
530 | static struct kgem_bo *__kgem_bo_init(struct kgem_bo *bo, |
||
531 | int handle, int num_pages) |
||
532 | { |
||
533 | assert(num_pages); |
||
534 | memset(bo, 0, sizeof(*bo)); |
||
535 | |||
536 | bo->refcnt = 1; |
||
537 | bo->handle = handle; |
||
538 | bo->target_handle = -1; |
||
539 | num_pages(bo) = num_pages; |
||
540 | bucket(bo) = cache_bucket(num_pages); |
||
541 | bo->reusable = true; |
||
542 | bo->domain = DOMAIN_CPU; |
||
543 | list_init(&bo->request); |
||
544 | list_init(&bo->list); |
||
545 | list_init(&bo->vma); |
||
546 | |||
547 | return bo; |
||
548 | } |
||
549 | |||
550 | static struct kgem_bo *__kgem_bo_alloc(int handle, int num_pages) |
||
551 | { |
||
552 | struct kgem_bo *bo; |
||
553 | |||
554 | if (__kgem_freed_bo) { |
||
555 | bo = __kgem_freed_bo; |
||
556 | __kgem_freed_bo = *(struct kgem_bo **)bo; |
||
557 | } else { |
||
558 | bo = malloc(sizeof(*bo)); |
||
559 | if (bo == NULL) |
||
560 | return NULL; |
||
561 | } |
||
562 | |||
563 | return __kgem_bo_init(bo, handle, num_pages); |
||
564 | } |
||
565 | |||
566 | static struct kgem_request *__kgem_request_alloc(struct kgem *kgem) |
||
567 | { |
||
568 | struct kgem_request *rq; |
||
569 | |||
570 | rq = __kgem_freed_request; |
||
571 | if (rq) { |
||
572 | __kgem_freed_request = *(struct kgem_request **)rq; |
||
573 | } else { |
||
574 | rq = malloc(sizeof(*rq)); |
||
575 | if (rq == NULL) |
||
576 | rq = &kgem->static_request; |
||
577 | } |
||
578 | |||
579 | list_init(&rq->buffers); |
||
580 | rq->bo = NULL; |
||
581 | rq->ring = 0; |
||
582 | |||
583 | return rq; |
||
584 | } |
||
585 | |||
586 | static void __kgem_request_free(struct kgem_request *rq) |
||
587 | { |
||
588 | _list_del(&rq->list); |
||
589 | *(struct kgem_request **)rq = __kgem_freed_request; |
||
590 | __kgem_freed_request = rq; |
||
591 | } |
||
592 | |||
593 | static struct list *inactive(struct kgem *kgem, int num_pages) |
||
594 | { |
||
595 | assert(num_pages < MAX_CACHE_SIZE / PAGE_SIZE); |
||
596 | assert(cache_bucket(num_pages) < NUM_CACHE_BUCKETS); |
||
597 | return &kgem->inactive[cache_bucket(num_pages)]; |
||
598 | } |
||
599 | |||
600 | static struct list *active(struct kgem *kgem, int num_pages, int tiling) |
||
601 | { |
||
602 | assert(num_pages < MAX_CACHE_SIZE / PAGE_SIZE); |
||
603 | assert(cache_bucket(num_pages) < NUM_CACHE_BUCKETS); |
||
604 | return &kgem->active[cache_bucket(num_pages)][tiling]; |
||
605 | } |
||
606 | |||
607 | static size_t |
||
608 | agp_aperture_size(struct pci_device *dev, unsigned gen) |
||
609 | { |
||
610 | /* XXX assume that only future chipsets are unknown and follow |
||
611 | * the post gen2 PCI layout. |
||
612 | */ |
||
613 | return 0; |
||
614 | } |
||
615 | |||
616 | static size_t |
||
617 | total_ram_size(void) |
||
618 | { |
||
619 | uint32_t data[9]; |
||
620 | size_t size = 0; |
||
621 | |||
622 | asm volatile("int $0x40" |
||
623 | : "=a" (size) |
||
624 | : "a" (18),"b"(20), "c" (data) |
||
625 | : "memory"); |
||
626 | |||
627 | return size != -1 ? size : 0; |
||
628 | } |
||
629 | |||
630 | static unsigned |
||
631 | cpu_cache_size__cpuid4(void) |
||
632 | { |
||
633 | /* Deterministic Cache Parmaeters (Function 04h)": |
||
634 | * When EAX is initialized to a value of 4, the CPUID instruction |
||
635 | * returns deterministic cache information in the EAX, EBX, ECX |
||
636 | * and EDX registers. This function requires ECX be initialized |
||
637 | * with an index which indicates which cache to return information |
||
638 | * about. The OS is expected to call this function (CPUID.4) with |
||
639 | * ECX = 0, 1, 2, until EAX[4:0] == 0, indicating no more caches. |
||
640 | * The order in which the caches are returned is not specified |
||
641 | * and may change at Intel's discretion. |
||
642 | * |
||
643 | * Calculating the Cache Size in bytes: |
||
644 | * = (Ways +1) * (Partitions +1) * (Line Size +1) * (Sets +1) |
||
645 | */ |
||
646 | |||
647 | unsigned int eax, ebx, ecx, edx; |
||
648 | unsigned int llc_size = 0; |
||
649 | int cnt = 0; |
||
650 | |||
651 | if (__get_cpuid_max(BASIC_CPUID, NULL) < 4) |
||
652 | return 0; |
||
653 | |||
654 | do { |
||
655 | unsigned associativity, line_partitions, line_size, sets; |
||
656 | |||
657 | __cpuid_count(4, cnt++, eax, ebx, ecx, edx); |
||
658 | |||
659 | if ((eax & 0x1f) == 0) |
||
660 | break; |
||
661 | |||
662 | associativity = ((ebx >> 22) & 0x3ff) + 1; |
||
663 | line_partitions = ((ebx >> 12) & 0x3ff) + 1; |
||
664 | line_size = (ebx & 0xfff) + 1; |
||
665 | sets = ecx + 1; |
||
666 | |||
667 | llc_size = associativity * line_partitions * line_size * sets; |
||
668 | } while (1); |
||
669 | |||
670 | return llc_size; |
||
671 | } |
||
672 | |||
673 | static int gem_param(struct kgem *kgem, int name) |
||
674 | { |
||
675 | drm_i915_getparam_t gp; |
||
676 | int v = -1; /* No param uses the sign bit, reserve it for errors */ |
||
677 | |||
678 | VG_CLEAR(gp); |
||
679 | gp.param = name; |
||
680 | gp.value = &v; |
||
681 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GETPARAM, &gp)) |
||
682 | return -1; |
||
683 | |||
684 | VG(VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v))); |
||
685 | return v; |
||
686 | } |
||
687 | |||
688 | static bool test_has_execbuffer2(struct kgem *kgem) |
||
689 | { |
||
690 | return 1; |
||
691 | } |
||
692 | |||
693 | static bool test_has_no_reloc(struct kgem *kgem) |
||
694 | { |
||
695 | if (DBG_NO_FAST_RELOC) |
||
696 | return false; |
||
697 | |||
698 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_NO_RELOC) > 0; |
||
699 | } |
||
700 | |||
701 | static bool test_has_handle_lut(struct kgem *kgem) |
||
702 | { |
||
703 | if (DBG_NO_HANDLE_LUT) |
||
704 | return false; |
||
705 | |||
706 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_HANDLE_LUT) > 0; |
||
707 | } |
||
708 | |||
709 | static bool test_has_wt(struct kgem *kgem) |
||
710 | { |
||
711 | if (DBG_NO_WT) |
||
712 | return false; |
||
713 | |||
714 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_WT) > 0; |
||
715 | } |
||
716 | |||
717 | static bool test_has_semaphores_enabled(struct kgem *kgem) |
||
718 | { |
||
719 | bool detected = false; |
||
720 | int ret; |
||
721 | |||
722 | if (DBG_NO_SEMAPHORES) |
||
723 | return false; |
||
724 | |||
725 | ret = gem_param(kgem, LOCAL_I915_PARAM_HAS_SEMAPHORES); |
||
726 | if (ret != -1) |
||
727 | return ret > 0; |
||
728 | |||
729 | return detected; |
||
730 | } |
||
731 | |||
732 | static bool __kgem_throttle(struct kgem *kgem) |
||
733 | { |
||
734 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_THROTTLE, NULL) == 0) |
||
735 | return false; |
||
736 | |||
737 | return errno == EIO; |
||
738 | } |
||
739 | |||
740 | static bool is_hw_supported(struct kgem *kgem, |
||
741 | struct pci_device *dev) |
||
742 | { |
||
743 | if (DBG_NO_HW) |
||
744 | return false; |
||
745 | |||
746 | if (!test_has_execbuffer2(kgem)) |
||
747 | return false; |
||
748 | |||
749 | if (kgem->gen == (unsigned)-1) /* unknown chipset, assume future gen */ |
||
750 | return kgem->has_blt; |
||
751 | |||
752 | /* Although pre-855gm the GMCH is fubar, it works mostly. So |
||
753 | * let the user decide through "NoAccel" whether or not to risk |
||
754 | * hw acceleration. |
||
755 | */ |
||
756 | |||
757 | if (kgem->gen == 060 && dev->revision < 8) { |
||
758 | /* pre-production SNB with dysfunctional BLT */ |
||
759 | return false; |
||
760 | } |
||
761 | |||
762 | if (kgem->gen >= 060) /* Only if the kernel supports the BLT ring */ |
||
763 | return kgem->has_blt; |
||
764 | |||
765 | return true; |
||
766 | } |
||
767 | |||
768 | static bool test_has_relaxed_fencing(struct kgem *kgem) |
||
769 | { |
||
770 | if (kgem->gen < 040) { |
||
771 | if (DBG_NO_RELAXED_FENCING) |
||
772 | return false; |
||
773 | |||
774 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_FENCING) > 0; |
||
775 | } else |
||
776 | return true; |
||
777 | } |
||
778 | |||
779 | static bool test_has_llc(struct kgem *kgem) |
||
780 | { |
||
781 | int has_llc = -1; |
||
782 | |||
783 | if (DBG_NO_LLC) |
||
784 | return false; |
||
785 | |||
786 | #if defined(I915_PARAM_HAS_LLC) /* Expected in libdrm-2.4.31 */ |
||
787 | has_llc = gem_param(kgem, I915_PARAM_HAS_LLC); |
||
788 | #endif |
||
789 | if (has_llc == -1) { |
||
790 | DBG(("%s: no kernel/drm support for HAS_LLC, assuming support for LLC based on GPU generation\n", __FUNCTION__)); |
||
791 | has_llc = kgem->gen >= 060; |
||
792 | } |
||
793 | |||
794 | return has_llc; |
||
795 | } |
||
796 | |||
797 | static bool test_has_caching(struct kgem *kgem) |
||
798 | { |
||
799 | uint32_t handle; |
||
800 | bool ret; |
||
801 | |||
802 | if (DBG_NO_CACHE_LEVEL) |
||
803 | return false; |
||
804 | |||
805 | /* Incoherent blt and sampler hangs the GPU */ |
||
806 | if (kgem->gen == 040) |
||
807 | return false; |
||
808 | |||
809 | handle = gem_create(kgem->fd, 1); |
||
810 | if (handle == 0) |
||
811 | return false; |
||
812 | |||
813 | ret = gem_set_caching(kgem->fd, handle, UNCACHED); |
||
814 | gem_close(kgem->fd, handle); |
||
815 | return ret; |
||
816 | } |
||
817 | |||
818 | static bool test_has_userptr(struct kgem *kgem) |
||
819 | { |
||
820 | #if defined(USE_USERPTR) |
||
821 | uint32_t handle; |
||
822 | void *ptr; |
||
823 | |||
824 | if (DBG_NO_USERPTR) |
||
825 | return false; |
||
826 | |||
827 | /* Incoherent blt and sampler hangs the GPU */ |
||
828 | if (kgem->gen == 040) |
||
829 | return false; |
||
830 | |||
831 | if (posix_memalign(&ptr, PAGE_SIZE, PAGE_SIZE)) |
||
832 | return false; |
||
833 | |||
834 | handle = gem_userptr(kgem->fd, ptr, PAGE_SIZE, false); |
||
835 | gem_close(kgem->fd, handle); |
||
836 | free(ptr); |
||
837 | |||
838 | return handle != 0; |
||
839 | #else |
||
840 | return false; |
||
841 | #endif |
||
842 | } |
||
843 | |||
844 | static bool test_has_create2(struct kgem *kgem) |
||
845 | { |
||
846 | #if defined(USE_CREATE2) |
||
847 | struct local_i915_gem_create2 args; |
||
848 | |||
849 | if (DBG_NO_CREATE2) |
||
850 | return false; |
||
851 | |||
852 | memset(&args, 0, sizeof(args)); |
||
853 | args.size = PAGE_SIZE; |
||
854 | args.caching = DISPLAY; |
||
855 | if (drmIoctl(kgem->fd, LOCAL_IOCTL_I915_GEM_CREATE2, &args) == 0) |
||
856 | gem_close(kgem->fd, args.handle); |
||
857 | |||
858 | return args.handle != 0; |
||
859 | #else |
||
860 | return false; |
||
861 | #endif |
||
862 | } |
||
863 | |||
864 | static bool test_has_secure_batches(struct kgem *kgem) |
||
865 | { |
||
866 | if (DBG_NO_SECURE_BATCHES) |
||
867 | return false; |
||
868 | |||
869 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_SECURE_BATCHES) > 0; |
||
870 | } |
||
871 | |||
872 | static bool test_has_pinned_batches(struct kgem *kgem) |
||
873 | { |
||
874 | if (DBG_NO_PINNED_BATCHES) |
||
875 | return false; |
||
876 | |||
877 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_PINNED_BATCHES) > 0; |
||
878 | } |
||
879 | |||
880 | |||
881 | static bool kgem_init_pinned_batches(struct kgem *kgem) |
||
882 | { |
||
883 | int count[2] = { 2, 1 }; |
||
884 | int size[2] = { 1, 2 }; |
||
885 | int n, i; |
||
886 | |||
887 | if (kgem->wedged) |
||
888 | return true; |
||
889 | |||
890 | for (n = 0; n < ARRAY_SIZE(count); n++) { |
||
891 | for (i = 0; i < count[n]; i++) { |
||
892 | struct drm_i915_gem_pin pin; |
||
893 | struct kgem_bo *bo; |
||
894 | |||
895 | VG_CLEAR(pin); |
||
896 | |||
897 | pin.handle = gem_create(kgem->fd, size[n]); |
||
898 | if (pin.handle == 0) |
||
899 | goto err; |
||
900 | |||
901 | DBG(("%s: new handle=%d, num_pages=%d\n", |
||
902 | __FUNCTION__, pin.handle, size[n])); |
||
903 | |||
904 | bo = __kgem_bo_alloc(pin.handle, size[n]); |
||
905 | if (bo == NULL) { |
||
906 | gem_close(kgem->fd, pin.handle); |
||
907 | goto err; |
||
908 | } |
||
909 | |||
910 | pin.alignment = 0; |
||
911 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_PIN, &pin)) { |
||
912 | gem_close(kgem->fd, pin.handle); |
||
913 | goto err; |
||
914 | } |
||
915 | bo->presumed_offset = pin.offset; |
||
916 | debug_alloc__bo(kgem, bo); |
||
917 | list_add(&bo->list, &kgem->pinned_batches[n]); |
||
918 | } |
||
919 | } |
||
920 | |||
921 | return true; |
||
922 | |||
923 | err: |
||
924 | for (n = 0; n < ARRAY_SIZE(kgem->pinned_batches); n++) { |
||
925 | while (!list_is_empty(&kgem->pinned_batches[n])) { |
||
926 | kgem_bo_destroy(kgem, |
||
927 | list_first_entry(&kgem->pinned_batches[n], |
||
928 | struct kgem_bo, list)); |
||
929 | } |
||
930 | } |
||
931 | |||
932 | /* For simplicity populate the lists with a single unpinned bo */ |
||
933 | for (n = 0; n < ARRAY_SIZE(count); n++) { |
||
934 | struct kgem_bo *bo; |
||
935 | uint32_t handle; |
||
936 | |||
937 | handle = gem_create(kgem->fd, size[n]); |
||
938 | if (handle == 0) |
||
939 | break; |
||
940 | |||
941 | bo = __kgem_bo_alloc(handle, size[n]); |
||
942 | if (bo == NULL) { |
||
943 | gem_close(kgem->fd, handle); |
||
944 | break; |
||
945 | } |
||
946 | |||
947 | debug_alloc__bo(kgem, bo); |
||
948 | list_add(&bo->list, &kgem->pinned_batches[n]); |
||
949 | } |
||
950 | return false; |
||
951 | } |
||
952 | |||
953 | void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen) |
||
954 | { |
||
955 | struct drm_i915_gem_get_aperture aperture; |
||
956 | size_t totalram; |
||
957 | unsigned half_gpu_max; |
||
958 | unsigned int i, j; |
||
959 | |||
960 | DBG(("%s: fd=%d, gen=%d\n", __FUNCTION__, fd, gen)); |
||
961 | |||
962 | memset(kgem, 0, sizeof(*kgem)); |
||
963 | |||
964 | kgem->fd = fd; |
||
965 | kgem->gen = gen; |
||
966 | |||
967 | list_init(&kgem->requests[0]); |
||
968 | list_init(&kgem->requests[1]); |
||
969 | list_init(&kgem->batch_buffers); |
||
970 | list_init(&kgem->active_buffers); |
||
971 | list_init(&kgem->flushing); |
||
972 | list_init(&kgem->large); |
||
973 | list_init(&kgem->large_inactive); |
||
974 | list_init(&kgem->snoop); |
||
975 | list_init(&kgem->scanout); |
||
976 | for (i = 0; i < ARRAY_SIZE(kgem->pinned_batches); i++) |
||
977 | list_init(&kgem->pinned_batches[i]); |
||
978 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) |
||
979 | list_init(&kgem->inactive[i]); |
||
980 | for (i = 0; i < ARRAY_SIZE(kgem->active); i++) { |
||
981 | for (j = 0; j < ARRAY_SIZE(kgem->active[i]); j++) |
||
982 | list_init(&kgem->active[i][j]); |
||
983 | } |
||
984 | for (i = 0; i < ARRAY_SIZE(kgem->vma); i++) { |
||
985 | for (j = 0; j < ARRAY_SIZE(kgem->vma[i].inactive); j++) |
||
986 | list_init(&kgem->vma[i].inactive[j]); |
||
987 | } |
||
988 | kgem->vma[MAP_GTT].count = -MAX_GTT_VMA_CACHE; |
||
989 | kgem->vma[MAP_CPU].count = -MAX_CPU_VMA_CACHE; |
||
990 | |||
991 | kgem->has_blt = gem_param(kgem, LOCAL_I915_PARAM_HAS_BLT) > 0; |
||
992 | DBG(("%s: has BLT ring? %d\n", __FUNCTION__, |
||
993 | kgem->has_blt)); |
||
994 | |||
995 | kgem->has_relaxed_delta = |
||
996 | gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_DELTA) > 0; |
||
997 | DBG(("%s: has relaxed delta? %d\n", __FUNCTION__, |
||
998 | kgem->has_relaxed_delta)); |
||
999 | |||
1000 | kgem->has_relaxed_fencing = test_has_relaxed_fencing(kgem); |
||
1001 | DBG(("%s: has relaxed fencing? %d\n", __FUNCTION__, |
||
1002 | kgem->has_relaxed_fencing)); |
||
1003 | |||
1004 | kgem->has_llc = test_has_llc(kgem); |
||
1005 | DBG(("%s: has shared last-level-cache? %d\n", __FUNCTION__, |
||
1006 | kgem->has_llc)); |
||
1007 | |||
1008 | kgem->has_wt = test_has_wt(kgem); |
||
1009 | DBG(("%s: has write-through caching for scanouts? %d\n", __FUNCTION__, |
||
1010 | kgem->has_wt)); |
||
1011 | |||
1012 | kgem->has_caching = test_has_caching(kgem); |
||
1013 | DBG(("%s: has set-cache-level? %d\n", __FUNCTION__, |
||
1014 | kgem->has_caching)); |
||
1015 | |||
1016 | kgem->has_userptr = test_has_userptr(kgem); |
||
1017 | DBG(("%s: has userptr? %d\n", __FUNCTION__, |
||
1018 | kgem->has_userptr)); |
||
1019 | |||
1020 | kgem->has_create2 = test_has_create2(kgem); |
||
1021 | kgem->has_create2 = 0; |
||
1022 | DBG(("%s: has create2? %d\n", __FUNCTION__, |
||
1023 | kgem->has_create2)); |
||
1024 | |||
1025 | kgem->has_no_reloc = test_has_no_reloc(kgem); |
||
1026 | DBG(("%s: has no-reloc? %d\n", __FUNCTION__, |
||
1027 | kgem->has_no_reloc)); |
||
1028 | |||
1029 | kgem->has_handle_lut = test_has_handle_lut(kgem); |
||
1030 | kgem->has_handle_lut = 0; |
||
1031 | DBG(("%s: has handle-lut? %d\n", __FUNCTION__, |
||
1032 | kgem->has_handle_lut)); |
||
1033 | |||
1034 | kgem->has_semaphores = false; |
||
1035 | if (kgem->has_blt && test_has_semaphores_enabled(kgem)) |
||
1036 | kgem->has_semaphores = true; |
||
1037 | DBG(("%s: semaphores enabled? %d\n", __FUNCTION__, |
||
1038 | kgem->has_semaphores)); |
||
1039 | |||
1040 | kgem->can_blt_cpu = gen >= 030; |
||
1041 | DBG(("%s: can blt to cpu? %d\n", __FUNCTION__, |
||
1042 | kgem->can_blt_cpu)); |
||
1043 | |||
1044 | kgem->has_secure_batches = test_has_secure_batches(kgem); |
||
1045 | DBG(("%s: can use privileged batchbuffers? %d\n", __FUNCTION__, |
||
1046 | kgem->has_secure_batches)); |
||
1047 | |||
1048 | kgem->has_pinned_batches = test_has_pinned_batches(kgem); |
||
1049 | DBG(("%s: can use pinned batchbuffers (to avoid CS w/a)? %d\n", __FUNCTION__, |
||
1050 | kgem->has_pinned_batches)); |
||
1051 | |||
1052 | if (!is_hw_supported(kgem, dev)) { |
||
1053 | printf("Detected unsupported/dysfunctional hardware, disabling acceleration.\n"); |
||
1054 | kgem->wedged = 1; |
||
1055 | } else if (__kgem_throttle(kgem)) { |
||
1056 | printf("Detected a hung GPU, disabling acceleration.\n"); |
||
1057 | kgem->wedged = 1; |
||
1058 | } |
||
1059 | |||
1060 | kgem->batch_size = ARRAY_SIZE(kgem->batch); |
||
1061 | if (gen == 020 && !kgem->has_pinned_batches) |
||
1062 | /* Limited to what we can pin */ |
||
1063 | kgem->batch_size = 4*1024; |
||
1064 | if (gen == 022) |
||
1065 | /* 865g cannot handle a batch spanning multiple pages */ |
||
1066 | kgem->batch_size = PAGE_SIZE / sizeof(uint32_t); |
||
1067 | if ((gen >> 3) == 7) |
||
1068 | kgem->batch_size = 16*1024; |
||
1069 | if (!kgem->has_relaxed_delta && kgem->batch_size > 4*1024) |
||
1070 | kgem->batch_size = 4*1024; |
||
1071 | |||
1072 | if (!kgem_init_pinned_batches(kgem) && gen == 020) { |
||
1073 | printf("Unable to reserve memory for GPU, disabling acceleration.\n"); |
||
1074 | kgem->wedged = 1; |
||
1075 | } |
||
1076 | |||
1077 | DBG(("%s: maximum batch size? %d\n", __FUNCTION__, |
||
1078 | kgem->batch_size)); |
||
1079 | |||
1080 | kgem->min_alignment = 4; |
||
1081 | if (gen < 040) |
||
1082 | kgem->min_alignment = 64; |
||
1083 | |||
1084 | kgem->half_cpu_cache_pages = cpu_cache_size() >> 13; |
||
1085 | DBG(("%s: last-level cache size: %d bytes, threshold in pages: %d\n", |
||
1086 | __FUNCTION__, cpu_cache_size(), kgem->half_cpu_cache_pages)); |
||
1087 | |||
1088 | kgem->next_request = __kgem_request_alloc(kgem); |
||
1089 | |||
1090 | DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, userptr? %d\n", __FUNCTION__, |
||
1091 | !DBG_NO_CPU && (kgem->has_llc | kgem->has_userptr | kgem->has_caching), |
||
1092 | kgem->has_llc, kgem->has_caching, kgem->has_userptr)); |
||
1093 | |||
1094 | VG_CLEAR(aperture); |
||
1095 | aperture.aper_size = 0; |
||
1096 | (void)drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture); |
||
1097 | if (aperture.aper_size == 0) |
||
1098 | aperture.aper_size = 64*1024*1024; |
||
1099 | |||
1100 | DBG(("%s: aperture size %lld, available now %lld\n", |
||
1101 | __FUNCTION__, |
||
1102 | (long long)aperture.aper_size, |
||
1103 | (long long)aperture.aper_available_size)); |
||
1104 | |||
1105 | kgem->aperture_total = aperture.aper_size; |
||
1106 | kgem->aperture_high = aperture.aper_size * 3/4; |
||
1107 | kgem->aperture_low = aperture.aper_size * 1/3; |
||
1108 | if (gen < 033) { |
||
1109 | /* Severe alignment penalties */ |
||
1110 | kgem->aperture_high /= 2; |
||
1111 | kgem->aperture_low /= 2; |
||
1112 | } |
||
1113 | DBG(("%s: aperture low=%d [%d], high=%d [%d]\n", __FUNCTION__, |
||
1114 | kgem->aperture_low, kgem->aperture_low / (1024*1024), |
||
1115 | kgem->aperture_high, kgem->aperture_high / (1024*1024))); |
||
1116 | |||
1117 | kgem->aperture_mappable = agp_aperture_size(dev, gen); |
||
1118 | if (kgem->aperture_mappable == 0 || |
||
1119 | kgem->aperture_mappable > aperture.aper_size) |
||
1120 | kgem->aperture_mappable = aperture.aper_size; |
||
1121 | DBG(("%s: aperture mappable=%d [%d MiB]\n", __FUNCTION__, |
||
1122 | kgem->aperture_mappable, kgem->aperture_mappable / (1024*1024))); |
||
1123 | |||
1124 | kgem->buffer_size = 64 * 1024; |
||
1125 | while (kgem->buffer_size < kgem->aperture_mappable >> 10) |
||
1126 | kgem->buffer_size *= 2; |
||
1127 | if (kgem->buffer_size >> 12 > kgem->half_cpu_cache_pages) |
||
1128 | kgem->buffer_size = kgem->half_cpu_cache_pages << 12; |
||
1129 | kgem->buffer_size = 1 << __fls(kgem->buffer_size); |
||
1130 | DBG(("%s: buffer size=%d [%d KiB]\n", __FUNCTION__, |
||
1131 | kgem->buffer_size, kgem->buffer_size / 1024)); |
||
1132 | assert(kgem->buffer_size); |
||
1133 | |||
1134 | kgem->max_object_size = 3 * (kgem->aperture_high >> 12) << 10; |
||
1135 | kgem->max_gpu_size = kgem->max_object_size; |
||
1136 | if (!kgem->has_llc && kgem->max_gpu_size > MAX_CACHE_SIZE) |
||
1137 | kgem->max_gpu_size = MAX_CACHE_SIZE; |
||
1138 | |||
1139 | totalram = total_ram_size(); |
||
1140 | if (totalram == 0) { |
||
1141 | DBG(("%s: total ram size unknown, assuming maximum of total aperture\n", |
||
1142 | __FUNCTION__)); |
||
1143 | totalram = kgem->aperture_total; |
||
1144 | } |
||
1145 | DBG(("%s: total ram=%ld\n", __FUNCTION__, (long)totalram)); |
||
1146 | if (kgem->max_object_size > totalram / 2) |
||
1147 | kgem->max_object_size = totalram / 2; |
||
1148 | if (kgem->max_gpu_size > totalram / 4) |
||
1149 | kgem->max_gpu_size = totalram / 4; |
||
1150 | |||
1151 | kgem->max_cpu_size = kgem->max_object_size; |
||
1152 | |||
1153 | half_gpu_max = kgem->max_gpu_size / 2; |
||
1154 | kgem->max_copy_tile_size = (MAX_CACHE_SIZE + 1)/2; |
||
1155 | if (kgem->max_copy_tile_size > half_gpu_max) |
||
1156 | kgem->max_copy_tile_size = half_gpu_max; |
||
1157 | |||
1158 | if (kgem->has_llc) |
||
1159 | kgem->max_upload_tile_size = kgem->max_copy_tile_size; |
||
1160 | else |
||
1161 | kgem->max_upload_tile_size = kgem->aperture_mappable / 4; |
||
1162 | if (kgem->max_upload_tile_size > half_gpu_max) |
||
1163 | kgem->max_upload_tile_size = half_gpu_max; |
||
1164 | if (kgem->max_upload_tile_size > kgem->aperture_high/2) |
||
1165 | kgem->max_upload_tile_size = kgem->aperture_high/2; |
||
1166 | if (kgem->max_upload_tile_size > kgem->aperture_low) |
||
1167 | kgem->max_upload_tile_size = kgem->aperture_low; |
||
1168 | if (kgem->max_upload_tile_size < 16*PAGE_SIZE) |
||
1169 | kgem->max_upload_tile_size = 16*PAGE_SIZE; |
||
1170 | |||
1171 | kgem->large_object_size = MAX_CACHE_SIZE; |
||
1172 | if (kgem->large_object_size > half_gpu_max) |
||
1173 | kgem->large_object_size = half_gpu_max; |
||
1174 | if (kgem->max_copy_tile_size > kgem->aperture_high/2) |
||
1175 | kgem->max_copy_tile_size = kgem->aperture_high/2; |
||
1176 | if (kgem->max_copy_tile_size > kgem->aperture_low) |
||
1177 | kgem->max_copy_tile_size = kgem->aperture_low; |
||
1178 | if (kgem->max_copy_tile_size < 16*PAGE_SIZE) |
||
1179 | kgem->max_copy_tile_size = 16*PAGE_SIZE; |
||
1180 | |||
1181 | if (kgem->has_llc | kgem->has_caching | kgem->has_userptr) { |
||
1182 | if (kgem->large_object_size > kgem->max_cpu_size) |
||
1183 | kgem->large_object_size = kgem->max_cpu_size; |
||
1184 | } else |
||
1185 | kgem->max_cpu_size = 0; |
||
1186 | if (DBG_NO_CPU) |
||
1187 | kgem->max_cpu_size = 0; |
||
1188 | |||
1189 | DBG(("%s: maximum object size=%d\n", |
||
1190 | __FUNCTION__, kgem->max_object_size)); |
||
1191 | DBG(("%s: large object thresold=%d\n", |
||
1192 | __FUNCTION__, kgem->large_object_size)); |
||
1193 | DBG(("%s: max object sizes (gpu=%d, cpu=%d, tile upload=%d, copy=%d)\n", |
||
1194 | __FUNCTION__, |
||
1195 | kgem->max_gpu_size, kgem->max_cpu_size, |
||
1196 | kgem->max_upload_tile_size, kgem->max_copy_tile_size)); |
||
1197 | |||
1198 | /* Convert the aperture thresholds to pages */ |
||
1199 | kgem->aperture_low /= PAGE_SIZE; |
||
1200 | kgem->aperture_high /= PAGE_SIZE; |
||
1201 | |||
1202 | kgem->fence_max = gem_param(kgem, I915_PARAM_NUM_FENCES_AVAIL) - 2; |
||
1203 | if ((int)kgem->fence_max < 0) |
||
1204 | kgem->fence_max = 5; /* minimum safe value for all hw */ |
||
1205 | DBG(("%s: max fences=%d\n", __FUNCTION__, kgem->fence_max)); |
||
1206 | |||
1207 | kgem->batch_flags_base = 0; |
||
1208 | if (kgem->has_no_reloc) |
||
1209 | kgem->batch_flags_base |= LOCAL_I915_EXEC_NO_RELOC; |
||
1210 | if (kgem->has_handle_lut) |
||
1211 | kgem->batch_flags_base |= LOCAL_I915_EXEC_HANDLE_LUT; |
||
1212 | if (kgem->has_pinned_batches) |
||
1213 | kgem->batch_flags_base |= LOCAL_I915_EXEC_IS_PINNED; |
||
1214 | } |
||
1215 | |||
1216 | /* XXX hopefully a good approximation */ |
||
1217 | uint32_t kgem_get_unique_id(struct kgem *kgem) |
||
1218 | { |
||
1219 | uint32_t id; |
||
1220 | id = ++kgem->unique_id; |
||
1221 | if (id == 0) |
||
1222 | id = ++kgem->unique_id; |
||
1223 | return id; |
||
1224 | } |
||
1225 | |||
1226 | inline static uint32_t kgem_pitch_alignment(struct kgem *kgem, unsigned flags) |
||
1227 | { |
||
1228 | if (flags & CREATE_PRIME) |
||
1229 | return 256; |
||
1230 | if (flags & CREATE_SCANOUT) |
||
1231 | return 64; |
||
1232 | return kgem->min_alignment; |
||
1233 | } |
||
1234 | |||
1235 | void kgem_get_tile_size(struct kgem *kgem, int tiling, |
||
1236 | int *tile_width, int *tile_height, int *tile_size) |
||
1237 | { |
||
1238 | if (kgem->gen <= 030) { |
||
1239 | if (tiling) { |
||
1240 | if (kgem->gen < 030) { |
||
1241 | *tile_width = 128; |
||
1242 | *tile_height = 16; |
||
1243 | *tile_size = 2048; |
||
1244 | } else { |
||
1245 | *tile_width = 512; |
||
1246 | *tile_height = 8; |
||
1247 | *tile_size = 4096; |
||
1248 | } |
||
1249 | } else { |
||
1250 | *tile_width = 1; |
||
1251 | *tile_height = 1; |
||
1252 | *tile_size = 1; |
||
1253 | } |
||
1254 | } else switch (tiling) { |
||
1255 | default: |
||
1256 | case I915_TILING_NONE: |
||
1257 | *tile_width = 1; |
||
1258 | *tile_height = 1; |
||
1259 | *tile_size = 1; |
||
1260 | break; |
||
1261 | case I915_TILING_X: |
||
1262 | *tile_width = 512; |
||
1263 | *tile_height = 8; |
||
1264 | *tile_size = 4096; |
||
1265 | break; |
||
1266 | case I915_TILING_Y: |
||
1267 | *tile_width = 128; |
||
1268 | *tile_height = 32; |
||
1269 | *tile_size = 4096; |
||
1270 | break; |
||
1271 | } |
||
1272 | } |
||
1273 | |||
1274 | uint32_t kgem_surface_size(struct kgem *kgem, |
||
1275 | bool relaxed_fencing, |
||
1276 | unsigned flags, |
||
1277 | uint32_t width, |
||
1278 | uint32_t height, |
||
1279 | uint32_t bpp, |
||
1280 | uint32_t tiling, |
||
1281 | uint32_t *pitch) |
||
1282 | { |
||
1283 | uint32_t tile_width, tile_height; |
||
1284 | uint32_t size; |
||
1285 | |||
1286 | assert(width <= MAXSHORT); |
||
1287 | assert(height <= MAXSHORT); |
||
1288 | assert(bpp >= 8); |
||
1289 | |||
1290 | if (kgem->gen <= 030) { |
||
1291 | if (tiling) { |
||
1292 | if (kgem->gen < 030) { |
||
1293 | tile_width = 128; |
||
1294 | tile_height = 32; |
||
1295 | } else { |
||
1296 | tile_width = 512; |
||
1297 | tile_height = 16; |
||
1298 | } |
||
1299 | } else { |
||
1300 | tile_width = 2 * bpp >> 3; |
||
1301 | tile_width = ALIGN(tile_width, |
||
1302 | kgem_pitch_alignment(kgem, flags)); |
||
1303 | tile_height = 2; |
||
1304 | } |
||
1305 | } else switch (tiling) { |
||
1306 | default: |
||
1307 | case I915_TILING_NONE: |
||
1308 | tile_width = 2 * bpp >> 3; |
||
1309 | tile_width = ALIGN(tile_width, |
||
1310 | kgem_pitch_alignment(kgem, flags)); |
||
1311 | tile_height = 2; |
||
1312 | break; |
||
1313 | |||
1314 | /* XXX align to an even tile row */ |
||
1315 | case I915_TILING_X: |
||
1316 | tile_width = 512; |
||
1317 | tile_height = 16; |
||
1318 | break; |
||
1319 | case I915_TILING_Y: |
||
1320 | tile_width = 128; |
||
1321 | tile_height = 64; |
||
1322 | break; |
||
1323 | } |
||
1324 | |||
1325 | *pitch = ALIGN(width * bpp / 8, tile_width); |
||
1326 | height = ALIGN(height, tile_height); |
||
1327 | if (kgem->gen >= 040) |
||
1328 | return PAGE_ALIGN(*pitch * height); |
||
1329 | |||
1330 | /* If it is too wide for the blitter, don't even bother. */ |
||
1331 | if (tiling != I915_TILING_NONE) { |
||
1332 | if (*pitch > 8192) |
||
1333 | return 0; |
||
1334 | |||
1335 | for (size = tile_width; size < *pitch; size <<= 1) |
||
1336 | ; |
||
1337 | *pitch = size; |
||
1338 | } else { |
||
1339 | if (*pitch >= 32768) |
||
1340 | return 0; |
||
1341 | } |
||
1342 | |||
1343 | size = *pitch * height; |
||
1344 | if (relaxed_fencing || tiling == I915_TILING_NONE) |
||
1345 | return PAGE_ALIGN(size); |
||
1346 | |||
1347 | /* We need to allocate a pot fence region for a tiled buffer. */ |
||
1348 | if (kgem->gen < 030) |
||
1349 | tile_width = 512 * 1024; |
||
1350 | else |
||
1351 | tile_width = 1024 * 1024; |
||
1352 | while (tile_width < size) |
||
1353 | tile_width *= 2; |
||
1354 | return tile_width; |
||
1355 | } |
||
1356 | |||
1357 | static uint32_t kgem_aligned_height(struct kgem *kgem, |
||
1358 | uint32_t height, uint32_t tiling) |
||
1359 | { |
||
1360 | uint32_t tile_height; |
||
1361 | |||
1362 | if (kgem->gen <= 030) { |
||
1363 | tile_height = tiling ? kgem->gen < 030 ? 32 : 16 : 1; |
||
1364 | } else switch (tiling) { |
||
1365 | /* XXX align to an even tile row */ |
||
1366 | default: |
||
1367 | case I915_TILING_NONE: |
||
1368 | tile_height = 1; |
||
1369 | break; |
||
1370 | case I915_TILING_X: |
||
1371 | tile_height = 16; |
||
1372 | break; |
||
1373 | case I915_TILING_Y: |
||
1374 | tile_height = 64; |
||
1375 | break; |
||
1376 | } |
||
1377 | |||
1378 | return ALIGN(height, tile_height); |
||
1379 | } |
||
1380 | |||
1381 | static struct drm_i915_gem_exec_object2 * |
||
1382 | kgem_add_handle(struct kgem *kgem, struct kgem_bo *bo) |
||
1383 | { |
||
1384 | struct drm_i915_gem_exec_object2 *exec; |
||
1385 | |||
1386 | DBG(("%s: handle=%d, index=%d\n", |
||
1387 | __FUNCTION__, bo->handle, kgem->nexec)); |
||
1388 | |||
1389 | assert(kgem->nexec < ARRAY_SIZE(kgem->exec)); |
||
1390 | bo->target_handle = kgem->has_handle_lut ? kgem->nexec : bo->handle; |
||
1391 | exec = memset(&kgem->exec[kgem->nexec++], 0, sizeof(*exec)); |
||
1392 | exec->handle = bo->handle; |
||
1393 | exec->offset = bo->presumed_offset; |
||
1394 | |||
1395 | kgem->aperture += num_pages(bo); |
||
1396 | |||
1397 | return exec; |
||
1398 | } |
||
1399 | |||
1400 | static void kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo) |
||
1401 | { |
||
1402 | bo->exec = kgem_add_handle(kgem, bo); |
||
1403 | bo->rq = MAKE_REQUEST(kgem->next_request, kgem->ring); |
||
1404 | |||
1405 | list_move_tail(&bo->request, &kgem->next_request->buffers); |
||
1406 | |||
1407 | /* XXX is it worth working around gcc here? */ |
||
1408 | kgem->flush |= bo->flush; |
||
1409 | } |
||
1410 | |||
1411 | static uint32_t kgem_end_batch(struct kgem *kgem) |
||
1412 | { |
||
1413 | kgem->batch[kgem->nbatch++] = MI_BATCH_BUFFER_END; |
||
1414 | if (kgem->nbatch & 1) |
||
1415 | kgem->batch[kgem->nbatch++] = MI_NOOP; |
||
1416 | |||
1417 | return kgem->nbatch; |
||
1418 | } |
||
1419 | |||
1420 | static void kgem_fixup_self_relocs(struct kgem *kgem, struct kgem_bo *bo) |
||
1421 | { |
||
1422 | int n; |
||
1423 | |||
1424 | assert(kgem->nreloc__self <= 256); |
||
1425 | if (kgem->nreloc__self == 0) |
||
1426 | return; |
||
1427 | |||
1428 | for (n = 0; n < kgem->nreloc__self; n++) { |
||
1429 | int i = kgem->reloc__self[n]; |
||
1430 | assert(kgem->reloc[i].target_handle == ~0U); |
||
1431 | kgem->reloc[i].target_handle = bo->target_handle; |
||
1432 | kgem->reloc[i].presumed_offset = bo->presumed_offset; |
||
1433 | kgem->batch[kgem->reloc[i].offset/sizeof(kgem->batch[0])] = |
||
1434 | kgem->reloc[i].delta + bo->presumed_offset; |
||
1435 | } |
||
1436 | |||
1437 | if (n == 256) { |
||
1438 | for (n = kgem->reloc__self[255]; n < kgem->nreloc; n++) { |
||
1439 | if (kgem->reloc[n].target_handle == ~0U) { |
||
1440 | kgem->reloc[n].target_handle = bo->target_handle; |
||
1441 | kgem->reloc[n].presumed_offset = bo->presumed_offset; |
||
1442 | kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] = |
||
1443 | kgem->reloc[n].delta + bo->presumed_offset; |
||
1444 | } |
||
1445 | } |
||
1446 | |||
1447 | } |
||
1448 | |||
1449 | } |
||
1450 | |||
1451 | static void kgem_bo_binding_free(struct kgem *kgem, struct kgem_bo *bo) |
||
1452 | { |
||
1453 | struct kgem_bo_binding *b; |
||
1454 | |||
1455 | b = bo->binding.next; |
||
1456 | while (b) { |
||
1457 | struct kgem_bo_binding *next = b->next; |
||
1458 | free (b); |
||
1459 | b = next; |
||
1460 | } |
||
1461 | } |
||
1462 | |||
1463 | static void kgem_bo_release_map(struct kgem *kgem, struct kgem_bo *bo) |
||
1464 | { |
||
1465 | int type = IS_CPU_MAP(bo->map); |
||
1466 | |||
1467 | assert(!IS_USER_MAP(bo->map)); |
||
1468 | |||
1469 | DBG(("%s: releasing %s vma for handle=%d, count=%d\n", |
||
1470 | __FUNCTION__, type ? "CPU" : "GTT", |
||
1471 | bo->handle, kgem->vma[type].count)); |
||
1472 | |||
1473 | VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo))); |
||
1474 | user_free(MAP(bo->map)); |
||
1475 | bo->map = NULL; |
||
1476 | |||
1477 | if (!list_is_empty(&bo->vma)) { |
||
1478 | list_del(&bo->vma); |
||
1479 | kgem->vma[type].count--; |
||
1480 | } |
||
1481 | } |
||
1482 | |||
1483 | static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo) |
||
1484 | { |
||
1485 | DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle)); |
||
1486 | assert(bo->refcnt == 0); |
||
1487 | assert(bo->proxy == NULL); |
||
1488 | assert(bo->exec == NULL); |
||
1489 | assert(!bo->snoop || bo->rq == NULL); |
||
1490 | |||
1491 | #ifdef DEBUG_MEMORY |
||
1492 | kgem->debug_memory.bo_allocs--; |
||
1493 | kgem->debug_memory.bo_bytes -= bytes(bo); |
||
1494 | #endif |
||
1495 | |||
1496 | kgem_bo_binding_free(kgem, bo); |
||
1497 | |||
1498 | if (IS_USER_MAP(bo->map)) { |
||
1499 | assert(bo->rq == NULL); |
||
1500 | assert(!__kgem_busy(kgem, bo->handle)); |
||
1501 | assert(MAP(bo->map) != bo || bo->io || bo->flush); |
||
1502 | if (!(bo->io || bo->flush)) { |
||
1503 | DBG(("%s: freeing snooped base\n", __FUNCTION__)); |
||
1504 | assert(bo != MAP(bo->map)); |
||
1505 | free(MAP(bo->map)); |
||
1506 | } |
||
1507 | bo->map = NULL; |
||
1508 | } |
||
1509 | if (bo->map) |
||
1510 | kgem_bo_release_map(kgem, bo); |
||
1511 | assert(list_is_empty(&bo->vma)); |
||
1512 | assert(bo->map == NULL); |
||
1513 | |||
1514 | _list_del(&bo->list); |
||
1515 | _list_del(&bo->request); |
||
1516 | gem_close(kgem->fd, bo->handle); |
||
1517 | |||
1518 | if (!bo->io) { |
||
1519 | *(struct kgem_bo **)bo = __kgem_freed_bo; |
||
1520 | __kgem_freed_bo = bo; |
||
1521 | } else |
||
1522 | free(bo); |
||
1523 | } |
||
1524 | |||
1525 | inline static void kgem_bo_move_to_inactive(struct kgem *kgem, |
||
1526 | struct kgem_bo *bo) |
||
1527 | { |
||
1528 | DBG(("%s: moving handle=%d to inactive\n", __FUNCTION__, bo->handle)); |
||
1529 | |||
1530 | assert(bo->refcnt == 0); |
||
1531 | assert(bo->reusable); |
||
1532 | assert(bo->rq == NULL); |
||
1533 | assert(bo->exec == NULL); |
||
1534 | assert(bo->domain != DOMAIN_GPU); |
||
1535 | assert(!bo->proxy); |
||
1536 | assert(!bo->io); |
||
1537 | assert(!bo->scanout); |
||
1538 | assert(!bo->snoop); |
||
1539 | assert(!bo->flush); |
||
1540 | assert(!bo->needs_flush); |
||
1541 | assert(list_is_empty(&bo->vma)); |
||
1542 | assert_tiling(kgem, bo); |
||
1543 | ASSERT_IDLE(kgem, bo->handle); |
||
1544 | |||
1545 | kgem->need_expire = true; |
||
1546 | |||
1547 | if (bucket(bo) >= NUM_CACHE_BUCKETS) { |
||
1548 | list_move(&bo->list, &kgem->large_inactive); |
||
1549 | return; |
||
1550 | } |
||
1551 | |||
1552 | assert(bo->flush == false); |
||
1553 | list_move(&bo->list, &kgem->inactive[bucket(bo)]); |
||
1554 | if (bo->map) { |
||
1555 | int type = IS_CPU_MAP(bo->map); |
||
1556 | if (bucket(bo) >= NUM_CACHE_BUCKETS || |
||
1557 | (!type && !__kgem_bo_is_mappable(kgem, bo))) { |
||
1558 | // munmap(MAP(bo->map), bytes(bo)); |
||
1559 | bo->map = NULL; |
||
1560 | } |
||
1561 | if (bo->map) { |
||
1562 | list_add(&bo->vma, &kgem->vma[type].inactive[bucket(bo)]); |
||
1563 | kgem->vma[type].count++; |
||
1564 | } |
||
1565 | } |
||
1566 | } |
||
1567 | |||
1568 | static struct kgem_bo *kgem_bo_replace_io(struct kgem_bo *bo) |
||
1569 | { |
||
1570 | struct kgem_bo *base; |
||
1571 | |||
1572 | if (!bo->io) |
||
1573 | return bo; |
||
1574 | |||
1575 | assert(!bo->snoop); |
||
1576 | base = malloc(sizeof(*base)); |
||
1577 | if (base) { |
||
1578 | DBG(("%s: transferring io handle=%d to bo\n", |
||
1579 | __FUNCTION__, bo->handle)); |
||
1580 | /* transfer the handle to a minimum bo */ |
||
1581 | memcpy(base, bo, sizeof(*base)); |
||
1582 | base->io = false; |
||
1583 | list_init(&base->list); |
||
1584 | list_replace(&bo->request, &base->request); |
||
1585 | list_replace(&bo->vma, &base->vma); |
||
1586 | free(bo); |
||
1587 | bo = base; |
||
1588 | } else |
||
1589 | bo->reusable = false; |
||
1590 | |||
1591 | return bo; |
||
1592 | } |
||
1593 | |||
1594 | inline static void kgem_bo_remove_from_inactive(struct kgem *kgem, |
||
1595 | struct kgem_bo *bo) |
||
1596 | { |
||
1597 | DBG(("%s: removing handle=%d from inactive\n", __FUNCTION__, bo->handle)); |
||
1598 | |||
1599 | list_del(&bo->list); |
||
1600 | assert(bo->rq == NULL); |
||
1601 | assert(bo->exec == NULL); |
||
1602 | if (bo->map) { |
||
1603 | assert(!list_is_empty(&bo->vma)); |
||
1604 | list_del(&bo->vma); |
||
1605 | kgem->vma[IS_CPU_MAP(bo->map)].count--; |
||
1606 | } |
||
1607 | } |
||
1608 | |||
1609 | inline static void kgem_bo_remove_from_active(struct kgem *kgem, |
||
1610 | struct kgem_bo *bo) |
||
1611 | { |
||
1612 | DBG(("%s: removing handle=%d from active\n", __FUNCTION__, bo->handle)); |
||
1613 | |||
1614 | list_del(&bo->list); |
||
1615 | assert(bo->rq != NULL); |
||
1616 | if (bo->rq == (void *)kgem) |
||
1617 | list_del(&bo->request); |
||
1618 | assert(list_is_empty(&bo->vma)); |
||
1619 | } |
||
1620 | |||
1621 | static void _kgem_bo_delete_buffer(struct kgem *kgem, struct kgem_bo *bo) |
||
1622 | { |
||
1623 | struct kgem_buffer *io = (struct kgem_buffer *)bo->proxy; |
||
1624 | |||
1625 | DBG(("%s: size=%d, offset=%d, parent used=%d\n", |
||
1626 | __FUNCTION__, bo->size.bytes, bo->delta, io->used)); |
||
1627 | |||
1628 | if (ALIGN(bo->delta + bo->size.bytes, UPLOAD_ALIGNMENT) == io->used) |
||
1629 | io->used = bo->delta; |
||
1630 | } |
||
1631 | |||
1632 | static void kgem_bo_move_to_scanout(struct kgem *kgem, struct kgem_bo *bo) |
||
1633 | { |
||
1634 | assert(bo->refcnt == 0); |
||
1635 | assert(bo->scanout); |
||
1636 | assert(bo->delta); |
||
1637 | assert(!bo->flush); |
||
1638 | assert(!bo->snoop); |
||
1639 | assert(!bo->io); |
||
1640 | |||
1641 | if (bo->purged) { |
||
1642 | DBG(("%s: discarding purged scanout - external name?\n", |
||
1643 | __FUNCTION__)); |
||
1644 | kgem_bo_free(kgem, bo); |
||
1645 | return; |
||
1646 | } |
||
1647 | |||
1648 | DBG(("%s: moving %d [fb %d] to scanout cache, active? %d\n", |
||
1649 | __FUNCTION__, bo->handle, bo->delta, bo->rq != NULL)); |
||
1650 | if (bo->rq) |
||
1651 | list_move_tail(&bo->list, &kgem->scanout); |
||
1652 | else |
||
1653 | list_move(&bo->list, &kgem->scanout); |
||
1654 | } |
||
1655 | |||
1656 | static void kgem_bo_move_to_snoop(struct kgem *kgem, struct kgem_bo *bo) |
||
1657 | { |
||
1658 | assert(bo->reusable); |
||
1659 | assert(!bo->flush); |
||
1660 | assert(!bo->needs_flush); |
||
1661 | assert(bo->refcnt == 0); |
||
1662 | assert(bo->exec == NULL); |
||
1663 | |||
1664 | if (num_pages(bo) > kgem->max_cpu_size >> 13) { |
||
1665 | DBG(("%s handle=%d discarding large CPU buffer (%d >%d pages)\n", |
||
1666 | __FUNCTION__, bo->handle, num_pages(bo), kgem->max_cpu_size >> 13)); |
||
1667 | kgem_bo_free(kgem, bo); |
||
1668 | return; |
||
1669 | } |
||
1670 | |||
1671 | assert(bo->tiling == I915_TILING_NONE); |
||
1672 | assert(bo->rq == NULL); |
||
1673 | |||
1674 | DBG(("%s: moving %d to snoop cachee\n", __FUNCTION__, bo->handle)); |
||
1675 | list_add(&bo->list, &kgem->snoop); |
||
1676 | } |
||
1677 | |||
1678 | static struct kgem_bo * |
||
1679 | search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags) |
||
1680 | { |
||
1681 | struct kgem_bo *bo, *first = NULL; |
||
1682 | |||
1683 | DBG(("%s: num_pages=%d, flags=%x\n", __FUNCTION__, num_pages, flags)); |
||
1684 | |||
1685 | if ((kgem->has_caching | kgem->has_userptr) == 0) |
||
1686 | return NULL; |
||
1687 | |||
1688 | if (list_is_empty(&kgem->snoop)) { |
||
1689 | DBG(("%s: inactive and cache empty\n", __FUNCTION__)); |
||
1690 | if (!__kgem_throttle_retire(kgem, flags)) { |
||
1691 | DBG(("%s: nothing retired\n", __FUNCTION__)); |
||
1692 | return NULL; |
||
1693 | } |
||
1694 | } |
||
1695 | |||
1696 | list_for_each_entry(bo, &kgem->snoop, list) { |
||
1697 | assert(bo->refcnt == 0); |
||
1698 | assert(bo->snoop); |
||
1699 | assert(!bo->scanout); |
||
1700 | assert(!bo->purged); |
||
1701 | assert(bo->proxy == NULL); |
||
1702 | assert(bo->tiling == I915_TILING_NONE); |
||
1703 | assert(bo->rq == NULL); |
||
1704 | assert(bo->exec == NULL); |
||
1705 | |||
1706 | if (num_pages > num_pages(bo)) |
||
1707 | continue; |
||
1708 | |||
1709 | if (num_pages(bo) > 2*num_pages) { |
||
1710 | if (first == NULL) |
||
1711 | first = bo; |
||
1712 | continue; |
||
1713 | } |
||
1714 | |||
1715 | list_del(&bo->list); |
||
1716 | bo->pitch = 0; |
||
1717 | bo->delta = 0; |
||
1718 | |||
1719 | DBG((" %s: found handle=%d (num_pages=%d) in snoop cache\n", |
||
1720 | __FUNCTION__, bo->handle, num_pages(bo))); |
||
1721 | return bo; |
||
1722 | } |
||
1723 | |||
1724 | if (first) { |
||
1725 | list_del(&first->list); |
||
1726 | first->pitch = 0; |
||
1727 | first->delta = 0; |
||
1728 | |||
1729 | DBG((" %s: found handle=%d (num_pages=%d) in snoop cache\n", |
||
1730 | __FUNCTION__, first->handle, num_pages(first))); |
||
1731 | return first; |
||
1732 | } |
||
1733 | |||
1734 | return NULL; |
||
1735 | } |
||
1736 | |||
1737 | void kgem_bo_undo(struct kgem *kgem, struct kgem_bo *bo) |
||
1738 | { |
||
1739 | if (kgem->nexec != 1 || bo->exec == NULL) |
||
1740 | return; |
||
1741 | |||
1742 | DBG(("%s: only handle in batch, discarding last operations for handle=%d\n", |
||
1743 | __FUNCTION__, bo->handle)); |
||
1744 | |||
1745 | assert(bo->exec == &kgem->exec[0]); |
||
1746 | assert(kgem->exec[0].handle == bo->handle); |
||
1747 | assert(RQ(bo->rq) == kgem->next_request); |
||
1748 | |||
1749 | bo->refcnt++; |
||
1750 | kgem_reset(kgem); |
||
1751 | bo->refcnt--; |
||
1752 | } |
||
1753 | |||
1754 | static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) |
||
1755 | { |
||
1756 | DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle)); |
||
1757 | |||
1758 | assert(list_is_empty(&bo->list)); |
||
1759 | assert(bo->refcnt == 0); |
||
1760 | assert(!bo->purged || !bo->reusable); |
||
1761 | assert(bo->proxy == NULL); |
||
1762 | assert_tiling(kgem, bo); |
||
1763 | |||
1764 | bo->binding.offset = 0; |
||
1765 | |||
1766 | if (DBG_NO_CACHE) |
||
1767 | goto destroy; |
||
1768 | |||
1769 | if (bo->snoop && !bo->flush) { |
||
1770 | DBG(("%s: handle=%d is snooped\n", __FUNCTION__, bo->handle)); |
||
1771 | assert(bo->reusable); |
||
1772 | assert(list_is_empty(&bo->list)); |
||
1773 | if (bo->exec == NULL && bo->rq && !__kgem_busy(kgem, bo->handle)) |
||
1774 | __kgem_bo_clear_busy(bo); |
||
1775 | if (bo->rq == NULL) |
||
1776 | kgem_bo_move_to_snoop(kgem, bo); |
||
1777 | return; |
||
1778 | } |
||
1779 | if (!IS_USER_MAP(bo->map)) |
||
1780 | bo->flush = false; |
||
1781 | |||
1782 | if (bo->scanout) { |
||
1783 | kgem_bo_move_to_scanout(kgem, bo); |
||
1784 | return; |
||
1785 | } |
||
1786 | |||
1787 | if (bo->io) |
||
1788 | bo = kgem_bo_replace_io(bo); |
||
1789 | if (!bo->reusable) { |
||
1790 | DBG(("%s: handle=%d, not reusable\n", |
||
1791 | __FUNCTION__, bo->handle)); |
||
1792 | goto destroy; |
||
1793 | } |
||
1794 | |||
1795 | if (!kgem->has_llc && IS_CPU_MAP(bo->map) && bo->domain != DOMAIN_CPU) |
||
1796 | kgem_bo_release_map(kgem, bo); |
||
1797 | |||
1798 | assert(list_is_empty(&bo->vma)); |
||
1799 | assert(list_is_empty(&bo->list)); |
||
1800 | assert(bo->flush == false); |
||
1801 | assert(bo->snoop == false); |
||
1802 | assert(bo->io == false); |
||
1803 | assert(bo->scanout == false); |
||
1804 | |||
1805 | kgem_bo_undo(kgem, bo); |
||
1806 | assert(bo->refcnt == 0); |
||
1807 | |||
1808 | if (bo->rq && bo->exec == NULL && !__kgem_busy(kgem, bo->handle)) |
||
1809 | __kgem_bo_clear_busy(bo); |
||
1810 | |||
1811 | if (bo->rq) { |
||
1812 | struct list *cache; |
||
1813 | |||
1814 | DBG(("%s: handle=%d -> active\n", __FUNCTION__, bo->handle)); |
||
1815 | if (bucket(bo) < NUM_CACHE_BUCKETS) |
||
1816 | cache = &kgem->active[bucket(bo)][bo->tiling]; |
||
1817 | else |
||
1818 | cache = &kgem->large; |
||
1819 | list_add(&bo->list, cache); |
||
1820 | return; |
||
1821 | } |
||
1822 | |||
1823 | assert(bo->exec == NULL); |
||
1824 | assert(list_is_empty(&bo->request)); |
||
1825 | |||
1826 | if (!IS_CPU_MAP(bo->map)) { |
||
1827 | if (!kgem_bo_set_purgeable(kgem, bo)) |
||
1828 | goto destroy; |
||
1829 | |||
1830 | if (!kgem->has_llc && bo->domain == DOMAIN_CPU) |
||
1831 | goto destroy; |
||
1832 | |||
1833 | DBG(("%s: handle=%d, purged\n", |
||
1834 | __FUNCTION__, bo->handle)); |
||
1835 | } |
||
1836 | |||
1837 | kgem_bo_move_to_inactive(kgem, bo); |
||
1838 | return; |
||
1839 | |||
1840 | destroy: |
||
1841 | if (!bo->exec) |
||
1842 | kgem_bo_free(kgem, bo); |
||
1843 | } |
||
1844 | |||
1845 | static void kgem_bo_unref(struct kgem *kgem, struct kgem_bo *bo) |
||
1846 | { |
||
1847 | assert(bo->refcnt); |
||
1848 | if (--bo->refcnt == 0) |
||
1849 | __kgem_bo_destroy(kgem, bo); |
||
1850 | } |
||
1851 | |||
1852 | static void kgem_buffer_release(struct kgem *kgem, struct kgem_buffer *bo) |
||
1853 | { |
||
1854 | while (!list_is_empty(&bo->base.vma)) { |
||
1855 | struct kgem_bo *cached; |
||
1856 | |||
1857 | cached = list_first_entry(&bo->base.vma, struct kgem_bo, vma); |
||
1858 | assert(cached->proxy == &bo->base); |
||
1859 | list_del(&cached->vma); |
||
1860 | |||
1861 | assert(*(struct kgem_bo **)cached->map == cached); |
||
1862 | *(struct kgem_bo **)cached->map = NULL; |
||
1863 | cached->map = NULL; |
||
1864 | |||
1865 | kgem_bo_destroy(kgem, cached); |
||
1866 | } |
||
1867 | } |
||
1868 | |||
1869 | static bool kgem_retire__buffers(struct kgem *kgem) |
||
1870 | { |
||
1871 | bool retired = false; |
||
1872 | |||
1873 | while (!list_is_empty(&kgem->active_buffers)) { |
||
1874 | struct kgem_buffer *bo = |
||
1875 | list_last_entry(&kgem->active_buffers, |
||
1876 | struct kgem_buffer, |
||
1877 | base.list); |
||
1878 | |||
1879 | if (bo->base.rq) |
||
1880 | break; |
||
1881 | |||
1882 | DBG(("%s: releasing upload cache for handle=%d? %d\n", |
||
1883 | __FUNCTION__, bo->base.handle, !list_is_empty(&bo->base.vma))); |
||
1884 | list_del(&bo->base.list); |
||
1885 | kgem_buffer_release(kgem, bo); |
||
1886 | kgem_bo_unref(kgem, &bo->base); |
||
1887 | retired = true; |
||
1888 | } |
||
1889 | |||
1890 | return retired; |
||
1891 | } |
||
1892 | |||
1893 | static bool kgem_retire__flushing(struct kgem *kgem) |
||
1894 | { |
||
1895 | struct kgem_bo *bo, *next; |
||
1896 | bool retired = false; |
||
1897 | |||
1898 | list_for_each_entry_safe(bo, next, &kgem->flushing, request) { |
||
1899 | assert(bo->rq == (void *)kgem); |
||
1900 | assert(bo->exec == NULL); |
||
1901 | |||
1902 | if (__kgem_busy(kgem, bo->handle)) |
||
1903 | break; |
||
1904 | |||
1905 | __kgem_bo_clear_busy(bo); |
||
1906 | |||
1907 | if (bo->refcnt) |
||
1908 | continue; |
||
1909 | |||
1910 | if (bo->snoop) { |
||
1911 | kgem_bo_move_to_snoop(kgem, bo); |
||
1912 | } else if (bo->scanout) { |
||
1913 | kgem_bo_move_to_scanout(kgem, bo); |
||
1914 | } else if ((bo = kgem_bo_replace_io(bo))->reusable && |
||
1915 | kgem_bo_set_purgeable(kgem, bo)) { |
||
1916 | kgem_bo_move_to_inactive(kgem, bo); |
||
1917 | retired = true; |
||
1918 | } else |
||
1919 | kgem_bo_free(kgem, bo); |
||
1920 | } |
||
1921 | #if HAS_DEBUG_FULL |
||
1922 | { |
||
1923 | int count = 0; |
||
1924 | list_for_each_entry(bo, &kgem->flushing, request) |
||
1925 | count++; |
||
1926 | ErrorF("%s: %d bo on flushing list\n", __FUNCTION__, count); |
||
1927 | } |
||
1928 | #endif |
||
1929 | |||
1930 | kgem->need_retire |= !list_is_empty(&kgem->flushing); |
||
1931 | |||
1932 | return retired; |
||
1933 | } |
||
1934 | |||
1935 | |||
1936 | static bool __kgem_retire_rq(struct kgem *kgem, struct kgem_request *rq) |
||
1937 | { |
||
1938 | bool retired = false; |
||
1939 | |||
1940 | DBG(("%s: request %d complete\n", |
||
1941 | __FUNCTION__, rq->bo->handle)); |
||
1942 | |||
1943 | while (!list_is_empty(&rq->buffers)) { |
||
1944 | struct kgem_bo *bo; |
||
1945 | |||
1946 | bo = list_first_entry(&rq->buffers, |
||
1947 | struct kgem_bo, |
||
1948 | request); |
||
1949 | |||
1950 | assert(RQ(bo->rq) == rq); |
||
1951 | assert(bo->exec == NULL); |
||
1952 | assert(bo->domain == DOMAIN_GPU || bo->domain == DOMAIN_NONE); |
||
1953 | |||
1954 | list_del(&bo->request); |
||
1955 | |||
1956 | if (bo->needs_flush) |
||
1957 | bo->needs_flush = __kgem_busy(kgem, bo->handle); |
||
1958 | if (bo->needs_flush) { |
||
1959 | DBG(("%s: moving %d to flushing\n", |
||
1960 | __FUNCTION__, bo->handle)); |
||
1961 | list_add(&bo->request, &kgem->flushing); |
||
1962 | bo->rq = (void *)kgem; |
||
1963 | continue; |
||
1964 | } |
||
1965 | |||
1966 | bo->domain = DOMAIN_NONE; |
||
1967 | bo->rq = NULL; |
||
1968 | if (bo->refcnt) |
||
1969 | continue; |
||
1970 | |||
1971 | if (bo->snoop) { |
||
1972 | kgem_bo_move_to_snoop(kgem, bo); |
||
1973 | } else if (bo->scanout) { |
||
1974 | kgem_bo_move_to_scanout(kgem, bo); |
||
1975 | } else if ((bo = kgem_bo_replace_io(bo))->reusable && |
||
1976 | kgem_bo_set_purgeable(kgem, bo)) { |
||
1977 | kgem_bo_move_to_inactive(kgem, bo); |
||
1978 | retired = true; |
||
1979 | } else { |
||
1980 | DBG(("%s: closing %d\n", |
||
1981 | __FUNCTION__, bo->handle)); |
||
1982 | kgem_bo_free(kgem, bo); |
||
1983 | } |
||
1984 | } |
||
1985 | |||
1986 | assert(rq->bo->rq == NULL); |
||
1987 | assert(list_is_empty(&rq->bo->request)); |
||
1988 | |||
1989 | if (--rq->bo->refcnt == 0) { |
||
1990 | if (kgem_bo_set_purgeable(kgem, rq->bo)) { |
||
1991 | kgem_bo_move_to_inactive(kgem, rq->bo); |
||
1992 | retired = true; |
||
1993 | } else { |
||
1994 | DBG(("%s: closing %d\n", |
||
1995 | __FUNCTION__, rq->bo->handle)); |
||
1996 | kgem_bo_free(kgem, rq->bo); |
||
1997 | } |
||
1998 | } |
||
1999 | |||
2000 | __kgem_request_free(rq); |
||
2001 | return retired; |
||
2002 | } |
||
2003 | |||
2004 | static bool kgem_retire__requests_ring(struct kgem *kgem, int ring) |
||
2005 | { |
||
2006 | bool retired = false; |
||
2007 | |||
2008 | while (!list_is_empty(&kgem->requests[ring])) { |
||
2009 | struct kgem_request *rq; |
||
2010 | |||
2011 | rq = list_first_entry(&kgem->requests[ring], |
||
2012 | struct kgem_request, |
||
2013 | list); |
||
2014 | if (__kgem_busy(kgem, rq->bo->handle)) |
||
2015 | break; |
||
2016 | |||
2017 | retired |= __kgem_retire_rq(kgem, rq); |
||
2018 | } |
||
2019 | |||
2020 | #if HAS_DEBUG_FULL |
||
2021 | { |
||
2022 | struct kgem_bo *bo; |
||
2023 | int count = 0; |
||
2024 | |||
2025 | list_for_each_entry(bo, &kgem->requests[ring], request) |
||
2026 | count++; |
||
2027 | |||
2028 | bo = NULL; |
||
2029 | if (!list_is_empty(&kgem->requests[ring])) |
||
2030 | bo = list_first_entry(&kgem->requests[ring], |
||
2031 | struct kgem_request, |
||
2032 | list)->bo; |
||
2033 | |||
2034 | ErrorF("%s: ring=%d, %d outstanding requests, oldest=%d\n", |
||
2035 | __FUNCTION__, ring, count, bo ? bo->handle : 0); |
||
2036 | } |
||
2037 | #endif |
||
2038 | |||
2039 | return retired; |
||
2040 | } |
||
2041 | |||
2042 | static bool kgem_retire__requests(struct kgem *kgem) |
||
2043 | { |
||
2044 | bool retired = false; |
||
2045 | int n; |
||
2046 | |||
2047 | for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) { |
||
2048 | retired |= kgem_retire__requests_ring(kgem, n); |
||
2049 | kgem->need_retire |= !list_is_empty(&kgem->requests[n]); |
||
2050 | } |
||
2051 | |||
2052 | return retired; |
||
2053 | } |
||
2054 | |||
2055 | bool kgem_retire(struct kgem *kgem) |
||
2056 | { |
||
2057 | bool retired = false; |
||
2058 | |||
2059 | DBG(("%s\n", __FUNCTION__)); |
||
2060 | |||
2061 | kgem->need_retire = false; |
||
2062 | |||
2063 | retired |= kgem_retire__flushing(kgem); |
||
2064 | retired |= kgem_retire__requests(kgem); |
||
2065 | retired |= kgem_retire__buffers(kgem); |
||
2066 | |||
2067 | DBG(("%s -- retired=%d, need_retire=%d\n", |
||
2068 | __FUNCTION__, retired, kgem->need_retire)); |
||
2069 | |||
2070 | kgem->retire(kgem); |
||
2071 | |||
2072 | return retired; |
||
2073 | } |
||
2074 | |||
2075 | bool __kgem_ring_is_idle(struct kgem *kgem, int ring) |
||
2076 | { |
||
2077 | struct kgem_request *rq; |
||
2078 | |||
2079 | assert(!list_is_empty(&kgem->requests[ring])); |
||
2080 | |||
2081 | rq = list_last_entry(&kgem->requests[ring], |
||
2082 | struct kgem_request, list); |
||
2083 | if (__kgem_busy(kgem, rq->bo->handle)) { |
||
2084 | DBG(("%s: last requests handle=%d still busy\n", |
||
2085 | __FUNCTION__, rq->bo->handle)); |
||
2086 | return false; |
||
2087 | } |
||
2088 | |||
2089 | DBG(("%s: ring=%d idle (handle=%d)\n", |
||
2090 | __FUNCTION__, ring, rq->bo->handle)); |
||
2091 | |||
2092 | kgem_retire__requests_ring(kgem, ring); |
||
2093 | assert(list_is_empty(&kgem->requests[ring])); |
||
2094 | return true; |
||
2095 | } |
||
2096 | |||
2097 | static void kgem_commit(struct kgem *kgem) |
||
2098 | { |
||
2099 | struct kgem_request *rq = kgem->next_request; |
||
2100 | struct kgem_bo *bo, *next; |
||
2101 | |||
2102 | list_for_each_entry_safe(bo, next, &rq->buffers, request) { |
||
2103 | assert(next->request.prev == &bo->request); |
||
2104 | |||
2105 | DBG(("%s: release handle=%d (proxy? %d), dirty? %d flush? %d, snoop? %d -> offset=%x\n", |
||
2106 | __FUNCTION__, bo->handle, bo->proxy != NULL, |
||
2107 | bo->gpu_dirty, bo->needs_flush, bo->snoop, |
||
2108 | (unsigned)bo->exec->offset)); |
||
2109 | |||
2110 | assert(bo->exec); |
||
2111 | assert(bo->proxy == NULL || bo->exec == &_kgem_dummy_exec); |
||
2112 | assert(RQ(bo->rq) == rq || (RQ(bo->proxy->rq) == rq)); |
||
2113 | |||
2114 | bo->presumed_offset = bo->exec->offset; |
||
2115 | bo->exec = NULL; |
||
2116 | bo->target_handle = -1; |
||
2117 | |||
2118 | if (!bo->refcnt && !bo->reusable) { |
||
2119 | assert(!bo->snoop); |
||
2120 | kgem_bo_free(kgem, bo); |
||
2121 | continue; |
||
2122 | } |
||
2123 | |||
2124 | bo->binding.offset = 0; |
||
2125 | bo->domain = DOMAIN_GPU; |
||
2126 | bo->gpu_dirty = false; |
||
2127 | |||
2128 | if (bo->proxy) { |
||
2129 | /* proxies are not used for domain tracking */ |
||
2130 | bo->exec = NULL; |
||
2131 | __kgem_bo_clear_busy(bo); |
||
2132 | } |
||
2133 | |||
2134 | kgem->scanout_busy |= bo->scanout; |
||
2135 | } |
||
2136 | |||
2137 | if (rq == &kgem->static_request) { |
||
2138 | struct drm_i915_gem_set_domain set_domain; |
||
2139 | |||
2140 | DBG(("%s: syncing due to allocation failure\n", __FUNCTION__)); |
||
2141 | |||
2142 | VG_CLEAR(set_domain); |
||
2143 | set_domain.handle = rq->bo->handle; |
||
2144 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
||
2145 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
||
2146 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) { |
||
2147 | DBG(("%s: sync: GPU hang detected\n", __FUNCTION__)); |
||
2148 | kgem_throttle(kgem); |
||
2149 | } |
||
2150 | |||
2151 | kgem_retire(kgem); |
||
2152 | assert(list_is_empty(&rq->buffers)); |
||
2153 | |||
2154 | assert(rq->bo->map == NULL); |
||
2155 | gem_close(kgem->fd, rq->bo->handle); |
||
2156 | kgem_cleanup_cache(kgem); |
||
2157 | } else { |
||
2158 | list_add_tail(&rq->list, &kgem->requests[rq->ring]); |
||
2159 | kgem->need_throttle = kgem->need_retire = 1; |
||
2160 | } |
||
2161 | |||
2162 | kgem->next_request = NULL; |
||
2163 | } |
||
2164 | |||
2165 | static void kgem_close_list(struct kgem *kgem, struct list *head) |
||
2166 | { |
||
2167 | while (!list_is_empty(head)) |
||
2168 | kgem_bo_free(kgem, list_first_entry(head, struct kgem_bo, list)); |
||
2169 | } |
||
2170 | |||
2171 | static void kgem_close_inactive(struct kgem *kgem) |
||
2172 | { |
||
2173 | unsigned int i; |
||
2174 | |||
2175 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) |
||
2176 | kgem_close_list(kgem, &kgem->inactive[i]); |
||
2177 | } |
||
2178 | |||
2179 | static void kgem_finish_buffers(struct kgem *kgem) |
||
2180 | { |
||
2181 | struct kgem_buffer *bo, *next; |
||
2182 | |||
2183 | list_for_each_entry_safe(bo, next, &kgem->batch_buffers, base.list) { |
||
2184 | DBG(("%s: buffer handle=%d, used=%d, exec?=%d, write=%d, mmapped=%s\n", |
||
2185 | __FUNCTION__, bo->base.handle, bo->used, bo->base.exec!=NULL, |
||
2186 | bo->write, bo->mmapped ? IS_CPU_MAP(bo->base.map) ? "cpu" : "gtt" : "no")); |
||
2187 | |||
2188 | assert(next->base.list.prev == &bo->base.list); |
||
2189 | assert(bo->base.io); |
||
2190 | assert(bo->base.refcnt >= 1); |
||
2191 | |||
2192 | if (!bo->base.exec) { |
||
2193 | DBG(("%s: skipping unattached handle=%d, used=%d\n", |
||
2194 | __FUNCTION__, bo->base.handle, bo->used)); |
||
2195 | continue; |
||
2196 | } |
||
2197 | |||
2198 | if (!bo->write) { |
||
2199 | assert(bo->base.exec || bo->base.refcnt > 1); |
||
2200 | goto decouple; |
||
2201 | } |
||
2202 | |||
2203 | if (bo->mmapped) { |
||
2204 | int used; |
||
2205 | |||
2206 | assert(!bo->need_io); |
||
2207 | |||
2208 | used = ALIGN(bo->used, PAGE_SIZE); |
||
2209 | if (!DBG_NO_UPLOAD_ACTIVE && |
||
2210 | used + PAGE_SIZE <= bytes(&bo->base) && |
||
2211 | (kgem->has_llc || !IS_CPU_MAP(bo->base.map) || bo->base.snoop)) { |
||
2212 | DBG(("%s: retaining upload buffer (%d/%d)\n", |
||
2213 | __FUNCTION__, bo->used, bytes(&bo->base))); |
||
2214 | bo->used = used; |
||
2215 | list_move(&bo->base.list, |
||
2216 | &kgem->active_buffers); |
||
2217 | continue; |
||
2218 | } |
||
2219 | DBG(("%s: discarding mmapped buffer, used=%d, map type=%d\n", |
||
2220 | __FUNCTION__, bo->used, (int)__MAP_TYPE(bo->base.map))); |
||
2221 | goto decouple; |
||
2222 | } |
||
2223 | |||
2224 | if (!bo->used) { |
||
2225 | /* Unless we replace the handle in the execbuffer, |
||
2226 | * then this bo will become active. So decouple it |
||
2227 | * from the buffer list and track it in the normal |
||
2228 | * manner. |
||
2229 | */ |
||
2230 | goto decouple; |
||
2231 | } |
||
2232 | |||
2233 | assert(bo->need_io); |
||
2234 | assert(bo->base.rq == MAKE_REQUEST(kgem->next_request, kgem->ring)); |
||
2235 | assert(bo->base.domain != DOMAIN_GPU); |
||
2236 | |||
2237 | if (bo->base.refcnt == 1 && |
||
2238 | bo->base.size.pages.count > 1 && |
||
2239 | bo->used < bytes(&bo->base) / 2) { |
||
2240 | struct kgem_bo *shrink; |
||
2241 | unsigned alloc = NUM_PAGES(bo->used); |
||
2242 | |||
2243 | shrink = search_snoop_cache(kgem, alloc, |
||
2244 | CREATE_INACTIVE | CREATE_NO_RETIRE); |
||
2245 | if (shrink) { |
||
2246 | void *map; |
||
2247 | int n; |
||
2248 | |||
2249 | DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n", |
||
2250 | __FUNCTION__, |
||
2251 | bo->used, bytes(&bo->base), bytes(shrink), |
||
2252 | bo->base.handle, shrink->handle)); |
||
2253 | |||
2254 | assert(bo->used <= bytes(shrink)); |
||
2255 | map = kgem_bo_map__cpu(kgem, shrink); |
||
2256 | if (map) { |
||
2257 | kgem_bo_sync__cpu(kgem, shrink); |
||
2258 | memcpy(map, bo->mem, bo->used); |
||
2259 | |||
2260 | shrink->target_handle = |
||
2261 | kgem->has_handle_lut ? bo->base.target_handle : shrink->handle; |
||
2262 | for (n = 0; n < kgem->nreloc; n++) { |
||
2263 | if (kgem->reloc[n].target_handle == bo->base.target_handle) { |
||
2264 | kgem->reloc[n].target_handle = shrink->target_handle; |
||
2265 | kgem->reloc[n].presumed_offset = shrink->presumed_offset; |
||
2266 | kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] = |
||
2267 | kgem->reloc[n].delta + shrink->presumed_offset; |
||
2268 | } |
||
2269 | } |
||
2270 | |||
2271 | bo->base.exec->handle = shrink->handle; |
||
2272 | bo->base.exec->offset = shrink->presumed_offset; |
||
2273 | shrink->exec = bo->base.exec; |
||
2274 | shrink->rq = bo->base.rq; |
||
2275 | list_replace(&bo->base.request, |
||
2276 | &shrink->request); |
||
2277 | list_init(&bo->base.request); |
||
2278 | shrink->needs_flush = bo->base.gpu_dirty; |
||
2279 | |||
2280 | bo->base.exec = NULL; |
||
2281 | bo->base.rq = NULL; |
||
2282 | bo->base.gpu_dirty = false; |
||
2283 | bo->base.needs_flush = false; |
||
2284 | bo->used = 0; |
||
2285 | |||
2286 | goto decouple; |
||
2287 | } |
||
2288 | |||
2289 | __kgem_bo_destroy(kgem, shrink); |
||
2290 | } |
||
2291 | |||
2292 | shrink = search_linear_cache(kgem, alloc, |
||
2293 | CREATE_INACTIVE | CREATE_NO_RETIRE); |
||
2294 | if (shrink) { |
||
2295 | int n; |
||
2296 | |||
2297 | DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n", |
||
2298 | __FUNCTION__, |
||
2299 | bo->used, bytes(&bo->base), bytes(shrink), |
||
2300 | bo->base.handle, shrink->handle)); |
||
2301 | |||
2302 | assert(bo->used <= bytes(shrink)); |
||
2303 | if (gem_write(kgem->fd, shrink->handle, |
||
2304 | 0, bo->used, bo->mem) == 0) { |
||
2305 | shrink->target_handle = |
||
2306 | kgem->has_handle_lut ? bo->base.target_handle : shrink->handle; |
||
2307 | for (n = 0; n < kgem->nreloc; n++) { |
||
2308 | if (kgem->reloc[n].target_handle == bo->base.target_handle) { |
||
2309 | kgem->reloc[n].target_handle = shrink->target_handle; |
||
2310 | kgem->reloc[n].presumed_offset = shrink->presumed_offset; |
||
2311 | kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] = |
||
2312 | kgem->reloc[n].delta + shrink->presumed_offset; |
||
2313 | } |
||
2314 | } |
||
2315 | |||
2316 | bo->base.exec->handle = shrink->handle; |
||
2317 | bo->base.exec->offset = shrink->presumed_offset; |
||
2318 | shrink->exec = bo->base.exec; |
||
2319 | shrink->rq = bo->base.rq; |
||
2320 | list_replace(&bo->base.request, |
||
2321 | &shrink->request); |
||
2322 | list_init(&bo->base.request); |
||
2323 | shrink->needs_flush = bo->base.gpu_dirty; |
||
2324 | |||
2325 | bo->base.exec = NULL; |
||
2326 | bo->base.rq = NULL; |
||
2327 | bo->base.gpu_dirty = false; |
||
2328 | bo->base.needs_flush = false; |
||
2329 | bo->used = 0; |
||
2330 | |||
2331 | goto decouple; |
||
2332 | } |
||
2333 | |||
2334 | __kgem_bo_destroy(kgem, shrink); |
||
2335 | } |
||
2336 | } |
||
2337 | |||
2338 | DBG(("%s: handle=%d, uploading %d/%d\n", |
||
2339 | __FUNCTION__, bo->base.handle, bo->used, bytes(&bo->base))); |
||
2340 | ASSERT_IDLE(kgem, bo->base.handle); |
||
2341 | assert(bo->used <= bytes(&bo->base)); |
||
2342 | gem_write(kgem->fd, bo->base.handle, |
||
2343 | 0, bo->used, bo->mem); |
||
2344 | bo->need_io = 0; |
||
2345 | |||
2346 | decouple: |
||
2347 | DBG(("%s: releasing handle=%d\n", |
||
2348 | __FUNCTION__, bo->base.handle)); |
||
2349 | list_del(&bo->base.list); |
||
2350 | kgem_bo_unref(kgem, &bo->base); |
||
2351 | } |
||
2352 | } |
||
2353 | |||
2354 | static void kgem_cleanup(struct kgem *kgem) |
||
2355 | { |
||
2356 | int n; |
||
2357 | |||
2358 | for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) { |
||
2359 | while (!list_is_empty(&kgem->requests[n])) { |
||
2360 | struct kgem_request *rq; |
||
2361 | |||
2362 | rq = list_first_entry(&kgem->requests[n], |
||
2363 | struct kgem_request, |
||
2364 | list); |
||
2365 | while (!list_is_empty(&rq->buffers)) { |
||
2366 | struct kgem_bo *bo; |
||
2367 | |||
2368 | bo = list_first_entry(&rq->buffers, |
||
2369 | struct kgem_bo, |
||
2370 | request); |
||
2371 | |||
2372 | bo->exec = NULL; |
||
2373 | bo->gpu_dirty = false; |
||
2374 | __kgem_bo_clear_busy(bo); |
||
2375 | if (bo->refcnt == 0) |
||
2376 | kgem_bo_free(kgem, bo); |
||
2377 | } |
||
2378 | |||
2379 | __kgem_request_free(rq); |
||
2380 | } |
||
2381 | } |
||
2382 | |||
2383 | kgem_close_inactive(kgem); |
||
2384 | } |
||
2385 | |||
2386 | static int kgem_batch_write(struct kgem *kgem, uint32_t handle, uint32_t size) |
||
2387 | { |
||
2388 | int ret; |
||
2389 | |||
2390 | ASSERT_IDLE(kgem, handle); |
||
2391 | |||
2392 | /* If there is no surface data, just upload the batch */ |
||
2393 | if (kgem->surface == kgem->batch_size) |
||
2394 | return gem_write(kgem->fd, handle, |
||
2395 | 0, sizeof(uint32_t)*kgem->nbatch, |
||
2396 | kgem->batch); |
||
2397 | |||
2398 | /* Are the batch pages conjoint with the surface pages? */ |
||
2399 | if (kgem->surface < kgem->nbatch + PAGE_SIZE/sizeof(uint32_t)) { |
||
2400 | assert(size == PAGE_ALIGN(kgem->batch_size*sizeof(uint32_t))); |
||
2401 | return gem_write(kgem->fd, handle, |
||
2402 | 0, kgem->batch_size*sizeof(uint32_t), |
||
2403 | kgem->batch); |
||
2404 | } |
||
2405 | |||
2406 | /* Disjoint surface/batch, upload separately */ |
||
2407 | ret = gem_write(kgem->fd, handle, |
||
2408 | 0, sizeof(uint32_t)*kgem->nbatch, |
||
2409 | kgem->batch); |
||
2410 | if (ret) |
||
2411 | return ret; |
||
2412 | |||
2413 | ret = PAGE_ALIGN(sizeof(uint32_t) * kgem->batch_size); |
||
2414 | ret -= sizeof(uint32_t) * kgem->surface; |
||
2415 | assert(size-ret >= kgem->nbatch*sizeof(uint32_t)); |
||
2416 | return __gem_write(kgem->fd, handle, |
||
2417 | size - ret, (kgem->batch_size - kgem->surface)*sizeof(uint32_t), |
||
2418 | kgem->batch + kgem->surface); |
||
2419 | } |
||
2420 | |||
2421 | void kgem_reset(struct kgem *kgem) |
||
2422 | { |
||
2423 | if (kgem->next_request) { |
||
2424 | struct kgem_request *rq = kgem->next_request; |
||
2425 | |||
2426 | while (!list_is_empty(&rq->buffers)) { |
||
2427 | struct kgem_bo *bo = |
||
2428 | list_first_entry(&rq->buffers, |
||
2429 | struct kgem_bo, |
||
2430 | request); |
||
2431 | list_del(&bo->request); |
||
2432 | |||
2433 | assert(RQ(bo->rq) == rq); |
||
2434 | |||
2435 | bo->binding.offset = 0; |
||
2436 | bo->exec = NULL; |
||
2437 | bo->target_handle = -1; |
||
2438 | bo->gpu_dirty = false; |
||
2439 | |||
2440 | if (bo->needs_flush && __kgem_busy(kgem, bo->handle)) { |
||
2441 | assert(bo->domain == DOMAIN_GPU || bo->domain == DOMAIN_NONE); |
||
2442 | list_add(&bo->request, &kgem->flushing); |
||
2443 | bo->rq = (void *)kgem; |
||
2444 | } else |
||
2445 | __kgem_bo_clear_busy(bo); |
||
2446 | |||
2447 | if (bo->refcnt || bo->rq) |
||
2448 | continue; |
||
2449 | |||
2450 | if (bo->snoop) { |
||
2451 | kgem_bo_move_to_snoop(kgem, bo); |
||
2452 | } else if (bo->scanout) { |
||
2453 | kgem_bo_move_to_scanout(kgem, bo); |
||
2454 | } else if ((bo = kgem_bo_replace_io(bo))->reusable && |
||
2455 | kgem_bo_set_purgeable(kgem, bo)) { |
||
2456 | kgem_bo_move_to_inactive(kgem, bo); |
||
2457 | } else { |
||
2458 | DBG(("%s: closing %d\n", |
||
2459 | __FUNCTION__, bo->handle)); |
||
2460 | kgem_bo_free(kgem, bo); |
||
2461 | } |
||
2462 | } |
||
2463 | |||
2464 | if (rq != &kgem->static_request) { |
||
2465 | list_init(&rq->list); |
||
2466 | __kgem_request_free(rq); |
||
2467 | } |
||
2468 | } |
||
2469 | |||
2470 | kgem->nfence = 0; |
||
2471 | kgem->nexec = 0; |
||
2472 | kgem->nreloc = 0; |
||
2473 | kgem->nreloc__self = 0; |
||
2474 | kgem->aperture = 0; |
||
2475 | kgem->aperture_fenced = 0; |
||
2476 | kgem->nbatch = 0; |
||
2477 | kgem->surface = kgem->batch_size; |
||
2478 | kgem->mode = KGEM_NONE; |
||
2479 | kgem->flush = 0; |
||
2480 | kgem->batch_flags = kgem->batch_flags_base; |
||
2481 | |||
2482 | kgem->next_request = __kgem_request_alloc(kgem); |
||
2483 | |||
2484 | kgem_sna_reset(kgem); |
||
2485 | } |
||
2486 | |||
2487 | static int compact_batch_surface(struct kgem *kgem) |
||
2488 | { |
||
2489 | int size, shrink, n; |
||
2490 | |||
2491 | if (!kgem->has_relaxed_delta) |
||
2492 | return kgem->batch_size; |
||
2493 | |||
2494 | /* See if we can pack the contents into one or two pages */ |
||
2495 | n = ALIGN(kgem->batch_size, 1024); |
||
2496 | size = n - kgem->surface + kgem->nbatch; |
||
2497 | size = ALIGN(size, 1024); |
||
2498 | |||
2499 | shrink = n - size; |
||
2500 | if (shrink) { |
||
2501 | DBG(("shrinking from %d to %d\n", kgem->batch_size, size)); |
||
2502 | |||
2503 | shrink *= sizeof(uint32_t); |
||
2504 | for (n = 0; n < kgem->nreloc; n++) { |
||
2505 | if (kgem->reloc[n].read_domains == I915_GEM_DOMAIN_INSTRUCTION && |
||
2506 | kgem->reloc[n].target_handle == ~0U) |
||
2507 | kgem->reloc[n].delta -= shrink; |
||
2508 | |||
2509 | if (kgem->reloc[n].offset >= sizeof(uint32_t)*kgem->nbatch) |
||
2510 | kgem->reloc[n].offset -= shrink; |
||
2511 | } |
||
2512 | } |
||
2513 | |||
2514 | return size * sizeof(uint32_t); |
||
2515 | } |
||
2516 | |||
2517 | static struct kgem_bo * |
||
2518 | kgem_create_batch(struct kgem *kgem, int size) |
||
2519 | { |
||
2520 | struct drm_i915_gem_set_domain set_domain; |
||
2521 | struct kgem_bo *bo; |
||
2522 | |||
2523 | if (size <= 4096) { |
||
2524 | bo = list_first_entry(&kgem->pinned_batches[0], |
||
2525 | struct kgem_bo, |
||
2526 | list); |
||
2527 | if (!bo->rq) { |
||
2528 | out_4096: |
||
2529 | list_move_tail(&bo->list, &kgem->pinned_batches[0]); |
||
2530 | return kgem_bo_reference(bo); |
||
2531 | } |
||
2532 | |||
2533 | if (!__kgem_busy(kgem, bo->handle)) { |
||
2534 | assert(RQ(bo->rq)->bo == bo); |
||
2535 | __kgem_retire_rq(kgem, RQ(bo->rq)); |
||
2536 | goto out_4096; |
||
2537 | } |
||
2538 | } |
||
2539 | |||
2540 | if (size <= 16384) { |
||
2541 | bo = list_first_entry(&kgem->pinned_batches[1], |
||
2542 | struct kgem_bo, |
||
2543 | list); |
||
2544 | if (!bo->rq) { |
||
2545 | out_16384: |
||
2546 | list_move_tail(&bo->list, &kgem->pinned_batches[1]); |
||
2547 | return kgem_bo_reference(bo); |
||
2548 | } |
||
2549 | |||
2550 | if (!__kgem_busy(kgem, bo->handle)) { |
||
2551 | assert(RQ(bo->rq)->bo == bo); |
||
2552 | __kgem_retire_rq(kgem, RQ(bo->rq)); |
||
2553 | goto out_16384; |
||
2554 | } |
||
2555 | } |
||
2556 | |||
2557 | if (kgem->gen == 020 && !kgem->has_pinned_batches) { |
||
2558 | assert(size <= 16384); |
||
2559 | |||
2560 | bo = list_first_entry(&kgem->pinned_batches[size > 4096], |
||
2561 | struct kgem_bo, |
||
2562 | list); |
||
2563 | list_move_tail(&bo->list, &kgem->pinned_batches[size > 4096]); |
||
2564 | |||
2565 | DBG(("%s: syncing due to busy batches\n", __FUNCTION__)); |
||
2566 | |||
2567 | VG_CLEAR(set_domain); |
||
2568 | set_domain.handle = bo->handle; |
||
2569 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
||
2570 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
||
2571 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) { |
||
2572 | DBG(("%s: sync: GPU hang detected\n", __FUNCTION__)); |
||
2573 | kgem_throttle(kgem); |
||
2574 | return NULL; |
||
2575 | } |
||
2576 | |||
2577 | kgem_retire(kgem); |
||
2578 | assert(bo->rq == NULL); |
||
2579 | return kgem_bo_reference(bo); |
||
2580 | } |
||
2581 | |||
2582 | return kgem_create_linear(kgem, size, CREATE_NO_THROTTLE); |
||
2583 | } |
||
2584 | |||
2585 | void _kgem_submit(struct kgem *kgem) |
||
2586 | { |
||
2587 | struct kgem_request *rq; |
||
2588 | uint32_t batch_end; |
||
2589 | int size; |
||
2590 | |||
2591 | assert(!DBG_NO_HW); |
||
2592 | assert(!kgem->wedged); |
||
2593 | |||
2594 | assert(kgem->nbatch); |
||
2595 | assert(kgem->nbatch <= KGEM_BATCH_SIZE(kgem)); |
||
2596 | assert(kgem->nbatch <= kgem->surface); |
||
2597 | |||
2598 | batch_end = kgem_end_batch(kgem); |
||
2599 | kgem_sna_flush(kgem); |
||
2600 | |||
2601 | DBG(("batch[%d/%d, flags=%x]: %d %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d\n", |
||
2602 | kgem->mode, kgem->ring, kgem->batch_flags, |
||
2603 | batch_end, kgem->nbatch, kgem->surface, kgem->batch_size, |
||
2604 | kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture)); |
||
2605 | |||
2606 | assert(kgem->nbatch <= kgem->batch_size); |
||
2607 | assert(kgem->nbatch <= kgem->surface); |
||
2608 | assert(kgem->nreloc <= ARRAY_SIZE(kgem->reloc)); |
||
2609 | assert(kgem->nexec < ARRAY_SIZE(kgem->exec)); |
||
2610 | assert(kgem->nfence <= kgem->fence_max); |
||
2611 | |||
2612 | kgem_finish_buffers(kgem); |
||
2613 | |||
2614 | #if SHOW_BATCH |
||
2615 | __kgem_batch_debug(kgem, batch_end); |
||
2616 | #endif |
||
2617 | |||
2618 | rq = kgem->next_request; |
||
2619 | if (kgem->surface != kgem->batch_size) |
||
2620 | size = compact_batch_surface(kgem); |
||
2621 | else |
||
2622 | size = kgem->nbatch * sizeof(kgem->batch[0]); |
||
2623 | rq->bo = kgem_create_batch(kgem, size); |
||
2624 | if (rq->bo) { |
||
2625 | uint32_t handle = rq->bo->handle; |
||
2626 | int i; |
||
2627 | |||
2628 | assert(!rq->bo->needs_flush); |
||
2629 | |||
2630 | i = kgem->nexec++; |
||
2631 | kgem->exec[i].handle = handle; |
||
2632 | kgem->exec[i].relocation_count = kgem->nreloc; |
||
2633 | kgem->exec[i].relocs_ptr = (uintptr_t)kgem->reloc; |
||
2634 | kgem->exec[i].alignment = 0; |
||
2635 | kgem->exec[i].offset = rq->bo->presumed_offset; |
||
2636 | kgem->exec[i].flags = 0; |
||
2637 | kgem->exec[i].rsvd1 = 0; |
||
2638 | kgem->exec[i].rsvd2 = 0; |
||
2639 | |||
2640 | rq->bo->target_handle = kgem->has_handle_lut ? i : handle; |
||
2641 | rq->bo->exec = &kgem->exec[i]; |
||
2642 | rq->bo->rq = MAKE_REQUEST(rq, kgem->ring); /* useful sanity check */ |
||
2643 | list_add(&rq->bo->request, &rq->buffers); |
||
2644 | rq->ring = kgem->ring == KGEM_BLT; |
||
2645 | |||
2646 | kgem_fixup_self_relocs(kgem, rq->bo); |
||
2647 | |||
2648 | if (kgem_batch_write(kgem, handle, size) == 0) { |
||
2649 | struct drm_i915_gem_execbuffer2 execbuf; |
||
2650 | int ret, retry = 3; |
||
2651 | |||
2652 | memset(&execbuf, 0, sizeof(execbuf)); |
||
2653 | execbuf.buffers_ptr = (uintptr_t)kgem->exec; |
||
2654 | execbuf.buffer_count = kgem->nexec; |
||
2655 | execbuf.batch_len = batch_end*sizeof(uint32_t); |
||
2656 | execbuf.flags = kgem->ring | kgem->batch_flags; |
||
2657 | |||
2658 | if (DEBUG_DUMP) |
||
2659 | { |
||
2660 | int fd = open("/tmp1/1/batchbuffer.bin", O_CREAT|O_WRONLY|O_BINARY); |
||
2661 | if (fd != -1) { |
||
2662 | write(fd, kgem->batch, size); |
||
2663 | close(fd); |
||
2664 | } |
||
2665 | else printf("SNA: failed to write batchbuffer\n"); |
||
2666 | asm volatile("int3"); |
||
2667 | } |
||
2668 | |||
2669 | ret = drmIoctl(kgem->fd, |
||
2670 | DRM_IOCTL_I915_GEM_EXECBUFFER2, |
||
2671 | &execbuf); |
||
2672 | while (ret == -1 && errno == EBUSY && retry--) { |
||
2673 | __kgem_throttle(kgem); |
||
2674 | ret = drmIoctl(kgem->fd, |
||
2675 | DRM_IOCTL_I915_GEM_EXECBUFFER2, |
||
2676 | &execbuf); |
||
2677 | } |
||
2678 | if (DEBUG_SYNC && ret == 0) { |
||
2679 | struct drm_i915_gem_set_domain set_domain; |
||
2680 | |||
2681 | VG_CLEAR(set_domain); |
||
2682 | set_domain.handle = handle; |
||
2683 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
||
2684 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
||
2685 | |||
2686 | ret = drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain); |
||
2687 | } |
||
2688 | if (ret == -1) { |
||
2689 | DBG(("%s: GPU hang detected [%d]\n", |
||
2690 | __FUNCTION__, errno)); |
||
2691 | kgem_throttle(kgem); |
||
2692 | kgem->wedged = true; |
||
2693 | |||
2694 | #if 0 |
||
2695 | ret = errno; |
||
2696 | ErrorF("batch[%d/%d]: %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d: errno=%d\n", |
||
2697 | kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface, |
||
2698 | kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture, errno); |
||
2699 | |||
2700 | for (i = 0; i < kgem->nexec; i++) { |
||
2701 | struct kgem_bo *bo, *found = NULL; |
||
2702 | |||
2703 | list_for_each_entry(bo, &kgem->next_request->buffers, request) { |
||
2704 | if (bo->handle == kgem->exec[i].handle) { |
||
2705 | found = bo; |
||
2706 | break; |
||
2707 | } |
||
2708 | } |
||
2709 | ErrorF("exec[%d] = handle:%d, presumed offset: %x, size: %d, tiling %d, fenced %d, snooped %d, deleted %d\n", |
||
2710 | i, |
||
2711 | kgem->exec[i].handle, |
||
2712 | (int)kgem->exec[i].offset, |
||
2713 | found ? kgem_bo_size(found) : -1, |
||
2714 | found ? found->tiling : -1, |
||
2715 | (int)(kgem->exec[i].flags & EXEC_OBJECT_NEEDS_FENCE), |
||
2716 | found ? found->snoop : -1, |
||
2717 | found ? found->purged : -1); |
||
2718 | } |
||
2719 | for (i = 0; i < kgem->nreloc; i++) { |
||
2720 | ErrorF("reloc[%d] = pos:%d, target:%d, delta:%d, read:%x, write:%x, offset:%x\n", |
||
2721 | i, |
||
2722 | (int)kgem->reloc[i].offset, |
||
2723 | kgem->reloc[i].target_handle, |
||
2724 | kgem->reloc[i].delta, |
||
2725 | kgem->reloc[i].read_domains, |
||
2726 | kgem->reloc[i].write_domain, |
||
2727 | (int)kgem->reloc[i].presumed_offset); |
||
2728 | } |
||
2729 | |||
2730 | if (DEBUG_SYNC) { |
||
2731 | int fd = open("/tmp/batchbuffer", O_WRONLY | O_CREAT | O_APPEND, 0666); |
||
2732 | if (fd != -1) { |
||
2733 | write(fd, kgem->batch, batch_end*sizeof(uint32_t)); |
||
2734 | close(fd); |
||
2735 | } |
||
2736 | |||
2737 | FatalError("SNA: failed to submit batchbuffer, errno=%d\n", ret); |
||
2738 | } |
||
2739 | #endif |
||
2740 | } |
||
2741 | } |
||
2742 | |||
2743 | kgem_commit(kgem); |
||
2744 | } |
||
2745 | if (kgem->wedged) |
||
2746 | kgem_cleanup(kgem); |
||
2747 | |||
2748 | kgem_reset(kgem); |
||
2749 | |||
2750 | assert(kgem->next_request != NULL); |
||
2751 | } |
||
2752 | |||
2753 | void kgem_throttle(struct kgem *kgem) |
||
2754 | { |
||
2755 | kgem->need_throttle = 0; |
||
2756 | if (kgem->wedged) |
||
2757 | return; |
||
2758 | |||
2759 | kgem->wedged = __kgem_throttle(kgem); |
||
2760 | if (kgem->wedged) { |
||
2761 | printf("Detected a hung GPU, disabling acceleration.\n"); |
||
2762 | printf("When reporting this, please include i915_error_state from debugfs and the full dmesg.\n"); |
||
2763 | } |
||
2764 | } |
||
2765 | |||
2766 | void kgem_purge_cache(struct kgem *kgem) |
||
2767 | { |
||
2768 | struct kgem_bo *bo, *next; |
||
2769 | int i; |
||
2770 | |||
2771 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { |
||
2772 | list_for_each_entry_safe(bo, next, &kgem->inactive[i], list) { |
||
2773 | if (!kgem_bo_is_retained(kgem, bo)) { |
||
2774 | DBG(("%s: purging %d\n", |
||
2775 | __FUNCTION__, bo->handle)); |
||
2776 | kgem_bo_free(kgem, bo); |
||
2777 | } |
||
2778 | } |
||
2779 | } |
||
2780 | |||
2781 | kgem->need_purge = false; |
||
2782 | } |
||
2783 | |||
2784 | |||
2785 | void kgem_clean_large_cache(struct kgem *kgem) |
||
2786 | { |
||
2787 | while (!list_is_empty(&kgem->large_inactive)) { |
||
2788 | kgem_bo_free(kgem, |
||
2789 | list_first_entry(&kgem->large_inactive, |
||
2790 | struct kgem_bo, list)); |
||
2791 | |||
2792 | } |
||
2793 | } |
||
2794 | |||
2795 | bool kgem_expire_cache(struct kgem *kgem) |
||
2796 | { |
||
2797 | time_t now, expire; |
||
2798 | struct kgem_bo *bo; |
||
2799 | unsigned int size = 0, count = 0; |
||
2800 | bool idle; |
||
2801 | unsigned int i; |
||
2802 | |||
2803 | time(&now); |
||
2804 | |||
2805 | while (__kgem_freed_bo) { |
||
2806 | bo = __kgem_freed_bo; |
||
2807 | __kgem_freed_bo = *(struct kgem_bo **)bo; |
||
2808 | free(bo); |
||
2809 | } |
||
2810 | |||
2811 | while (__kgem_freed_request) { |
||
2812 | struct kgem_request *rq = __kgem_freed_request; |
||
2813 | __kgem_freed_request = *(struct kgem_request **)rq; |
||
2814 | free(rq); |
||
2815 | } |
||
2816 | |||
2817 | kgem_clean_large_cache(kgem); |
||
2818 | |||
2819 | expire = 0; |
||
2820 | list_for_each_entry(bo, &kgem->snoop, list) { |
||
2821 | if (bo->delta) { |
||
2822 | expire = now - MAX_INACTIVE_TIME/2; |
||
2823 | break; |
||
2824 | } |
||
2825 | |||
2826 | bo->delta = now; |
||
2827 | } |
||
2828 | if (expire) { |
||
2829 | while (!list_is_empty(&kgem->snoop)) { |
||
2830 | bo = list_last_entry(&kgem->snoop, struct kgem_bo, list); |
||
2831 | |||
2832 | if (bo->delta > expire) |
||
2833 | break; |
||
2834 | |||
2835 | kgem_bo_free(kgem, bo); |
||
2836 | } |
||
2837 | } |
||
2838 | #ifdef DEBUG_MEMORY |
||
2839 | { |
||
2840 | long snoop_size = 0; |
||
2841 | int snoop_count = 0; |
||
2842 | list_for_each_entry(bo, &kgem->snoop, list) |
||
2843 | snoop_count++, snoop_size += bytes(bo); |
||
2844 | ErrorF("%s: still allocated %d bo, %ld bytes, in snoop cache\n", |
||
2845 | __FUNCTION__, snoop_count, snoop_size); |
||
2846 | } |
||
2847 | #endif |
||
2848 | |||
2849 | kgem_retire(kgem); |
||
2850 | if (kgem->wedged) |
||
2851 | kgem_cleanup(kgem); |
||
2852 | |||
2853 | kgem->expire(kgem); |
||
2854 | |||
2855 | if (kgem->need_purge) |
||
2856 | kgem_purge_cache(kgem); |
||
2857 | |||
2858 | expire = 0; |
||
2859 | |||
2860 | idle = !kgem->need_retire; |
||
2861 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { |
||
2862 | idle &= list_is_empty(&kgem->inactive[i]); |
||
2863 | list_for_each_entry(bo, &kgem->inactive[i], list) { |
||
2864 | if (bo->delta) { |
||
2865 | expire = now - MAX_INACTIVE_TIME; |
||
2866 | break; |
||
2867 | } |
||
2868 | |||
2869 | bo->delta = now; |
||
2870 | } |
||
2871 | } |
||
2872 | if (idle) { |
||
2873 | DBG(("%s: idle\n", __FUNCTION__)); |
||
2874 | kgem->need_expire = false; |
||
2875 | return false; |
||
2876 | } |
||
2877 | if (expire == 0) |
||
2878 | return true; |
||
2879 | |||
2880 | idle = !kgem->need_retire; |
||
2881 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { |
||
2882 | struct list preserve; |
||
2883 | |||
2884 | list_init(&preserve); |
||
2885 | while (!list_is_empty(&kgem->inactive[i])) { |
||
2886 | bo = list_last_entry(&kgem->inactive[i], |
||
2887 | struct kgem_bo, list); |
||
2888 | |||
2889 | if (bo->delta > expire) { |
||
2890 | idle = false; |
||
2891 | break; |
||
2892 | } |
||
2893 | |||
2894 | if (bo->map && bo->delta + MAP_PRESERVE_TIME > expire) { |
||
2895 | idle = false; |
||
2896 | list_move_tail(&bo->list, &preserve); |
||
2897 | } else { |
||
2898 | count++; |
||
2899 | size += bytes(bo); |
||
2900 | kgem_bo_free(kgem, bo); |
||
2901 | DBG(("%s: expiring %d\n", |
||
2902 | __FUNCTION__, bo->handle)); |
||
2903 | } |
||
2904 | } |
||
2905 | if (!list_is_empty(&preserve)) { |
||
2906 | preserve.prev->next = kgem->inactive[i].next; |
||
2907 | kgem->inactive[i].next->prev = preserve.prev; |
||
2908 | kgem->inactive[i].next = preserve.next; |
||
2909 | preserve.next->prev = &kgem->inactive[i]; |
||
2910 | } |
||
2911 | } |
||
2912 | |||
2913 | #ifdef DEBUG_MEMORY |
||
2914 | { |
||
2915 | long inactive_size = 0; |
||
2916 | int inactive_count = 0; |
||
2917 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) |
||
2918 | list_for_each_entry(bo, &kgem->inactive[i], list) |
||
2919 | inactive_count++, inactive_size += bytes(bo); |
||
2920 | ErrorF("%s: still allocated %d bo, %ld bytes, in inactive cache\n", |
||
2921 | __FUNCTION__, inactive_count, inactive_size); |
||
2922 | } |
||
2923 | #endif |
||
2924 | |||
2925 | DBG(("%s: expired %d objects, %d bytes, idle? %d\n", |
||
2926 | __FUNCTION__, count, size, idle)); |
||
2927 | |||
2928 | kgem->need_expire = !idle; |
||
2929 | return !idle; |
||
2930 | (void)count; |
||
2931 | (void)size; |
||
2932 | } |
||
2933 | |||
2934 | void kgem_cleanup_cache(struct kgem *kgem) |
||
2935 | { |
||
2936 | unsigned int i; |
||
2937 | int n; |
||
2938 | |||
2939 | /* sync to the most recent request */ |
||
2940 | for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) { |
||
2941 | if (!list_is_empty(&kgem->requests[n])) { |
||
2942 | struct kgem_request *rq; |
||
2943 | struct drm_i915_gem_set_domain set_domain; |
||
2944 | |||
2945 | rq = list_first_entry(&kgem->requests[n], |
||
2946 | struct kgem_request, |
||
2947 | list); |
||
2948 | |||
2949 | DBG(("%s: sync on cleanup\n", __FUNCTION__)); |
||
2950 | |||
2951 | VG_CLEAR(set_domain); |
||
2952 | set_domain.handle = rq->bo->handle; |
||
2953 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
||
2954 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
||
2955 | (void)drmIoctl(kgem->fd, |
||
2956 | DRM_IOCTL_I915_GEM_SET_DOMAIN, |
||
2957 | &set_domain); |
||
2958 | } |
||
2959 | } |
||
2960 | |||
2961 | kgem_retire(kgem); |
||
2962 | kgem_cleanup(kgem); |
||
2963 | |||
2964 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { |
||
2965 | while (!list_is_empty(&kgem->inactive[i])) |
||
2966 | kgem_bo_free(kgem, |
||
2967 | list_last_entry(&kgem->inactive[i], |
||
2968 | struct kgem_bo, list)); |
||
2969 | } |
||
2970 | |||
2971 | kgem_clean_large_cache(kgem); |
||
2972 | |||
2973 | while (!list_is_empty(&kgem->snoop)) |
||
2974 | kgem_bo_free(kgem, |
||
2975 | list_last_entry(&kgem->snoop, |
||
2976 | struct kgem_bo, list)); |
||
2977 | |||
2978 | while (__kgem_freed_bo) { |
||
2979 | struct kgem_bo *bo = __kgem_freed_bo; |
||
2980 | __kgem_freed_bo = *(struct kgem_bo **)bo; |
||
2981 | free(bo); |
||
2982 | } |
||
2983 | |||
2984 | kgem->need_purge = false; |
||
2985 | kgem->need_expire = false; |
||
2986 | } |
||
2987 | |||
2988 | static struct kgem_bo * |
||
2989 | search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags) |
||
2990 | { |
||
2991 | struct kgem_bo *bo, *first = NULL; |
||
2992 | bool use_active = (flags & CREATE_INACTIVE) == 0; |
||
2993 | struct list *cache; |
||
2994 | |||
2995 | DBG(("%s: num_pages=%d, flags=%x, use_active? %d, use_large=%d [max=%d]\n", |
||
2996 | __FUNCTION__, num_pages, flags, use_active, |
||
2997 | num_pages >= MAX_CACHE_SIZE / PAGE_SIZE, |
||
2998 | MAX_CACHE_SIZE / PAGE_SIZE)); |
||
2999 | |||
3000 | assert(num_pages); |
||
3001 | |||
3002 | if (num_pages >= MAX_CACHE_SIZE / PAGE_SIZE) { |
||
3003 | DBG(("%s: searching large buffers\n", __FUNCTION__)); |
||
3004 | retry_large: |
||
3005 | cache = use_active ? &kgem->large : &kgem->large_inactive; |
||
3006 | list_for_each_entry_safe(bo, first, cache, list) { |
||
3007 | assert(bo->refcnt == 0); |
||
3008 | assert(bo->reusable); |
||
3009 | assert(!bo->scanout); |
||
3010 | |||
3011 | if (num_pages > num_pages(bo)) |
||
3012 | goto discard; |
||
3013 | |||
3014 | if (bo->tiling != I915_TILING_NONE) { |
||
3015 | if (use_active) |
||
3016 | goto discard; |
||
3017 | |||
3018 | if (!gem_set_tiling(kgem->fd, bo->handle, |
||
3019 | I915_TILING_NONE, 0)) |
||
3020 | goto discard; |
||
3021 | |||
3022 | bo->tiling = I915_TILING_NONE; |
||
3023 | bo->pitch = 0; |
||
3024 | } |
||
3025 | |||
3026 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) |
||
3027 | goto discard; |
||
3028 | |||
3029 | list_del(&bo->list); |
||
3030 | if (bo->rq == (void *)kgem) |
||
3031 | list_del(&bo->request); |
||
3032 | |||
3033 | bo->delta = 0; |
||
3034 | assert_tiling(kgem, bo); |
||
3035 | return bo; |
||
3036 | |||
3037 | discard: |
||
3038 | if (!use_active) |
||
3039 | kgem_bo_free(kgem, bo); |
||
3040 | } |
||
3041 | |||
3042 | if (use_active) { |
||
3043 | use_active = false; |
||
3044 | goto retry_large; |
||
3045 | } |
||
3046 | |||
3047 | if (__kgem_throttle_retire(kgem, flags)) |
||
3048 | goto retry_large; |
||
3049 | |||
3050 | return NULL; |
||
3051 | } |
||
3052 | |||
3053 | if (!use_active && list_is_empty(inactive(kgem, num_pages))) { |
||
3054 | DBG(("%s: inactive and cache bucket empty\n", |
||
3055 | __FUNCTION__)); |
||
3056 | |||
3057 | if (flags & CREATE_NO_RETIRE) { |
||
3058 | DBG(("%s: can not retire\n", __FUNCTION__)); |
||
3059 | return NULL; |
||
3060 | } |
||
3061 | |||
3062 | if (list_is_empty(active(kgem, num_pages, I915_TILING_NONE))) { |
||
3063 | DBG(("%s: active cache bucket empty\n", __FUNCTION__)); |
||
3064 | return NULL; |
||
3065 | } |
||
3066 | |||
3067 | if (!__kgem_throttle_retire(kgem, flags)) { |
||
3068 | DBG(("%s: nothing retired\n", __FUNCTION__)); |
||
3069 | return NULL; |
||
3070 | } |
||
3071 | |||
3072 | if (list_is_empty(inactive(kgem, num_pages))) { |
||
3073 | DBG(("%s: active cache bucket still empty after retire\n", |
||
3074 | __FUNCTION__)); |
||
3075 | return NULL; |
||
3076 | } |
||
3077 | } |
||
3078 | |||
3079 | if (!use_active && flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
||
3080 | int for_cpu = !!(flags & CREATE_CPU_MAP); |
||
3081 | DBG(("%s: searching for inactive %s map\n", |
||
3082 | __FUNCTION__, for_cpu ? "cpu" : "gtt")); |
||
3083 | cache = &kgem->vma[for_cpu].inactive[cache_bucket(num_pages)]; |
||
3084 | list_for_each_entry(bo, cache, vma) { |
||
3085 | assert(IS_CPU_MAP(bo->map) == for_cpu); |
||
3086 | assert(bucket(bo) == cache_bucket(num_pages)); |
||
3087 | assert(bo->proxy == NULL); |
||
3088 | assert(bo->rq == NULL); |
||
3089 | assert(bo->exec == NULL); |
||
3090 | assert(!bo->scanout); |
||
3091 | |||
3092 | if (num_pages > num_pages(bo)) { |
||
3093 | DBG(("inactive too small: %d < %d\n", |
||
3094 | num_pages(bo), num_pages)); |
||
3095 | continue; |
||
3096 | } |
||
3097 | |||
3098 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
||
3099 | kgem_bo_free(kgem, bo); |
||
3100 | break; |
||
3101 | } |
||
3102 | |||
3103 | if (I915_TILING_NONE != bo->tiling && |
||
3104 | !gem_set_tiling(kgem->fd, bo->handle, |
||
3105 | I915_TILING_NONE, 0)) |
||
3106 | continue; |
||
3107 | |||
3108 | kgem_bo_remove_from_inactive(kgem, bo); |
||
3109 | |||
3110 | bo->tiling = I915_TILING_NONE; |
||
3111 | bo->pitch = 0; |
||
3112 | bo->delta = 0; |
||
3113 | DBG((" %s: found handle=%d (num_pages=%d) in linear vma cache\n", |
||
3114 | __FUNCTION__, bo->handle, num_pages(bo))); |
||
3115 | assert(use_active || bo->domain != DOMAIN_GPU); |
||
3116 | assert(!bo->needs_flush); |
||
3117 | assert_tiling(kgem, bo); |
||
3118 | ASSERT_MAYBE_IDLE(kgem, bo->handle, !use_active); |
||
3119 | return bo; |
||
3120 | } |
||
3121 | |||
3122 | if (flags & CREATE_EXACT) |
||
3123 | return NULL; |
||
3124 | |||
3125 | if (flags & CREATE_CPU_MAP && !kgem->has_llc) |
||
3126 | return NULL; |
||
3127 | } |
||
3128 | |||
3129 | cache = use_active ? active(kgem, num_pages, I915_TILING_NONE) : inactive(kgem, num_pages); |
||
3130 | list_for_each_entry(bo, cache, list) { |
||
3131 | assert(bo->refcnt == 0); |
||
3132 | assert(bo->reusable); |
||
3133 | assert(!!bo->rq == !!use_active); |
||
3134 | assert(bo->proxy == NULL); |
||
3135 | assert(!bo->scanout); |
||
3136 | |||
3137 | if (num_pages > num_pages(bo)) |
||
3138 | continue; |
||
3139 | |||
3140 | if (use_active && |
||
3141 | kgem->gen <= 040 && |
||
3142 | bo->tiling != I915_TILING_NONE) |
||
3143 | continue; |
||
3144 | |||
3145 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
||
3146 | kgem_bo_free(kgem, bo); |
||
3147 | break; |
||
3148 | } |
||
3149 | |||
3150 | if (I915_TILING_NONE != bo->tiling) { |
||
3151 | if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) |
||
3152 | continue; |
||
3153 | |||
3154 | if (first) |
||
3155 | continue; |
||
3156 | |||
3157 | if (!gem_set_tiling(kgem->fd, bo->handle, |
||
3158 | I915_TILING_NONE, 0)) |
||
3159 | continue; |
||
3160 | |||
3161 | bo->tiling = I915_TILING_NONE; |
||
3162 | bo->pitch = 0; |
||
3163 | } |
||
3164 | |||
3165 | if (bo->map) { |
||
3166 | if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
||
3167 | int for_cpu = !!(flags & CREATE_CPU_MAP); |
||
3168 | if (IS_CPU_MAP(bo->map) != for_cpu) { |
||
3169 | if (first != NULL) |
||
3170 | break; |
||
3171 | |||
3172 | first = bo; |
||
3173 | continue; |
||
3174 | } |
||
3175 | } else { |
||
3176 | if (first != NULL) |
||
3177 | break; |
||
3178 | |||
3179 | first = bo; |
||
3180 | continue; |
||
3181 | } |
||
3182 | } else { |
||
3183 | if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
||
3184 | if (first != NULL) |
||
3185 | break; |
||
3186 | |||
3187 | first = bo; |
||
3188 | continue; |
||
3189 | } |
||
3190 | } |
||
3191 | |||
3192 | if (use_active) |
||
3193 | kgem_bo_remove_from_active(kgem, bo); |
||
3194 | else |
||
3195 | kgem_bo_remove_from_inactive(kgem, bo); |
||
3196 | |||
3197 | assert(bo->tiling == I915_TILING_NONE); |
||
3198 | bo->pitch = 0; |
||
3199 | bo->delta = 0; |
||
3200 | DBG((" %s: found handle=%d (num_pages=%d) in linear %s cache\n", |
||
3201 | __FUNCTION__, bo->handle, num_pages(bo), |
||
3202 | use_active ? "active" : "inactive")); |
||
3203 | assert(list_is_empty(&bo->list)); |
||
3204 | assert(use_active || bo->domain != DOMAIN_GPU); |
||
3205 | assert(!bo->needs_flush || use_active); |
||
3206 | assert_tiling(kgem, bo); |
||
3207 | ASSERT_MAYBE_IDLE(kgem, bo->handle, !use_active); |
||
3208 | return bo; |
||
3209 | } |
||
3210 | |||
3211 | if (first) { |
||
3212 | assert(first->tiling == I915_TILING_NONE); |
||
3213 | |||
3214 | if (use_active) |
||
3215 | kgem_bo_remove_from_active(kgem, first); |
||
3216 | else |
||
3217 | kgem_bo_remove_from_inactive(kgem, first); |
||
3218 | |||
3219 | first->pitch = 0; |
||
3220 | first->delta = 0; |
||
3221 | DBG((" %s: found handle=%d (near-miss) (num_pages=%d) in linear %s cache\n", |
||
3222 | __FUNCTION__, first->handle, num_pages(first), |
||
3223 | use_active ? "active" : "inactive")); |
||
3224 | assert(list_is_empty(&first->list)); |
||
3225 | assert(use_active || first->domain != DOMAIN_GPU); |
||
3226 | assert(!first->needs_flush || use_active); |
||
3227 | ASSERT_MAYBE_IDLE(kgem, first->handle, !use_active); |
||
3228 | return first; |
||
3229 | } |
||
3230 | |||
3231 | return NULL; |
||
3232 | } |
||
3233 | |||
3234 | |||
3235 | struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags) |
||
3236 | { |
||
3237 | struct kgem_bo *bo; |
||
3238 | uint32_t handle; |
||
3239 | |||
3240 | DBG(("%s(%d)\n", __FUNCTION__, size)); |
||
3241 | assert(size); |
||
3242 | |||
3243 | if (flags & CREATE_GTT_MAP && kgem->has_llc) { |
||
3244 | flags &= ~CREATE_GTT_MAP; |
||
3245 | flags |= CREATE_CPU_MAP; |
||
3246 | } |
||
3247 | |||
3248 | size = NUM_PAGES(size); |
||
3249 | bo = search_linear_cache(kgem, size, CREATE_INACTIVE | flags); |
||
3250 | if (bo) { |
||
3251 | assert(bo->domain != DOMAIN_GPU); |
||
3252 | ASSERT_IDLE(kgem, bo->handle); |
||
3253 | bo->refcnt = 1; |
||
3254 | return bo; |
||
3255 | } |
||
3256 | |||
3257 | if (flags & CREATE_CACHED) |
||
3258 | return NULL; |
||
3259 | |||
3260 | handle = gem_create(kgem->fd, size); |
||
3261 | if (handle == 0) |
||
3262 | return NULL; |
||
3263 | |||
3264 | DBG(("%s: new handle=%d, num_pages=%d\n", __FUNCTION__, handle, size)); |
||
3265 | bo = __kgem_bo_alloc(handle, size); |
||
3266 | if (bo == NULL) { |
||
3267 | gem_close(kgem->fd, handle); |
||
3268 | return NULL; |
||
3269 | } |
||
3270 | |||
3271 | debug_alloc__bo(kgem, bo); |
||
3272 | return bo; |
||
3273 | } |
||
3274 | |||
3275 | inline int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo) |
||
3276 | { |
||
3277 | unsigned int size; |
||
3278 | |||
3279 | assert(bo->tiling); |
||
3280 | assert_tiling(kgem, bo); |
||
3281 | assert(kgem->gen < 040); |
||
3282 | |||
3283 | if (kgem->gen < 030) |
||
3284 | size = 512 * 1024; |
||
3285 | else |
||
3286 | size = 1024 * 1024; |
||
3287 | while (size < bytes(bo)) |
||
3288 | size *= 2; |
||
3289 | |||
3290 | return size; |
||
3291 | } |
||
3292 | |||
3293 | struct kgem_bo *kgem_create_2d(struct kgem *kgem, |
||
3294 | int width, |
||
3295 | int height, |
||
3296 | int bpp, |
||
3297 | int tiling, |
||
3298 | uint32_t flags) |
||
3299 | { |
||
3300 | struct list *cache; |
||
3301 | struct kgem_bo *bo; |
||
3302 | uint32_t pitch, tiled_height, size; |
||
3303 | uint32_t handle; |
||
3304 | int i, bucket, retry; |
||
3305 | bool exact = flags & (CREATE_EXACT | CREATE_SCANOUT); |
||
3306 | |||
3307 | if (tiling < 0) |
||
3308 | exact = true, tiling = -tiling; |
||
3309 | |||
3310 | |||
3311 | DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d, cpu-mapping=%d, gtt-mapping=%d, scanout?=%d, prime?=%d, temp?=%d)\n", __FUNCTION__, |
||
3312 | width, height, bpp, tiling, exact, |
||
3313 | !!(flags & CREATE_INACTIVE), |
||
3314 | !!(flags & CREATE_CPU_MAP), |
||
3315 | !!(flags & CREATE_GTT_MAP), |
||
3316 | !!(flags & CREATE_SCANOUT), |
||
3317 | !!(flags & CREATE_PRIME), |
||
3318 | !!(flags & CREATE_TEMPORARY))); |
||
3319 | |||
3320 | size = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags, |
||
3321 | width, height, bpp, tiling, &pitch); |
||
3322 | assert(size && size <= kgem->max_object_size); |
||
3323 | size /= PAGE_SIZE; |
||
3324 | bucket = cache_bucket(size); |
||
3325 | |||
3326 | if (flags & CREATE_SCANOUT) { |
||
3327 | struct kgem_bo *last = NULL; |
||
3328 | |||
3329 | list_for_each_entry_reverse(bo, &kgem->scanout, list) { |
||
3330 | assert(bo->scanout); |
||
3331 | assert(bo->delta); |
||
3332 | assert(!bo->flush); |
||
3333 | assert_tiling(kgem, bo); |
||
3334 | |||
3335 | if (size > num_pages(bo) || num_pages(bo) > 2*size) |
||
3336 | continue; |
||
3337 | |||
3338 | if (bo->tiling != tiling || |
||
3339 | (tiling != I915_TILING_NONE && bo->pitch != pitch)) { |
||
3340 | if (!gem_set_tiling(kgem->fd, bo->handle, |
||
3341 | tiling, pitch)) |
||
3342 | continue; |
||
3343 | |||
3344 | bo->tiling = tiling; |
||
3345 | bo->pitch = pitch; |
||
3346 | } |
||
3347 | |||
3348 | if (flags & CREATE_INACTIVE && bo->rq) { |
||
3349 | last = bo; |
||
3350 | continue; |
||
3351 | } |
||
3352 | |||
3353 | list_del(&bo->list); |
||
3354 | |||
3355 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3356 | DBG((" 1:from scanout: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3357 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3358 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3359 | assert_tiling(kgem, bo); |
||
3360 | bo->refcnt = 1; |
||
3361 | return bo; |
||
3362 | } |
||
3363 | |||
3364 | if (last) { |
||
3365 | list_del(&last->list); |
||
3366 | |||
3367 | last->unique_id = kgem_get_unique_id(kgem); |
||
3368 | DBG((" 1:from scanout: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3369 | last->pitch, last->tiling, last->handle, last->unique_id)); |
||
3370 | assert(last->pitch*kgem_aligned_height(kgem, height, last->tiling) <= kgem_bo_size(last)); |
||
3371 | assert_tiling(kgem, last); |
||
3372 | last->refcnt = 1; |
||
3373 | return last; |
||
3374 | } |
||
3375 | |||
3376 | bo = NULL; //__kgem_bo_create_as_display(kgem, size, tiling, pitch); |
||
3377 | if (bo) |
||
3378 | return bo; |
||
3379 | } |
||
3380 | |||
3381 | if (bucket >= NUM_CACHE_BUCKETS) { |
||
3382 | DBG(("%s: large bo num pages=%d, bucket=%d\n", |
||
3383 | __FUNCTION__, size, bucket)); |
||
3384 | |||
3385 | if (flags & CREATE_INACTIVE) |
||
3386 | goto large_inactive; |
||
3387 | |||
3388 | tiled_height = kgem_aligned_height(kgem, height, tiling); |
||
3389 | |||
3390 | list_for_each_entry(bo, &kgem->large, list) { |
||
3391 | assert(!bo->purged); |
||
3392 | assert(!bo->scanout); |
||
3393 | assert(bo->refcnt == 0); |
||
3394 | assert(bo->reusable); |
||
3395 | assert_tiling(kgem, bo); |
||
3396 | |||
3397 | if (kgem->gen < 040) { |
||
3398 | if (bo->pitch < pitch) { |
||
3399 | DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n", |
||
3400 | bo->tiling, tiling, |
||
3401 | bo->pitch, pitch)); |
||
3402 | continue; |
||
3403 | } |
||
3404 | |||
3405 | if (bo->pitch * tiled_height > bytes(bo)) |
||
3406 | continue; |
||
3407 | } else { |
||
3408 | if (num_pages(bo) < size) |
||
3409 | continue; |
||
3410 | |||
3411 | if (bo->pitch != pitch || bo->tiling != tiling) { |
||
3412 | if (!gem_set_tiling(kgem->fd, bo->handle, |
||
3413 | tiling, pitch)) |
||
3414 | continue; |
||
3415 | |||
3416 | bo->pitch = pitch; |
||
3417 | bo->tiling = tiling; |
||
3418 | } |
||
3419 | } |
||
3420 | |||
3421 | kgem_bo_remove_from_active(kgem, bo); |
||
3422 | |||
3423 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3424 | bo->delta = 0; |
||
3425 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3426 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3427 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3428 | assert_tiling(kgem, bo); |
||
3429 | bo->refcnt = 1; |
||
3430 | bo->flush = true; |
||
3431 | return bo; |
||
3432 | } |
||
3433 | |||
3434 | large_inactive: |
||
3435 | __kgem_throttle_retire(kgem, flags); |
||
3436 | list_for_each_entry(bo, &kgem->large_inactive, list) { |
||
3437 | assert(bo->refcnt == 0); |
||
3438 | assert(bo->reusable); |
||
3439 | assert(!bo->scanout); |
||
3440 | assert_tiling(kgem, bo); |
||
3441 | |||
3442 | if (size > num_pages(bo)) |
||
3443 | continue; |
||
3444 | |||
3445 | if (bo->tiling != tiling || |
||
3446 | (tiling != I915_TILING_NONE && bo->pitch != pitch)) { |
||
3447 | if (!gem_set_tiling(kgem->fd, bo->handle, |
||
3448 | tiling, pitch)) |
||
3449 | continue; |
||
3450 | |||
3451 | bo->tiling = tiling; |
||
3452 | bo->pitch = pitch; |
||
3453 | } |
||
3454 | |||
3455 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
||
3456 | kgem_bo_free(kgem, bo); |
||
3457 | break; |
||
3458 | } |
||
3459 | |||
3460 | list_del(&bo->list); |
||
3461 | |||
3462 | assert(bo->domain != DOMAIN_GPU); |
||
3463 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3464 | bo->pitch = pitch; |
||
3465 | bo->delta = 0; |
||
3466 | DBG((" 1:from large inactive: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3467 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3468 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3469 | assert_tiling(kgem, bo); |
||
3470 | bo->refcnt = 1; |
||
3471 | return bo; |
||
3472 | } |
||
3473 | |||
3474 | goto create; |
||
3475 | } |
||
3476 | |||
3477 | if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
||
3478 | int for_cpu = !!(flags & CREATE_CPU_MAP); |
||
3479 | if (kgem->has_llc && tiling == I915_TILING_NONE) |
||
3480 | for_cpu = 1; |
||
3481 | /* We presume that we will need to upload to this bo, |
||
3482 | * and so would prefer to have an active VMA. |
||
3483 | */ |
||
3484 | cache = &kgem->vma[for_cpu].inactive[bucket]; |
||
3485 | do { |
||
3486 | list_for_each_entry(bo, cache, vma) { |
||
3487 | assert(bucket(bo) == bucket); |
||
3488 | assert(bo->refcnt == 0); |
||
3489 | assert(!bo->scanout); |
||
3490 | assert(bo->map); |
||
3491 | assert(IS_CPU_MAP(bo->map) == for_cpu); |
||
3492 | assert(bo->rq == NULL); |
||
3493 | assert(list_is_empty(&bo->request)); |
||
3494 | assert(bo->flush == false); |
||
3495 | assert_tiling(kgem, bo); |
||
3496 | |||
3497 | if (size > num_pages(bo)) { |
||
3498 | DBG(("inactive too small: %d < %d\n", |
||
3499 | num_pages(bo), size)); |
||
3500 | continue; |
||
3501 | } |
||
3502 | |||
3503 | if (bo->tiling != tiling || |
||
3504 | (tiling != I915_TILING_NONE && bo->pitch != pitch)) { |
||
3505 | DBG(("inactive vma with wrong tiling: %d < %d\n", |
||
3506 | bo->tiling, tiling)); |
||
3507 | continue; |
||
3508 | } |
||
3509 | |||
3510 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
||
3511 | kgem_bo_free(kgem, bo); |
||
3512 | break; |
||
3513 | } |
||
3514 | |||
3515 | assert(bo->tiling == tiling); |
||
3516 | bo->pitch = pitch; |
||
3517 | bo->delta = 0; |
||
3518 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3519 | bo->domain = DOMAIN_NONE; |
||
3520 | |||
3521 | kgem_bo_remove_from_inactive(kgem, bo); |
||
3522 | |||
3523 | DBG((" from inactive vma: pitch=%d, tiling=%d: handle=%d, id=%d\n", |
||
3524 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3525 | assert(bo->reusable); |
||
3526 | assert(bo->domain != DOMAIN_GPU); |
||
3527 | ASSERT_IDLE(kgem, bo->handle); |
||
3528 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3529 | assert_tiling(kgem, bo); |
||
3530 | bo->refcnt = 1; |
||
3531 | return bo; |
||
3532 | } |
||
3533 | } while (!list_is_empty(cache) && |
||
3534 | __kgem_throttle_retire(kgem, flags)); |
||
3535 | |||
3536 | if (flags & CREATE_CPU_MAP && !kgem->has_llc) { |
||
3537 | if (list_is_empty(&kgem->active[bucket][tiling]) && |
||
3538 | list_is_empty(&kgem->inactive[bucket])) |
||
3539 | flags &= ~CREATE_CACHED; |
||
3540 | |||
3541 | goto create; |
||
3542 | } |
||
3543 | } |
||
3544 | |||
3545 | if (flags & CREATE_INACTIVE) |
||
3546 | goto skip_active_search; |
||
3547 | |||
3548 | /* Best active match */ |
||
3549 | retry = NUM_CACHE_BUCKETS - bucket; |
||
3550 | if (retry > 3 && (flags & CREATE_TEMPORARY) == 0) |
||
3551 | retry = 3; |
||
3552 | search_again: |
||
3553 | assert(bucket < NUM_CACHE_BUCKETS); |
||
3554 | cache = &kgem->active[bucket][tiling]; |
||
3555 | if (tiling) { |
||
3556 | tiled_height = kgem_aligned_height(kgem, height, tiling); |
||
3557 | list_for_each_entry(bo, cache, list) { |
||
3558 | assert(!bo->purged); |
||
3559 | assert(bo->refcnt == 0); |
||
3560 | assert(bucket(bo) == bucket); |
||
3561 | assert(bo->reusable); |
||
3562 | assert(bo->tiling == tiling); |
||
3563 | assert(bo->flush == false); |
||
3564 | assert(!bo->scanout); |
||
3565 | assert_tiling(kgem, bo); |
||
3566 | |||
3567 | if (kgem->gen < 040) { |
||
3568 | if (bo->pitch < pitch) { |
||
3569 | DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n", |
||
3570 | bo->tiling, tiling, |
||
3571 | bo->pitch, pitch)); |
||
3572 | continue; |
||
3573 | } |
||
3574 | |||
3575 | if (bo->pitch * tiled_height > bytes(bo)) |
||
3576 | continue; |
||
3577 | } else { |
||
3578 | if (num_pages(bo) < size) |
||
3579 | continue; |
||
3580 | |||
3581 | if (bo->pitch != pitch) { |
||
3582 | if (!gem_set_tiling(kgem->fd, |
||
3583 | bo->handle, |
||
3584 | tiling, pitch)) |
||
3585 | continue; |
||
3586 | |||
3587 | bo->pitch = pitch; |
||
3588 | } |
||
3589 | } |
||
3590 | |||
3591 | kgem_bo_remove_from_active(kgem, bo); |
||
3592 | |||
3593 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3594 | bo->delta = 0; |
||
3595 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3596 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3597 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3598 | assert_tiling(kgem, bo); |
||
3599 | bo->refcnt = 1; |
||
3600 | return bo; |
||
3601 | } |
||
3602 | } else { |
||
3603 | list_for_each_entry(bo, cache, list) { |
||
3604 | assert(bucket(bo) == bucket); |
||
3605 | assert(!bo->purged); |
||
3606 | assert(bo->refcnt == 0); |
||
3607 | assert(bo->reusable); |
||
3608 | assert(!bo->scanout); |
||
3609 | assert(bo->tiling == tiling); |
||
3610 | assert(bo->flush == false); |
||
3611 | assert_tiling(kgem, bo); |
||
3612 | |||
3613 | if (num_pages(bo) < size) |
||
3614 | continue; |
||
3615 | |||
3616 | kgem_bo_remove_from_active(kgem, bo); |
||
3617 | |||
3618 | bo->pitch = pitch; |
||
3619 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3620 | bo->delta = 0; |
||
3621 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3622 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3623 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3624 | assert_tiling(kgem, bo); |
||
3625 | bo->refcnt = 1; |
||
3626 | return bo; |
||
3627 | } |
||
3628 | } |
||
3629 | |||
3630 | if (--retry && exact) { |
||
3631 | if (kgem->gen >= 040) { |
||
3632 | for (i = I915_TILING_NONE; i <= I915_TILING_Y; i++) { |
||
3633 | if (i == tiling) |
||
3634 | continue; |
||
3635 | |||
3636 | cache = &kgem->active[bucket][i]; |
||
3637 | list_for_each_entry(bo, cache, list) { |
||
3638 | assert(!bo->purged); |
||
3639 | assert(bo->refcnt == 0); |
||
3640 | assert(bo->reusable); |
||
3641 | assert(!bo->scanout); |
||
3642 | assert(bo->flush == false); |
||
3643 | assert_tiling(kgem, bo); |
||
3644 | |||
3645 | if (num_pages(bo) < size) |
||
3646 | continue; |
||
3647 | |||
3648 | if (!gem_set_tiling(kgem->fd, |
||
3649 | bo->handle, |
||
3650 | tiling, pitch)) |
||
3651 | continue; |
||
3652 | |||
3653 | kgem_bo_remove_from_active(kgem, bo); |
||
3654 | |||
3655 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3656 | bo->pitch = pitch; |
||
3657 | bo->tiling = tiling; |
||
3658 | bo->delta = 0; |
||
3659 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3660 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3661 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3662 | assert_tiling(kgem, bo); |
||
3663 | bo->refcnt = 1; |
||
3664 | return bo; |
||
3665 | } |
||
3666 | } |
||
3667 | } |
||
3668 | |||
3669 | bucket++; |
||
3670 | goto search_again; |
||
3671 | } |
||
3672 | |||
3673 | if (!exact) { /* allow an active near-miss? */ |
||
3674 | i = tiling; |
||
3675 | while (--i >= 0) { |
||
3676 | tiled_height = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags, |
||
3677 | width, height, bpp, tiling, &pitch); |
||
3678 | cache = active(kgem, tiled_height / PAGE_SIZE, i); |
||
3679 | tiled_height = kgem_aligned_height(kgem, height, i); |
||
3680 | list_for_each_entry(bo, cache, list) { |
||
3681 | assert(!bo->purged); |
||
3682 | assert(bo->refcnt == 0); |
||
3683 | assert(bo->reusable); |
||
3684 | assert(!bo->scanout); |
||
3685 | assert(bo->flush == false); |
||
3686 | assert_tiling(kgem, bo); |
||
3687 | |||
3688 | if (bo->tiling) { |
||
3689 | if (bo->pitch < pitch) { |
||
3690 | DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n", |
||
3691 | bo->tiling, tiling, |
||
3692 | bo->pitch, pitch)); |
||
3693 | continue; |
||
3694 | } |
||
3695 | } else |
||
3696 | bo->pitch = pitch; |
||
3697 | |||
3698 | if (bo->pitch * tiled_height > bytes(bo)) |
||
3699 | continue; |
||
3700 | |||
3701 | kgem_bo_remove_from_active(kgem, bo); |
||
3702 | |||
3703 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3704 | bo->delta = 0; |
||
3705 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3706 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3707 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3708 | assert_tiling(kgem, bo); |
||
3709 | bo->refcnt = 1; |
||
3710 | return bo; |
||
3711 | } |
||
3712 | } |
||
3713 | } |
||
3714 | |||
3715 | skip_active_search: |
||
3716 | bucket = cache_bucket(size); |
||
3717 | retry = NUM_CACHE_BUCKETS - bucket; |
||
3718 | if (retry > 3) |
||
3719 | retry = 3; |
||
3720 | search_inactive: |
||
3721 | /* Now just look for a close match and prefer any currently active */ |
||
3722 | assert(bucket < NUM_CACHE_BUCKETS); |
||
3723 | cache = &kgem->inactive[bucket]; |
||
3724 | list_for_each_entry(bo, cache, list) { |
||
3725 | assert(bucket(bo) == bucket); |
||
3726 | assert(bo->reusable); |
||
3727 | assert(!bo->scanout); |
||
3728 | assert(bo->flush == false); |
||
3729 | assert_tiling(kgem, bo); |
||
3730 | |||
3731 | if (size > num_pages(bo)) { |
||
3732 | DBG(("inactive too small: %d < %d\n", |
||
3733 | num_pages(bo), size)); |
||
3734 | continue; |
||
3735 | } |
||
3736 | |||
3737 | if (bo->tiling != tiling || |
||
3738 | (tiling != I915_TILING_NONE && bo->pitch != pitch)) { |
||
3739 | if (!gem_set_tiling(kgem->fd, bo->handle, |
||
3740 | tiling, pitch)) |
||
3741 | continue; |
||
3742 | |||
3743 | if (bo->map) |
||
3744 | kgem_bo_release_map(kgem, bo); |
||
3745 | } |
||
3746 | |||
3747 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
||
3748 | kgem_bo_free(kgem, bo); |
||
3749 | break; |
||
3750 | } |
||
3751 | |||
3752 | kgem_bo_remove_from_inactive(kgem, bo); |
||
3753 | |||
3754 | bo->pitch = pitch; |
||
3755 | bo->tiling = tiling; |
||
3756 | |||
3757 | bo->delta = 0; |
||
3758 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3759 | assert(bo->pitch); |
||
3760 | DBG((" from inactive: pitch=%d, tiling=%d: handle=%d, id=%d\n", |
||
3761 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3762 | assert(bo->refcnt == 0); |
||
3763 | assert(bo->reusable); |
||
3764 | assert((flags & CREATE_INACTIVE) == 0 || bo->domain != DOMAIN_GPU); |
||
3765 | ASSERT_MAYBE_IDLE(kgem, bo->handle, flags & CREATE_INACTIVE); |
||
3766 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3767 | assert_tiling(kgem, bo); |
||
3768 | bo->refcnt = 1; |
||
3769 | return bo; |
||
3770 | } |
||
3771 | |||
3772 | if (flags & CREATE_INACTIVE && |
||
3773 | !list_is_empty(&kgem->active[bucket][tiling]) && |
||
3774 | __kgem_throttle_retire(kgem, flags)) { |
||
3775 | flags &= ~CREATE_INACTIVE; |
||
3776 | goto search_inactive; |
||
3777 | } |
||
3778 | |||
3779 | if (--retry) { |
||
3780 | bucket++; |
||
3781 | flags &= ~CREATE_INACTIVE; |
||
3782 | goto search_inactive; |
||
3783 | } |
||
3784 | |||
3785 | create: |
||
3786 | if (flags & CREATE_CACHED) |
||
3787 | return NULL; |
||
3788 | |||
3789 | if (bucket >= NUM_CACHE_BUCKETS) |
||
3790 | size = ALIGN(size, 1024); |
||
3791 | handle = gem_create(kgem->fd, size); |
||
3792 | if (handle == 0) |
||
3793 | return NULL; |
||
3794 | |||
3795 | bo = __kgem_bo_alloc(handle, size); |
||
3796 | if (!bo) { |
||
3797 | gem_close(kgem->fd, handle); |
||
3798 | return NULL; |
||
3799 | } |
||
3800 | |||
3801 | if (bucket >= NUM_CACHE_BUCKETS) { |
||
3802 | DBG(("%s: marking large bo for automatic flushing\n", |
||
3803 | __FUNCTION__)); |
||
3804 | bo->flush = true; |
||
3805 | } |
||
3806 | |||
3807 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3808 | if (tiling == I915_TILING_NONE || |
||
3809 | gem_set_tiling(kgem->fd, handle, tiling, pitch)) { |
||
3810 | bo->tiling = tiling; |
||
3811 | bo->pitch = pitch; |
||
3812 | } else { |
||
3813 | if (flags & CREATE_EXACT) { |
||
3814 | if (bo->pitch != pitch || bo->tiling != tiling) { |
||
3815 | kgem_bo_free(kgem, bo); |
||
3816 | return NULL; |
||
3817 | } |
||
3818 | } |
||
3819 | } |
||
3820 | |||
3821 | assert(bytes(bo) >= bo->pitch * kgem_aligned_height(kgem, height, bo->tiling)); |
||
3822 | assert_tiling(kgem, bo); |
||
3823 | |||
3824 | debug_alloc__bo(kgem, bo); |
||
3825 | |||
3826 | DBG((" new pitch=%d, tiling=%d, handle=%d, id=%d, num_pages=%d [%d], bucket=%d\n", |
||
3827 | bo->pitch, bo->tiling, bo->handle, bo->unique_id, |
||
3828 | size, num_pages(bo), bucket(bo))); |
||
3829 | return bo; |
||
3830 | } |
||
3831 | |||
3832 | #if 0 |
||
3833 | struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem, |
||
3834 | int width, |
||
3835 | int height, |
||
3836 | int bpp, |
||
3837 | uint32_t flags) |
||
3838 | { |
||
3839 | struct kgem_bo *bo; |
||
3840 | int stride, size; |
||
3841 | |||
3842 | if (DBG_NO_CPU) |
||
3843 | return NULL; |
||
3844 | |||
3845 | DBG(("%s(%dx%d, bpp=%d)\n", __FUNCTION__, width, height, bpp)); |
||
3846 | |||
3847 | if (kgem->has_llc) { |
||
3848 | bo = kgem_create_2d(kgem, width, height, bpp, |
||
3849 | I915_TILING_NONE, flags); |
||
3850 | if (bo == NULL) |
||
3851 | return bo; |
||
3852 | |||
3853 | assert(bo->tiling == I915_TILING_NONE); |
||
3854 | assert_tiling(kgem, bo); |
||
3855 | |||
3856 | if (kgem_bo_map__cpu(kgem, bo) == NULL) { |
||
3857 | kgem_bo_destroy(kgem, bo); |
||
3858 | return NULL; |
||
3859 | } |
||
3860 | |||
3861 | return bo; |
||
3862 | } |
||
3863 | |||
3864 | assert(width > 0 && height > 0); |
||
3865 | stride = ALIGN(width, 2) * bpp >> 3; |
||
3866 | stride = ALIGN(stride, 4); |
||
3867 | size = stride * ALIGN(height, 2); |
||
3868 | assert(size >= PAGE_SIZE); |
||
3869 | |||
3870 | DBG(("%s: %dx%d, %d bpp, stride=%d\n", |
||
3871 | __FUNCTION__, width, height, bpp, stride)); |
||
3872 | |||
3873 | bo = search_snoop_cache(kgem, NUM_PAGES(size), 0); |
||
3874 | if (bo) { |
||
3875 | assert(bo->tiling == I915_TILING_NONE); |
||
3876 | assert_tiling(kgem, bo); |
||
3877 | assert(bo->snoop); |
||
3878 | bo->refcnt = 1; |
||
3879 | bo->pitch = stride; |
||
3880 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3881 | return bo; |
||
3882 | } |
||
3883 | |||
3884 | if (kgem->has_caching) { |
||
3885 | bo = kgem_create_linear(kgem, size, flags); |
||
3886 | if (bo == NULL) |
||
3887 | return NULL; |
||
3888 | |||
3889 | assert(bo->tiling == I915_TILING_NONE); |
||
3890 | assert_tiling(kgem, bo); |
||
3891 | |||
3892 | if (!gem_set_caching(kgem->fd, bo->handle, SNOOPED)) { |
||
3893 | kgem_bo_destroy(kgem, bo); |
||
3894 | return NULL; |
||
3895 | } |
||
3896 | bo->snoop = true; |
||
3897 | |||
3898 | if (kgem_bo_map__cpu(kgem, bo) == NULL) { |
||
3899 | kgem_bo_destroy(kgem, bo); |
||
3900 | return NULL; |
||
3901 | } |
||
3902 | |||
3903 | bo->pitch = stride; |
||
3904 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3905 | return bo; |
||
3906 | } |
||
3907 | |||
3908 | if (kgem->has_userptr) { |
||
3909 | void *ptr; |
||
3910 | |||
3911 | /* XXX */ |
||
3912 | //if (posix_memalign(&ptr, 64, ALIGN(size, 64))) |
||
3913 | if (posix_memalign(&ptr, PAGE_SIZE, ALIGN(size, PAGE_SIZE))) |
||
3914 | return NULL; |
||
3915 | |||
3916 | bo = kgem_create_map(kgem, ptr, size, false); |
||
3917 | if (bo == NULL) { |
||
3918 | free(ptr); |
||
3919 | return NULL; |
||
3920 | } |
||
3921 | |||
3922 | bo->pitch = stride; |
||
3923 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3924 | return bo; |
||
3925 | } |
||
3926 | |||
3927 | return NULL; |
||
3928 | } |
||
3929 | #endif |
||
3930 | |||
3931 | void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) |
||
3932 | { |
||
3933 | DBG(("%s: handle=%d, proxy? %d\n", |
||
3934 | __FUNCTION__, bo->handle, bo->proxy != NULL)); |
||
3935 | |||
3936 | if (bo->proxy) { |
||
3937 | _list_del(&bo->vma); |
||
3938 | _list_del(&bo->request); |
||
3939 | if (bo->io && bo->exec == NULL) |
||
3940 | _kgem_bo_delete_buffer(kgem, bo); |
||
3941 | kgem_bo_unref(kgem, bo->proxy); |
||
3942 | kgem_bo_binding_free(kgem, bo); |
||
3943 | free(bo); |
||
3944 | return; |
||
3945 | } |
||
3946 | |||
3947 | __kgem_bo_destroy(kgem, bo); |
||
3948 | } |
||
3949 | |||
3950 | static void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo) |
||
3951 | { |
||
3952 | assert(bo->rq); |
||
3953 | assert(bo->exec == NULL); |
||
3954 | assert(bo->needs_flush); |
||
3955 | |||
3956 | /* The kernel will emit a flush *and* update its own flushing lists. */ |
||
3957 | if (!__kgem_busy(kgem, bo->handle)) |
||
3958 | __kgem_bo_clear_busy(bo); |
||
3959 | |||
3960 | DBG(("%s: handle=%d, busy?=%d\n", |
||
3961 | __FUNCTION__, bo->handle, bo->rq != NULL)); |
||
3962 | } |
||
3963 | |||
3964 | void kgem_scanout_flush(struct kgem *kgem, struct kgem_bo *bo) |
||
3965 | { |
||
3966 | kgem_bo_submit(kgem, bo); |
||
3967 | if (!bo->needs_flush) |
||
3968 | return; |
||
3969 | |||
3970 | /* If the kernel fails to emit the flush, then it will be forced when |
||
3971 | * we assume direct access. And as the usual failure is EIO, we do |
||
3972 | * not actually care. |
||
3973 | */ |
||
3974 | assert(bo->exec == NULL); |
||
3975 | if (bo->rq) |
||
3976 | __kgem_flush(kgem, bo); |
||
3977 | |||
3978 | /* Whatever actually happens, we can regard the GTT write domain |
||
3979 | * as being flushed. |
||
3980 | */ |
||
3981 | bo->gtt_dirty = false; |
||
3982 | bo->needs_flush = false; |
||
3983 | bo->domain = DOMAIN_NONE; |
||
3984 | } |
||
3985 | |||
3986 | inline static bool needs_semaphore(struct kgem *kgem, struct kgem_bo *bo) |
||
3987 | { |
||
3988 | return kgem->nreloc && bo->rq && RQ_RING(bo->rq) != kgem->ring; |
||
3989 | } |
||
3990 | |||
3991 | bool kgem_check_bo(struct kgem *kgem, ...) |
||
3992 | { |
||
3993 | va_list ap; |
||
3994 | struct kgem_bo *bo; |
||
3995 | int num_exec = 0; |
||
3996 | int num_pages = 0; |
||
3997 | bool flush = false; |
||
3998 | |||
3999 | va_start(ap, kgem); |
||
4000 | while ((bo = va_arg(ap, struct kgem_bo *))) { |
||
4001 | while (bo->proxy) |
||
4002 | bo = bo->proxy; |
||
4003 | if (bo->exec) |
||
4004 | continue; |
||
4005 | |||
4006 | if (needs_semaphore(kgem, bo)) |
||
4007 | return false; |
||
4008 | |||
4009 | num_pages += num_pages(bo); |
||
4010 | num_exec++; |
||
4011 | |||
4012 | flush |= bo->flush; |
||
4013 | } |
||
4014 | va_end(ap); |
||
4015 | |||
4016 | DBG(("%s: num_pages=+%d, num_exec=+%d\n", |
||
4017 | __FUNCTION__, num_pages, num_exec)); |
||
4018 | |||
4019 | if (!num_pages) |
||
4020 | return true; |
||
4021 | |||
4022 | if (kgem_flush(kgem, flush)) |
||
4023 | return false; |
||
4024 | |||
4025 | if (kgem->aperture > kgem->aperture_low && |
||
4026 | kgem_ring_is_idle(kgem, kgem->ring)) { |
||
4027 | DBG(("%s: current aperture usage (%d) is greater than low water mark (%d)\n", |
||
4028 | __FUNCTION__, kgem->aperture, kgem->aperture_low)); |
||
4029 | return false; |
||
4030 | } |
||
4031 | |||
4032 | if (num_pages + kgem->aperture > kgem->aperture_high) { |
||
4033 | DBG(("%s: final aperture usage (%d) is greater than high water mark (%d)\n", |
||
4034 | __FUNCTION__, num_pages + kgem->aperture, kgem->aperture_high)); |
||
4035 | return false; |
||
4036 | } |
||
4037 | |||
4038 | if (kgem->nexec + num_exec >= KGEM_EXEC_SIZE(kgem)) { |
||
4039 | DBG(("%s: out of exec slots (%d + %d / %d)\n", __FUNCTION__, |
||
4040 | kgem->nexec, num_exec, KGEM_EXEC_SIZE(kgem))); |
||
4041 | return false; |
||
4042 | } |
||
4043 | |||
4044 | return true; |
||
4045 | } |
||
4046 | |||
4047 | |||
4048 | |||
4049 | |||
4050 | |||
4051 | |||
4052 | |||
4053 | |||
4054 | |||
4055 | |||
4056 | |||
4057 | |||
4058 | |||
4059 | |||
4060 | |||
4061 | |||
4062 | |||
4063 | |||
4064 | |||
4065 | |||
4066 | |||
4067 | |||
4068 | |||
4069 | |||
4070 | |||
4071 | |||
4072 | |||
4073 | |||
4074 | |||
4075 | |||
4076 | uint32_t kgem_add_reloc(struct kgem *kgem, |
||
4077 | uint32_t pos, |
||
4078 | struct kgem_bo *bo, |
||
4079 | uint32_t read_write_domain, |
||
4080 | uint32_t delta) |
||
4081 | { |
||
4082 | int index; |
||
4083 | |||
4084 | DBG(("%s: handle=%d, pos=%d, delta=%d, domains=%08x\n", |
||
4085 | __FUNCTION__, bo ? bo->handle : 0, pos, delta, read_write_domain)); |
||
4086 | |||
4087 | assert((read_write_domain & 0x7fff) == 0 || bo != NULL); |
||
4088 | |||
4089 | if( bo != NULL && bo->handle == -2) |
||
4090 | { |
||
4091 | if (bo->exec == NULL) |
||
4092 | kgem_add_bo(kgem, bo); |
||
4093 | |||
4094 | if (read_write_domain & 0x7fff && !bo->gpu_dirty) { |
||
4095 | __kgem_bo_mark_dirty(bo); |
||
4096 | } |
||
4097 | return 0; |
||
4098 | }; |
||
4099 | |||
4100 | index = kgem->nreloc++; |
||
4101 | assert(index < ARRAY_SIZE(kgem->reloc)); |
||
4102 | kgem->reloc[index].offset = pos * sizeof(kgem->batch[0]); |
||
4103 | if (bo) { |
||
4104 | assert(bo->refcnt); |
||
4105 | while (bo->proxy) { |
||
4106 | DBG(("%s: adding proxy [delta=%d] for handle=%d\n", |
||
4107 | __FUNCTION__, bo->delta, bo->handle)); |
||
4108 | delta += bo->delta; |
||
4109 | assert(bo->handle == bo->proxy->handle); |
||
4110 | /* need to release the cache upon batch submit */ |
||
4111 | if (bo->exec == NULL) { |
||
4112 | list_move_tail(&bo->request, |
||
4113 | &kgem->next_request->buffers); |
||
4114 | bo->rq = MAKE_REQUEST(kgem->next_request, |
||
4115 | kgem->ring); |
||
4116 | bo->exec = &_kgem_dummy_exec; |
||
4117 | } |
||
4118 | |||
4119 | if (read_write_domain & 0x7fff && !bo->gpu_dirty) |
||
4120 | __kgem_bo_mark_dirty(bo); |
||
4121 | |||
4122 | bo = bo->proxy; |
||
4123 | assert(bo->refcnt); |
||
4124 | } |
||
4125 | assert(bo->refcnt); |
||
4126 | |||
4127 | if (bo->exec == NULL) |
||
4128 | kgem_add_bo(kgem, bo); |
||
4129 | assert(bo->rq == MAKE_REQUEST(kgem->next_request, kgem->ring)); |
||
4130 | assert(RQ_RING(bo->rq) == kgem->ring); |
||
4131 | |||
4132 | if (kgem->gen < 040 && read_write_domain & KGEM_RELOC_FENCED) { |
||
4133 | if (bo->tiling && |
||
4134 | (bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0) { |
||
4135 | assert(kgem->nfence < kgem->fence_max); |
||
4136 | kgem->aperture_fenced += |
||
4137 | kgem_bo_fenced_size(kgem, bo); |
||
4138 | kgem->nfence++; |
||
4139 | } |
||
4140 | bo->exec->flags |= EXEC_OBJECT_NEEDS_FENCE; |
||
4141 | } |
||
4142 | |||
4143 | kgem->reloc[index].delta = delta; |
||
4144 | kgem->reloc[index].target_handle = bo->target_handle; |
||
4145 | kgem->reloc[index].presumed_offset = bo->presumed_offset; |
||
4146 | |||
4147 | if (read_write_domain & 0x7fff && !bo->gpu_dirty) { |
||
4148 | assert(!bo->snoop || kgem->can_blt_cpu); |
||
4149 | __kgem_bo_mark_dirty(bo); |
||
4150 | } |
||
4151 | |||
4152 | delta += bo->presumed_offset; |
||
4153 | } else { |
||
4154 | kgem->reloc[index].delta = delta; |
||
4155 | kgem->reloc[index].target_handle = ~0U; |
||
4156 | kgem->reloc[index].presumed_offset = 0; |
||
4157 | if (kgem->nreloc__self < 256) |
||
4158 | kgem->reloc__self[kgem->nreloc__self++] = index; |
||
4159 | } |
||
4160 | kgem->reloc[index].read_domains = read_write_domain >> 16; |
||
4161 | kgem->reloc[index].write_domain = read_write_domain & 0x7fff; |
||
4162 | |||
4163 | return delta; |
||
4164 | } |
||
4165 | |||
4166 | static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket) |
||
4167 | { |
||
4168 | int i, j; |
||
4169 | |||
4170 | DBG(("%s: type=%d, count=%d (bucket: %d)\n", |
||
4171 | __FUNCTION__, type, kgem->vma[type].count, bucket)); |
||
4172 | if (kgem->vma[type].count <= 0) |
||
4173 | return; |
||
4174 | |||
4175 | if (kgem->need_purge) |
||
4176 | kgem_purge_cache(kgem); |
||
4177 | |||
4178 | /* vma are limited on a per-process basis to around 64k. |
||
4179 | * This includes all malloc arenas as well as other file |
||
4180 | * mappings. In order to be fair and not hog the cache, |
||
4181 | * and more importantly not to exhaust that limit and to |
||
4182 | * start failing mappings, we keep our own number of open |
||
4183 | * vma to within a conservative value. |
||
4184 | */ |
||
4185 | i = 0; |
||
4186 | while (kgem->vma[type].count > 0) { |
||
4187 | struct kgem_bo *bo = NULL; |
||
4188 | |||
4189 | for (j = 0; |
||
4190 | bo == NULL && j < ARRAY_SIZE(kgem->vma[type].inactive); |
||
4191 | j++) { |
||
4192 | struct list *head = &kgem->vma[type].inactive[i++%ARRAY_SIZE(kgem->vma[type].inactive)]; |
||
4193 | if (!list_is_empty(head)) |
||
4194 | bo = list_last_entry(head, struct kgem_bo, vma); |
||
4195 | } |
||
4196 | if (bo == NULL) |
||
4197 | break; |
||
4198 | |||
4199 | DBG(("%s: discarding inactive %s vma cache for %d\n", |
||
4200 | __FUNCTION__, |
||
4201 | IS_CPU_MAP(bo->map) ? "CPU" : "GTT", bo->handle)); |
||
4202 | assert(IS_CPU_MAP(bo->map) == type); |
||
4203 | assert(bo->map); |
||
4204 | assert(bo->rq == NULL); |
||
4205 | |||
4206 | VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo))); |
||
4207 | // munmap(MAP(bo->map), bytes(bo)); |
||
4208 | bo->map = NULL; |
||
4209 | list_del(&bo->vma); |
||
4210 | kgem->vma[type].count--; |
||
4211 | |||
4212 | if (!bo->purged && !kgem_bo_set_purgeable(kgem, bo)) { |
||
4213 | DBG(("%s: freeing unpurgeable old mapping\n", |
||
4214 | __FUNCTION__)); |
||
4215 | kgem_bo_free(kgem, bo); |
||
4216 | } |
||
4217 | } |
||
4218 | } |
||
4219 | |||
4220 | void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo) |
||
4221 | { |
||
4222 | void *ptr; |
||
4223 | |||
4224 | DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__, |
||
4225 | bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain)); |
||
4226 | |||
4227 | assert(bo->proxy == NULL); |
||
4228 | assert(list_is_empty(&bo->list)); |
||
4229 | assert(!IS_USER_MAP(bo->map)); |
||
4230 | assert_tiling(kgem, bo); |
||
4231 | |||
4232 | if (bo->tiling == I915_TILING_NONE && !bo->scanout && kgem->has_llc) { |
||
4233 | DBG(("%s: converting request for GTT map into CPU map\n", |
||
4234 | __FUNCTION__)); |
||
4235 | return kgem_bo_map__cpu(kgem, bo); |
||
4236 | } |
||
4237 | |||
4238 | if (IS_CPU_MAP(bo->map)) |
||
4239 | kgem_bo_release_map(kgem, bo); |
||
4240 | |||
4241 | ptr = bo->map; |
||
4242 | if (ptr == NULL) { |
||
4243 | assert(kgem_bo_size(bo) <= kgem->aperture_mappable / 2); |
||
4244 | |||
4245 | kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo)); |
||
4246 | |||
4247 | ptr = __kgem_bo_map__gtt(kgem, bo); |
||
4248 | if (ptr == NULL) |
||
4249 | return NULL; |
||
4250 | |||
4251 | /* Cache this mapping to avoid the overhead of an |
||
4252 | * excruciatingly slow GTT pagefault. This is more an |
||
4253 | * issue with compositing managers which need to frequently |
||
4254 | * flush CPU damage to their GPU bo. |
||
4255 | */ |
||
4256 | bo->map = ptr; |
||
4257 | DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle)); |
||
4258 | } |
||
4259 | |||
4260 | return ptr; |
||
4261 | } |
||
4262 | |||
4263 | void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo) |
||
4264 | { |
||
4265 | void *ptr; |
||
4266 | |||
4267 | DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__, |
||
4268 | bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain)); |
||
4269 | |||
4270 | assert(bo->proxy == NULL); |
||
4271 | assert(list_is_empty(&bo->list)); |
||
4272 | assert(!IS_USER_MAP(bo->map)); |
||
4273 | assert(bo->exec == NULL); |
||
4274 | assert_tiling(kgem, bo); |
||
4275 | |||
4276 | if (bo->tiling == I915_TILING_NONE && !bo->scanout && |
||
4277 | (kgem->has_llc || bo->domain == DOMAIN_CPU)) { |
||
4278 | DBG(("%s: converting request for GTT map into CPU map\n", |
||
4279 | __FUNCTION__)); |
||
4280 | ptr = kgem_bo_map__cpu(kgem, bo); |
||
4281 | if (ptr) |
||
4282 | kgem_bo_sync__cpu(kgem, bo); |
||
4283 | return ptr; |
||
4284 | } |
||
4285 | |||
4286 | if (IS_CPU_MAP(bo->map)) |
||
4287 | kgem_bo_release_map(kgem, bo); |
||
4288 | |||
4289 | ptr = bo->map; |
||
4290 | if (ptr == NULL) { |
||
4291 | assert(kgem_bo_size(bo) <= kgem->aperture_mappable / 2); |
||
4292 | assert(kgem->gen != 021 || bo->tiling != I915_TILING_Y); |
||
4293 | |||
4294 | kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo)); |
||
4295 | |||
4296 | ptr = __kgem_bo_map__gtt(kgem, bo); |
||
4297 | if (ptr == NULL) |
||
4298 | return NULL; |
||
4299 | |||
4300 | /* Cache this mapping to avoid the overhead of an |
||
4301 | * excruciatingly slow GTT pagefault. This is more an |
||
4302 | * issue with compositing managers which need to frequently |
||
4303 | * flush CPU damage to their GPU bo. |
||
4304 | */ |
||
4305 | bo->map = ptr; |
||
4306 | DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle)); |
||
4307 | } |
||
4308 | |||
4309 | if (bo->domain != DOMAIN_GTT || FORCE_MMAP_SYNC & (1 << DOMAIN_GTT)) { |
||
4310 | struct drm_i915_gem_set_domain set_domain; |
||
4311 | |||
4312 | DBG(("%s: sync: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__, |
||
4313 | bo->needs_flush, bo->domain, __kgem_busy(kgem, bo->handle))); |
||
4314 | |||
4315 | /* XXX use PROT_READ to avoid the write flush? */ |
||
4316 | |||
4317 | VG_CLEAR(set_domain); |
||
4318 | set_domain.handle = bo->handle; |
||
4319 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
||
4320 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
||
4321 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) { |
||
4322 | kgem_bo_retire(kgem, bo); |
||
4323 | bo->domain = DOMAIN_GTT; |
||
4324 | bo->gtt_dirty = true; |
||
4325 | } |
||
4326 | } |
||
4327 | |||
4328 | return ptr; |
||
4329 | } |
||
4330 | |||
4331 | void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo) |
||
4332 | { |
||
4333 | void *ptr; |
||
4334 | |||
4335 | DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__, |
||
4336 | bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain)); |
||
4337 | |||
4338 | assert(bo->exec == NULL); |
||
4339 | assert(list_is_empty(&bo->list)); |
||
4340 | assert(!IS_USER_MAP(bo->map)); |
||
4341 | assert_tiling(kgem, bo); |
||
4342 | |||
4343 | if (IS_CPU_MAP(bo->map)) |
||
4344 | kgem_bo_release_map(kgem, bo); |
||
4345 | |||
4346 | ptr = bo->map; |
||
4347 | if (ptr == NULL) { |
||
4348 | assert(bytes(bo) <= kgem->aperture_mappable / 4); |
||
4349 | |||
4350 | kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo)); |
||
4351 | |||
4352 | ptr = __kgem_bo_map__gtt(kgem, bo); |
||
4353 | if (ptr == NULL) |
||
4354 | return NULL; |
||
4355 | |||
4356 | /* Cache this mapping to avoid the overhead of an |
||
4357 | * excruciatingly slow GTT pagefault. This is more an |
||
4358 | * issue with compositing managers which need to frequently |
||
4359 | * flush CPU damage to their GPU bo. |
||
4360 | */ |
||
4361 | bo->map = ptr; |
||
4362 | DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle)); |
||
4363 | } |
||
4364 | |||
4365 | return ptr; |
||
4366 | } |
||
4367 | |||
4368 | void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo) |
||
4369 | { |
||
4370 | if (bo->map) |
||
4371 | return MAP(bo->map); |
||
4372 | |||
4373 | kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo)); |
||
4374 | return bo->map = __kgem_bo_map__gtt(kgem, bo); |
||
4375 | } |
||
4376 | |||
4377 | void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo) |
||
4378 | { |
||
4379 | struct drm_i915_gem_mmap mmap_arg; |
||
4380 | |||
4381 | DBG(("%s(handle=%d, size=%d, mapped? %d)\n", |
||
4382 | __FUNCTION__, bo->handle, bytes(bo), (int)__MAP_TYPE(bo->map))); |
||
4383 | assert(!bo->purged); |
||
4384 | assert(list_is_empty(&bo->list)); |
||
4385 | assert(bo->proxy == NULL); |
||
4386 | |||
4387 | if (IS_CPU_MAP(bo->map)) |
||
4388 | return MAP(bo->map); |
||
4389 | |||
4390 | if (bo->map) |
||
4391 | kgem_bo_release_map(kgem, bo); |
||
4392 | |||
4393 | kgem_trim_vma_cache(kgem, MAP_CPU, bucket(bo)); |
||
4394 | |||
4395 | retry: |
||
4396 | VG_CLEAR(mmap_arg); |
||
4397 | mmap_arg.handle = bo->handle; |
||
4398 | mmap_arg.offset = 0; |
||
4399 | mmap_arg.size = bytes(bo); |
||
4400 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) { |
||
4401 | |||
4402 | if (__kgem_throttle_retire(kgem, 0)) |
||
4403 | goto retry; |
||
4404 | |||
4405 | if (kgem->need_expire) { |
||
4406 | kgem_cleanup_cache(kgem); |
||
4407 | goto retry; |
||
4408 | } |
||
4409 | |||
4410 | ErrorF("%s: failed to mmap handle=%d, %d bytes, into CPU domain\n", |
||
4411 | __FUNCTION__, bo->handle, bytes(bo)); |
||
4412 | return NULL; |
||
4413 | } |
||
4414 | |||
4415 | VG(VALGRIND_MAKE_MEM_DEFINED(mmap_arg.addr_ptr, bytes(bo))); |
||
4416 | |||
4417 | DBG(("%s: caching CPU vma for %d\n", __FUNCTION__, bo->handle)); |
||
4418 | bo->map = MAKE_CPU_MAP(mmap_arg.addr_ptr); |
||
4419 | return (void *)(uintptr_t)mmap_arg.addr_ptr; |
||
4420 | } |
||
4421 | |||
4422 | void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo) |
||
4423 | { |
||
4424 | struct drm_i915_gem_mmap mmap_arg; |
||
4425 | |||
4426 | DBG(("%s(handle=%d, size=%d, mapped? %d)\n", |
||
4427 | __FUNCTION__, bo->handle, bytes(bo), (int)__MAP_TYPE(bo->map))); |
||
4428 | assert(bo->refcnt); |
||
4429 | assert(!bo->purged); |
||
4430 | assert(list_is_empty(&bo->list)); |
||
4431 | assert(bo->proxy == NULL); |
||
4432 | |||
4433 | if (IS_CPU_MAP(bo->map)) |
||
4434 | return MAP(bo->map); |
||
4435 | |||
4436 | retry: |
||
4437 | VG_CLEAR(mmap_arg); |
||
4438 | mmap_arg.handle = bo->handle; |
||
4439 | mmap_arg.offset = 0; |
||
4440 | mmap_arg.size = bytes(bo); |
||
4441 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) { |
||
4442 | int err = errno; |
||
4443 | |||
4444 | assert(err != EINVAL); |
||
4445 | |||
4446 | if (__kgem_throttle_retire(kgem, 0)) |
||
4447 | goto retry; |
||
4448 | |||
4449 | if (kgem->need_expire) { |
||
4450 | kgem_cleanup_cache(kgem); |
||
4451 | goto retry; |
||
4452 | } |
||
4453 | |||
4454 | ErrorF("%s: failed to mmap handle=%d, %d bytes, into CPU domain: %d\n", |
||
4455 | __FUNCTION__, bo->handle, bytes(bo), err); |
||
4456 | return NULL; |
||
4457 | } |
||
4458 | |||
4459 | VG(VALGRIND_MAKE_MEM_DEFINED(mmap_arg.addr_ptr, bytes(bo))); |
||
4460 | if (bo->map && bo->domain == DOMAIN_CPU) { |
||
4461 | DBG(("%s: discarding GTT vma for %d\n", __FUNCTION__, bo->handle)); |
||
4462 | kgem_bo_release_map(kgem, bo); |
||
4463 | } |
||
4464 | if (bo->map == NULL) { |
||
4465 | DBG(("%s: caching CPU vma for %d\n", __FUNCTION__, bo->handle)); |
||
4466 | bo->map = MAKE_CPU_MAP(mmap_arg.addr_ptr); |
||
4467 | } |
||
4468 | return (void *)(uintptr_t)mmap_arg.addr_ptr; |
||
4469 | } |
||
4470 | void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo) |
||
4471 | { |
||
4472 | DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle)); |
||
4473 | assert(!bo->scanout); |
||
4474 | kgem_bo_submit(kgem, bo); |
||
4475 | |||
4476 | /* SHM pixmaps use proxies for subpage offsets */ |
||
4477 | assert(!bo->purged); |
||
4478 | while (bo->proxy) |
||
4479 | bo = bo->proxy; |
||
4480 | assert(!bo->purged); |
||
4481 | |||
4482 | if (bo->domain != DOMAIN_CPU || FORCE_MMAP_SYNC & (1 << DOMAIN_CPU)) { |
||
4483 | struct drm_i915_gem_set_domain set_domain; |
||
4484 | |||
4485 | DBG(("%s: SYNC: handle=%d, needs_flush? %d, domain? %d, busy? %d\n", |
||
4486 | __FUNCTION__, bo->handle, |
||
4487 | bo->needs_flush, bo->domain, |
||
4488 | __kgem_busy(kgem, bo->handle))); |
||
4489 | |||
4490 | VG_CLEAR(set_domain); |
||
4491 | set_domain.handle = bo->handle; |
||
4492 | set_domain.read_domains = I915_GEM_DOMAIN_CPU; |
||
4493 | set_domain.write_domain = I915_GEM_DOMAIN_CPU; |
||
4494 | |||
4495 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) { |
||
4496 | kgem_bo_retire(kgem, bo); |
||
4497 | bo->domain = DOMAIN_CPU; |
||
4498 | } |
||
4499 | } |
||
4500 | } |
||
4501 | |||
4502 | void kgem_clear_dirty(struct kgem *kgem) |
||
4503 | { |
||
4504 | struct list * const buffers = &kgem->next_request->buffers; |
||
4505 | struct kgem_bo *bo; |
||
4506 | |||
4507 | list_for_each_entry(bo, buffers, request) { |
||
4508 | if (!bo->gpu_dirty) |
||
4509 | break; |
||
4510 | |||
4511 | bo->gpu_dirty = false; |
||
4512 | } |
||
4513 | } |
||
4514 | |||
4515 | struct kgem_bo *kgem_create_proxy(struct kgem *kgem, |
||
4516 | struct kgem_bo *target, |
||
4517 | int offset, int length) |
||
4518 | { |
||
4519 | struct kgem_bo *bo; |
||
4520 | |||
4521 | DBG(("%s: target handle=%d [proxy? %d], offset=%d, length=%d, io=%d\n", |
||
4522 | __FUNCTION__, target->handle, target->proxy ? target->proxy->delta : -1, |
||
4523 | offset, length, target->io)); |
||
4524 | |||
4525 | bo = __kgem_bo_alloc(target->handle, length); |
||
4526 | if (bo == NULL) |
||
4527 | return NULL; |
||
4528 | |||
4529 | bo->unique_id = kgem_get_unique_id(kgem); |
||
4530 | bo->reusable = false; |
||
4531 | bo->size.bytes = length; |
||
4532 | |||
4533 | bo->io = target->io && target->proxy == NULL; |
||
4534 | bo->gpu_dirty = target->gpu_dirty; |
||
4535 | bo->tiling = target->tiling; |
||
4536 | bo->pitch = target->pitch; |
||
4537 | bo->flush = target->flush; |
||
4538 | bo->snoop = target->snoop; |
||
4539 | |||
4540 | assert(!bo->scanout); |
||
4541 | bo->proxy = kgem_bo_reference(target); |
||
4542 | bo->delta = offset; |
||
4543 | |||
4544 | if (target->exec) { |
||
4545 | list_move_tail(&bo->request, &kgem->next_request->buffers); |
||
4546 | bo->exec = &_kgem_dummy_exec; |
||
4547 | } |
||
4548 | bo->rq = target->rq; |
||
4549 | |||
4550 | return bo; |
||
4551 | } |
||
4552 | |||
4553 | #if 0 |
||
4554 | static struct kgem_buffer * |
||
4555 | buffer_alloc(void) |
||
4556 | { |
||
4557 | struct kgem_buffer *bo; |
||
4558 | |||
4559 | bo = malloc(sizeof(*bo)); |
||
4560 | if (bo == NULL) |
||
4561 | return NULL; |
||
4562 | |||
4563 | bo->mem = NULL; |
||
4564 | bo->need_io = false; |
||
4565 | bo->mmapped = true; |
||
4566 | |||
4567 | return bo; |
||
4568 | } |
||
4569 | |||
4570 | static struct kgem_buffer * |
||
4571 | buffer_alloc_with_data(int num_pages) |
||
4572 | { |
||
4573 | struct kgem_buffer *bo; |
||
4574 | |||
4575 | bo = malloc(sizeof(*bo) + 2*UPLOAD_ALIGNMENT + num_pages * PAGE_SIZE); |
||
4576 | if (bo == NULL) |
||
4577 | return NULL; |
||
4578 | |||
4579 | bo->mem = (void *)ALIGN((uintptr_t)bo + sizeof(*bo), UPLOAD_ALIGNMENT); |
||
4580 | bo->mmapped = false; |
||
4581 | return bo; |
||
4582 | } |
||
4583 | |||
4584 | static inline bool |
||
4585 | use_snoopable_buffer(struct kgem *kgem, uint32_t flags) |
||
4586 | { |
||
4587 | if ((flags & KGEM_BUFFER_WRITE) == 0) |
||
4588 | return kgem->gen >= 030; |
||
4589 | |||
4590 | return true; |
||
4591 | } |
||
4592 | |||
4593 | static void |
||
4594 | init_buffer_from_bo(struct kgem_buffer *bo, struct kgem_bo *old) |
||
4595 | { |
||
4596 | DBG(("%s: reusing handle=%d for buffer\n", |
||
4597 | __FUNCTION__, old->handle)); |
||
4598 | |||
4599 | assert(old->proxy == NULL); |
||
4600 | |||
4601 | memcpy(&bo->base, old, sizeof(*old)); |
||
4602 | if (old->rq) |
||
4603 | list_replace(&old->request, &bo->base.request); |
||
4604 | else |
||
4605 | list_init(&bo->base.request); |
||
4606 | list_replace(&old->vma, &bo->base.vma); |
||
4607 | list_init(&bo->base.list); |
||
4608 | free(old); |
||
4609 | |||
4610 | assert(bo->base.tiling == I915_TILING_NONE); |
||
4611 | |||
4612 | bo->base.refcnt = 1; |
||
4613 | } |
||
4614 | |||
4615 | static struct kgem_buffer * |
||
4616 | search_snoopable_buffer(struct kgem *kgem, unsigned alloc) |
||
4617 | { |
||
4618 | struct kgem_buffer *bo; |
||
4619 | struct kgem_bo *old; |
||
4620 | |||
4621 | old = search_snoop_cache(kgem, alloc, 0); |
||
4622 | if (old) { |
||
4623 | if (!old->io) { |
||
4624 | bo = buffer_alloc(); |
||
4625 | if (bo == NULL) |
||
4626 | return NULL; |
||
4627 | |||
4628 | init_buffer_from_bo(bo, old); |
||
4629 | } else { |
||
4630 | bo = (struct kgem_buffer *)old; |
||
4631 | bo->base.refcnt = 1; |
||
4632 | } |
||
4633 | |||
4634 | DBG(("%s: created CPU handle=%d for buffer, size %d\n", |
||
4635 | __FUNCTION__, bo->base.handle, num_pages(&bo->base))); |
||
4636 | |||
4637 | assert(bo->base.snoop); |
||
4638 | assert(bo->base.tiling == I915_TILING_NONE); |
||
4639 | assert(num_pages(&bo->base) >= alloc); |
||
4640 | assert(bo->mmapped == true); |
||
4641 | assert(bo->need_io == false); |
||
4642 | |||
4643 | bo->mem = kgem_bo_map__cpu(kgem, &bo->base); |
||
4644 | if (bo->mem == NULL) { |
||
4645 | bo->base.refcnt = 0; |
||
4646 | kgem_bo_free(kgem, &bo->base); |
||
4647 | bo = NULL; |
||
4648 | } |
||
4649 | |||
4650 | return bo; |
||
4651 | } |
||
4652 | |||
4653 | return NULL; |
||
4654 | } |
||
4655 | |||
4656 | static struct kgem_buffer * |
||
4657 | create_snoopable_buffer(struct kgem *kgem, unsigned alloc) |
||
4658 | { |
||
4659 | struct kgem_buffer *bo; |
||
4660 | uint32_t handle; |
||
4661 | |||
4662 | if (kgem->has_llc) { |
||
4663 | struct kgem_bo *old; |
||
4664 | |||
4665 | bo = buffer_alloc(); |
||
4666 | if (bo == NULL) |
||
4667 | return NULL; |
||
4668 | |||
4669 | old = search_linear_cache(kgem, alloc, |
||
4670 | CREATE_INACTIVE | CREATE_CPU_MAP | CREATE_EXACT); |
||
4671 | if (old) { |
||
4672 | init_buffer_from_bo(bo, old); |
||
4673 | } else { |
||
4674 | handle = gem_create(kgem->fd, alloc); |
||
4675 | if (handle == 0) { |
||
4676 | free(bo); |
||
4677 | return NULL; |
||
4678 | } |
||
4679 | |||
4680 | debug_alloc(kgem, alloc); |
||
4681 | __kgem_bo_init(&bo->base, handle, alloc); |
||
4682 | DBG(("%s: created CPU (LLC) handle=%d for buffer, size %d\n", |
||
4683 | __FUNCTION__, bo->base.handle, alloc)); |
||
4684 | } |
||
4685 | |||
4686 | assert(bo->base.refcnt == 1); |
||
4687 | assert(bo->mmapped == true); |
||
4688 | assert(bo->need_io == false); |
||
4689 | |||
4690 | bo->mem = kgem_bo_map__cpu(kgem, &bo->base); |
||
4691 | if (bo->mem != NULL) |
||
4692 | return bo; |
||
4693 | |||
4694 | bo->base.refcnt = 0; /* for valgrind */ |
||
4695 | kgem_bo_free(kgem, &bo->base); |
||
4696 | } |
||
4697 | |||
4698 | if (kgem->has_caching) { |
||
4699 | struct kgem_bo *old; |
||
4700 | |||
4701 | bo = buffer_alloc(); |
||
4702 | if (bo == NULL) |
||
4703 | return NULL; |
||
4704 | |||
4705 | old = search_linear_cache(kgem, alloc, |
||
4706 | CREATE_INACTIVE | CREATE_CPU_MAP | CREATE_EXACT); |
||
4707 | if (old) { |
||
4708 | init_buffer_from_bo(bo, old); |
||
4709 | } else { |
||
4710 | handle = gem_create(kgem->fd, alloc); |
||
4711 | if (handle == 0) { |
||
4712 | free(bo); |
||
4713 | return NULL; |
||
4714 | } |
||
4715 | |||
4716 | debug_alloc(kgem, alloc); |
||
4717 | __kgem_bo_init(&bo->base, handle, alloc); |
||
4718 | DBG(("%s: created CPU handle=%d for buffer, size %d\n", |
||
4719 | __FUNCTION__, bo->base.handle, alloc)); |
||
4720 | } |
||
4721 | |||
4722 | assert(bo->base.refcnt == 1); |
||
4723 | assert(bo->mmapped == true); |
||
4724 | assert(bo->need_io == false); |
||
4725 | |||
4726 | if (!gem_set_caching(kgem->fd, bo->base.handle, SNOOPED)) |
||
4727 | goto free_caching; |
||
4728 | |||
4729 | bo->base.snoop = true; |
||
4730 | |||
4731 | bo->mem = kgem_bo_map__cpu(kgem, &bo->base); |
||
4732 | if (bo->mem == NULL) |
||
4733 | goto free_caching; |
||
4734 | |||
4735 | return bo; |
||
4736 | |||
4737 | free_caching: |
||
4738 | bo->base.refcnt = 0; /* for valgrind */ |
||
4739 | kgem_bo_free(kgem, &bo->base); |
||
4740 | } |
||
4741 | |||
4742 | if (kgem->has_userptr) { |
||
4743 | bo = buffer_alloc(); |
||
4744 | if (bo == NULL) |
||
4745 | return NULL; |
||
4746 | |||
4747 | //if (posix_memalign(&ptr, 64, ALIGN(size, 64))) |
||
4748 | if (posix_memalign(&bo->mem, PAGE_SIZE, alloc * PAGE_SIZE)) { |
||
4749 | free(bo); |
||
4750 | return NULL; |
||
4751 | } |
||
4752 | |||
4753 | handle = gem_userptr(kgem->fd, bo->mem, alloc * PAGE_SIZE, false); |
||
4754 | if (handle == 0) { |
||
4755 | free(bo->mem); |
||
4756 | free(bo); |
||
4757 | return NULL; |
||
4758 | } |
||
4759 | |||
4760 | debug_alloc(kgem, alloc); |
||
4761 | __kgem_bo_init(&bo->base, handle, alloc); |
||
4762 | DBG(("%s: created snoop handle=%d for buffer\n", |
||
4763 | __FUNCTION__, bo->base.handle)); |
||
4764 | |||
4765 | assert(bo->mmapped == true); |
||
4766 | assert(bo->need_io == false); |
||
4767 | |||
4768 | bo->base.refcnt = 1; |
||
4769 | bo->base.snoop = true; |
||
4770 | bo->base.map = MAKE_USER_MAP(bo->mem); |
||
4771 | |||
4772 | return bo; |
||
4773 | } |
||
4774 | |||
4775 | return NULL; |
||
4776 | } |
||
4777 | |||
4778 | struct kgem_bo *kgem_create_buffer(struct kgem *kgem, |
||
4779 | uint32_t size, uint32_t flags, |
||
4780 | void **ret) |
||
4781 | { |
||
4782 | struct kgem_buffer *bo; |
||
4783 | unsigned offset, alloc; |
||
4784 | struct kgem_bo *old; |
||
4785 | |||
4786 | DBG(("%s: size=%d, flags=%x [write?=%d, inplace?=%d, last?=%d]\n", |
||
4787 | __FUNCTION__, size, flags, |
||
4788 | !!(flags & KGEM_BUFFER_WRITE), |
||
4789 | !!(flags & KGEM_BUFFER_INPLACE), |
||
4790 | !!(flags & KGEM_BUFFER_LAST))); |
||
4791 | assert(size); |
||
4792 | /* we should never be asked to create anything TOO large */ |
||
4793 | assert(size <= kgem->max_object_size); |
||
4794 | |||
4795 | #if !DBG_NO_UPLOAD_CACHE |
||
4796 | list_for_each_entry(bo, &kgem->batch_buffers, base.list) { |
||
4797 | assert(bo->base.io); |
||
4798 | assert(bo->base.refcnt >= 1); |
||
4799 | |||
4800 | /* We can reuse any write buffer which we can fit */ |
||
4801 | if (flags == KGEM_BUFFER_LAST && |
||
4802 | bo->write == KGEM_BUFFER_WRITE && |
||
4803 | bo->base.refcnt == 1 && !bo->mmapped && |
||
4804 | size <= bytes(&bo->base)) { |
||
4805 | DBG(("%s: reusing write buffer for read of %d bytes? used=%d, total=%d\n", |
||
4806 | __FUNCTION__, size, bo->used, bytes(&bo->base))); |
||
4807 | gem_write(kgem->fd, bo->base.handle, |
||
4808 | 0, bo->used, bo->mem); |
||
4809 | kgem_buffer_release(kgem, bo); |
||
4810 | bo->need_io = 0; |
||
4811 | bo->write = 0; |
||
4812 | offset = 0; |
||
4813 | bo->used = size; |
||
4814 | goto done; |
||
4815 | } |
||
4816 | |||
4817 | if (flags & KGEM_BUFFER_WRITE) { |
||
4818 | if ((bo->write & KGEM_BUFFER_WRITE) == 0 || |
||
4819 | (((bo->write & ~flags) & KGEM_BUFFER_INPLACE) && |
||
4820 | !bo->base.snoop)) { |
||
4821 | DBG(("%s: skip write %x buffer, need %x\n", |
||
4822 | __FUNCTION__, bo->write, flags)); |
||
4823 | continue; |
||
4824 | } |
||
4825 | assert(bo->mmapped || bo->need_io); |
||
4826 | } else { |
||
4827 | if (bo->write & KGEM_BUFFER_WRITE) { |
||
4828 | DBG(("%s: skip write %x buffer, need %x\n", |
||
4829 | __FUNCTION__, bo->write, flags)); |
||
4830 | continue; |
||
4831 | } |
||
4832 | } |
||
4833 | |||
4834 | if (bo->used + size <= bytes(&bo->base)) { |
||
4835 | DBG(("%s: reusing buffer? used=%d + size=%d, total=%d\n", |
||
4836 | __FUNCTION__, bo->used, size, bytes(&bo->base))); |
||
4837 | offset = bo->used; |
||
4838 | bo->used += size; |
||
4839 | goto done; |
||
4840 | } |
||
4841 | } |
||
4842 | |||
4843 | if (flags & KGEM_BUFFER_WRITE) { |
||
4844 | list_for_each_entry(bo, &kgem->active_buffers, base.list) { |
||
4845 | assert(bo->base.io); |
||
4846 | assert(bo->base.refcnt >= 1); |
||
4847 | assert(bo->mmapped); |
||
4848 | assert(!IS_CPU_MAP(bo->base.map) || kgem->has_llc || bo->base.snoop); |
||
4849 | |||
4850 | if (!kgem->has_llc && (bo->write & ~flags) & KGEM_BUFFER_INPLACE) { |
||
4851 | DBG(("%s: skip write %x buffer, need %x\n", |
||
4852 | __FUNCTION__, bo->write, flags)); |
||
4853 | continue; |
||
4854 | } |
||
4855 | |||
4856 | if (bo->used + size <= bytes(&bo->base)) { |
||
4857 | DBG(("%s: reusing buffer? used=%d + size=%d, total=%d\n", |
||
4858 | __FUNCTION__, bo->used, size, bytes(&bo->base))); |
||
4859 | offset = bo->used; |
||
4860 | bo->used += size; |
||
4861 | list_move(&bo->base.list, &kgem->batch_buffers); |
||
4862 | goto done; |
||
4863 | } |
||
4864 | } |
||
4865 | } |
||
4866 | #endif |
||
4867 | |||
4868 | #if !DBG_NO_MAP_UPLOAD |
||
4869 | /* Be a little more generous and hope to hold fewer mmappings */ |
||
4870 | alloc = ALIGN(2*size, kgem->buffer_size); |
||
4871 | if (alloc > MAX_CACHE_SIZE) |
||
4872 | alloc = ALIGN(size, kgem->buffer_size); |
||
4873 | if (alloc > MAX_CACHE_SIZE) |
||
4874 | alloc = PAGE_ALIGN(size); |
||
4875 | assert(alloc); |
||
4876 | |||
4877 | if (alloc > kgem->aperture_mappable / 4) |
||
4878 | flags &= ~KGEM_BUFFER_INPLACE; |
||
4879 | alloc /= PAGE_SIZE; |
||
4880 | |||
4881 | if (kgem->has_llc && |
||
4882 | (flags & KGEM_BUFFER_WRITE_INPLACE) != KGEM_BUFFER_WRITE_INPLACE) { |
||
4883 | bo = buffer_alloc(); |
||
4884 | if (bo == NULL) |
||
4885 | goto skip_llc; |
||
4886 | |||
4887 | old = NULL; |
||
4888 | if ((flags & KGEM_BUFFER_WRITE) == 0) |
||
4889 | old = search_linear_cache(kgem, alloc, CREATE_CPU_MAP); |
||
4890 | if (old == NULL) |
||
4891 | old = search_linear_cache(kgem, alloc, CREATE_INACTIVE | CREATE_CPU_MAP); |
||
4892 | if (old == NULL) |
||
4893 | old = search_linear_cache(kgem, NUM_PAGES(size), CREATE_INACTIVE | CREATE_CPU_MAP); |
||
4894 | if (old) { |
||
4895 | DBG(("%s: found LLC handle=%d for buffer\n", |
||
4896 | __FUNCTION__, old->handle)); |
||
4897 | |||
4898 | init_buffer_from_bo(bo, old); |
||
4899 | } else { |
||
4900 | uint32_t handle = gem_create(kgem->fd, alloc); |
||
4901 | if (handle == 0) { |
||
4902 | free(bo); |
||
4903 | goto skip_llc; |
||
4904 | } |
||
4905 | __kgem_bo_init(&bo->base, handle, alloc); |
||
4906 | DBG(("%s: created LLC handle=%d for buffer\n", |
||
4907 | __FUNCTION__, bo->base.handle)); |
||
4908 | |||
4909 | debug_alloc(kgem, alloc); |
||
4910 | } |
||
4911 | |||
4912 | assert(bo->mmapped); |
||
4913 | assert(!bo->need_io); |
||
4914 | |||
4915 | bo->mem = kgem_bo_map__cpu(kgem, &bo->base); |
||
4916 | if (bo->mem) { |
||
4917 | if (flags & KGEM_BUFFER_WRITE) |
||
4918 | kgem_bo_sync__cpu(kgem, &bo->base); |
||
4919 | flags &= ~KGEM_BUFFER_INPLACE; |
||
4920 | goto init; |
||
4921 | } else { |
||
4922 | bo->base.refcnt = 0; /* for valgrind */ |
||
4923 | kgem_bo_free(kgem, &bo->base); |
||
4924 | } |
||
4925 | } |
||
4926 | skip_llc: |
||
4927 | |||
4928 | if ((flags & KGEM_BUFFER_WRITE_INPLACE) == KGEM_BUFFER_WRITE_INPLACE) { |
||
4929 | /* The issue with using a GTT upload buffer is that we may |
||
4930 | * cause eviction-stalls in order to free up some GTT space. |
||
4931 | * An is-mappable? ioctl could help us detect when we are |
||
4932 | * about to block, or some per-page magic in the kernel. |
||
4933 | * |
||
4934 | * XXX This is especially noticeable on memory constrained |
||
4935 | * devices like gen2 or with relatively slow gpu like i3. |
||
4936 | */ |
||
4937 | DBG(("%s: searching for an inactive GTT map for upload\n", |
||
4938 | __FUNCTION__)); |
||
4939 | old = search_linear_cache(kgem, alloc, |
||
4940 | CREATE_EXACT | CREATE_INACTIVE | CREATE_GTT_MAP); |
||
4941 | #if HAVE_I915_GEM_BUFFER_INFO |
||
4942 | if (old) { |
||
4943 | struct drm_i915_gem_buffer_info info; |
||
4944 | |||
4945 | /* An example of such a non-blocking ioctl might work */ |
||
4946 | |||
4947 | VG_CLEAR(info); |
||
4948 | info.handle = handle; |
||
4949 | if (drmIoctl(kgem->fd, |
||
4950 | DRM_IOCTL_I915_GEM_BUFFER_INFO, |
||
4951 | &fino) == 0) { |
||
4952 | old->presumed_offset = info.addr; |
||
4953 | if ((info.flags & I915_GEM_MAPPABLE) == 0) { |
||
4954 | kgem_bo_move_to_inactive(kgem, old); |
||
4955 | old = NULL; |
||
4956 | } |
||
4957 | } |
||
4958 | } |
||
4959 | #endif |
||
4960 | if (old == NULL) |
||
4961 | old = search_linear_cache(kgem, NUM_PAGES(size), |
||
4962 | CREATE_EXACT | CREATE_INACTIVE | CREATE_GTT_MAP); |
||
4963 | if (old == NULL) { |
||
4964 | old = search_linear_cache(kgem, alloc, CREATE_INACTIVE); |
||
4965 | if (old && !__kgem_bo_is_mappable(kgem, old)) { |
||
4966 | _kgem_bo_destroy(kgem, old); |
||
4967 | old = NULL; |
||
4968 | } |
||
4969 | } |
||
4970 | if (old) { |
||
4971 | DBG(("%s: reusing handle=%d for buffer\n", |
||
4972 | __FUNCTION__, old->handle)); |
||
4973 | assert(__kgem_bo_is_mappable(kgem, old)); |
||
4974 | assert(!old->snoop); |
||
4975 | assert(old->rq == NULL); |
||
4976 | |||
4977 | bo = buffer_alloc(); |
||
4978 | if (bo == NULL) |
||
4979 | return NULL; |
||
4980 | |||
4981 | init_buffer_from_bo(bo, old); |
||
4982 | assert(num_pages(&bo->base) >= NUM_PAGES(size)); |
||
4983 | |||
4984 | assert(bo->mmapped); |
||
4985 | assert(bo->base.refcnt == 1); |
||
4986 | |||
4987 | bo->mem = kgem_bo_map(kgem, &bo->base); |
||
4988 | if (bo->mem) { |
||
4989 | if (IS_CPU_MAP(bo->base.map)) |
||
4990 | flags &= ~KGEM_BUFFER_INPLACE; |
||
4991 | goto init; |
||
4992 | } else { |
||
4993 | bo->base.refcnt = 0; |
||
4994 | kgem_bo_free(kgem, &bo->base); |
||
4995 | } |
||
4996 | } |
||
4997 | } |
||
4998 | #else |
||
4999 | flags &= ~KGEM_BUFFER_INPLACE; |
||
5000 | #endif |
||
5001 | /* Be more parsimonious with pwrite/pread/cacheable buffers */ |
||
5002 | if ((flags & KGEM_BUFFER_INPLACE) == 0) |
||
5003 | alloc = NUM_PAGES(size); |
||
5004 | |||
5005 | if (use_snoopable_buffer(kgem, flags)) { |
||
5006 | bo = search_snoopable_buffer(kgem, alloc); |
||
5007 | if (bo) { |
||
5008 | if (flags & KGEM_BUFFER_WRITE) |
||
5009 | kgem_bo_sync__cpu(kgem, &bo->base); |
||
5010 | flags &= ~KGEM_BUFFER_INPLACE; |
||
5011 | goto init; |
||
5012 | } |
||
5013 | |||
5014 | if ((flags & KGEM_BUFFER_INPLACE) == 0) { |
||
5015 | bo = create_snoopable_buffer(kgem, alloc); |
||
5016 | if (bo) |
||
5017 | goto init; |
||
5018 | } |
||
5019 | } |
||
5020 | |||
5021 | flags &= ~KGEM_BUFFER_INPLACE; |
||
5022 | |||
5023 | old = NULL; |
||
5024 | if ((flags & KGEM_BUFFER_WRITE) == 0) |
||
5025 | old = search_linear_cache(kgem, alloc, 0); |
||
5026 | if (old == NULL) |
||
5027 | old = search_linear_cache(kgem, alloc, CREATE_INACTIVE); |
||
5028 | if (old) { |
||
5029 | DBG(("%s: reusing ordinary handle %d for io\n", |
||
5030 | __FUNCTION__, old->handle)); |
||
5031 | bo = buffer_alloc_with_data(num_pages(old)); |
||
5032 | if (bo == NULL) |
||
5033 | return NULL; |
||
5034 | |||
5035 | init_buffer_from_bo(bo, old); |
||
5036 | bo->need_io = flags & KGEM_BUFFER_WRITE; |
||
5037 | } else { |
||
5038 | unsigned hint; |
||
5039 | |||
5040 | if (use_snoopable_buffer(kgem, flags)) { |
||
5041 | bo = create_snoopable_buffer(kgem, alloc); |
||
5042 | if (bo) |
||
5043 | goto init; |
||
5044 | } |
||
5045 | |||
5046 | bo = buffer_alloc(); |
||
5047 | if (bo == NULL) |
||
5048 | return NULL; |
||
5049 | |||
5050 | hint = CREATE_INACTIVE; |
||
5051 | if (flags & KGEM_BUFFER_WRITE) |
||
5052 | hint |= CREATE_CPU_MAP; |
||
5053 | old = search_linear_cache(kgem, alloc, hint); |
||
5054 | if (old) { |
||
5055 | DBG(("%s: reusing handle=%d for buffer\n", |
||
5056 | __FUNCTION__, old->handle)); |
||
5057 | |||
5058 | init_buffer_from_bo(bo, old); |
||
5059 | } else { |
||
5060 | uint32_t handle = gem_create(kgem->fd, alloc); |
||
5061 | if (handle == 0) { |
||
5062 | free(bo); |
||
5063 | return NULL; |
||
5064 | } |
||
5065 | |||
5066 | DBG(("%s: created handle=%d for buffer\n", |
||
5067 | __FUNCTION__, handle)); |
||
5068 | |||
5069 | __kgem_bo_init(&bo->base, handle, alloc); |
||
5070 | debug_alloc(kgem, alloc * PAGE_SIZE); |
||
5071 | } |
||
5072 | |||
5073 | assert(bo->mmapped); |
||
5074 | assert(!bo->need_io); |
||
5075 | assert(bo->base.refcnt == 1); |
||
5076 | |||
5077 | if (flags & KGEM_BUFFER_WRITE) { |
||
5078 | bo->mem = kgem_bo_map__cpu(kgem, &bo->base); |
||
5079 | if (bo->mem != NULL) { |
||
5080 | kgem_bo_sync__cpu(kgem, &bo->base); |
||
5081 | goto init; |
||
5082 | } |
||
5083 | } |
||
5084 | |||
5085 | DBG(("%s: failing back to new pwrite buffer\n", __FUNCTION__)); |
||
5086 | old = &bo->base; |
||
5087 | bo = buffer_alloc_with_data(num_pages(old)); |
||
5088 | if (bo == NULL) { |
||
5089 | old->refcnt= 0; |
||
5090 | kgem_bo_free(kgem, old); |
||
5091 | return NULL; |
||
5092 | } |
||
5093 | |||
5094 | init_buffer_from_bo(bo, old); |
||
5095 | |||
5096 | assert(bo->mem); |
||
5097 | assert(!bo->mmapped); |
||
5098 | assert(bo->base.refcnt == 1); |
||
5099 | |||
5100 | bo->need_io = flags & KGEM_BUFFER_WRITE; |
||
5101 | } |
||
5102 | init: |
||
5103 | bo->base.io = true; |
||
5104 | assert(bo->base.refcnt == 1); |
||
5105 | assert(num_pages(&bo->base) >= NUM_PAGES(size)); |
||
5106 | assert(!bo->need_io || !bo->base.needs_flush); |
||
5107 | assert(!bo->need_io || bo->base.domain != DOMAIN_GPU); |
||
5108 | assert(bo->mem); |
||
5109 | assert(!bo->mmapped || bo->base.map != NULL); |
||
5110 | |||
5111 | bo->used = size; |
||
5112 | bo->write = flags & KGEM_BUFFER_WRITE_INPLACE; |
||
5113 | offset = 0; |
||
5114 | |||
5115 | assert(list_is_empty(&bo->base.list)); |
||
5116 | list_add(&bo->base.list, &kgem->batch_buffers); |
||
5117 | |||
5118 | DBG(("%s(pages=%d [%d]) new handle=%d, used=%d, write=%d\n", |
||
5119 | __FUNCTION__, num_pages(&bo->base), alloc, bo->base.handle, bo->used, bo->write)); |
||
5120 | |||
5121 | done: |
||
5122 | bo->used = ALIGN(bo->used, UPLOAD_ALIGNMENT); |
||
5123 | assert(bo->mem); |
||
5124 | *ret = (char *)bo->mem + offset; |
||
5125 | return kgem_create_proxy(kgem, &bo->base, offset, size); |
||
5126 | } |
||
5127 | |||
5128 | bool kgem_buffer_is_inplace(struct kgem_bo *_bo) |
||
5129 | { |
||
5130 | struct kgem_buffer *bo = (struct kgem_buffer *)_bo->proxy; |
||
5131 | return bo->write & KGEM_BUFFER_WRITE_INPLACE; |
||
5132 | } |
||
5133 | |||
5134 | struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem, |
||
5135 | int width, int height, int bpp, |
||
5136 | uint32_t flags, |
||
5137 | void **ret) |
||
5138 | { |
||
5139 | struct kgem_bo *bo; |
||
5140 | int stride; |
||
5141 | |||
5142 | assert(width > 0 && height > 0); |
||
5143 | assert(ret != NULL); |
||
5144 | stride = ALIGN(width, 2) * bpp >> 3; |
||
5145 | stride = ALIGN(stride, 4); |
||
5146 | |||
5147 | DBG(("%s: %dx%d, %d bpp, stride=%d\n", |
||
5148 | __FUNCTION__, width, height, bpp, stride)); |
||
5149 | |||
5150 | bo = kgem_create_buffer(kgem, stride * ALIGN(height, 2), flags, ret); |
||
5151 | if (bo == NULL) { |
||
5152 | DBG(("%s: allocation failure for upload buffer\n", |
||
5153 | __FUNCTION__)); |
||
5154 | return NULL; |
||
5155 | } |
||
5156 | assert(*ret != NULL); |
||
5157 | assert(bo->proxy != NULL); |
||
5158 | |||
5159 | if (height & 1) { |
||
5160 | struct kgem_buffer *io = (struct kgem_buffer *)bo->proxy; |
||
5161 | int min; |
||
5162 | |||
5163 | assert(io->used); |
||
5164 | |||
5165 | /* Having padded this surface to ensure that accesses to |
||
5166 | * the last pair of rows is valid, remove the padding so |
||
5167 | * that it can be allocated to other pixmaps. |
||
5168 | */ |
||
5169 | min = bo->delta + height * stride; |
||
5170 | min = ALIGN(min, UPLOAD_ALIGNMENT); |
||
5171 | if (io->used != min) { |
||
5172 | DBG(("%s: trimming buffer from %d to %d\n", |
||
5173 | __FUNCTION__, io->used, min)); |
||
5174 | io->used = min; |
||
5175 | } |
||
5176 | bo->size.bytes -= stride; |
||
5177 | } |
||
5178 | |||
5179 | bo->map = MAKE_CPU_MAP(*ret); |
||
5180 | bo->pitch = stride; |
||
5181 | bo->unique_id = kgem_get_unique_id(kgem); |
||
5182 | return bo; |
||
5183 | } |
||
5184 | |||
5185 | struct kgem_bo *kgem_upload_source_image(struct kgem *kgem, |
||
5186 | const void *data, |
||
5187 | const BoxRec *box, |
||
5188 | int stride, int bpp) |
||
5189 | { |
||
5190 | int width = box->x2 - box->x1; |
||
5191 | int height = box->y2 - box->y1; |
||
5192 | struct kgem_bo *bo; |
||
5193 | void *dst; |
||
5194 | |||
5195 | if (!kgem_can_create_2d(kgem, width, height, bpp)) |
||
5196 | return NULL; |
||
5197 | |||
5198 | DBG(("%s : (%d, %d), (%d, %d), stride=%d, bpp=%d\n", |
||
5199 | __FUNCTION__, box->x1, box->y1, box->x2, box->y2, stride, bpp)); |
||
5200 | |||
5201 | assert(data); |
||
5202 | assert(width > 0); |
||
5203 | assert(height > 0); |
||
5204 | assert(stride); |
||
5205 | assert(bpp); |
||
5206 | |||
5207 | bo = kgem_create_buffer_2d(kgem, |
||
5208 | width, height, bpp, |
||
5209 | KGEM_BUFFER_WRITE_INPLACE, &dst); |
||
5210 | if (bo) |
||
5211 | memcpy_blt(data, dst, bpp, |
||
5212 | stride, bo->pitch, |
||
5213 | box->x1, box->y1, |
||
5214 | 0, 0, |
||
5215 | width, height); |
||
5216 | |||
5217 | return bo; |
||
5218 | } |
||
5219 | |||
5220 | void kgem_proxy_bo_attach(struct kgem_bo *bo, |
||
5221 | struct kgem_bo **ptr) |
||
5222 | { |
||
5223 | DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle)); |
||
5224 | assert(bo->map == NULL || IS_CPU_MAP(bo->map)); |
||
5225 | assert(bo->proxy); |
||
5226 | list_add(&bo->vma, &bo->proxy->vma); |
||
5227 | bo->map = ptr; |
||
5228 | *ptr = kgem_bo_reference(bo); |
||
5229 | } |
||
5230 | |||
5231 | void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo) |
||
5232 | { |
||
5233 | struct kgem_buffer *bo; |
||
5234 | uint32_t offset = _bo->delta, length = _bo->size.bytes; |
||
5235 | |||
5236 | /* We expect the caller to have already submitted the batch */ |
||
5237 | assert(_bo->io); |
||
5238 | assert(_bo->exec == NULL); |
||
5239 | assert(_bo->rq == NULL); |
||
5240 | assert(_bo->proxy); |
||
5241 | |||
5242 | _bo = _bo->proxy; |
||
5243 | assert(_bo->proxy == NULL); |
||
5244 | assert(_bo->exec == NULL); |
||
5245 | |||
5246 | bo = (struct kgem_buffer *)_bo; |
||
5247 | |||
5248 | DBG(("%s(offset=%d, length=%d, snooped=%d)\n", __FUNCTION__, |
||
5249 | offset, length, bo->base.snoop)); |
||
5250 | |||
5251 | if (bo->mmapped) { |
||
5252 | struct drm_i915_gem_set_domain set_domain; |
||
5253 | |||
5254 | DBG(("%s: sync: needs_flush? %d, domain? %d, busy? %d\n", |
||
5255 | __FUNCTION__, |
||
5256 | bo->base.needs_flush, |
||
5257 | bo->base.domain, |
||
5258 | __kgem_busy(kgem, bo->base.handle))); |
||
5259 | |||
5260 | assert(!IS_CPU_MAP(bo->base.map) || bo->base.snoop || kgem->has_llc); |
||
5261 | |||
5262 | VG_CLEAR(set_domain); |
||
5263 | set_domain.handle = bo->base.handle; |
||
5264 | set_domain.write_domain = 0; |
||
5265 | set_domain.read_domains = |
||
5266 | IS_CPU_MAP(bo->base.map) ? I915_GEM_DOMAIN_CPU : I915_GEM_DOMAIN_GTT; |
||
5267 | |||
5268 | if (drmIoctl(kgem->fd, |
||
5269 | DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) |
||
5270 | return; |
||
5271 | } else { |
||
5272 | if (gem_read(kgem->fd, |
||
5273 | bo->base.handle, (char *)bo->mem+offset, |
||
5274 | offset, length)) |
||
5275 | return; |
||
5276 | } |
||
5277 | kgem_bo_retire(kgem, &bo->base); |
||
5278 | bo->base.domain = DOMAIN_NONE; |
||
5279 | } |
||
5280 | #endif |
||
5281 | |||
5282 | uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format) |
||
5283 | { |
||
5284 | struct kgem_bo_binding *b; |
||
5285 | |||
5286 | for (b = &bo->binding; b && b->offset; b = b->next) |
||
5287 | if (format == b->format) |
||
5288 | return b->offset; |
||
5289 | |||
5290 | return 0; |
||
5291 | } |
||
5292 | |||
5293 | void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset) |
||
5294 | { |
||
5295 | struct kgem_bo_binding *b; |
||
5296 | |||
5297 | for (b = &bo->binding; b; b = b->next) { |
||
5298 | if (b->offset) |
||
5299 | continue; |
||
5300 | |||
5301 | b->offset = offset; |
||
5302 | b->format = format; |
||
5303 | |||
5304 | if (b->next) |
||
5305 | b->next->offset = 0; |
||
5306 | |||
5307 | return; |
||
5308 | } |
||
5309 | |||
5310 | b = malloc(sizeof(*b)); |
||
5311 | if (b) { |
||
5312 | b->next = bo->binding.next; |
||
5313 | b->format = format; |
||
5314 | b->offset = offset; |
||
5315 | bo->binding.next = b; |
||
5316 | } |
||
5317 | } |
||
5318 | |||
5319 | int kgem_init_fb(struct kgem *kgem, struct sna_fb *fb) |
||
5320 | { |
||
5321 | struct kgem_bo *bo; |
||
5322 | size_t size; |
||
5323 | int ret; |
||
5324 | |||
5325 | ret = drmIoctl(kgem->fd, SRV_FBINFO, fb); |
||
5326 | if( ret != 0 ) |
||
5327 | return 0; |
||
5328 | |||
5329 | size = fb->pitch * fb->height / PAGE_SIZE; |
||
5330 | |||
5331 | bo = __kgem_bo_alloc(-2, size); |
||
5332 | if (!bo) { |
||
5333 | return 0; |
||
5334 | } |
||
5335 | |||
5336 | bo->domain = DOMAIN_GTT; |
||
5337 | bo->unique_id = kgem_get_unique_id(kgem); |
||
5338 | bo->pitch = fb->pitch; |
||
5339 | bo->tiling = I915_TILING_X; |
||
5340 | bo->scanout = 1; |
||
5341 | fb->fb_bo = bo; |
||
5342 | |||
5343 | // printf("fb width %d height %d pitch %d bo %p\n", |
||
5344 | // fb->width, fb->height, fb->pitch, fb->fb_bo); |
||
5345 | |||
5346 | return 1; |
||
5347 | }; |
||
5348 | |||
5349 | |||
5350 | int kgem_update_fb(struct kgem *kgem, struct sna_fb *fb) |
||
5351 | { |
||
5352 | struct kgem_bo *bo; |
||
5353 | size_t size; |
||
5354 | int ret; |
||
5355 | |||
5356 | bo = fb->fb_bo; |
||
5357 | |||
5358 | ret = drmIoctl(kgem->fd, SRV_FBINFO, fb); |
||
5359 | if( ret != 0 ) |
||
5360 | return 0; |
||
5361 | |||
5362 | fb->fb_bo = bo; |
||
5363 | |||
5364 | size = fb->pitch * fb->height / PAGE_SIZE; |
||
5365 | |||
5366 | if((size != bo->size.pages.count) || |
||
5367 | (fb->pitch != bo->pitch)) |
||
5368 | { |
||
5369 | bo->size.pages.count = size; |
||
5370 | bo->pitch = fb->pitch; |
||
5371 | |||
5372 | printf("fb width %d height %d pitch %d bo %p\n", |
||
5373 | fb->width, fb->height, fb->pitch, fb->fb_bo); |
||
5374 | |||
5375 | return 1; |
||
5376 | } |
||
5377 | |||
5378 | return 0; |
||
5379 | }; |
||
5380 | |||
5381 | void sna_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) |
||
5382 | { |
||
5383 | kgem_bo_destroy(kgem, bo); |
||
5384 | kgem_bo_free(kgem, bo); |
||
5385 | } |
||
5386 | |||
5387 | |||
5388 | void kgem_close_batches(struct kgem *kgem) |
||
5389 | { |
||
5390 | int n; |
||
5391 | |||
5392 | for (n = 0; n < ARRAY_SIZE(kgem->pinned_batches); n++) { |
||
5393 | while (!list_is_empty(&kgem->pinned_batches[n])) { |
||
5394 | kgem_bo_destroy(kgem, |
||
5395 | list_first_entry(&kgem->pinned_batches[n], |
||
5396 | struct kgem_bo, list)); |
||
5397 | } |
||
5398 | } |
||
5399 | }; |
||
5400 | |||
5401 | struct kgem_bo *kgem_bo_from_handle(struct kgem *kgem, int handle, |
||
5402 | int pitch, int height) |
||
5403 | { |
||
5404 | struct kgem_bo *bo; |
||
5405 | int size; |
||
5406 | |||
5407 | size = pitch * height / PAGE_SIZE; |
||
5408 | |||
5409 | bo = __kgem_bo_alloc(handle, size); |
||
5410 | if(bo == NULL) |
||
5411 | return NULL; |
||
5412 | |||
5413 | bo->domain = DOMAIN_GTT; |
||
5414 | bo->unique_id = kgem_get_unique_id(kgem); |
||
5415 | bo->pitch = pitch; |
||
5416 | bo->tiling = I915_TILING_X; |
||
5417 | bo->scanout = 0; |
||
5418 | |||
5419 | return bo; |
||
5420 | }>=>=>=>=>><>=>><>=>=>>=>>>>>=>>>=>>=>>=>=>>=>>>>>=>>>=>=>>>>=>=>=>>>>>=>>>>>>>>>>=>>=>=>=>=>=>=>=>=>>>>=>>=>>=>>=>>>>>>=>>>=>>>=><=>>>=>=>=>>=>>>>><>><>><>>>>>>>>>>>>>>>>>>>>>=>31) |