Rev 3256 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
3254 | Serge | 1 | /* |
2 | * Copyright (c) 2011 Intel Corporation |
||
3 | * |
||
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
5 | * copy of this software and associated documentation files (the "Software"), |
||
6 | * to deal in the Software without restriction, including without limitation |
||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
8 | * and/or sell copies of the Software, and to permit persons to whom the |
||
9 | * Software is furnished to do so, subject to the following conditions: |
||
10 | * |
||
11 | * The above copyright notice and this permission notice (including the next |
||
12 | * paragraph) shall be included in all copies or substantial portions of the |
||
13 | * Software. |
||
14 | * |
||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
||
21 | * SOFTWARE. |
||
22 | * |
||
23 | * Authors: |
||
24 | * Chris Wilson |
||
25 | * |
||
26 | */ |
||
27 | |||
28 | #ifdef HAVE_CONFIG_H |
||
29 | #include "config.h" |
||
30 | #endif |
||
31 | |||
32 | #include "sna.h" |
||
33 | #include "sna_reg.h" |
||
34 | |||
3256 | Serge | 35 | |
36 | unsigned int cpu_cache_size(); |
||
37 | |||
38 | static struct kgem_bo * |
||
39 | search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags); |
||
40 | |||
41 | static struct kgem_bo * |
||
42 | search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags); |
||
43 | |||
3254 | Serge | 44 | #define DBG_NO_HW 0 |
45 | #define DBG_NO_TILING 1 |
||
46 | #define DBG_NO_CACHE 0 |
||
47 | #define DBG_NO_CACHE_LEVEL 0 |
||
48 | #define DBG_NO_CPU 0 |
||
49 | #define DBG_NO_USERPTR 0 |
||
50 | #define DBG_NO_LLC 0 |
||
51 | #define DBG_NO_SEMAPHORES 0 |
||
3256 | Serge | 52 | #define DBG_NO_MADV 1 |
3254 | Serge | 53 | #define DBG_NO_UPLOAD_CACHE 0 |
54 | #define DBG_NO_UPLOAD_ACTIVE 0 |
||
55 | #define DBG_NO_MAP_UPLOAD 0 |
||
56 | #define DBG_NO_RELAXED_FENCING 0 |
||
57 | #define DBG_NO_SECURE_BATCHES 0 |
||
58 | #define DBG_NO_PINNED_BATCHES 0 |
||
59 | #define DBG_NO_FAST_RELOC 0 |
||
60 | #define DBG_NO_HANDLE_LUT 0 |
||
61 | #define DBG_DUMP 0 |
||
62 | |||
3256 | Serge | 63 | #ifndef DEBUG_SYNC |
64 | #define DEBUG_SYNC 0 |
||
65 | #endif |
||
66 | |||
67 | #define SHOW_BATCH 0 |
||
68 | |||
69 | #if 0 |
||
70 | #define ASSERT_IDLE(kgem__, handle__) assert(!__kgem_busy(kgem__, handle__)) |
||
71 | #define ASSERT_MAYBE_IDLE(kgem__, handle__, expect__) assert(!(expect__) || !__kgem_busy(kgem__, handle__)) |
||
72 | #else |
||
73 | #define ASSERT_IDLE(kgem__, handle__) |
||
74 | #define ASSERT_MAYBE_IDLE(kgem__, handle__, expect__) |
||
75 | #endif |
||
76 | |||
3255 | Serge | 77 | /* Worst case seems to be 965gm where we cannot write within a cacheline that |
78 | * is being simultaneously being read by the GPU, or within the sampler |
||
79 | * prefetch. In general, the chipsets seem to have a requirement that sampler |
||
80 | * offsets be aligned to a cacheline (64 bytes). |
||
81 | */ |
||
82 | #define UPLOAD_ALIGNMENT 128 |
||
83 | |||
84 | #define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE) |
||
85 | #define NUM_PAGES(x) (((x) + PAGE_SIZE-1) / PAGE_SIZE) |
||
86 | |||
3254 | Serge | 87 | #define MAX_GTT_VMA_CACHE 512 |
88 | #define MAX_CPU_VMA_CACHE INT16_MAX |
||
89 | #define MAP_PRESERVE_TIME 10 |
||
90 | |||
91 | #define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3)) |
||
92 | #define MAKE_CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1)) |
||
93 | #define MAKE_USER_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3)) |
||
94 | #define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 2) |
||
95 | #define __MAP_TYPE(ptr) ((uintptr_t)(ptr) & 3) |
||
96 | |||
97 | #define MAKE_REQUEST(rq, ring) ((struct kgem_request *)((uintptr_t)(rq) | (ring))) |
||
98 | |||
99 | #define LOCAL_I915_PARAM_HAS_BLT 11 |
||
100 | #define LOCAL_I915_PARAM_HAS_RELAXED_FENCING 12 |
||
101 | #define LOCAL_I915_PARAM_HAS_RELAXED_DELTA 15 |
||
102 | #define LOCAL_I915_PARAM_HAS_SEMAPHORES 20 |
||
103 | #define LOCAL_I915_PARAM_HAS_SECURE_BATCHES 23 |
||
104 | #define LOCAL_I915_PARAM_HAS_PINNED_BATCHES 24 |
||
105 | #define LOCAL_I915_PARAM_HAS_NO_RELOC 25 |
||
106 | #define LOCAL_I915_PARAM_HAS_HANDLE_LUT 26 |
||
107 | |||
3256 | Serge | 108 | #define LOCAL_I915_EXEC_IS_PINNED (1<<10) |
109 | #define LOCAL_I915_EXEC_NO_RELOC (1<<11) |
||
110 | #define LOCAL_I915_EXEC_HANDLE_LUT (1<<12) |
||
111 | #define UNCACHED 0 |
||
112 | #define SNOOPED 1 |
||
113 | |||
114 | struct local_i915_gem_cacheing { |
||
115 | uint32_t handle; |
||
116 | uint32_t cacheing; |
||
117 | }; |
||
3258 | Serge | 118 | |
119 | #define LOCAL_IOCTL_I915_GEM_SET_CACHEING SRV_I915_GEM_SET_CACHEING |
||
120 | |||
121 | struct kgem_buffer { |
||
122 | struct kgem_bo base; |
||
123 | void *mem; |
||
124 | uint32_t used; |
||
125 | uint32_t need_io : 1; |
||
126 | uint32_t write : 2; |
||
127 | uint32_t mmapped : 1; |
||
128 | }; |
||
129 | |||
3255 | Serge | 130 | static struct kgem_bo *__kgem_freed_bo; |
3256 | Serge | 131 | static struct kgem_request *__kgem_freed_request; |
3258 | Serge | 132 | static struct drm_i915_gem_exec_object2 _kgem_dummy_exec; |
3254 | Serge | 133 | |
3258 | Serge | 134 | static inline int bytes(struct kgem_bo *bo) |
135 | { |
||
136 | return __kgem_bo_size(bo); |
||
137 | } |
||
138 | |||
3255 | Serge | 139 | #define bucket(B) (B)->size.pages.bucket |
140 | #define num_pages(B) (B)->size.pages.count |
||
3254 | Serge | 141 | |
3255 | Serge | 142 | #ifdef DEBUG_MEMORY |
143 | static void debug_alloc(struct kgem *kgem, size_t size) |
||
144 | { |
||
145 | kgem->debug_memory.bo_allocs++; |
||
146 | kgem->debug_memory.bo_bytes += size; |
||
147 | } |
||
148 | static void debug_alloc__bo(struct kgem *kgem, struct kgem_bo *bo) |
||
149 | { |
||
150 | debug_alloc(kgem, bytes(bo)); |
||
151 | } |
||
152 | #else |
||
153 | #define debug_alloc(k, b) |
||
154 | #define debug_alloc__bo(k, b) |
||
155 | #endif |
||
156 | |||
3258 | Serge | 157 | static void kgem_sna_reset(struct kgem *kgem) |
158 | { |
||
159 | struct sna *sna = container_of(kgem, struct sna, kgem); |
||
160 | |||
161 | sna->render.reset(sna); |
||
162 | sna->blt_state.fill_bo = 0; |
||
163 | } |
||
164 | |||
165 | static void kgem_sna_flush(struct kgem *kgem) |
||
166 | { |
||
167 | struct sna *sna = container_of(kgem, struct sna, kgem); |
||
168 | |||
169 | sna->render.flush(sna); |
||
170 | |||
171 | // if (sna->render.solid_cache.dirty) |
||
172 | // sna_render_flush_solid(sna); |
||
173 | } |
||
174 | |||
3256 | Serge | 175 | static bool gem_set_tiling(int fd, uint32_t handle, int tiling, int stride) |
176 | { |
||
177 | struct drm_i915_gem_set_tiling set_tiling; |
||
178 | int ret; |
||
179 | |||
180 | if (DBG_NO_TILING) |
||
181 | return false; |
||
182 | /* |
||
183 | VG_CLEAR(set_tiling); |
||
184 | do { |
||
185 | set_tiling.handle = handle; |
||
186 | set_tiling.tiling_mode = tiling; |
||
187 | set_tiling.stride = stride; |
||
188 | |||
189 | ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling); |
||
190 | } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); |
||
191 | */ |
||
192 | return ret == 0; |
||
193 | } |
||
194 | |||
195 | static bool gem_set_cacheing(int fd, uint32_t handle, int cacheing) |
||
196 | { |
||
197 | struct local_i915_gem_cacheing arg; |
||
198 | |||
199 | VG_CLEAR(arg); |
||
200 | arg.handle = handle; |
||
201 | arg.cacheing = cacheing; |
||
3258 | Serge | 202 | return drmIoctl(fd, LOCAL_IOCTL_I915_GEM_SET_CACHEING, &arg) == 0; |
203 | } |
||
3256 | Serge | 204 | |
3258 | Serge | 205 | |
3256 | Serge | 206 | |
207 | |||
208 | |||
209 | static bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags) |
||
210 | { |
||
211 | if (flags & CREATE_NO_RETIRE) { |
||
212 | DBG(("%s: not retiring per-request\n", __FUNCTION__)); |
||
213 | return false; |
||
214 | } |
||
215 | |||
216 | if (!kgem->need_retire) { |
||
217 | DBG(("%s: nothing to retire\n", __FUNCTION__)); |
||
218 | return false; |
||
219 | } |
||
220 | |||
3258 | Serge | 221 | if (kgem_retire(kgem)) |
222 | return true; |
||
3256 | Serge | 223 | |
224 | if (flags & CREATE_NO_THROTTLE || !kgem->need_throttle) { |
||
225 | DBG(("%s: not throttling\n", __FUNCTION__)); |
||
226 | return false; |
||
227 | } |
||
228 | |||
3258 | Serge | 229 | kgem_throttle(kgem); |
230 | return kgem_retire(kgem); |
||
231 | } |
||
3256 | Serge | 232 | |
3258 | Serge | 233 | static void *__kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo) |
234 | { |
||
235 | struct drm_i915_gem_mmap_gtt mmap_arg; |
||
236 | void *ptr; |
||
237 | |||
238 | DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__, |
||
239 | bo->handle, bytes(bo))); |
||
240 | assert(bo->proxy == NULL); |
||
241 | |||
242 | retry_gtt: |
||
243 | VG_CLEAR(mmap_arg); |
||
244 | mmap_arg.handle = bo->handle; |
||
245 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg)) { |
||
246 | printf("%s: failed to retrieve GTT offset for handle=%d: %d\n", |
||
247 | __FUNCTION__, bo->handle, 0); |
||
248 | (void)__kgem_throttle_retire(kgem, 0); |
||
249 | if (kgem_expire_cache(kgem)) |
||
250 | goto retry_gtt; |
||
251 | |||
252 | if (kgem->need_expire) { |
||
253 | kgem_cleanup_cache(kgem); |
||
254 | goto retry_gtt; |
||
255 | } |
||
256 | |||
257 | return NULL; |
||
258 | } |
||
259 | |||
260 | retry_mmap: |
||
261 | // ptr = mmap(0, bytes(bo), PROT_READ | PROT_WRITE, MAP_SHARED, |
||
262 | // kgem->fd, mmap_arg.offset); |
||
263 | if (ptr == 0) { |
||
264 | printf("%s: failed to mmap %d, %d bytes, into GTT domain: %d\n", |
||
265 | __FUNCTION__, bo->handle, bytes(bo), 0); |
||
266 | if (__kgem_throttle_retire(kgem, 0)) |
||
267 | goto retry_mmap; |
||
268 | |||
269 | if (kgem->need_expire) { |
||
270 | kgem_cleanup_cache(kgem); |
||
271 | goto retry_mmap; |
||
272 | } |
||
273 | |||
274 | ptr = NULL; |
||
275 | } |
||
276 | |||
277 | return ptr; |
||
3256 | Serge | 278 | } |
279 | |||
3258 | Serge | 280 | static int __gem_write(int fd, uint32_t handle, |
281 | int offset, int length, |
||
282 | const void *src) |
||
283 | { |
||
284 | struct drm_i915_gem_pwrite pwrite; |
||
285 | |||
286 | DBG(("%s(handle=%d, offset=%d, len=%d)\n", __FUNCTION__, |
||
287 | handle, offset, length)); |
||
288 | |||
289 | VG_CLEAR(pwrite); |
||
290 | pwrite.handle = handle; |
||
291 | pwrite.offset = offset; |
||
292 | pwrite.size = length; |
||
293 | pwrite.data_ptr = (uintptr_t)src; |
||
294 | return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite); |
||
295 | } |
||
296 | |||
3256 | Serge | 297 | static int gem_write(int fd, uint32_t handle, |
298 | int offset, int length, |
||
299 | const void *src) |
||
300 | { |
||
301 | struct drm_i915_gem_pwrite pwrite; |
||
302 | |||
303 | DBG(("%s(handle=%d, offset=%d, len=%d)\n", __FUNCTION__, |
||
304 | handle, offset, length)); |
||
305 | |||
306 | VG_CLEAR(pwrite); |
||
307 | pwrite.handle = handle; |
||
308 | /* align the transfer to cachelines; fortuitously this is safe! */ |
||
309 | if ((offset | length) & 63) { |
||
310 | pwrite.offset = offset & ~63; |
||
311 | pwrite.size = ALIGN(offset+length, 64) - pwrite.offset; |
||
312 | pwrite.data_ptr = (uintptr_t)src + pwrite.offset - offset; |
||
313 | } else { |
||
314 | pwrite.offset = offset; |
||
315 | pwrite.size = length; |
||
316 | pwrite.data_ptr = (uintptr_t)src; |
||
317 | } |
||
3258 | Serge | 318 | return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite); |
3256 | Serge | 319 | } |
3258 | Serge | 320 | |
3256 | Serge | 321 | |
3258 | Serge | 322 | bool __kgem_busy(struct kgem *kgem, int handle) |
323 | { |
||
324 | struct drm_i915_gem_busy busy; |
||
325 | |||
326 | VG_CLEAR(busy); |
||
327 | busy.handle = handle; |
||
328 | busy.busy = !kgem->wedged; |
||
329 | (void)drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); |
||
330 | DBG(("%s: handle=%d, busy=%d, wedged=%d\n", |
||
331 | __FUNCTION__, handle, busy.busy, kgem->wedged)); |
||
3256 | Serge | 332 | |
3258 | Serge | 333 | return busy.busy; |
334 | } |
||
335 | |||
336 | static void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo) |
||
337 | { |
||
338 | DBG(("%s: retiring bo handle=%d (needed flush? %d), rq? %d [busy?=%d]\n", |
||
339 | __FUNCTION__, bo->handle, bo->needs_flush, bo->rq != NULL, |
||
340 | __kgem_busy(kgem, bo->handle))); |
||
341 | assert(bo->exec == NULL); |
||
342 | assert(list_is_empty(&bo->vma)); |
||
343 | |||
344 | if (bo->rq) { |
||
345 | if (!__kgem_busy(kgem, bo->handle)) { |
||
346 | __kgem_bo_clear_busy(bo); |
||
347 | kgem_retire(kgem); |
||
348 | } |
||
349 | } else { |
||
350 | assert(!bo->needs_flush); |
||
351 | ASSERT_IDLE(kgem, bo->handle); |
||
352 | } |
||
353 | } |
||
354 | |||
3256 | Serge | 355 | bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo, |
356 | const void *data, int length) |
||
357 | { |
||
358 | assert(bo->refcnt); |
||
359 | assert(!bo->purged); |
||
360 | assert(bo->proxy == NULL); |
||
361 | ASSERT_IDLE(kgem, bo->handle); |
||
362 | |||
363 | assert(length <= bytes(bo)); |
||
364 | if (gem_write(kgem->fd, bo->handle, 0, length, data)) |
||
365 | return false; |
||
366 | |||
367 | DBG(("%s: flush=%d, domain=%d\n", __FUNCTION__, bo->flush, bo->domain)); |
||
368 | if (bo->exec == NULL) { |
||
3258 | Serge | 369 | kgem_bo_retire(kgem, bo); |
3256 | Serge | 370 | bo->domain = DOMAIN_NONE; |
371 | } |
||
372 | return true; |
||
373 | } |
||
374 | |||
3255 | Serge | 375 | static uint32_t gem_create(int fd, int num_pages) |
376 | { |
||
377 | struct drm_i915_gem_create create; |
||
378 | |||
379 | VG_CLEAR(create); |
||
380 | create.handle = 0; |
||
381 | create.size = PAGE_SIZE * num_pages; |
||
3258 | Serge | 382 | (void)drmIoctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create); |
3255 | Serge | 383 | |
384 | return create.handle; |
||
385 | } |
||
386 | |||
3256 | Serge | 387 | static bool |
388 | kgem_bo_set_purgeable(struct kgem *kgem, struct kgem_bo *bo) |
||
389 | { |
||
390 | #if DBG_NO_MADV |
||
391 | return true; |
||
392 | #else |
||
393 | struct drm_i915_gem_madvise madv; |
||
394 | |||
395 | assert(bo->exec == NULL); |
||
396 | assert(!bo->purged); |
||
397 | |||
398 | VG_CLEAR(madv); |
||
399 | madv.handle = bo->handle; |
||
400 | madv.madv = I915_MADV_DONTNEED; |
||
401 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) { |
||
402 | bo->purged = 1; |
||
403 | kgem->need_purge |= !madv.retained && bo->domain == DOMAIN_GPU; |
||
404 | return madv.retained; |
||
405 | } |
||
406 | |||
407 | return true; |
||
408 | #endif |
||
409 | } |
||
410 | |||
411 | static bool |
||
412 | kgem_bo_is_retained(struct kgem *kgem, struct kgem_bo *bo) |
||
413 | { |
||
414 | #if DBG_NO_MADV |
||
415 | return true; |
||
416 | #else |
||
417 | struct drm_i915_gem_madvise madv; |
||
418 | |||
419 | if (!bo->purged) |
||
420 | return true; |
||
421 | |||
422 | VG_CLEAR(madv); |
||
423 | madv.handle = bo->handle; |
||
424 | madv.madv = I915_MADV_DONTNEED; |
||
425 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) |
||
426 | return madv.retained; |
||
427 | |||
428 | return false; |
||
429 | #endif |
||
430 | } |
||
431 | |||
432 | static bool |
||
433 | kgem_bo_clear_purgeable(struct kgem *kgem, struct kgem_bo *bo) |
||
434 | { |
||
435 | #if DBG_NO_MADV |
||
436 | return true; |
||
437 | #else |
||
438 | struct drm_i915_gem_madvise madv; |
||
439 | |||
440 | assert(bo->purged); |
||
441 | |||
442 | VG_CLEAR(madv); |
||
443 | madv.handle = bo->handle; |
||
444 | madv.madv = I915_MADV_WILLNEED; |
||
445 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) { |
||
446 | bo->purged = !madv.retained; |
||
447 | kgem->need_purge |= !madv.retained && bo->domain == DOMAIN_GPU; |
||
448 | return madv.retained; |
||
449 | } |
||
450 | |||
451 | return false; |
||
452 | #endif |
||
453 | } |
||
454 | |||
3255 | Serge | 455 | static void gem_close(int fd, uint32_t handle) |
456 | { |
||
457 | struct drm_gem_close close; |
||
458 | |||
459 | VG_CLEAR(close); |
||
460 | close.handle = handle; |
||
3258 | Serge | 461 | (void)drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &close); |
3255 | Serge | 462 | } |
463 | |||
464 | constant inline static unsigned long __fls(unsigned long word) |
||
465 | { |
||
466 | #if defined(__GNUC__) && (defined(__i386__) || defined(__x86__) || defined(__x86_64__)) |
||
467 | asm("bsr %1,%0" |
||
468 | : "=r" (word) |
||
469 | : "rm" (word)); |
||
470 | return word; |
||
471 | #else |
||
472 | unsigned int v = 0; |
||
473 | |||
474 | while (word >>= 1) |
||
475 | v++; |
||
476 | |||
477 | return v; |
||
478 | #endif |
||
479 | } |
||
480 | |||
481 | constant inline static int cache_bucket(int num_pages) |
||
482 | { |
||
483 | return __fls(num_pages); |
||
484 | } |
||
485 | |||
486 | static struct kgem_bo *__kgem_bo_init(struct kgem_bo *bo, |
||
487 | int handle, int num_pages) |
||
488 | { |
||
489 | assert(num_pages); |
||
490 | memset(bo, 0, sizeof(*bo)); |
||
491 | |||
492 | bo->refcnt = 1; |
||
493 | bo->handle = handle; |
||
494 | bo->target_handle = -1; |
||
495 | num_pages(bo) = num_pages; |
||
496 | bucket(bo) = cache_bucket(num_pages); |
||
497 | bo->reusable = true; |
||
498 | bo->domain = DOMAIN_CPU; |
||
499 | list_init(&bo->request); |
||
500 | list_init(&bo->list); |
||
501 | list_init(&bo->vma); |
||
502 | |||
503 | return bo; |
||
504 | } |
||
505 | |||
506 | static struct kgem_bo *__kgem_bo_alloc(int handle, int num_pages) |
||
507 | { |
||
508 | struct kgem_bo *bo; |
||
509 | |||
510 | if (__kgem_freed_bo) { |
||
511 | bo = __kgem_freed_bo; |
||
512 | __kgem_freed_bo = *(struct kgem_bo **)bo; |
||
513 | } else { |
||
514 | bo = malloc(sizeof(*bo)); |
||
515 | if (bo == NULL) |
||
516 | return NULL; |
||
517 | } |
||
518 | |||
519 | return __kgem_bo_init(bo, handle, num_pages); |
||
520 | } |
||
521 | |||
3256 | Serge | 522 | static struct kgem_request *__kgem_request_alloc(struct kgem *kgem) |
523 | { |
||
524 | struct kgem_request *rq; |
||
525 | |||
526 | rq = __kgem_freed_request; |
||
527 | if (rq) { |
||
528 | __kgem_freed_request = *(struct kgem_request **)rq; |
||
529 | } else { |
||
530 | rq = malloc(sizeof(*rq)); |
||
531 | if (rq == NULL) |
||
532 | rq = &kgem->static_request; |
||
533 | } |
||
534 | |||
535 | list_init(&rq->buffers); |
||
536 | rq->bo = NULL; |
||
537 | rq->ring = 0; |
||
538 | |||
539 | return rq; |
||
540 | } |
||
541 | |||
542 | static void __kgem_request_free(struct kgem_request *rq) |
||
543 | { |
||
544 | _list_del(&rq->list); |
||
545 | *(struct kgem_request **)rq = __kgem_freed_request; |
||
546 | __kgem_freed_request = rq; |
||
547 | } |
||
548 | |||
549 | static struct list *inactive(struct kgem *kgem, int num_pages) |
||
550 | { |
||
551 | assert(num_pages < MAX_CACHE_SIZE / PAGE_SIZE); |
||
552 | assert(cache_bucket(num_pages) < NUM_CACHE_BUCKETS); |
||
553 | return &kgem->inactive[cache_bucket(num_pages)]; |
||
554 | } |
||
555 | |||
556 | static struct list *active(struct kgem *kgem, int num_pages, int tiling) |
||
557 | { |
||
558 | assert(num_pages < MAX_CACHE_SIZE / PAGE_SIZE); |
||
559 | assert(cache_bucket(num_pages) < NUM_CACHE_BUCKETS); |
||
560 | return &kgem->active[cache_bucket(num_pages)][tiling]; |
||
561 | } |
||
562 | |||
563 | static size_t |
||
564 | agp_aperture_size(struct pci_device *dev, unsigned gen) |
||
565 | { |
||
566 | /* XXX assume that only future chipsets are unknown and follow |
||
567 | * the post gen2 PCI layout. |
||
568 | */ |
||
569 | // return dev->regions[gen < 030 ? 0 : 2].size; |
||
570 | |||
571 | return 0; |
||
572 | } |
||
573 | |||
574 | static size_t |
||
575 | total_ram_size(void) |
||
576 | { |
||
577 | uint32_t data[9]; |
||
578 | size_t size = 0; |
||
579 | |||
580 | asm volatile("int $0x40" |
||
581 | : "=a" (size) |
||
582 | : "a" (18),"b"(20), "c" (data) |
||
583 | : "memory"); |
||
584 | |||
585 | return size != -1 ? size : 0; |
||
586 | } |
||
587 | |||
3254 | Serge | 588 | static int gem_param(struct kgem *kgem, int name) |
589 | { |
||
590 | drm_i915_getparam_t gp; |
||
591 | int v = -1; /* No param uses the sign bit, reserve it for errors */ |
||
592 | |||
593 | VG_CLEAR(gp); |
||
594 | gp.param = name; |
||
595 | gp.value = &v; |
||
3258 | Serge | 596 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GETPARAM, &gp)) |
3254 | Serge | 597 | return -1; |
598 | |||
599 | VG(VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v))); |
||
600 | return v; |
||
601 | } |
||
602 | |||
3255 | Serge | 603 | static bool test_has_execbuffer2(struct kgem *kgem) |
604 | { |
||
605 | return 1; |
||
606 | } |
||
607 | |||
3254 | Serge | 608 | static bool test_has_no_reloc(struct kgem *kgem) |
609 | { |
||
610 | if (DBG_NO_FAST_RELOC) |
||
611 | return false; |
||
612 | |||
613 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_NO_RELOC) > 0; |
||
614 | } |
||
615 | |||
616 | static bool test_has_handle_lut(struct kgem *kgem) |
||
617 | { |
||
618 | if (DBG_NO_HANDLE_LUT) |
||
619 | return false; |
||
620 | |||
621 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_HANDLE_LUT) > 0; |
||
622 | } |
||
623 | |||
624 | static bool test_has_semaphores_enabled(struct kgem *kgem) |
||
625 | { |
||
626 | FILE *file; |
||
627 | bool detected = false; |
||
628 | int ret; |
||
629 | |||
630 | if (DBG_NO_SEMAPHORES) |
||
631 | return false; |
||
632 | |||
633 | ret = gem_param(kgem, LOCAL_I915_PARAM_HAS_SEMAPHORES); |
||
634 | if (ret != -1) |
||
635 | return ret > 0; |
||
636 | |||
637 | return detected; |
||
638 | } |
||
639 | |||
3255 | Serge | 640 | static bool __kgem_throttle(struct kgem *kgem) |
641 | { |
||
642 | // if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_THROTTLE, NULL) == 0) |
||
643 | return false; |
||
3254 | Serge | 644 | |
3255 | Serge | 645 | // return errno == EIO; |
646 | } |
||
647 | |||
648 | static bool is_hw_supported(struct kgem *kgem, |
||
649 | struct pci_device *dev) |
||
650 | { |
||
651 | if (DBG_NO_HW) |
||
652 | return false; |
||
653 | |||
654 | if (!test_has_execbuffer2(kgem)) |
||
655 | return false; |
||
656 | |||
657 | if (kgem->gen == (unsigned)-1) /* unknown chipset, assume future gen */ |
||
658 | return kgem->has_blt; |
||
659 | |||
660 | /* Although pre-855gm the GMCH is fubar, it works mostly. So |
||
661 | * let the user decide through "NoAccel" whether or not to risk |
||
662 | * hw acceleration. |
||
663 | */ |
||
664 | |||
665 | if (kgem->gen == 060 && dev->revision < 8) { |
||
666 | /* pre-production SNB with dysfunctional BLT */ |
||
667 | return false; |
||
668 | } |
||
669 | |||
670 | if (kgem->gen >= 060) /* Only if the kernel supports the BLT ring */ |
||
671 | return kgem->has_blt; |
||
672 | |||
673 | return true; |
||
674 | } |
||
675 | |||
3254 | Serge | 676 | static bool test_has_relaxed_fencing(struct kgem *kgem) |
677 | { |
||
678 | if (kgem->gen < 040) { |
||
679 | if (DBG_NO_RELAXED_FENCING) |
||
680 | return false; |
||
681 | |||
682 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_FENCING) > 0; |
||
683 | } else |
||
684 | return true; |
||
685 | } |
||
686 | |||
687 | static bool test_has_llc(struct kgem *kgem) |
||
688 | { |
||
689 | int has_llc = -1; |
||
690 | |||
691 | if (DBG_NO_LLC) |
||
692 | return false; |
||
693 | |||
694 | #if defined(I915_PARAM_HAS_LLC) /* Expected in libdrm-2.4.31 */ |
||
695 | has_llc = gem_param(kgem, I915_PARAM_HAS_LLC); |
||
696 | #endif |
||
697 | if (has_llc == -1) { |
||
698 | DBG(("%s: no kernel/drm support for HAS_LLC, assuming support for LLC based on GPU generation\n", __FUNCTION__)); |
||
699 | has_llc = kgem->gen >= 060; |
||
700 | } |
||
701 | |||
702 | return has_llc; |
||
703 | } |
||
704 | |||
705 | static bool test_has_cacheing(struct kgem *kgem) |
||
706 | { |
||
707 | uint32_t handle; |
||
3256 | Serge | 708 | bool ret; |
3254 | Serge | 709 | |
710 | if (DBG_NO_CACHE_LEVEL) |
||
711 | return false; |
||
712 | |||
713 | /* Incoherent blt and sampler hangs the GPU */ |
||
714 | if (kgem->gen == 040) |
||
715 | return false; |
||
716 | |||
3256 | Serge | 717 | handle = gem_create(kgem->fd, 1); |
718 | if (handle == 0) |
||
719 | return false; |
||
3254 | Serge | 720 | |
3256 | Serge | 721 | ret = gem_set_cacheing(kgem->fd, handle, UNCACHED); |
722 | gem_close(kgem->fd, handle); |
||
3254 | Serge | 723 | return ret; |
724 | } |
||
725 | |||
726 | static bool test_has_userptr(struct kgem *kgem) |
||
727 | { |
||
728 | #if defined(USE_USERPTR) |
||
729 | uint32_t handle; |
||
730 | void *ptr; |
||
731 | |||
732 | if (DBG_NO_USERPTR) |
||
733 | return false; |
||
734 | |||
735 | /* Incoherent blt and sampler hangs the GPU */ |
||
736 | if (kgem->gen == 040) |
||
737 | return false; |
||
738 | |||
739 | ptr = malloc(PAGE_SIZE); |
||
740 | handle = gem_userptr(kgem->fd, ptr, PAGE_SIZE, false); |
||
741 | gem_close(kgem->fd, handle); |
||
742 | free(ptr); |
||
743 | |||
744 | return handle != 0; |
||
745 | #else |
||
746 | return false; |
||
747 | #endif |
||
748 | } |
||
749 | |||
750 | static bool test_has_secure_batches(struct kgem *kgem) |
||
751 | { |
||
752 | if (DBG_NO_SECURE_BATCHES) |
||
753 | return false; |
||
754 | |||
755 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_SECURE_BATCHES) > 0; |
||
756 | } |
||
757 | |||
758 | static bool test_has_pinned_batches(struct kgem *kgem) |
||
759 | { |
||
760 | if (DBG_NO_PINNED_BATCHES) |
||
761 | return false; |
||
762 | |||
763 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_PINNED_BATCHES) > 0; |
||
764 | } |
||
765 | |||
766 | |||
3255 | Serge | 767 | static bool kgem_init_pinned_batches(struct kgem *kgem) |
768 | { |
||
769 | int count[2] = { 4, 2 }; |
||
770 | int size[2] = { 1, 4 }; |
||
771 | int n, i; |
||
772 | |||
773 | if (kgem->wedged) |
||
774 | return true; |
||
775 | |||
776 | for (n = 0; n < ARRAY_SIZE(count); n++) { |
||
777 | for (i = 0; i < count[n]; i++) { |
||
778 | struct drm_i915_gem_pin pin; |
||
779 | struct kgem_bo *bo; |
||
780 | |||
781 | VG_CLEAR(pin); |
||
782 | |||
783 | pin.handle = gem_create(kgem->fd, size[n]); |
||
784 | if (pin.handle == 0) |
||
785 | goto err; |
||
786 | |||
787 | DBG(("%s: new handle=%d, num_pages=%d\n", |
||
788 | __FUNCTION__, pin.handle, size[n])); |
||
789 | |||
790 | bo = __kgem_bo_alloc(pin.handle, size[n]); |
||
791 | if (bo == NULL) { |
||
792 | gem_close(kgem->fd, pin.handle); |
||
793 | goto err; |
||
794 | } |
||
795 | |||
796 | pin.alignment = 0; |
||
3258 | Serge | 797 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_PIN, &pin)) { |
3255 | Serge | 798 | gem_close(kgem->fd, pin.handle); |
799 | goto err; |
||
800 | } |
||
801 | bo->presumed_offset = pin.offset; |
||
802 | debug_alloc__bo(kgem, bo); |
||
803 | list_add(&bo->list, &kgem->pinned_batches[n]); |
||
804 | } |
||
805 | } |
||
806 | |||
807 | return true; |
||
808 | |||
809 | err: |
||
810 | for (n = 0; n < ARRAY_SIZE(kgem->pinned_batches); n++) { |
||
811 | while (!list_is_empty(&kgem->pinned_batches[n])) { |
||
812 | kgem_bo_destroy(kgem, |
||
813 | list_first_entry(&kgem->pinned_batches[n], |
||
814 | struct kgem_bo, list)); |
||
815 | } |
||
816 | } |
||
817 | |||
818 | /* For simplicity populate the lists with a single unpinned bo */ |
||
819 | for (n = 0; n < ARRAY_SIZE(count); n++) { |
||
820 | struct kgem_bo *bo; |
||
821 | uint32_t handle; |
||
822 | |||
823 | handle = gem_create(kgem->fd, size[n]); |
||
824 | if (handle == 0) |
||
825 | break; |
||
826 | |||
827 | bo = __kgem_bo_alloc(handle, size[n]); |
||
828 | if (bo == NULL) { |
||
829 | gem_close(kgem->fd, handle); |
||
830 | break; |
||
831 | } |
||
832 | |||
833 | debug_alloc__bo(kgem, bo); |
||
834 | list_add(&bo->list, &kgem->pinned_batches[n]); |
||
835 | } |
||
836 | return false; |
||
837 | } |
||
838 | |||
3254 | Serge | 839 | void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen) |
840 | { |
||
841 | struct drm_i915_gem_get_aperture aperture; |
||
842 | size_t totalram; |
||
843 | unsigned half_gpu_max; |
||
844 | unsigned int i, j; |
||
845 | |||
846 | DBG(("%s: fd=%d, gen=%d\n", __FUNCTION__, fd, gen)); |
||
847 | |||
848 | memset(kgem, 0, sizeof(*kgem)); |
||
849 | |||
850 | kgem->fd = fd; |
||
851 | kgem->gen = gen; |
||
852 | |||
853 | list_init(&kgem->requests[0]); |
||
854 | list_init(&kgem->requests[1]); |
||
855 | list_init(&kgem->batch_buffers); |
||
856 | list_init(&kgem->active_buffers); |
||
857 | list_init(&kgem->flushing); |
||
858 | list_init(&kgem->large); |
||
859 | list_init(&kgem->large_inactive); |
||
860 | list_init(&kgem->snoop); |
||
861 | list_init(&kgem->scanout); |
||
862 | for (i = 0; i < ARRAY_SIZE(kgem->pinned_batches); i++) |
||
863 | list_init(&kgem->pinned_batches[i]); |
||
864 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) |
||
865 | list_init(&kgem->inactive[i]); |
||
866 | for (i = 0; i < ARRAY_SIZE(kgem->active); i++) { |
||
867 | for (j = 0; j < ARRAY_SIZE(kgem->active[i]); j++) |
||
868 | list_init(&kgem->active[i][j]); |
||
869 | } |
||
870 | for (i = 0; i < ARRAY_SIZE(kgem->vma); i++) { |
||
871 | for (j = 0; j < ARRAY_SIZE(kgem->vma[i].inactive); j++) |
||
872 | list_init(&kgem->vma[i].inactive[j]); |
||
873 | } |
||
874 | kgem->vma[MAP_GTT].count = -MAX_GTT_VMA_CACHE; |
||
875 | kgem->vma[MAP_CPU].count = -MAX_CPU_VMA_CACHE; |
||
876 | |||
877 | kgem->has_blt = gem_param(kgem, LOCAL_I915_PARAM_HAS_BLT) > 0; |
||
878 | DBG(("%s: has BLT ring? %d\n", __FUNCTION__, |
||
879 | kgem->has_blt)); |
||
880 | |||
881 | kgem->has_relaxed_delta = |
||
882 | gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_DELTA) > 0; |
||
883 | DBG(("%s: has relaxed delta? %d\n", __FUNCTION__, |
||
884 | kgem->has_relaxed_delta)); |
||
885 | |||
886 | kgem->has_relaxed_fencing = test_has_relaxed_fencing(kgem); |
||
887 | DBG(("%s: has relaxed fencing? %d\n", __FUNCTION__, |
||
888 | kgem->has_relaxed_fencing)); |
||
889 | |||
890 | kgem->has_llc = test_has_llc(kgem); |
||
891 | DBG(("%s: has shared last-level-cache? %d\n", __FUNCTION__, |
||
892 | kgem->has_llc)); |
||
893 | |||
894 | kgem->has_cacheing = test_has_cacheing(kgem); |
||
895 | DBG(("%s: has set-cache-level? %d\n", __FUNCTION__, |
||
896 | kgem->has_cacheing)); |
||
897 | |||
898 | kgem->has_userptr = test_has_userptr(kgem); |
||
899 | DBG(("%s: has userptr? %d\n", __FUNCTION__, |
||
900 | kgem->has_userptr)); |
||
901 | |||
902 | kgem->has_no_reloc = test_has_no_reloc(kgem); |
||
903 | DBG(("%s: has no-reloc? %d\n", __FUNCTION__, |
||
904 | kgem->has_no_reloc)); |
||
905 | |||
906 | kgem->has_handle_lut = test_has_handle_lut(kgem); |
||
907 | DBG(("%s: has handle-lut? %d\n", __FUNCTION__, |
||
908 | kgem->has_handle_lut)); |
||
909 | |||
910 | kgem->has_semaphores = false; |
||
911 | if (kgem->has_blt && test_has_semaphores_enabled(kgem)) |
||
912 | kgem->has_semaphores = true; |
||
913 | DBG(("%s: semaphores enabled? %d\n", __FUNCTION__, |
||
914 | kgem->has_semaphores)); |
||
915 | |||
916 | kgem->can_blt_cpu = gen >= 030; |
||
917 | DBG(("%s: can blt to cpu? %d\n", __FUNCTION__, |
||
918 | kgem->can_blt_cpu)); |
||
919 | |||
920 | kgem->has_secure_batches = test_has_secure_batches(kgem); |
||
921 | DBG(("%s: can use privileged batchbuffers? %d\n", __FUNCTION__, |
||
922 | kgem->has_secure_batches)); |
||
923 | |||
924 | kgem->has_pinned_batches = test_has_pinned_batches(kgem); |
||
925 | DBG(("%s: can use pinned batchbuffers (to avoid CS w/a)? %d\n", __FUNCTION__, |
||
926 | kgem->has_pinned_batches)); |
||
927 | |||
928 | if (!is_hw_supported(kgem, dev)) { |
||
3255 | Serge | 929 | printf("Detected unsupported/dysfunctional hardware, disabling acceleration.\n"); |
3254 | Serge | 930 | kgem->wedged = 1; |
931 | } else if (__kgem_throttle(kgem)) { |
||
3255 | Serge | 932 | printf("Detected a hung GPU, disabling acceleration.\n"); |
3254 | Serge | 933 | kgem->wedged = 1; |
934 | } |
||
935 | |||
936 | kgem->batch_size = ARRAY_SIZE(kgem->batch); |
||
937 | if (gen == 020 && !kgem->has_pinned_batches) |
||
938 | /* Limited to what we can pin */ |
||
939 | kgem->batch_size = 4*1024; |
||
940 | if (gen == 022) |
||
941 | /* 865g cannot handle a batch spanning multiple pages */ |
||
942 | kgem->batch_size = PAGE_SIZE / sizeof(uint32_t); |
||
943 | if ((gen >> 3) == 7) |
||
944 | kgem->batch_size = 16*1024; |
||
945 | if (!kgem->has_relaxed_delta && kgem->batch_size > 4*1024) |
||
946 | kgem->batch_size = 4*1024; |
||
947 | |||
948 | if (!kgem_init_pinned_batches(kgem) && gen == 020) { |
||
3255 | Serge | 949 | printf("Unable to reserve memory for GPU, disabling acceleration.\n"); |
3254 | Serge | 950 | kgem->wedged = 1; |
951 | } |
||
952 | |||
953 | DBG(("%s: maximum batch size? %d\n", __FUNCTION__, |
||
954 | kgem->batch_size)); |
||
955 | |||
956 | kgem->min_alignment = 4; |
||
957 | if (gen < 040) |
||
958 | kgem->min_alignment = 64; |
||
959 | |||
960 | kgem->half_cpu_cache_pages = cpu_cache_size() >> 13; |
||
961 | DBG(("%s: half cpu cache %d pages\n", __FUNCTION__, |
||
962 | kgem->half_cpu_cache_pages)); |
||
963 | |||
964 | kgem->next_request = __kgem_request_alloc(kgem); |
||
965 | |||
966 | DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, userptr? %d\n", __FUNCTION__, |
||
967 | !DBG_NO_CPU && (kgem->has_llc | kgem->has_userptr | kgem->has_cacheing), |
||
968 | kgem->has_llc, kgem->has_cacheing, kgem->has_userptr)); |
||
969 | |||
970 | VG_CLEAR(aperture); |
||
971 | aperture.aper_size = 0; |
||
3258 | Serge | 972 | (void)drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture); |
3254 | Serge | 973 | if (aperture.aper_size == 0) |
974 | aperture.aper_size = 64*1024*1024; |
||
975 | |||
976 | DBG(("%s: aperture size %lld, available now %lld\n", |
||
977 | __FUNCTION__, |
||
978 | (long long)aperture.aper_size, |
||
979 | (long long)aperture.aper_available_size)); |
||
980 | |||
981 | kgem->aperture_total = aperture.aper_size; |
||
982 | kgem->aperture_high = aperture.aper_size * 3/4; |
||
983 | kgem->aperture_low = aperture.aper_size * 1/3; |
||
984 | if (gen < 033) { |
||
985 | /* Severe alignment penalties */ |
||
986 | kgem->aperture_high /= 2; |
||
987 | kgem->aperture_low /= 2; |
||
988 | } |
||
989 | DBG(("%s: aperture low=%d [%d], high=%d [%d]\n", __FUNCTION__, |
||
990 | kgem->aperture_low, kgem->aperture_low / (1024*1024), |
||
991 | kgem->aperture_high, kgem->aperture_high / (1024*1024))); |
||
992 | |||
993 | kgem->aperture_mappable = agp_aperture_size(dev, gen); |
||
994 | if (kgem->aperture_mappable == 0 || |
||
995 | kgem->aperture_mappable > aperture.aper_size) |
||
996 | kgem->aperture_mappable = aperture.aper_size; |
||
997 | DBG(("%s: aperture mappable=%d [%d MiB]\n", __FUNCTION__, |
||
998 | kgem->aperture_mappable, kgem->aperture_mappable / (1024*1024))); |
||
999 | |||
1000 | kgem->buffer_size = 64 * 1024; |
||
1001 | while (kgem->buffer_size < kgem->aperture_mappable >> 10) |
||
1002 | kgem->buffer_size *= 2; |
||
1003 | if (kgem->buffer_size >> 12 > kgem->half_cpu_cache_pages) |
||
1004 | kgem->buffer_size = kgem->half_cpu_cache_pages << 12; |
||
1005 | DBG(("%s: buffer size=%d [%d KiB]\n", __FUNCTION__, |
||
1006 | kgem->buffer_size, kgem->buffer_size / 1024)); |
||
1007 | |||
1008 | kgem->max_object_size = 3 * (kgem->aperture_high >> 12) << 10; |
||
1009 | kgem->max_gpu_size = kgem->max_object_size; |
||
1010 | if (!kgem->has_llc) |
||
1011 | kgem->max_gpu_size = MAX_CACHE_SIZE; |
||
1012 | |||
1013 | totalram = total_ram_size(); |
||
1014 | if (totalram == 0) { |
||
1015 | DBG(("%s: total ram size unknown, assuming maximum of total aperture\n", |
||
1016 | __FUNCTION__)); |
||
1017 | totalram = kgem->aperture_total; |
||
1018 | } |
||
3256 | Serge | 1019 | DBG(("%s: total ram=%u\n", __FUNCTION__, totalram)); |
3254 | Serge | 1020 | if (kgem->max_object_size > totalram / 2) |
1021 | kgem->max_object_size = totalram / 2; |
||
1022 | if (kgem->max_gpu_size > totalram / 4) |
||
1023 | kgem->max_gpu_size = totalram / 4; |
||
1024 | |||
1025 | kgem->max_cpu_size = kgem->max_object_size; |
||
1026 | |||
1027 | half_gpu_max = kgem->max_gpu_size / 2; |
||
1028 | kgem->max_copy_tile_size = (MAX_CACHE_SIZE + 1)/2; |
||
1029 | if (kgem->max_copy_tile_size > half_gpu_max) |
||
1030 | kgem->max_copy_tile_size = half_gpu_max; |
||
1031 | |||
1032 | if (kgem->has_llc) |
||
1033 | kgem->max_upload_tile_size = kgem->max_copy_tile_size; |
||
1034 | else |
||
1035 | kgem->max_upload_tile_size = kgem->aperture_mappable / 4; |
||
1036 | if (kgem->max_upload_tile_size > half_gpu_max) |
||
1037 | kgem->max_upload_tile_size = half_gpu_max; |
||
1038 | |||
1039 | kgem->large_object_size = MAX_CACHE_SIZE; |
||
1040 | if (kgem->large_object_size > kgem->max_gpu_size) |
||
1041 | kgem->large_object_size = kgem->max_gpu_size; |
||
1042 | |||
1043 | if (kgem->has_llc | kgem->has_cacheing | kgem->has_userptr) { |
||
1044 | if (kgem->large_object_size > kgem->max_cpu_size) |
||
1045 | kgem->large_object_size = kgem->max_cpu_size; |
||
1046 | } else |
||
1047 | kgem->max_cpu_size = 0; |
||
1048 | if (DBG_NO_CPU) |
||
1049 | kgem->max_cpu_size = 0; |
||
1050 | |||
1051 | DBG(("%s: maximum object size=%d\n", |
||
1052 | __FUNCTION__, kgem->max_object_size)); |
||
1053 | DBG(("%s: large object thresold=%d\n", |
||
1054 | __FUNCTION__, kgem->large_object_size)); |
||
1055 | DBG(("%s: max object sizes (gpu=%d, cpu=%d, tile upload=%d, copy=%d)\n", |
||
1056 | __FUNCTION__, |
||
1057 | kgem->max_gpu_size, kgem->max_cpu_size, |
||
1058 | kgem->max_upload_tile_size, kgem->max_copy_tile_size)); |
||
1059 | |||
1060 | /* Convert the aperture thresholds to pages */ |
||
1061 | kgem->aperture_low /= PAGE_SIZE; |
||
1062 | kgem->aperture_high /= PAGE_SIZE; |
||
1063 | |||
1064 | kgem->fence_max = gem_param(kgem, I915_PARAM_NUM_FENCES_AVAIL) - 2; |
||
1065 | if ((int)kgem->fence_max < 0) |
||
1066 | kgem->fence_max = 5; /* minimum safe value for all hw */ |
||
1067 | DBG(("%s: max fences=%d\n", __FUNCTION__, kgem->fence_max)); |
||
1068 | |||
1069 | kgem->batch_flags_base = 0; |
||
1070 | if (kgem->has_no_reloc) |
||
1071 | kgem->batch_flags_base |= LOCAL_I915_EXEC_NO_RELOC; |
||
1072 | if (kgem->has_handle_lut) |
||
1073 | kgem->batch_flags_base |= LOCAL_I915_EXEC_HANDLE_LUT; |
||
1074 | if (kgem->has_pinned_batches) |
||
1075 | kgem->batch_flags_base |= LOCAL_I915_EXEC_IS_PINNED; |
||
1076 | |||
3256 | Serge | 1077 | } |
3254 | Serge | 1078 | |
3258 | Serge | 1079 | static struct drm_i915_gem_exec_object2 * |
1080 | kgem_add_handle(struct kgem *kgem, struct kgem_bo *bo) |
||
1081 | { |
||
1082 | struct drm_i915_gem_exec_object2 *exec; |
||
3256 | Serge | 1083 | |
3258 | Serge | 1084 | DBG(("%s: handle=%d, index=%d\n", |
1085 | __FUNCTION__, bo->handle, kgem->nexec)); |
||
1086 | |||
1087 | assert(kgem->nexec < ARRAY_SIZE(kgem->exec)); |
||
1088 | bo->target_handle = kgem->has_handle_lut ? kgem->nexec : bo->handle; |
||
1089 | exec = memset(&kgem->exec[kgem->nexec++], 0, sizeof(*exec)); |
||
1090 | exec->handle = bo->handle; |
||
1091 | exec->offset = bo->presumed_offset; |
||
1092 | |||
1093 | kgem->aperture += num_pages(bo); |
||
1094 | |||
1095 | return exec; |
||
1096 | } |
||
1097 | |||
1098 | static void kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo) |
||
1099 | { |
||
1100 | bo->exec = kgem_add_handle(kgem, bo); |
||
1101 | bo->rq = MAKE_REQUEST(kgem->next_request, kgem->ring); |
||
1102 | |||
1103 | list_move_tail(&bo->request, &kgem->next_request->buffers); |
||
1104 | |||
1105 | /* XXX is it worth working around gcc here? */ |
||
1106 | kgem->flush |= bo->flush; |
||
1107 | } |
||
1108 | |||
1109 | static uint32_t kgem_end_batch(struct kgem *kgem) |
||
1110 | { |
||
1111 | kgem->batch[kgem->nbatch++] = MI_BATCH_BUFFER_END; |
||
1112 | if (kgem->nbatch & 1) |
||
1113 | kgem->batch[kgem->nbatch++] = MI_NOOP; |
||
1114 | |||
1115 | return kgem->nbatch; |
||
1116 | } |
||
1117 | |||
1118 | static void kgem_fixup_self_relocs(struct kgem *kgem, struct kgem_bo *bo) |
||
1119 | { |
||
1120 | int n; |
||
1121 | |||
1122 | if (kgem->nreloc__self == 0) |
||
1123 | return; |
||
1124 | |||
1125 | for (n = 0; n < kgem->nreloc__self; n++) { |
||
1126 | int i = kgem->reloc__self[n]; |
||
1127 | assert(kgem->reloc[i].target_handle == ~0U); |
||
1128 | kgem->reloc[i].target_handle = bo->target_handle; |
||
1129 | kgem->reloc[i].presumed_offset = bo->presumed_offset; |
||
1130 | kgem->batch[kgem->reloc[i].offset/sizeof(kgem->batch[0])] = |
||
1131 | kgem->reloc[i].delta + bo->presumed_offset; |
||
1132 | } |
||
1133 | |||
1134 | if (n == 256) { |
||
1135 | for (n = kgem->reloc__self[255]; n < kgem->nreloc; n++) { |
||
1136 | if (kgem->reloc[n].target_handle == ~0U) { |
||
1137 | kgem->reloc[n].target_handle = bo->target_handle; |
||
1138 | kgem->reloc[n].presumed_offset = bo->presumed_offset; |
||
1139 | kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] = |
||
1140 | kgem->reloc[n].delta + bo->presumed_offset; |
||
1141 | } |
||
1142 | } |
||
1143 | |||
1144 | } |
||
1145 | |||
1146 | } |
||
1147 | |||
1148 | static void kgem_bo_binding_free(struct kgem *kgem, struct kgem_bo *bo) |
||
1149 | { |
||
1150 | struct kgem_bo_binding *b; |
||
1151 | |||
1152 | b = bo->binding.next; |
||
1153 | while (b) { |
||
1154 | struct kgem_bo_binding *next = b->next; |
||
1155 | free (b); |
||
1156 | b = next; |
||
1157 | } |
||
1158 | } |
||
1159 | |||
1160 | static void kgem_bo_release_map(struct kgem *kgem, struct kgem_bo *bo) |
||
1161 | { |
||
1162 | int type = IS_CPU_MAP(bo->map); |
||
1163 | |||
1164 | assert(!IS_USER_MAP(bo->map)); |
||
1165 | |||
1166 | DBG(("%s: releasing %s vma for handle=%d, count=%d\n", |
||
1167 | __FUNCTION__, type ? "CPU" : "GTT", |
||
1168 | bo->handle, kgem->vma[type].count)); |
||
1169 | |||
1170 | VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo))); |
||
1171 | // munmap(MAP(bo->map), bytes(bo)); |
||
1172 | bo->map = NULL; |
||
1173 | |||
1174 | if (!list_is_empty(&bo->vma)) { |
||
1175 | list_del(&bo->vma); |
||
1176 | kgem->vma[type].count--; |
||
1177 | } |
||
1178 | } |
||
1179 | |||
1180 | static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo) |
||
1181 | { |
||
1182 | DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle)); |
||
1183 | assert(bo->refcnt == 0); |
||
1184 | assert(bo->exec == NULL); |
||
1185 | assert(!bo->snoop || bo->rq == NULL); |
||
1186 | |||
1187 | #ifdef DEBUG_MEMORY |
||
1188 | kgem->debug_memory.bo_allocs--; |
||
1189 | kgem->debug_memory.bo_bytes -= bytes(bo); |
||
1190 | #endif |
||
1191 | |||
1192 | kgem_bo_binding_free(kgem, bo); |
||
1193 | |||
1194 | if (IS_USER_MAP(bo->map)) { |
||
1195 | assert(bo->rq == NULL); |
||
1196 | assert(MAP(bo->map) != bo || bo->io); |
||
1197 | if (bo != MAP(bo->map)) { |
||
1198 | DBG(("%s: freeing snooped base\n", __FUNCTION__)); |
||
1199 | free(MAP(bo->map)); |
||
1200 | } |
||
1201 | bo->map = NULL; |
||
1202 | } |
||
1203 | if (bo->map) |
||
1204 | kgem_bo_release_map(kgem, bo); |
||
1205 | assert(list_is_empty(&bo->vma)); |
||
1206 | |||
1207 | _list_del(&bo->list); |
||
1208 | _list_del(&bo->request); |
||
1209 | gem_close(kgem->fd, bo->handle); |
||
1210 | |||
1211 | if (!bo->io) { |
||
1212 | *(struct kgem_bo **)bo = __kgem_freed_bo; |
||
1213 | __kgem_freed_bo = bo; |
||
1214 | } else |
||
1215 | free(bo); |
||
1216 | } |
||
1217 | |||
1218 | inline static void kgem_bo_move_to_inactive(struct kgem *kgem, |
||
1219 | struct kgem_bo *bo) |
||
1220 | { |
||
1221 | DBG(("%s: moving handle=%d to inactive\n", __FUNCTION__, bo->handle)); |
||
1222 | |||
1223 | assert(bo->refcnt == 0); |
||
1224 | assert(bo->reusable); |
||
1225 | assert(bo->rq == NULL); |
||
1226 | assert(bo->exec == NULL); |
||
1227 | assert(bo->domain != DOMAIN_GPU); |
||
1228 | assert(!bo->proxy); |
||
1229 | assert(!bo->io); |
||
1230 | assert(!bo->scanout); |
||
1231 | assert(!bo->needs_flush); |
||
1232 | assert(list_is_empty(&bo->vma)); |
||
1233 | ASSERT_IDLE(kgem, bo->handle); |
||
1234 | |||
1235 | kgem->need_expire = true; |
||
1236 | |||
1237 | if (bucket(bo) >= NUM_CACHE_BUCKETS) { |
||
1238 | list_move(&bo->list, &kgem->large_inactive); |
||
1239 | return; |
||
1240 | } |
||
1241 | |||
1242 | assert(bo->flush == false); |
||
1243 | list_move(&bo->list, &kgem->inactive[bucket(bo)]); |
||
1244 | if (bo->map) { |
||
1245 | int type = IS_CPU_MAP(bo->map); |
||
1246 | if (bucket(bo) >= NUM_CACHE_BUCKETS || |
||
1247 | (!type && !__kgem_bo_is_mappable(kgem, bo))) { |
||
1248 | // munmap(MAP(bo->map), bytes(bo)); |
||
1249 | bo->map = NULL; |
||
1250 | } |
||
1251 | if (bo->map) { |
||
1252 | list_add(&bo->vma, &kgem->vma[type].inactive[bucket(bo)]); |
||
1253 | kgem->vma[type].count++; |
||
1254 | } |
||
1255 | } |
||
1256 | } |
||
1257 | |||
1258 | static struct kgem_bo *kgem_bo_replace_io(struct kgem_bo *bo) |
||
1259 | { |
||
1260 | struct kgem_bo *base; |
||
1261 | |||
1262 | if (!bo->io) |
||
1263 | return bo; |
||
1264 | |||
1265 | assert(!bo->snoop); |
||
1266 | base = malloc(sizeof(*base)); |
||
1267 | if (base) { |
||
1268 | DBG(("%s: transferring io handle=%d to bo\n", |
||
1269 | __FUNCTION__, bo->handle)); |
||
1270 | /* transfer the handle to a minimum bo */ |
||
1271 | memcpy(base, bo, sizeof(*base)); |
||
1272 | base->io = false; |
||
1273 | list_init(&base->list); |
||
1274 | list_replace(&bo->request, &base->request); |
||
1275 | list_replace(&bo->vma, &base->vma); |
||
1276 | free(bo); |
||
1277 | bo = base; |
||
1278 | } else |
||
1279 | bo->reusable = false; |
||
1280 | |||
1281 | return bo; |
||
1282 | } |
||
1283 | |||
3256 | Serge | 1284 | inline static void kgem_bo_remove_from_inactive(struct kgem *kgem, |
1285 | struct kgem_bo *bo) |
||
1286 | { |
||
1287 | DBG(("%s: removing handle=%d from inactive\n", __FUNCTION__, bo->handle)); |
||
1288 | |||
1289 | list_del(&bo->list); |
||
1290 | assert(bo->rq == NULL); |
||
1291 | assert(bo->exec == NULL); |
||
1292 | if (bo->map) { |
||
1293 | assert(!list_is_empty(&bo->vma)); |
||
1294 | list_del(&bo->vma); |
||
1295 | kgem->vma[IS_CPU_MAP(bo->map)].count--; |
||
1296 | } |
||
3254 | Serge | 1297 | } |
1298 | |||
3258 | Serge | 1299 | inline static void kgem_bo_remove_from_active(struct kgem *kgem, |
1300 | struct kgem_bo *bo) |
||
1301 | { |
||
1302 | DBG(("%s: removing handle=%d from active\n", __FUNCTION__, bo->handle)); |
||
3254 | Serge | 1303 | |
3258 | Serge | 1304 | list_del(&bo->list); |
1305 | assert(bo->rq != NULL); |
||
1306 | if (bo->rq == (void *)kgem) |
||
1307 | list_del(&bo->request); |
||
1308 | assert(list_is_empty(&bo->vma)); |
||
1309 | } |
||
3254 | Serge | 1310 | |
3258 | Serge | 1311 | static void kgem_bo_clear_scanout(struct kgem *kgem, struct kgem_bo *bo) |
1312 | { |
||
1313 | assert(bo->scanout); |
||
1314 | assert(!bo->refcnt); |
||
1315 | assert(bo->exec == NULL); |
||
1316 | assert(bo->proxy == NULL); |
||
3256 | Serge | 1317 | |
3258 | Serge | 1318 | DBG(("%s: handle=%d, fb=%d (reusable=%d)\n", |
1319 | __FUNCTION__, bo->handle, bo->delta, bo->reusable)); |
||
1320 | if (bo->delta) { |
||
1321 | /* XXX will leak if we are not DRM_MASTER. *shrug* */ |
||
1322 | // drmModeRmFB(kgem->fd, bo->delta); |
||
1323 | bo->delta = 0; |
||
1324 | } |
||
1325 | |||
1326 | bo->scanout = false; |
||
1327 | bo->flush = false; |
||
1328 | bo->reusable = true; |
||
1329 | |||
1330 | if (kgem->has_llc && |
||
1331 | !gem_set_cacheing(kgem->fd, bo->handle, SNOOPED)) |
||
1332 | bo->reusable = false; |
||
1333 | } |
||
1334 | |||
1335 | static void _kgem_bo_delete_buffer(struct kgem *kgem, struct kgem_bo *bo) |
||
1336 | { |
||
1337 | struct kgem_buffer *io = (struct kgem_buffer *)bo->proxy; |
||
1338 | |||
1339 | DBG(("%s: size=%d, offset=%d, parent used=%d\n", |
||
1340 | __FUNCTION__, bo->size.bytes, bo->delta, io->used)); |
||
1341 | |||
1342 | if (ALIGN(bo->delta + bo->size.bytes, UPLOAD_ALIGNMENT) == io->used) |
||
1343 | io->used = bo->delta; |
||
1344 | } |
||
1345 | |||
1346 | static void kgem_bo_move_to_scanout(struct kgem *kgem, struct kgem_bo *bo) |
||
1347 | { |
||
1348 | assert(bo->refcnt == 0); |
||
1349 | assert(bo->scanout); |
||
1350 | assert(bo->delta); |
||
1351 | assert(!bo->snoop); |
||
1352 | assert(!bo->io); |
||
1353 | |||
1354 | DBG(("%s: moving %d [fb %d] to scanout cache, active? %d\n", |
||
1355 | __FUNCTION__, bo->handle, bo->delta, bo->rq != NULL)); |
||
1356 | if (bo->rq) |
||
1357 | list_move_tail(&bo->list, &kgem->scanout); |
||
1358 | else |
||
1359 | list_move(&bo->list, &kgem->scanout); |
||
1360 | } |
||
1361 | |||
1362 | static void kgem_bo_move_to_snoop(struct kgem *kgem, struct kgem_bo *bo) |
||
1363 | { |
||
1364 | assert(bo->refcnt == 0); |
||
1365 | assert(bo->exec == NULL); |
||
1366 | |||
1367 | if (num_pages(bo) > kgem->max_cpu_size >> 13) { |
||
1368 | DBG(("%s handle=%d discarding large CPU buffer (%d >%d pages)\n", |
||
1369 | __FUNCTION__, bo->handle, num_pages(bo), kgem->max_cpu_size >> 13)); |
||
1370 | kgem_bo_free(kgem, bo); |
||
1371 | return; |
||
1372 | } |
||
1373 | |||
1374 | assert(bo->tiling == I915_TILING_NONE); |
||
1375 | assert(bo->rq == NULL); |
||
1376 | |||
1377 | DBG(("%s: moving %d to snoop cachee\n", __FUNCTION__, bo->handle)); |
||
1378 | list_add(&bo->list, &kgem->snoop); |
||
1379 | } |
||
1380 | |||
3256 | Serge | 1381 | static struct kgem_bo * |
3258 | Serge | 1382 | search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags) |
1383 | { |
||
1384 | struct kgem_bo *bo, *first = NULL; |
||
1385 | |||
1386 | DBG(("%s: num_pages=%d, flags=%x\n", __FUNCTION__, num_pages, flags)); |
||
1387 | |||
1388 | if ((kgem->has_cacheing | kgem->has_userptr) == 0) |
||
1389 | return NULL; |
||
1390 | |||
1391 | if (list_is_empty(&kgem->snoop)) { |
||
1392 | DBG(("%s: inactive and cache empty\n", __FUNCTION__)); |
||
1393 | if (!__kgem_throttle_retire(kgem, flags)) { |
||
1394 | DBG(("%s: nothing retired\n", __FUNCTION__)); |
||
1395 | return NULL; |
||
1396 | } |
||
1397 | } |
||
1398 | |||
1399 | list_for_each_entry(bo, &kgem->snoop, list) { |
||
1400 | assert(bo->refcnt == 0); |
||
1401 | assert(bo->snoop); |
||
1402 | assert(!bo->scanout); |
||
1403 | assert(bo->proxy == NULL); |
||
1404 | assert(bo->tiling == I915_TILING_NONE); |
||
1405 | assert(bo->rq == NULL); |
||
1406 | assert(bo->exec == NULL); |
||
1407 | |||
1408 | if (num_pages > num_pages(bo)) |
||
1409 | continue; |
||
1410 | |||
1411 | if (num_pages(bo) > 2*num_pages) { |
||
1412 | if (first == NULL) |
||
1413 | first = bo; |
||
1414 | continue; |
||
1415 | } |
||
1416 | |||
1417 | list_del(&bo->list); |
||
1418 | bo->pitch = 0; |
||
1419 | bo->delta = 0; |
||
1420 | |||
1421 | DBG((" %s: found handle=%d (num_pages=%d) in snoop cache\n", |
||
1422 | __FUNCTION__, bo->handle, num_pages(bo))); |
||
1423 | return bo; |
||
1424 | } |
||
1425 | |||
1426 | if (first) { |
||
1427 | list_del(&first->list); |
||
1428 | first->pitch = 0; |
||
1429 | first->delta = 0; |
||
1430 | |||
1431 | DBG((" %s: found handle=%d (num_pages=%d) in snoop cache\n", |
||
1432 | __FUNCTION__, first->handle, num_pages(first))); |
||
1433 | return first; |
||
1434 | } |
||
1435 | |||
1436 | return NULL; |
||
1437 | } |
||
1438 | |||
1439 | static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) |
||
1440 | { |
||
1441 | DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle)); |
||
1442 | |||
1443 | assert(list_is_empty(&bo->list)); |
||
1444 | assert(bo->refcnt == 0); |
||
1445 | assert(!bo->purged); |
||
1446 | assert(bo->proxy == NULL); |
||
1447 | |||
1448 | bo->binding.offset = 0; |
||
1449 | |||
1450 | if (DBG_NO_CACHE) |
||
1451 | goto destroy; |
||
1452 | |||
1453 | if (bo->snoop && !bo->flush) { |
||
1454 | DBG(("%s: handle=%d is snooped\n", __FUNCTION__, bo->handle)); |
||
1455 | assert(!bo->flush); |
||
1456 | assert(list_is_empty(&bo->list)); |
||
1457 | if (bo->exec == NULL && bo->rq && !__kgem_busy(kgem, bo->handle)) |
||
1458 | __kgem_bo_clear_busy(bo); |
||
1459 | if (bo->rq == NULL) { |
||
1460 | assert(!bo->needs_flush); |
||
1461 | kgem_bo_move_to_snoop(kgem, bo); |
||
1462 | } |
||
1463 | return; |
||
1464 | } |
||
1465 | |||
1466 | if (bo->scanout) { |
||
1467 | kgem_bo_move_to_scanout(kgem, bo); |
||
1468 | return; |
||
1469 | } |
||
1470 | |||
1471 | if (bo->io) |
||
1472 | bo = kgem_bo_replace_io(bo); |
||
1473 | if (!bo->reusable) { |
||
1474 | DBG(("%s: handle=%d, not reusable\n", |
||
1475 | __FUNCTION__, bo->handle)); |
||
1476 | goto destroy; |
||
1477 | } |
||
1478 | |||
1479 | if (!kgem->has_llc && IS_CPU_MAP(bo->map) && bo->domain != DOMAIN_CPU) |
||
1480 | kgem_bo_release_map(kgem, bo); |
||
1481 | |||
1482 | assert(list_is_empty(&bo->vma)); |
||
1483 | assert(list_is_empty(&bo->list)); |
||
1484 | assert(bo->snoop == false); |
||
1485 | assert(bo->io == false); |
||
1486 | assert(bo->scanout == false); |
||
1487 | |||
1488 | if (bo->exec && kgem->nexec == 1) { |
||
1489 | DBG(("%s: only handle in batch, discarding last operations\n", |
||
1490 | __FUNCTION__)); |
||
1491 | assert(bo->exec == &kgem->exec[0]); |
||
1492 | assert(kgem->exec[0].handle == bo->handle); |
||
1493 | assert(RQ(bo->rq) == kgem->next_request); |
||
1494 | bo->refcnt = 1; |
||
1495 | kgem_reset(kgem); |
||
1496 | bo->refcnt = 0; |
||
1497 | } |
||
1498 | |||
1499 | if (bo->rq && bo->exec == NULL && !__kgem_busy(kgem, bo->handle)) |
||
1500 | __kgem_bo_clear_busy(bo); |
||
1501 | |||
1502 | if (bo->rq) { |
||
1503 | struct list *cache; |
||
1504 | |||
1505 | DBG(("%s: handle=%d -> active\n", __FUNCTION__, bo->handle)); |
||
1506 | if (bucket(bo) < NUM_CACHE_BUCKETS) |
||
1507 | cache = &kgem->active[bucket(bo)][bo->tiling]; |
||
1508 | else |
||
1509 | cache = &kgem->large; |
||
1510 | list_add(&bo->list, cache); |
||
1511 | return; |
||
1512 | } |
||
1513 | |||
1514 | assert(bo->exec == NULL); |
||
1515 | assert(list_is_empty(&bo->request)); |
||
1516 | |||
1517 | if (!IS_CPU_MAP(bo->map)) { |
||
1518 | if (!kgem_bo_set_purgeable(kgem, bo)) |
||
1519 | goto destroy; |
||
1520 | |||
1521 | if (!kgem->has_llc && bo->domain == DOMAIN_CPU) |
||
1522 | goto destroy; |
||
1523 | |||
1524 | DBG(("%s: handle=%d, purged\n", |
||
1525 | __FUNCTION__, bo->handle)); |
||
1526 | } |
||
1527 | |||
1528 | kgem_bo_move_to_inactive(kgem, bo); |
||
1529 | return; |
||
1530 | |||
1531 | destroy: |
||
1532 | if (!bo->exec) |
||
1533 | kgem_bo_free(kgem, bo); |
||
1534 | } |
||
1535 | |||
1536 | static void kgem_bo_unref(struct kgem *kgem, struct kgem_bo *bo) |
||
1537 | { |
||
1538 | assert(bo->refcnt); |
||
1539 | if (--bo->refcnt == 0) |
||
1540 | __kgem_bo_destroy(kgem, bo); |
||
1541 | } |
||
1542 | |||
1543 | static void kgem_buffer_release(struct kgem *kgem, struct kgem_buffer *bo) |
||
1544 | { |
||
1545 | while (!list_is_empty(&bo->base.vma)) { |
||
1546 | struct kgem_bo *cached; |
||
1547 | |||
1548 | cached = list_first_entry(&bo->base.vma, struct kgem_bo, vma); |
||
1549 | assert(cached->proxy == &bo->base); |
||
1550 | list_del(&cached->vma); |
||
1551 | |||
1552 | assert(*(struct kgem_bo **)cached->map == cached); |
||
1553 | *(struct kgem_bo **)cached->map = NULL; |
||
1554 | cached->map = NULL; |
||
1555 | |||
1556 | kgem_bo_destroy(kgem, cached); |
||
1557 | } |
||
1558 | } |
||
1559 | |||
1560 | static bool kgem_retire__buffers(struct kgem *kgem) |
||
1561 | { |
||
1562 | bool retired = false; |
||
1563 | |||
1564 | while (!list_is_empty(&kgem->active_buffers)) { |
||
1565 | struct kgem_buffer *bo = |
||
1566 | list_last_entry(&kgem->active_buffers, |
||
1567 | struct kgem_buffer, |
||
1568 | base.list); |
||
1569 | |||
1570 | if (bo->base.rq) |
||
1571 | break; |
||
1572 | |||
1573 | DBG(("%s: releasing upload cache for handle=%d? %d\n", |
||
1574 | __FUNCTION__, bo->base.handle, !list_is_empty(&bo->base.vma))); |
||
1575 | list_del(&bo->base.list); |
||
1576 | kgem_buffer_release(kgem, bo); |
||
1577 | kgem_bo_unref(kgem, &bo->base); |
||
1578 | retired = true; |
||
1579 | } |
||
1580 | |||
1581 | return retired; |
||
1582 | } |
||
1583 | |||
1584 | static bool kgem_retire__flushing(struct kgem *kgem) |
||
1585 | { |
||
1586 | struct kgem_bo *bo, *next; |
||
1587 | bool retired = false; |
||
1588 | |||
1589 | list_for_each_entry_safe(bo, next, &kgem->flushing, request) { |
||
1590 | assert(bo->rq == (void *)kgem); |
||
1591 | assert(bo->exec == NULL); |
||
1592 | |||
1593 | if (__kgem_busy(kgem, bo->handle)) |
||
1594 | break; |
||
1595 | |||
1596 | __kgem_bo_clear_busy(bo); |
||
1597 | |||
1598 | if (bo->refcnt) |
||
1599 | continue; |
||
1600 | |||
1601 | if (bo->snoop) { |
||
1602 | kgem_bo_move_to_snoop(kgem, bo); |
||
1603 | } else if (bo->scanout) { |
||
1604 | kgem_bo_move_to_scanout(kgem, bo); |
||
1605 | } else if ((bo = kgem_bo_replace_io(bo))->reusable && |
||
1606 | kgem_bo_set_purgeable(kgem, bo)) { |
||
1607 | kgem_bo_move_to_inactive(kgem, bo); |
||
1608 | retired = true; |
||
1609 | } else |
||
1610 | kgem_bo_free(kgem, bo); |
||
1611 | } |
||
1612 | #if HAS_DEBUG_FULL |
||
1613 | { |
||
1614 | int count = 0; |
||
1615 | list_for_each_entry(bo, &kgem->flushing, request) |
||
1616 | count++; |
||
1617 | printf("%s: %d bo on flushing list\n", __FUNCTION__, count); |
||
1618 | } |
||
1619 | #endif |
||
1620 | |||
1621 | kgem->need_retire |= !list_is_empty(&kgem->flushing); |
||
1622 | |||
1623 | return retired; |
||
1624 | } |
||
1625 | |||
1626 | |||
1627 | static bool __kgem_retire_rq(struct kgem *kgem, struct kgem_request *rq) |
||
1628 | { |
||
1629 | bool retired = false; |
||
1630 | |||
1631 | DBG(("%s: request %d complete\n", |
||
1632 | __FUNCTION__, rq->bo->handle)); |
||
1633 | |||
1634 | while (!list_is_empty(&rq->buffers)) { |
||
1635 | struct kgem_bo *bo; |
||
1636 | |||
1637 | bo = list_first_entry(&rq->buffers, |
||
1638 | struct kgem_bo, |
||
1639 | request); |
||
1640 | |||
1641 | assert(RQ(bo->rq) == rq); |
||
1642 | assert(bo->exec == NULL); |
||
1643 | assert(bo->domain == DOMAIN_GPU || bo->domain == DOMAIN_NONE); |
||
1644 | |||
1645 | list_del(&bo->request); |
||
1646 | |||
1647 | if (bo->needs_flush) |
||
1648 | bo->needs_flush = __kgem_busy(kgem, bo->handle); |
||
1649 | if (bo->needs_flush) { |
||
1650 | DBG(("%s: moving %d to flushing\n", |
||
1651 | __FUNCTION__, bo->handle)); |
||
1652 | list_add(&bo->request, &kgem->flushing); |
||
1653 | bo->rq = (void *)kgem; |
||
1654 | continue; |
||
1655 | } |
||
1656 | |||
1657 | bo->domain = DOMAIN_NONE; |
||
1658 | bo->rq = NULL; |
||
1659 | if (bo->refcnt) |
||
1660 | continue; |
||
1661 | |||
1662 | if (bo->snoop) { |
||
1663 | kgem_bo_move_to_snoop(kgem, bo); |
||
1664 | } else if (bo->scanout) { |
||
1665 | kgem_bo_move_to_scanout(kgem, bo); |
||
1666 | } else if ((bo = kgem_bo_replace_io(bo))->reusable && |
||
1667 | kgem_bo_set_purgeable(kgem, bo)) { |
||
1668 | kgem_bo_move_to_inactive(kgem, bo); |
||
1669 | retired = true; |
||
1670 | } else { |
||
1671 | DBG(("%s: closing %d\n", |
||
1672 | __FUNCTION__, bo->handle)); |
||
1673 | kgem_bo_free(kgem, bo); |
||
1674 | } |
||
1675 | } |
||
1676 | |||
1677 | assert(rq->bo->rq == NULL); |
||
1678 | assert(list_is_empty(&rq->bo->request)); |
||
1679 | |||
1680 | if (--rq->bo->refcnt == 0) { |
||
1681 | if (kgem_bo_set_purgeable(kgem, rq->bo)) { |
||
1682 | kgem_bo_move_to_inactive(kgem, rq->bo); |
||
1683 | retired = true; |
||
1684 | } else { |
||
1685 | DBG(("%s: closing %d\n", |
||
1686 | __FUNCTION__, rq->bo->handle)); |
||
1687 | kgem_bo_free(kgem, rq->bo); |
||
1688 | } |
||
1689 | } |
||
1690 | |||
1691 | __kgem_request_free(rq); |
||
1692 | return retired; |
||
1693 | } |
||
1694 | |||
1695 | static bool kgem_retire__requests_ring(struct kgem *kgem, int ring) |
||
1696 | { |
||
1697 | bool retired = false; |
||
1698 | |||
1699 | while (!list_is_empty(&kgem->requests[ring])) { |
||
1700 | struct kgem_request *rq; |
||
1701 | |||
1702 | rq = list_first_entry(&kgem->requests[ring], |
||
1703 | struct kgem_request, |
||
1704 | list); |
||
1705 | if (__kgem_busy(kgem, rq->bo->handle)) |
||
1706 | break; |
||
1707 | |||
1708 | retired |= __kgem_retire_rq(kgem, rq); |
||
1709 | } |
||
1710 | |||
1711 | #if HAS_DEBUG_FULL |
||
1712 | { |
||
1713 | struct kgem_bo *bo; |
||
1714 | int count = 0; |
||
1715 | |||
1716 | list_for_each_entry(bo, &kgem->requests[ring], request) |
||
1717 | count++; |
||
1718 | |||
1719 | bo = NULL; |
||
1720 | if (!list_is_empty(&kgem->requests[ring])) |
||
1721 | bo = list_first_entry(&kgem->requests[ring], |
||
1722 | struct kgem_request, |
||
1723 | list)->bo; |
||
1724 | |||
1725 | printf("%s: ring=%d, %d outstanding requests, oldest=%d\n", |
||
1726 | __FUNCTION__, ring, count, bo ? bo->handle : 0); |
||
1727 | } |
||
1728 | #endif |
||
1729 | |||
1730 | return retired; |
||
1731 | } |
||
1732 | |||
1733 | static bool kgem_retire__requests(struct kgem *kgem) |
||
1734 | { |
||
1735 | bool retired = false; |
||
1736 | int n; |
||
1737 | |||
1738 | for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) { |
||
1739 | retired |= kgem_retire__requests_ring(kgem, n); |
||
1740 | kgem->need_retire |= !list_is_empty(&kgem->requests[n]); |
||
1741 | } |
||
1742 | |||
1743 | return retired; |
||
1744 | } |
||
1745 | |||
1746 | bool kgem_retire(struct kgem *kgem) |
||
1747 | { |
||
1748 | bool retired = false; |
||
1749 | |||
1750 | DBG(("%s\n", __FUNCTION__)); |
||
1751 | |||
1752 | kgem->need_retire = false; |
||
1753 | |||
1754 | retired |= kgem_retire__flushing(kgem); |
||
1755 | retired |= kgem_retire__requests(kgem); |
||
1756 | retired |= kgem_retire__buffers(kgem); |
||
1757 | |||
1758 | DBG(("%s -- retired=%d, need_retire=%d\n", |
||
1759 | __FUNCTION__, retired, kgem->need_retire)); |
||
1760 | |||
1761 | kgem->retire(kgem); |
||
1762 | |||
1763 | return retired; |
||
1764 | } |
||
1765 | |||
1766 | |||
1767 | |||
1768 | |||
1769 | |||
1770 | |||
1771 | |||
1772 | static void kgem_commit(struct kgem *kgem) |
||
1773 | { |
||
1774 | struct kgem_request *rq = kgem->next_request; |
||
1775 | struct kgem_bo *bo, *next; |
||
1776 | |||
1777 | list_for_each_entry_safe(bo, next, &rq->buffers, request) { |
||
1778 | assert(next->request.prev == &bo->request); |
||
1779 | |||
1780 | DBG(("%s: release handle=%d (proxy? %d), dirty? %d flush? %d, snoop? %d -> offset=%x\n", |
||
1781 | __FUNCTION__, bo->handle, bo->proxy != NULL, |
||
1782 | bo->dirty, bo->needs_flush, bo->snoop, |
||
1783 | (unsigned)bo->exec->offset)); |
||
1784 | |||
1785 | assert(!bo->purged); |
||
1786 | assert(bo->exec); |
||
1787 | assert(bo->proxy == NULL || bo->exec == &_kgem_dummy_exec); |
||
1788 | assert(RQ(bo->rq) == rq || (RQ(bo->proxy->rq) == rq)); |
||
1789 | |||
1790 | bo->presumed_offset = bo->exec->offset; |
||
1791 | bo->exec = NULL; |
||
1792 | bo->target_handle = -1; |
||
1793 | |||
1794 | if (!bo->refcnt && !bo->reusable) { |
||
1795 | assert(!bo->snoop); |
||
1796 | kgem_bo_free(kgem, bo); |
||
1797 | continue; |
||
1798 | } |
||
1799 | |||
1800 | bo->binding.offset = 0; |
||
1801 | bo->domain = DOMAIN_GPU; |
||
1802 | bo->dirty = false; |
||
1803 | |||
1804 | if (bo->proxy) { |
||
1805 | /* proxies are not used for domain tracking */ |
||
1806 | bo->exec = NULL; |
||
1807 | __kgem_bo_clear_busy(bo); |
||
1808 | } |
||
1809 | |||
1810 | kgem->scanout_busy |= bo->scanout; |
||
1811 | } |
||
1812 | |||
1813 | if (rq == &kgem->static_request) { |
||
1814 | struct drm_i915_gem_set_domain set_domain; |
||
1815 | |||
1816 | DBG(("%s: syncing due to allocation failure\n", __FUNCTION__)); |
||
1817 | |||
1818 | VG_CLEAR(set_domain); |
||
1819 | set_domain.handle = rq->bo->handle; |
||
1820 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
||
1821 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
||
1822 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) { |
||
1823 | DBG(("%s: sync: GPU hang detected\n", __FUNCTION__)); |
||
1824 | kgem_throttle(kgem); |
||
1825 | } |
||
1826 | |||
1827 | kgem_retire(kgem); |
||
1828 | assert(list_is_empty(&rq->buffers)); |
||
1829 | |||
1830 | gem_close(kgem->fd, rq->bo->handle); |
||
1831 | kgem_cleanup_cache(kgem); |
||
1832 | } else { |
||
1833 | list_add_tail(&rq->list, &kgem->requests[rq->ring]); |
||
1834 | kgem->need_throttle = kgem->need_retire = 1; |
||
1835 | } |
||
1836 | |||
1837 | kgem->next_request = NULL; |
||
1838 | } |
||
1839 | |||
1840 | static void kgem_close_list(struct kgem *kgem, struct list *head) |
||
1841 | { |
||
1842 | while (!list_is_empty(head)) |
||
1843 | kgem_bo_free(kgem, list_first_entry(head, struct kgem_bo, list)); |
||
1844 | } |
||
1845 | |||
1846 | static void kgem_close_inactive(struct kgem *kgem) |
||
1847 | { |
||
1848 | unsigned int i; |
||
1849 | |||
1850 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) |
||
1851 | kgem_close_list(kgem, &kgem->inactive[i]); |
||
1852 | } |
||
1853 | |||
1854 | static void kgem_finish_buffers(struct kgem *kgem) |
||
1855 | { |
||
1856 | struct kgem_buffer *bo, *next; |
||
1857 | |||
1858 | list_for_each_entry_safe(bo, next, &kgem->batch_buffers, base.list) { |
||
1859 | DBG(("%s: buffer handle=%d, used=%d, exec?=%d, write=%d, mmapped=%d\n", |
||
1860 | __FUNCTION__, bo->base.handle, bo->used, bo->base.exec!=NULL, |
||
1861 | bo->write, bo->mmapped)); |
||
1862 | |||
1863 | assert(next->base.list.prev == &bo->base.list); |
||
1864 | assert(bo->base.io); |
||
1865 | assert(bo->base.refcnt >= 1); |
||
1866 | |||
1867 | if (!bo->base.exec) { |
||
1868 | DBG(("%s: skipping unattached handle=%d, used=%d\n", |
||
1869 | __FUNCTION__, bo->base.handle, bo->used)); |
||
1870 | continue; |
||
1871 | } |
||
1872 | |||
1873 | if (!bo->write) { |
||
1874 | assert(bo->base.exec || bo->base.refcnt > 1); |
||
1875 | goto decouple; |
||
1876 | } |
||
1877 | |||
1878 | if (bo->mmapped) { |
||
1879 | int used; |
||
1880 | |||
1881 | assert(!bo->need_io); |
||
1882 | |||
1883 | used = ALIGN(bo->used, PAGE_SIZE); |
||
1884 | if (!DBG_NO_UPLOAD_ACTIVE && |
||
1885 | used + PAGE_SIZE <= bytes(&bo->base) && |
||
1886 | (kgem->has_llc || !IS_CPU_MAP(bo->base.map) || bo->base.snoop)) { |
||
1887 | DBG(("%s: retaining upload buffer (%d/%d)\n", |
||
1888 | __FUNCTION__, bo->used, bytes(&bo->base))); |
||
1889 | bo->used = used; |
||
1890 | list_move(&bo->base.list, |
||
1891 | &kgem->active_buffers); |
||
1892 | continue; |
||
1893 | } |
||
1894 | DBG(("%s: discarding mmapped buffer, used=%d, map type=%d\n", |
||
1895 | __FUNCTION__, bo->used, (int)__MAP_TYPE(bo->base.map))); |
||
1896 | goto decouple; |
||
1897 | } |
||
1898 | |||
1899 | if (!bo->used) { |
||
1900 | /* Unless we replace the handle in the execbuffer, |
||
1901 | * then this bo will become active. So decouple it |
||
1902 | * from the buffer list and track it in the normal |
||
1903 | * manner. |
||
1904 | */ |
||
1905 | goto decouple; |
||
1906 | } |
||
1907 | |||
1908 | assert(bo->need_io); |
||
1909 | assert(bo->base.rq == MAKE_REQUEST(kgem->next_request, kgem->ring)); |
||
1910 | assert(bo->base.domain != DOMAIN_GPU); |
||
1911 | |||
1912 | if (bo->base.refcnt == 1 && |
||
1913 | bo->base.size.pages.count > 1 && |
||
1914 | bo->used < bytes(&bo->base) / 2) { |
||
1915 | struct kgem_bo *shrink; |
||
1916 | unsigned alloc = NUM_PAGES(bo->used); |
||
1917 | |||
1918 | shrink = search_snoop_cache(kgem, alloc, |
||
1919 | CREATE_INACTIVE | CREATE_NO_RETIRE); |
||
1920 | if (shrink) { |
||
1921 | void *map; |
||
1922 | int n; |
||
1923 | |||
1924 | DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n", |
||
1925 | __FUNCTION__, |
||
1926 | bo->used, bytes(&bo->base), bytes(shrink), |
||
1927 | bo->base.handle, shrink->handle)); |
||
1928 | |||
1929 | assert(bo->used <= bytes(shrink)); |
||
1930 | map = kgem_bo_map__cpu(kgem, shrink); |
||
1931 | if (map) { |
||
1932 | kgem_bo_sync__cpu(kgem, shrink); |
||
1933 | memcpy(map, bo->mem, bo->used); |
||
1934 | |||
1935 | shrink->target_handle = |
||
1936 | kgem->has_handle_lut ? bo->base.target_handle : shrink->handle; |
||
1937 | for (n = 0; n < kgem->nreloc; n++) { |
||
1938 | if (kgem->reloc[n].target_handle == bo->base.target_handle) { |
||
1939 | kgem->reloc[n].target_handle = shrink->target_handle; |
||
1940 | kgem->reloc[n].presumed_offset = shrink->presumed_offset; |
||
1941 | kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] = |
||
1942 | kgem->reloc[n].delta + shrink->presumed_offset; |
||
1943 | } |
||
1944 | } |
||
1945 | |||
1946 | bo->base.exec->handle = shrink->handle; |
||
1947 | bo->base.exec->offset = shrink->presumed_offset; |
||
1948 | shrink->exec = bo->base.exec; |
||
1949 | shrink->rq = bo->base.rq; |
||
1950 | list_replace(&bo->base.request, |
||
1951 | &shrink->request); |
||
1952 | list_init(&bo->base.request); |
||
1953 | shrink->needs_flush = bo->base.dirty; |
||
1954 | |||
1955 | bo->base.exec = NULL; |
||
1956 | bo->base.rq = NULL; |
||
1957 | bo->base.dirty = false; |
||
1958 | bo->base.needs_flush = false; |
||
1959 | bo->used = 0; |
||
1960 | |||
1961 | goto decouple; |
||
1962 | } |
||
1963 | |||
1964 | __kgem_bo_destroy(kgem, shrink); |
||
1965 | } |
||
1966 | |||
1967 | shrink = search_linear_cache(kgem, alloc, |
||
1968 | CREATE_INACTIVE | CREATE_NO_RETIRE); |
||
1969 | if (shrink) { |
||
1970 | int n; |
||
1971 | |||
1972 | DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n", |
||
1973 | __FUNCTION__, |
||
1974 | bo->used, bytes(&bo->base), bytes(shrink), |
||
1975 | bo->base.handle, shrink->handle)); |
||
1976 | |||
1977 | assert(bo->used <= bytes(shrink)); |
||
1978 | if (gem_write(kgem->fd, shrink->handle, |
||
1979 | 0, bo->used, bo->mem) == 0) { |
||
1980 | shrink->target_handle = |
||
1981 | kgem->has_handle_lut ? bo->base.target_handle : shrink->handle; |
||
1982 | for (n = 0; n < kgem->nreloc; n++) { |
||
1983 | if (kgem->reloc[n].target_handle == bo->base.target_handle) { |
||
1984 | kgem->reloc[n].target_handle = shrink->target_handle; |
||
1985 | kgem->reloc[n].presumed_offset = shrink->presumed_offset; |
||
1986 | kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] = |
||
1987 | kgem->reloc[n].delta + shrink->presumed_offset; |
||
1988 | } |
||
1989 | } |
||
1990 | |||
1991 | bo->base.exec->handle = shrink->handle; |
||
1992 | bo->base.exec->offset = shrink->presumed_offset; |
||
1993 | shrink->exec = bo->base.exec; |
||
1994 | shrink->rq = bo->base.rq; |
||
1995 | list_replace(&bo->base.request, |
||
1996 | &shrink->request); |
||
1997 | list_init(&bo->base.request); |
||
1998 | shrink->needs_flush = bo->base.dirty; |
||
1999 | |||
2000 | bo->base.exec = NULL; |
||
2001 | bo->base.rq = NULL; |
||
2002 | bo->base.dirty = false; |
||
2003 | bo->base.needs_flush = false; |
||
2004 | bo->used = 0; |
||
2005 | |||
2006 | goto decouple; |
||
2007 | } |
||
2008 | |||
2009 | __kgem_bo_destroy(kgem, shrink); |
||
2010 | } |
||
2011 | } |
||
2012 | |||
2013 | DBG(("%s: handle=%d, uploading %d/%d\n", |
||
2014 | __FUNCTION__, bo->base.handle, bo->used, bytes(&bo->base))); |
||
2015 | ASSERT_IDLE(kgem, bo->base.handle); |
||
2016 | assert(bo->used <= bytes(&bo->base)); |
||
2017 | gem_write(kgem->fd, bo->base.handle, |
||
2018 | 0, bo->used, bo->mem); |
||
2019 | bo->need_io = 0; |
||
2020 | |||
2021 | decouple: |
||
2022 | DBG(("%s: releasing handle=%d\n", |
||
2023 | __FUNCTION__, bo->base.handle)); |
||
2024 | list_del(&bo->base.list); |
||
2025 | kgem_bo_unref(kgem, &bo->base); |
||
2026 | } |
||
2027 | } |
||
2028 | |||
2029 | static void kgem_cleanup(struct kgem *kgem) |
||
2030 | { |
||
2031 | int n; |
||
2032 | |||
2033 | for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) { |
||
2034 | while (!list_is_empty(&kgem->requests[n])) { |
||
2035 | struct kgem_request *rq; |
||
2036 | |||
2037 | rq = list_first_entry(&kgem->requests[n], |
||
2038 | struct kgem_request, |
||
2039 | list); |
||
2040 | while (!list_is_empty(&rq->buffers)) { |
||
2041 | struct kgem_bo *bo; |
||
2042 | |||
2043 | bo = list_first_entry(&rq->buffers, |
||
2044 | struct kgem_bo, |
||
2045 | request); |
||
2046 | |||
2047 | bo->exec = NULL; |
||
2048 | bo->dirty = false; |
||
2049 | __kgem_bo_clear_busy(bo); |
||
2050 | if (bo->refcnt == 0) |
||
2051 | kgem_bo_free(kgem, bo); |
||
2052 | } |
||
2053 | |||
2054 | __kgem_request_free(rq); |
||
2055 | } |
||
2056 | } |
||
2057 | |||
2058 | kgem_close_inactive(kgem); |
||
2059 | } |
||
2060 | |||
2061 | static int kgem_batch_write(struct kgem *kgem, uint32_t handle, uint32_t size) |
||
2062 | { |
||
2063 | int ret; |
||
2064 | |||
2065 | ASSERT_IDLE(kgem, handle); |
||
2066 | |||
2067 | /* If there is no surface data, just upload the batch */ |
||
2068 | if (kgem->surface == kgem->batch_size) |
||
2069 | return gem_write(kgem->fd, handle, |
||
2070 | 0, sizeof(uint32_t)*kgem->nbatch, |
||
2071 | kgem->batch); |
||
2072 | |||
2073 | /* Are the batch pages conjoint with the surface pages? */ |
||
2074 | if (kgem->surface < kgem->nbatch + PAGE_SIZE/sizeof(uint32_t)) { |
||
2075 | assert(size == PAGE_ALIGN(kgem->batch_size*sizeof(uint32_t))); |
||
2076 | return gem_write(kgem->fd, handle, |
||
2077 | 0, kgem->batch_size*sizeof(uint32_t), |
||
2078 | kgem->batch); |
||
2079 | } |
||
2080 | |||
2081 | /* Disjoint surface/batch, upload separately */ |
||
2082 | ret = gem_write(kgem->fd, handle, |
||
2083 | 0, sizeof(uint32_t)*kgem->nbatch, |
||
2084 | kgem->batch); |
||
2085 | if (ret) |
||
2086 | return ret; |
||
2087 | |||
2088 | ret = PAGE_ALIGN(sizeof(uint32_t) * kgem->batch_size); |
||
2089 | ret -= sizeof(uint32_t) * kgem->surface; |
||
2090 | assert(size-ret >= kgem->nbatch*sizeof(uint32_t)); |
||
2091 | return __gem_write(kgem->fd, handle, |
||
2092 | size - ret, (kgem->batch_size - kgem->surface)*sizeof(uint32_t), |
||
2093 | kgem->batch + kgem->surface); |
||
2094 | } |
||
2095 | |||
2096 | void kgem_reset(struct kgem *kgem) |
||
2097 | { |
||
2098 | if (kgem->next_request) { |
||
2099 | struct kgem_request *rq = kgem->next_request; |
||
2100 | |||
2101 | while (!list_is_empty(&rq->buffers)) { |
||
2102 | struct kgem_bo *bo = |
||
2103 | list_first_entry(&rq->buffers, |
||
2104 | struct kgem_bo, |
||
2105 | request); |
||
2106 | list_del(&bo->request); |
||
2107 | |||
2108 | assert(RQ(bo->rq) == rq); |
||
2109 | |||
2110 | bo->binding.offset = 0; |
||
2111 | bo->exec = NULL; |
||
2112 | bo->target_handle = -1; |
||
2113 | bo->dirty = false; |
||
2114 | |||
2115 | if (bo->needs_flush && __kgem_busy(kgem, bo->handle)) { |
||
2116 | list_add(&bo->request, &kgem->flushing); |
||
2117 | bo->rq = (void *)kgem; |
||
2118 | } else |
||
2119 | __kgem_bo_clear_busy(bo); |
||
2120 | |||
2121 | if (!bo->refcnt && !bo->reusable) { |
||
2122 | assert(!bo->snoop); |
||
2123 | DBG(("%s: discarding handle=%d\n", |
||
2124 | __FUNCTION__, bo->handle)); |
||
2125 | kgem_bo_free(kgem, bo); |
||
2126 | } |
||
2127 | } |
||
2128 | |||
2129 | if (rq != &kgem->static_request) { |
||
2130 | list_init(&rq->list); |
||
2131 | __kgem_request_free(rq); |
||
2132 | } |
||
2133 | } |
||
2134 | |||
2135 | kgem->nfence = 0; |
||
2136 | kgem->nexec = 0; |
||
2137 | kgem->nreloc = 0; |
||
2138 | kgem->nreloc__self = 0; |
||
2139 | kgem->aperture = 0; |
||
2140 | kgem->aperture_fenced = 0; |
||
2141 | kgem->nbatch = 0; |
||
2142 | kgem->surface = kgem->batch_size; |
||
2143 | kgem->mode = KGEM_NONE; |
||
2144 | kgem->flush = 0; |
||
2145 | kgem->batch_flags = kgem->batch_flags_base; |
||
2146 | |||
2147 | kgem->next_request = __kgem_request_alloc(kgem); |
||
2148 | |||
2149 | kgem_sna_reset(kgem); |
||
2150 | } |
||
2151 | |||
2152 | static int compact_batch_surface(struct kgem *kgem) |
||
2153 | { |
||
2154 | int size, shrink, n; |
||
2155 | |||
2156 | if (!kgem->has_relaxed_delta) |
||
2157 | return kgem->batch_size; |
||
2158 | |||
2159 | /* See if we can pack the contents into one or two pages */ |
||
2160 | n = ALIGN(kgem->batch_size, 1024); |
||
2161 | size = n - kgem->surface + kgem->nbatch; |
||
2162 | size = ALIGN(size, 1024); |
||
2163 | |||
2164 | shrink = n - size; |
||
2165 | if (shrink) { |
||
2166 | DBG(("shrinking from %d to %d\n", kgem->batch_size, size)); |
||
2167 | |||
2168 | shrink *= sizeof(uint32_t); |
||
2169 | for (n = 0; n < kgem->nreloc; n++) { |
||
2170 | if (kgem->reloc[n].read_domains == I915_GEM_DOMAIN_INSTRUCTION && |
||
2171 | kgem->reloc[n].target_handle == ~0U) |
||
2172 | kgem->reloc[n].delta -= shrink; |
||
2173 | |||
2174 | if (kgem->reloc[n].offset >= sizeof(uint32_t)*kgem->nbatch) |
||
2175 | kgem->reloc[n].offset -= shrink; |
||
2176 | } |
||
2177 | } |
||
2178 | |||
2179 | return size * sizeof(uint32_t); |
||
2180 | } |
||
2181 | |||
2182 | static struct kgem_bo * |
||
2183 | kgem_create_batch(struct kgem *kgem, int size) |
||
2184 | { |
||
2185 | struct drm_i915_gem_set_domain set_domain; |
||
2186 | struct kgem_bo *bo; |
||
2187 | |||
2188 | if (size <= 4096) { |
||
2189 | bo = list_first_entry(&kgem->pinned_batches[0], |
||
2190 | struct kgem_bo, |
||
2191 | list); |
||
2192 | if (!bo->rq) { |
||
2193 | out_4096: |
||
2194 | list_move_tail(&bo->list, &kgem->pinned_batches[0]); |
||
2195 | return kgem_bo_reference(bo); |
||
2196 | } |
||
2197 | |||
2198 | if (!__kgem_busy(kgem, bo->handle)) { |
||
2199 | assert(RQ(bo->rq)->bo == bo); |
||
2200 | __kgem_retire_rq(kgem, RQ(bo->rq)); |
||
2201 | goto out_4096; |
||
2202 | } |
||
2203 | } |
||
2204 | |||
2205 | if (size <= 16384) { |
||
2206 | bo = list_first_entry(&kgem->pinned_batches[1], |
||
2207 | struct kgem_bo, |
||
2208 | list); |
||
2209 | if (!bo->rq) { |
||
2210 | out_16384: |
||
2211 | list_move_tail(&bo->list, &kgem->pinned_batches[1]); |
||
2212 | return kgem_bo_reference(bo); |
||
2213 | } |
||
2214 | |||
2215 | if (!__kgem_busy(kgem, bo->handle)) { |
||
2216 | assert(RQ(bo->rq)->bo == bo); |
||
2217 | __kgem_retire_rq(kgem, RQ(bo->rq)); |
||
2218 | goto out_16384; |
||
2219 | } |
||
2220 | } |
||
2221 | |||
2222 | if (kgem->gen == 020 && !kgem->has_pinned_batches) { |
||
2223 | assert(size <= 16384); |
||
2224 | |||
2225 | bo = list_first_entry(&kgem->pinned_batches[size > 4096], |
||
2226 | struct kgem_bo, |
||
2227 | list); |
||
2228 | list_move_tail(&bo->list, &kgem->pinned_batches[size > 4096]); |
||
2229 | |||
2230 | DBG(("%s: syncing due to busy batches\n", __FUNCTION__)); |
||
2231 | |||
2232 | VG_CLEAR(set_domain); |
||
2233 | set_domain.handle = bo->handle; |
||
2234 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
||
2235 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
||
2236 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) { |
||
2237 | DBG(("%s: sync: GPU hang detected\n", __FUNCTION__)); |
||
2238 | kgem_throttle(kgem); |
||
2239 | return NULL; |
||
2240 | } |
||
2241 | |||
2242 | kgem_retire(kgem); |
||
2243 | assert(bo->rq == NULL); |
||
2244 | return kgem_bo_reference(bo); |
||
2245 | } |
||
2246 | |||
2247 | return kgem_create_linear(kgem, size, CREATE_NO_THROTTLE); |
||
2248 | } |
||
2249 | |||
2250 | void _kgem_submit(struct kgem *kgem) |
||
2251 | { |
||
2252 | struct kgem_request *rq; |
||
2253 | uint32_t batch_end; |
||
2254 | int size; |
||
2255 | |||
2256 | assert(!DBG_NO_HW); |
||
2257 | assert(!kgem->wedged); |
||
2258 | |||
2259 | assert(kgem->nbatch); |
||
2260 | assert(kgem->nbatch <= KGEM_BATCH_SIZE(kgem)); |
||
2261 | assert(kgem->nbatch <= kgem->surface); |
||
2262 | |||
2263 | batch_end = kgem_end_batch(kgem); |
||
2264 | kgem_sna_flush(kgem); |
||
2265 | |||
2266 | DBG(("batch[%d/%d]: %d %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d\n", |
||
2267 | kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface, kgem->batch_size, |
||
2268 | kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture)); |
||
2269 | |||
2270 | assert(kgem->nbatch <= kgem->batch_size); |
||
2271 | assert(kgem->nbatch <= kgem->surface); |
||
2272 | assert(kgem->nreloc <= ARRAY_SIZE(kgem->reloc)); |
||
2273 | assert(kgem->nexec < ARRAY_SIZE(kgem->exec)); |
||
2274 | assert(kgem->nfence <= kgem->fence_max); |
||
2275 | |||
2276 | kgem_finish_buffers(kgem); |
||
2277 | |||
2278 | #if SHOW_BATCH |
||
2279 | __kgem_batch_debug(kgem, batch_end); |
||
2280 | #endif |
||
2281 | |||
2282 | rq = kgem->next_request; |
||
2283 | if (kgem->surface != kgem->batch_size) |
||
2284 | size = compact_batch_surface(kgem); |
||
2285 | else |
||
2286 | size = kgem->nbatch * sizeof(kgem->batch[0]); |
||
2287 | rq->bo = kgem_create_batch(kgem, size); |
||
2288 | if (rq->bo) { |
||
2289 | uint32_t handle = rq->bo->handle; |
||
2290 | int i; |
||
2291 | |||
2292 | assert(!rq->bo->needs_flush); |
||
2293 | |||
2294 | i = kgem->nexec++; |
||
2295 | kgem->exec[i].handle = handle; |
||
2296 | kgem->exec[i].relocation_count = kgem->nreloc; |
||
2297 | kgem->exec[i].relocs_ptr = (uintptr_t)kgem->reloc; |
||
2298 | kgem->exec[i].alignment = 0; |
||
2299 | kgem->exec[i].offset = rq->bo->presumed_offset; |
||
2300 | kgem->exec[i].flags = 0; |
||
2301 | kgem->exec[i].rsvd1 = 0; |
||
2302 | kgem->exec[i].rsvd2 = 0; |
||
2303 | |||
2304 | rq->bo->target_handle = kgem->has_handle_lut ? i : handle; |
||
2305 | rq->bo->exec = &kgem->exec[i]; |
||
2306 | rq->bo->rq = MAKE_REQUEST(rq, kgem->ring); /* useful sanity check */ |
||
2307 | list_add(&rq->bo->request, &rq->buffers); |
||
2308 | rq->ring = kgem->ring == KGEM_BLT; |
||
2309 | |||
2310 | kgem_fixup_self_relocs(kgem, rq->bo); |
||
2311 | |||
2312 | if (kgem_batch_write(kgem, handle, size) == 0) { |
||
2313 | struct drm_i915_gem_execbuffer2 execbuf; |
||
2314 | int ret, retry = 3; |
||
2315 | |||
2316 | VG_CLEAR(execbuf); |
||
2317 | execbuf.buffers_ptr = (uintptr_t)kgem->exec; |
||
2318 | execbuf.buffer_count = kgem->nexec; |
||
2319 | execbuf.batch_start_offset = 0; |
||
2320 | execbuf.batch_len = batch_end*sizeof(uint32_t); |
||
2321 | execbuf.cliprects_ptr = 0; |
||
2322 | execbuf.num_cliprects = 0; |
||
2323 | execbuf.DR1 = 0; |
||
2324 | execbuf.DR4 = 0; |
||
2325 | execbuf.flags = kgem->ring | kgem->batch_flags; |
||
2326 | execbuf.rsvd1 = 0; |
||
2327 | execbuf.rsvd2 = 0; |
||
2328 | |||
2329 | |||
2330 | |||
2331 | // ret = drmIoctl(kgem->fd, |
||
2332 | // DRM_IOCTL_I915_GEM_EXECBUFFER2, |
||
2333 | // &execbuf); |
||
2334 | // while (ret == -1 && errno == EBUSY && retry--) { |
||
2335 | // __kgem_throttle(kgem); |
||
2336 | // ret = drmIoctl(kgem->fd, |
||
2337 | // DRM_IOCTL_I915_GEM_EXECBUFFER2, |
||
2338 | // &execbuf); |
||
2339 | // } |
||
2340 | if (DEBUG_SYNC && ret == 0) { |
||
2341 | struct drm_i915_gem_set_domain set_domain; |
||
2342 | |||
2343 | VG_CLEAR(set_domain); |
||
2344 | set_domain.handle = handle; |
||
2345 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
||
2346 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
||
2347 | |||
2348 | ret = drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain); |
||
2349 | } |
||
2350 | if (ret == -1) { |
||
2351 | // DBG(("%s: GPU hang detected [%d]\n", |
||
2352 | // __FUNCTION__, errno)); |
||
2353 | kgem_throttle(kgem); |
||
2354 | kgem->wedged = true; |
||
2355 | |||
2356 | #if 0 |
||
2357 | ret = errno; |
||
2358 | ErrorF("batch[%d/%d]: %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d: errno=%d\n", |
||
2359 | kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface, |
||
2360 | kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture, errno); |
||
2361 | |||
2362 | for (i = 0; i < kgem->nexec; i++) { |
||
2363 | struct kgem_bo *bo, *found = NULL; |
||
2364 | |||
2365 | list_for_each_entry(bo, &kgem->next_request->buffers, request) { |
||
2366 | if (bo->handle == kgem->exec[i].handle) { |
||
2367 | found = bo; |
||
2368 | break; |
||
2369 | } |
||
2370 | } |
||
2371 | ErrorF("exec[%d] = handle:%d, presumed offset: %x, size: %d, tiling %d, fenced %d, snooped %d, deleted %d\n", |
||
2372 | i, |
||
2373 | kgem->exec[i].handle, |
||
2374 | (int)kgem->exec[i].offset, |
||
2375 | found ? kgem_bo_size(found) : -1, |
||
2376 | found ? found->tiling : -1, |
||
2377 | (int)(kgem->exec[i].flags & EXEC_OBJECT_NEEDS_FENCE), |
||
2378 | found ? found->snoop : -1, |
||
2379 | found ? found->purged : -1); |
||
2380 | } |
||
2381 | for (i = 0; i < kgem->nreloc; i++) { |
||
2382 | ErrorF("reloc[%d] = pos:%d, target:%d, delta:%d, read:%x, write:%x, offset:%x\n", |
||
2383 | i, |
||
2384 | (int)kgem->reloc[i].offset, |
||
2385 | kgem->reloc[i].target_handle, |
||
2386 | kgem->reloc[i].delta, |
||
2387 | kgem->reloc[i].read_domains, |
||
2388 | kgem->reloc[i].write_domain, |
||
2389 | (int)kgem->reloc[i].presumed_offset); |
||
2390 | } |
||
2391 | |||
2392 | if (DEBUG_SYNC) { |
||
2393 | int fd = open("/tmp/batchbuffer", O_WRONLY | O_CREAT | O_APPEND, 0666); |
||
2394 | if (fd != -1) { |
||
2395 | write(fd, kgem->batch, batch_end*sizeof(uint32_t)); |
||
2396 | close(fd); |
||
2397 | } |
||
2398 | |||
2399 | FatalError("SNA: failed to submit batchbuffer, errno=%d\n", ret); |
||
2400 | } |
||
2401 | #endif |
||
2402 | } |
||
2403 | } |
||
2404 | |||
2405 | kgem_commit(kgem); |
||
2406 | } |
||
2407 | if (kgem->wedged) |
||
2408 | kgem_cleanup(kgem); |
||
2409 | |||
2410 | kgem_reset(kgem); |
||
2411 | |||
2412 | assert(kgem->next_request != NULL); |
||
2413 | } |
||
2414 | |||
2415 | void kgem_throttle(struct kgem *kgem) |
||
2416 | { |
||
2417 | kgem->need_throttle = 0; |
||
2418 | if (kgem->wedged) |
||
2419 | return; |
||
2420 | |||
2421 | kgem->wedged = __kgem_throttle(kgem); |
||
2422 | if (kgem->wedged) { |
||
2423 | printf("Detected a hung GPU, disabling acceleration.\n"); |
||
2424 | printf("When reporting this, please include i915_error_state from debugfs and the full dmesg.\n"); |
||
2425 | } |
||
2426 | } |
||
2427 | |||
2428 | void kgem_purge_cache(struct kgem *kgem) |
||
2429 | { |
||
2430 | struct kgem_bo *bo, *next; |
||
2431 | int i; |
||
2432 | |||
2433 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { |
||
2434 | list_for_each_entry_safe(bo, next, &kgem->inactive[i], list) { |
||
2435 | if (!kgem_bo_is_retained(kgem, bo)) { |
||
2436 | DBG(("%s: purging %d\n", |
||
2437 | __FUNCTION__, bo->handle)); |
||
2438 | kgem_bo_free(kgem, bo); |
||
2439 | } |
||
2440 | } |
||
2441 | } |
||
2442 | |||
2443 | kgem->need_purge = false; |
||
2444 | } |
||
2445 | |||
2446 | bool kgem_expire_cache(struct kgem *kgem) |
||
2447 | { |
||
2448 | time_t now, expire; |
||
2449 | struct kgem_bo *bo; |
||
2450 | unsigned int size = 0, count = 0; |
||
2451 | bool idle; |
||
2452 | unsigned int i; |
||
2453 | |||
2454 | time(&now); |
||
2455 | |||
2456 | while (__kgem_freed_bo) { |
||
2457 | bo = __kgem_freed_bo; |
||
2458 | __kgem_freed_bo = *(struct kgem_bo **)bo; |
||
2459 | free(bo); |
||
2460 | } |
||
2461 | |||
2462 | while (__kgem_freed_request) { |
||
2463 | struct kgem_request *rq = __kgem_freed_request; |
||
2464 | __kgem_freed_request = *(struct kgem_request **)rq; |
||
2465 | free(rq); |
||
2466 | } |
||
2467 | |||
2468 | while (!list_is_empty(&kgem->large_inactive)) { |
||
2469 | kgem_bo_free(kgem, |
||
2470 | list_first_entry(&kgem->large_inactive, |
||
2471 | struct kgem_bo, list)); |
||
2472 | |||
2473 | } |
||
2474 | |||
2475 | while (!list_is_empty(&kgem->scanout)) { |
||
2476 | bo = list_first_entry(&kgem->scanout, struct kgem_bo, list); |
||
2477 | if (__kgem_busy(kgem, bo->handle)) |
||
2478 | break; |
||
2479 | |||
2480 | list_del(&bo->list); |
||
2481 | kgem_bo_clear_scanout(kgem, bo); |
||
2482 | __kgem_bo_destroy(kgem, bo); |
||
2483 | } |
||
2484 | |||
2485 | expire = 0; |
||
2486 | list_for_each_entry(bo, &kgem->snoop, list) { |
||
2487 | if (bo->delta) { |
||
2488 | expire = now - MAX_INACTIVE_TIME/2; |
||
2489 | break; |
||
2490 | } |
||
2491 | |||
2492 | bo->delta = now; |
||
2493 | } |
||
2494 | if (expire) { |
||
2495 | while (!list_is_empty(&kgem->snoop)) { |
||
2496 | bo = list_last_entry(&kgem->snoop, struct kgem_bo, list); |
||
2497 | |||
2498 | if (bo->delta > expire) |
||
2499 | break; |
||
2500 | |||
2501 | kgem_bo_free(kgem, bo); |
||
2502 | } |
||
2503 | } |
||
2504 | #ifdef DEBUG_MEMORY |
||
2505 | { |
||
2506 | long snoop_size = 0; |
||
2507 | int snoop_count = 0; |
||
2508 | list_for_each_entry(bo, &kgem->snoop, list) |
||
2509 | snoop_count++, snoop_size += bytes(bo); |
||
2510 | ErrorF("%s: still allocated %d bo, %ld bytes, in snoop cache\n", |
||
2511 | __FUNCTION__, snoop_count, snoop_size); |
||
2512 | } |
||
2513 | #endif |
||
2514 | |||
2515 | kgem_retire(kgem); |
||
2516 | if (kgem->wedged) |
||
2517 | kgem_cleanup(kgem); |
||
2518 | |||
2519 | kgem->expire(kgem); |
||
2520 | |||
2521 | if (kgem->need_purge) |
||
2522 | kgem_purge_cache(kgem); |
||
2523 | |||
2524 | expire = 0; |
||
2525 | |||
2526 | idle = !kgem->need_retire; |
||
2527 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { |
||
2528 | idle &= list_is_empty(&kgem->inactive[i]); |
||
2529 | list_for_each_entry(bo, &kgem->inactive[i], list) { |
||
2530 | if (bo->delta) { |
||
2531 | expire = now - MAX_INACTIVE_TIME; |
||
2532 | break; |
||
2533 | } |
||
2534 | |||
2535 | bo->delta = now; |
||
2536 | } |
||
2537 | } |
||
2538 | if (idle) { |
||
2539 | DBG(("%s: idle\n", __FUNCTION__)); |
||
2540 | kgem->need_expire = false; |
||
2541 | return false; |
||
2542 | } |
||
2543 | if (expire == 0) |
||
2544 | return true; |
||
2545 | |||
2546 | idle = !kgem->need_retire; |
||
2547 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { |
||
2548 | struct list preserve; |
||
2549 | |||
2550 | list_init(&preserve); |
||
2551 | while (!list_is_empty(&kgem->inactive[i])) { |
||
2552 | bo = list_last_entry(&kgem->inactive[i], |
||
2553 | struct kgem_bo, list); |
||
2554 | |||
2555 | if (bo->delta > expire) { |
||
2556 | idle = false; |
||
2557 | break; |
||
2558 | } |
||
2559 | |||
2560 | if (bo->map && bo->delta + MAP_PRESERVE_TIME > expire) { |
||
2561 | idle = false; |
||
2562 | list_move_tail(&bo->list, &preserve); |
||
2563 | } else { |
||
2564 | count++; |
||
2565 | size += bytes(bo); |
||
2566 | kgem_bo_free(kgem, bo); |
||
2567 | DBG(("%s: expiring %d\n", |
||
2568 | __FUNCTION__, bo->handle)); |
||
2569 | } |
||
2570 | } |
||
2571 | if (!list_is_empty(&preserve)) { |
||
2572 | preserve.prev->next = kgem->inactive[i].next; |
||
2573 | kgem->inactive[i].next->prev = preserve.prev; |
||
2574 | kgem->inactive[i].next = preserve.next; |
||
2575 | preserve.next->prev = &kgem->inactive[i]; |
||
2576 | } |
||
2577 | } |
||
2578 | |||
2579 | #ifdef DEBUG_MEMORY |
||
2580 | { |
||
2581 | long inactive_size = 0; |
||
2582 | int inactive_count = 0; |
||
2583 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) |
||
2584 | list_for_each_entry(bo, &kgem->inactive[i], list) |
||
2585 | inactive_count++, inactive_size += bytes(bo); |
||
2586 | ErrorF("%s: still allocated %d bo, %ld bytes, in inactive cache\n", |
||
2587 | __FUNCTION__, inactive_count, inactive_size); |
||
2588 | } |
||
2589 | #endif |
||
2590 | |||
2591 | DBG(("%s: expired %d objects, %d bytes, idle? %d\n", |
||
2592 | __FUNCTION__, count, size, idle)); |
||
2593 | |||
2594 | kgem->need_expire = !idle; |
||
2595 | return !idle; |
||
2596 | (void)count; |
||
2597 | (void)size; |
||
2598 | } |
||
2599 | |||
2600 | void kgem_cleanup_cache(struct kgem *kgem) |
||
2601 | { |
||
2602 | unsigned int i; |
||
2603 | int n; |
||
2604 | |||
2605 | /* sync to the most recent request */ |
||
2606 | for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) { |
||
2607 | if (!list_is_empty(&kgem->requests[n])) { |
||
2608 | struct kgem_request *rq; |
||
2609 | struct drm_i915_gem_set_domain set_domain; |
||
2610 | |||
2611 | rq = list_first_entry(&kgem->requests[n], |
||
2612 | struct kgem_request, |
||
2613 | list); |
||
2614 | |||
2615 | DBG(("%s: sync on cleanup\n", __FUNCTION__)); |
||
2616 | |||
2617 | VG_CLEAR(set_domain); |
||
2618 | set_domain.handle = rq->bo->handle; |
||
2619 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
||
2620 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
||
2621 | (void)drmIoctl(kgem->fd, |
||
2622 | DRM_IOCTL_I915_GEM_SET_DOMAIN, |
||
2623 | &set_domain); |
||
2624 | } |
||
2625 | } |
||
2626 | |||
2627 | kgem_retire(kgem); |
||
2628 | kgem_cleanup(kgem); |
||
2629 | |||
2630 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { |
||
2631 | while (!list_is_empty(&kgem->inactive[i])) |
||
2632 | kgem_bo_free(kgem, |
||
2633 | list_last_entry(&kgem->inactive[i], |
||
2634 | struct kgem_bo, list)); |
||
2635 | } |
||
2636 | |||
2637 | while (!list_is_empty(&kgem->snoop)) |
||
2638 | kgem_bo_free(kgem, |
||
2639 | list_last_entry(&kgem->snoop, |
||
2640 | struct kgem_bo, list)); |
||
2641 | |||
2642 | while (__kgem_freed_bo) { |
||
2643 | struct kgem_bo *bo = __kgem_freed_bo; |
||
2644 | __kgem_freed_bo = *(struct kgem_bo **)bo; |
||
2645 | free(bo); |
||
2646 | } |
||
2647 | |||
2648 | kgem->need_purge = false; |
||
2649 | kgem->need_expire = false; |
||
2650 | } |
||
2651 | |||
2652 | static struct kgem_bo * |
||
3256 | Serge | 2653 | search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags) |
2654 | { |
||
2655 | struct kgem_bo *bo, *first = NULL; |
||
2656 | bool use_active = (flags & CREATE_INACTIVE) == 0; |
||
2657 | struct list *cache; |
||
2658 | |||
2659 | DBG(("%s: num_pages=%d, flags=%x, use_active? %d\n", |
||
2660 | __FUNCTION__, num_pages, flags, use_active)); |
||
2661 | |||
2662 | if (num_pages >= MAX_CACHE_SIZE / PAGE_SIZE) |
||
2663 | return NULL; |
||
2664 | |||
2665 | if (!use_active && list_is_empty(inactive(kgem, num_pages))) { |
||
2666 | DBG(("%s: inactive and cache bucket empty\n", |
||
2667 | __FUNCTION__)); |
||
2668 | |||
2669 | if (flags & CREATE_NO_RETIRE) { |
||
2670 | DBG(("%s: can not retire\n", __FUNCTION__)); |
||
2671 | return NULL; |
||
2672 | } |
||
2673 | |||
2674 | if (list_is_empty(active(kgem, num_pages, I915_TILING_NONE))) { |
||
2675 | DBG(("%s: active cache bucket empty\n", __FUNCTION__)); |
||
2676 | return NULL; |
||
2677 | } |
||
2678 | |||
2679 | if (!__kgem_throttle_retire(kgem, flags)) { |
||
2680 | DBG(("%s: nothing retired\n", __FUNCTION__)); |
||
2681 | return NULL; |
||
2682 | } |
||
2683 | |||
2684 | if (list_is_empty(inactive(kgem, num_pages))) { |
||
2685 | DBG(("%s: active cache bucket still empty after retire\n", |
||
2686 | __FUNCTION__)); |
||
2687 | return NULL; |
||
2688 | } |
||
2689 | } |
||
2690 | |||
2691 | if (!use_active && flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
||
2692 | int for_cpu = !!(flags & CREATE_CPU_MAP); |
||
2693 | DBG(("%s: searching for inactive %s map\n", |
||
2694 | __FUNCTION__, for_cpu ? "cpu" : "gtt")); |
||
2695 | cache = &kgem->vma[for_cpu].inactive[cache_bucket(num_pages)]; |
||
2696 | list_for_each_entry(bo, cache, vma) { |
||
2697 | assert(IS_CPU_MAP(bo->map) == for_cpu); |
||
2698 | assert(bucket(bo) == cache_bucket(num_pages)); |
||
2699 | assert(bo->proxy == NULL); |
||
2700 | assert(bo->rq == NULL); |
||
2701 | assert(bo->exec == NULL); |
||
2702 | assert(!bo->scanout); |
||
2703 | |||
2704 | if (num_pages > num_pages(bo)) { |
||
2705 | DBG(("inactive too small: %d < %d\n", |
||
2706 | num_pages(bo), num_pages)); |
||
2707 | continue; |
||
2708 | } |
||
2709 | |||
2710 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
||
2711 | kgem_bo_free(kgem, bo); |
||
2712 | break; |
||
2713 | } |
||
2714 | |||
2715 | if (I915_TILING_NONE != bo->tiling && |
||
2716 | !gem_set_tiling(kgem->fd, bo->handle, |
||
2717 | I915_TILING_NONE, 0)) |
||
2718 | continue; |
||
2719 | |||
2720 | kgem_bo_remove_from_inactive(kgem, bo); |
||
2721 | |||
2722 | bo->tiling = I915_TILING_NONE; |
||
2723 | bo->pitch = 0; |
||
2724 | bo->delta = 0; |
||
2725 | DBG((" %s: found handle=%d (num_pages=%d) in linear vma cache\n", |
||
2726 | __FUNCTION__, bo->handle, num_pages(bo))); |
||
2727 | assert(use_active || bo->domain != DOMAIN_GPU); |
||
2728 | assert(!bo->needs_flush); |
||
2729 | ASSERT_MAYBE_IDLE(kgem, bo->handle, !use_active); |
||
2730 | return bo; |
||
2731 | } |
||
2732 | |||
2733 | if (flags & CREATE_EXACT) |
||
2734 | return NULL; |
||
2735 | |||
2736 | if (flags & CREATE_CPU_MAP && !kgem->has_llc) |
||
2737 | return NULL; |
||
2738 | } |
||
2739 | |||
2740 | cache = use_active ? active(kgem, num_pages, I915_TILING_NONE) : inactive(kgem, num_pages); |
||
2741 | list_for_each_entry(bo, cache, list) { |
||
2742 | assert(bo->refcnt == 0); |
||
2743 | assert(bo->reusable); |
||
2744 | assert(!!bo->rq == !!use_active); |
||
2745 | assert(bo->proxy == NULL); |
||
2746 | assert(!bo->scanout); |
||
2747 | |||
2748 | if (num_pages > num_pages(bo)) |
||
2749 | continue; |
||
2750 | |||
2751 | if (use_active && |
||
2752 | kgem->gen <= 040 && |
||
2753 | bo->tiling != I915_TILING_NONE) |
||
2754 | continue; |
||
2755 | |||
2756 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
||
2757 | kgem_bo_free(kgem, bo); |
||
2758 | break; |
||
2759 | } |
||
2760 | |||
2761 | if (I915_TILING_NONE != bo->tiling) { |
||
2762 | if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) |
||
2763 | continue; |
||
2764 | |||
2765 | if (first) |
||
2766 | continue; |
||
2767 | |||
2768 | if (!gem_set_tiling(kgem->fd, bo->handle, |
||
2769 | I915_TILING_NONE, 0)) |
||
2770 | continue; |
||
2771 | |||
2772 | bo->tiling = I915_TILING_NONE; |
||
2773 | bo->pitch = 0; |
||
2774 | } |
||
2775 | |||
2776 | if (bo->map) { |
||
2777 | if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
||
2778 | int for_cpu = !!(flags & CREATE_CPU_MAP); |
||
2779 | if (IS_CPU_MAP(bo->map) != for_cpu) { |
||
2780 | if (first != NULL) |
||
2781 | break; |
||
2782 | |||
2783 | first = bo; |
||
2784 | continue; |
||
2785 | } |
||
2786 | } else { |
||
2787 | if (first != NULL) |
||
2788 | break; |
||
2789 | |||
2790 | first = bo; |
||
2791 | continue; |
||
2792 | } |
||
2793 | } else { |
||
2794 | if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
||
2795 | if (first != NULL) |
||
2796 | break; |
||
2797 | |||
2798 | first = bo; |
||
2799 | continue; |
||
2800 | } |
||
2801 | } |
||
2802 | |||
2803 | if (use_active) |
||
2804 | kgem_bo_remove_from_active(kgem, bo); |
||
2805 | else |
||
2806 | kgem_bo_remove_from_inactive(kgem, bo); |
||
2807 | |||
2808 | assert(bo->tiling == I915_TILING_NONE); |
||
2809 | bo->pitch = 0; |
||
2810 | bo->delta = 0; |
||
2811 | DBG((" %s: found handle=%d (num_pages=%d) in linear %s cache\n", |
||
2812 | __FUNCTION__, bo->handle, num_pages(bo), |
||
2813 | use_active ? "active" : "inactive")); |
||
2814 | assert(list_is_empty(&bo->list)); |
||
2815 | assert(use_active || bo->domain != DOMAIN_GPU); |
||
2816 | assert(!bo->needs_flush || use_active); |
||
2817 | ASSERT_MAYBE_IDLE(kgem, bo->handle, !use_active); |
||
2818 | return bo; |
||
2819 | } |
||
2820 | |||
2821 | if (first) { |
||
2822 | assert(first->tiling == I915_TILING_NONE); |
||
2823 | |||
2824 | if (use_active) |
||
2825 | kgem_bo_remove_from_active(kgem, first); |
||
2826 | else |
||
2827 | kgem_bo_remove_from_inactive(kgem, first); |
||
2828 | |||
2829 | first->pitch = 0; |
||
2830 | first->delta = 0; |
||
2831 | DBG((" %s: found handle=%d (near-miss) (num_pages=%d) in linear %s cache\n", |
||
2832 | __FUNCTION__, first->handle, num_pages(first), |
||
2833 | use_active ? "active" : "inactive")); |
||
2834 | assert(list_is_empty(&first->list)); |
||
2835 | assert(use_active || first->domain != DOMAIN_GPU); |
||
2836 | assert(!first->needs_flush || use_active); |
||
2837 | ASSERT_MAYBE_IDLE(kgem, first->handle, !use_active); |
||
2838 | return first; |
||
2839 | } |
||
2840 | |||
2841 | return NULL; |
||
2842 | } |
||
2843 | |||
2844 | |||
2845 | struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags) |
||
2846 | { |
||
2847 | struct kgem_bo *bo; |
||
2848 | uint32_t handle; |
||
2849 | |||
2850 | DBG(("%s(%d)\n", __FUNCTION__, size)); |
||
2851 | |||
2852 | if (flags & CREATE_GTT_MAP && kgem->has_llc) { |
||
2853 | flags &= ~CREATE_GTT_MAP; |
||
2854 | flags |= CREATE_CPU_MAP; |
||
2855 | } |
||
2856 | |||
2857 | size = (size + PAGE_SIZE - 1) / PAGE_SIZE; |
||
2858 | bo = search_linear_cache(kgem, size, CREATE_INACTIVE | flags); |
||
2859 | if (bo) { |
||
2860 | assert(bo->domain != DOMAIN_GPU); |
||
2861 | ASSERT_IDLE(kgem, bo->handle); |
||
2862 | bo->refcnt = 1; |
||
2863 | return bo; |
||
2864 | } |
||
2865 | |||
2866 | if (flags & CREATE_CACHED) |
||
2867 | return NULL; |
||
2868 | |||
2869 | handle = gem_create(kgem->fd, size); |
||
2870 | if (handle == 0) |
||
2871 | return NULL; |
||
2872 | |||
2873 | DBG(("%s: new handle=%d, num_pages=%d\n", __FUNCTION__, handle, size)); |
||
2874 | bo = __kgem_bo_alloc(handle, size); |
||
2875 | if (bo == NULL) { |
||
2876 | gem_close(kgem->fd, handle); |
||
2877 | return NULL; |
||
2878 | } |
||
2879 | |||
2880 | debug_alloc__bo(kgem, bo); |
||
2881 | return bo; |
||
2882 | } |
||
2883 | |||
3258 | Serge | 2884 | inline int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo) |
2885 | { |
||
2886 | unsigned int size; |
||
3256 | Serge | 2887 | |
3258 | Serge | 2888 | assert(bo->tiling); |
2889 | assert(kgem->gen < 040); |
||
3256 | Serge | 2890 | |
3258 | Serge | 2891 | if (kgem->gen < 030) |
2892 | size = 512 * 1024; |
||
2893 | else |
||
2894 | size = 1024 * 1024; |
||
2895 | while (size < bytes(bo)) |
||
2896 | size *= 2; |
||
3256 | Serge | 2897 | |
3258 | Serge | 2898 | return size; |
2899 | } |
||
3256 | Serge | 2900 | |
3258 | Serge | 2901 | #if 0 |
2902 | |||
2903 | struct kgem_bo *kgem_create_2d(struct kgem *kgem, |
||
2904 | int width, |
||
2905 | int height, |
||
2906 | int bpp, |
||
2907 | int tiling, |
||
2908 | uint32_t flags) |
||
2909 | { |
||
2910 | struct list *cache; |
||
2911 | struct kgem_bo *bo; |
||
2912 | uint32_t pitch, untiled_pitch, tiled_height, size; |
||
2913 | uint32_t handle; |
||
2914 | int i, bucket, retry; |
||
2915 | |||
2916 | if (tiling < 0) |
||
2917 | tiling = -tiling, flags |= CREATE_EXACT; |
||
2918 | |||
2919 | DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d, cpu-mapping=%d, gtt-mapping=%d, scanout?=%d, prime?=%d, temp?=%d)\n", __FUNCTION__, |
||
2920 | width, height, bpp, tiling, |
||
2921 | !!(flags & CREATE_EXACT), |
||
2922 | !!(flags & CREATE_INACTIVE), |
||
2923 | !!(flags & CREATE_CPU_MAP), |
||
2924 | !!(flags & CREATE_GTT_MAP), |
||
2925 | !!(flags & CREATE_SCANOUT), |
||
2926 | !!(flags & CREATE_PRIME), |
||
2927 | !!(flags & CREATE_TEMPORARY))); |
||
2928 | |||
2929 | size = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags, |
||
2930 | width, height, bpp, tiling, &pitch); |
||
2931 | assert(size && size <= kgem->max_object_size); |
||
2932 | size /= PAGE_SIZE; |
||
2933 | bucket = cache_bucket(size); |
||
2934 | |||
2935 | if (flags & CREATE_SCANOUT) { |
||
2936 | assert((flags & CREATE_INACTIVE) == 0); |
||
2937 | list_for_each_entry_reverse(bo, &kgem->scanout, list) { |
||
2938 | assert(bo->scanout); |
||
2939 | assert(bo->delta); |
||
2940 | assert(!bo->purged); |
||
2941 | |||
2942 | if (size > num_pages(bo) || num_pages(bo) > 2*size) |
||
2943 | continue; |
||
2944 | |||
2945 | if (bo->tiling != tiling || |
||
2946 | (tiling != I915_TILING_NONE && bo->pitch != pitch)) { |
||
2947 | if (!gem_set_tiling(kgem->fd, bo->handle, |
||
2948 | tiling, pitch)) |
||
2949 | continue; |
||
2950 | |||
2951 | bo->tiling = tiling; |
||
2952 | bo->pitch = pitch; |
||
2953 | } |
||
2954 | |||
2955 | list_del(&bo->list); |
||
2956 | |||
2957 | bo->unique_id = kgem_get_unique_id(kgem); |
||
2958 | DBG((" 1:from scanout: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
2959 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
2960 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
2961 | bo->refcnt = 1; |
||
2962 | return bo; |
||
2963 | } |
||
2964 | } |
||
2965 | |||
2966 | if (bucket >= NUM_CACHE_BUCKETS) { |
||
2967 | DBG(("%s: large bo num pages=%d, bucket=%d\n", |
||
2968 | __FUNCTION__, size, bucket)); |
||
2969 | |||
2970 | if (flags & CREATE_INACTIVE) |
||
2971 | goto large_inactive; |
||
2972 | |||
2973 | tiled_height = kgem_aligned_height(kgem, height, tiling); |
||
2974 | untiled_pitch = kgem_untiled_pitch(kgem, width, bpp, flags); |
||
2975 | |||
2976 | list_for_each_entry(bo, &kgem->large, list) { |
||
2977 | assert(!bo->purged); |
||
2978 | assert(!bo->scanout); |
||
2979 | assert(bo->refcnt == 0); |
||
2980 | assert(bo->reusable); |
||
2981 | assert(bo->flush == true); |
||
2982 | |||
2983 | if (kgem->gen < 040) { |
||
2984 | if (bo->pitch < pitch) { |
||
2985 | DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n", |
||
2986 | bo->tiling, tiling, |
||
2987 | bo->pitch, pitch)); |
||
2988 | continue; |
||
2989 | } |
||
2990 | |||
2991 | if (bo->pitch * tiled_height > bytes(bo)) |
||
2992 | continue; |
||
2993 | } else { |
||
2994 | if (num_pages(bo) < size) |
||
2995 | continue; |
||
2996 | |||
2997 | if (bo->pitch != pitch || bo->tiling != tiling) { |
||
2998 | if (!gem_set_tiling(kgem->fd, bo->handle, |
||
2999 | tiling, pitch)) |
||
3000 | continue; |
||
3001 | |||
3002 | bo->pitch = pitch; |
||
3003 | bo->tiling = tiling; |
||
3004 | } |
||
3005 | } |
||
3006 | |||
3007 | kgem_bo_remove_from_active(kgem, bo); |
||
3008 | |||
3009 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3010 | bo->delta = 0; |
||
3011 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3012 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3013 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3014 | bo->refcnt = 1; |
||
3015 | return bo; |
||
3016 | } |
||
3017 | |||
3018 | large_inactive: |
||
3019 | list_for_each_entry(bo, &kgem->large_inactive, list) { |
||
3020 | assert(bo->refcnt == 0); |
||
3021 | assert(bo->reusable); |
||
3022 | assert(!bo->scanout); |
||
3023 | |||
3024 | if (size > num_pages(bo)) |
||
3025 | continue; |
||
3026 | |||
3027 | if (bo->tiling != tiling || |
||
3028 | (tiling != I915_TILING_NONE && bo->pitch != pitch)) { |
||
3029 | if (!gem_set_tiling(kgem->fd, bo->handle, |
||
3030 | tiling, pitch)) |
||
3031 | continue; |
||
3032 | |||
3033 | bo->tiling = tiling; |
||
3034 | bo->pitch = pitch; |
||
3035 | } |
||
3036 | |||
3037 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
||
3038 | kgem_bo_free(kgem, bo); |
||
3039 | break; |
||
3040 | } |
||
3041 | |||
3042 | list_del(&bo->list); |
||
3043 | |||
3044 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3045 | bo->pitch = pitch; |
||
3046 | bo->delta = 0; |
||
3047 | DBG((" 1:from large inactive: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3048 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3049 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3050 | bo->refcnt = 1; |
||
3051 | return bo; |
||
3052 | } |
||
3053 | |||
3054 | goto create; |
||
3055 | } |
||
3056 | |||
3057 | if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
||
3058 | int for_cpu = !!(flags & CREATE_CPU_MAP); |
||
3059 | if (kgem->has_llc && tiling == I915_TILING_NONE) |
||
3060 | for_cpu = 1; |
||
3061 | /* We presume that we will need to upload to this bo, |
||
3062 | * and so would prefer to have an active VMA. |
||
3063 | */ |
||
3064 | cache = &kgem->vma[for_cpu].inactive[bucket]; |
||
3065 | do { |
||
3066 | list_for_each_entry(bo, cache, vma) { |
||
3067 | assert(bucket(bo) == bucket); |
||
3068 | assert(bo->refcnt == 0); |
||
3069 | assert(!bo->scanout); |
||
3070 | assert(bo->map); |
||
3071 | assert(IS_CPU_MAP(bo->map) == for_cpu); |
||
3072 | assert(bo->rq == NULL); |
||
3073 | assert(list_is_empty(&bo->request)); |
||
3074 | assert(bo->flush == false); |
||
3075 | |||
3076 | if (size > num_pages(bo)) { |
||
3077 | DBG(("inactive too small: %d < %d\n", |
||
3078 | num_pages(bo), size)); |
||
3079 | continue; |
||
3080 | } |
||
3081 | |||
3082 | if (bo->tiling != tiling || |
||
3083 | (tiling != I915_TILING_NONE && bo->pitch != pitch)) { |
||
3084 | DBG(("inactive vma with wrong tiling: %d < %d\n", |
||
3085 | bo->tiling, tiling)); |
||
3086 | continue; |
||
3087 | } |
||
3088 | |||
3089 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
||
3090 | kgem_bo_free(kgem, bo); |
||
3091 | break; |
||
3092 | } |
||
3093 | |||
3094 | bo->pitch = pitch; |
||
3095 | bo->delta = 0; |
||
3096 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3097 | |||
3098 | kgem_bo_remove_from_inactive(kgem, bo); |
||
3099 | |||
3100 | DBG((" from inactive vma: pitch=%d, tiling=%d: handle=%d, id=%d\n", |
||
3101 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3102 | assert(bo->reusable); |
||
3103 | assert(bo->domain != DOMAIN_GPU); |
||
3104 | ASSERT_IDLE(kgem, bo->handle); |
||
3105 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3106 | bo->refcnt = 1; |
||
3107 | return bo; |
||
3108 | } |
||
3109 | } while (!list_is_empty(cache) && |
||
3110 | __kgem_throttle_retire(kgem, flags)); |
||
3111 | |||
3112 | if (flags & CREATE_CPU_MAP && !kgem->has_llc) |
||
3113 | goto create; |
||
3114 | } |
||
3115 | |||
3116 | if (flags & CREATE_INACTIVE) |
||
3117 | goto skip_active_search; |
||
3118 | |||
3119 | /* Best active match */ |
||
3120 | retry = NUM_CACHE_BUCKETS - bucket; |
||
3121 | if (retry > 3 && (flags & CREATE_TEMPORARY) == 0) |
||
3122 | retry = 3; |
||
3123 | search_again: |
||
3124 | assert(bucket < NUM_CACHE_BUCKETS); |
||
3125 | cache = &kgem->active[bucket][tiling]; |
||
3126 | if (tiling) { |
||
3127 | tiled_height = kgem_aligned_height(kgem, height, tiling); |
||
3128 | list_for_each_entry(bo, cache, list) { |
||
3129 | assert(!bo->purged); |
||
3130 | assert(bo->refcnt == 0); |
||
3131 | assert(bucket(bo) == bucket); |
||
3132 | assert(bo->reusable); |
||
3133 | assert(bo->tiling == tiling); |
||
3134 | assert(bo->flush == false); |
||
3135 | assert(!bo->scanout); |
||
3136 | |||
3137 | if (kgem->gen < 040) { |
||
3138 | if (bo->pitch < pitch) { |
||
3139 | DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n", |
||
3140 | bo->tiling, tiling, |
||
3141 | bo->pitch, pitch)); |
||
3142 | continue; |
||
3143 | } |
||
3144 | |||
3145 | if (bo->pitch * tiled_height > bytes(bo)) |
||
3146 | continue; |
||
3147 | } else { |
||
3148 | if (num_pages(bo) < size) |
||
3149 | continue; |
||
3150 | |||
3151 | if (bo->pitch != pitch) { |
||
3152 | if (!gem_set_tiling(kgem->fd, |
||
3153 | bo->handle, |
||
3154 | tiling, pitch)) |
||
3155 | continue; |
||
3156 | |||
3157 | bo->pitch = pitch; |
||
3158 | } |
||
3159 | } |
||
3160 | |||
3161 | kgem_bo_remove_from_active(kgem, bo); |
||
3162 | |||
3163 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3164 | bo->delta = 0; |
||
3165 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3166 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3167 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3168 | bo->refcnt = 1; |
||
3169 | return bo; |
||
3170 | } |
||
3171 | } else { |
||
3172 | list_for_each_entry(bo, cache, list) { |
||
3173 | assert(bucket(bo) == bucket); |
||
3174 | assert(!bo->purged); |
||
3175 | assert(bo->refcnt == 0); |
||
3176 | assert(bo->reusable); |
||
3177 | assert(!bo->scanout); |
||
3178 | assert(bo->tiling == tiling); |
||
3179 | assert(bo->flush == false); |
||
3180 | |||
3181 | if (num_pages(bo) < size) |
||
3182 | continue; |
||
3183 | |||
3184 | kgem_bo_remove_from_active(kgem, bo); |
||
3185 | |||
3186 | bo->pitch = pitch; |
||
3187 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3188 | bo->delta = 0; |
||
3189 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3190 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3191 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3192 | bo->refcnt = 1; |
||
3193 | return bo; |
||
3194 | } |
||
3195 | } |
||
3196 | |||
3197 | if (--retry && flags & CREATE_EXACT) { |
||
3198 | if (kgem->gen >= 040) { |
||
3199 | for (i = I915_TILING_NONE; i <= I915_TILING_Y; i++) { |
||
3200 | if (i == tiling) |
||
3201 | continue; |
||
3202 | |||
3203 | cache = &kgem->active[bucket][i]; |
||
3204 | list_for_each_entry(bo, cache, list) { |
||
3205 | assert(!bo->purged); |
||
3206 | assert(bo->refcnt == 0); |
||
3207 | assert(bo->reusable); |
||
3208 | assert(!bo->scanout); |
||
3209 | assert(bo->flush == false); |
||
3210 | |||
3211 | if (num_pages(bo) < size) |
||
3212 | continue; |
||
3213 | |||
3214 | if (!gem_set_tiling(kgem->fd, |
||
3215 | bo->handle, |
||
3216 | tiling, pitch)) |
||
3217 | continue; |
||
3218 | |||
3219 | kgem_bo_remove_from_active(kgem, bo); |
||
3220 | |||
3221 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3222 | bo->pitch = pitch; |
||
3223 | bo->tiling = tiling; |
||
3224 | bo->delta = 0; |
||
3225 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3226 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3227 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3228 | bo->refcnt = 1; |
||
3229 | return bo; |
||
3230 | } |
||
3231 | } |
||
3232 | } |
||
3233 | |||
3234 | bucket++; |
||
3235 | goto search_again; |
||
3236 | } |
||
3237 | |||
3238 | if ((flags & CREATE_EXACT) == 0) { /* allow an active near-miss? */ |
||
3239 | untiled_pitch = kgem_untiled_pitch(kgem, width, bpp, flags); |
||
3240 | i = tiling; |
||
3241 | while (--i >= 0) { |
||
3242 | tiled_height = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags, |
||
3243 | width, height, bpp, tiling, &pitch); |
||
3244 | cache = active(kgem, tiled_height / PAGE_SIZE, i); |
||
3245 | tiled_height = kgem_aligned_height(kgem, height, i); |
||
3246 | list_for_each_entry(bo, cache, list) { |
||
3247 | assert(!bo->purged); |
||
3248 | assert(bo->refcnt == 0); |
||
3249 | assert(bo->reusable); |
||
3250 | assert(!bo->scanout); |
||
3251 | assert(bo->flush == false); |
||
3252 | |||
3253 | if (bo->tiling) { |
||
3254 | if (bo->pitch < pitch) { |
||
3255 | DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n", |
||
3256 | bo->tiling, tiling, |
||
3257 | bo->pitch, pitch)); |
||
3258 | continue; |
||
3259 | } |
||
3260 | } else |
||
3261 | bo->pitch = untiled_pitch; |
||
3262 | |||
3263 | if (bo->pitch * tiled_height > bytes(bo)) |
||
3264 | continue; |
||
3265 | |||
3266 | kgem_bo_remove_from_active(kgem, bo); |
||
3267 | |||
3268 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3269 | bo->delta = 0; |
||
3270 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3271 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3272 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3273 | bo->refcnt = 1; |
||
3274 | return bo; |
||
3275 | } |
||
3276 | } |
||
3277 | } |
||
3278 | |||
3279 | skip_active_search: |
||
3280 | bucket = cache_bucket(size); |
||
3281 | retry = NUM_CACHE_BUCKETS - bucket; |
||
3282 | if (retry > 3) |
||
3283 | retry = 3; |
||
3284 | search_inactive: |
||
3285 | /* Now just look for a close match and prefer any currently active */ |
||
3286 | assert(bucket < NUM_CACHE_BUCKETS); |
||
3287 | cache = &kgem->inactive[bucket]; |
||
3288 | list_for_each_entry(bo, cache, list) { |
||
3289 | assert(bucket(bo) == bucket); |
||
3290 | assert(bo->reusable); |
||
3291 | assert(!bo->scanout); |
||
3292 | assert(bo->flush == false); |
||
3293 | |||
3294 | if (size > num_pages(bo)) { |
||
3295 | DBG(("inactive too small: %d < %d\n", |
||
3296 | num_pages(bo), size)); |
||
3297 | continue; |
||
3298 | } |
||
3299 | |||
3300 | if (bo->tiling != tiling || |
||
3301 | (tiling != I915_TILING_NONE && bo->pitch != pitch)) { |
||
3302 | if (!gem_set_tiling(kgem->fd, bo->handle, |
||
3303 | tiling, pitch)) |
||
3304 | continue; |
||
3305 | |||
3306 | if (bo->map) |
||
3307 | kgem_bo_release_map(kgem, bo); |
||
3308 | } |
||
3309 | |||
3310 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
||
3311 | kgem_bo_free(kgem, bo); |
||
3312 | break; |
||
3313 | } |
||
3314 | |||
3315 | kgem_bo_remove_from_inactive(kgem, bo); |
||
3316 | |||
3317 | bo->pitch = pitch; |
||
3318 | bo->tiling = tiling; |
||
3319 | |||
3320 | bo->delta = 0; |
||
3321 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3322 | assert(bo->pitch); |
||
3323 | DBG((" from inactive: pitch=%d, tiling=%d: handle=%d, id=%d\n", |
||
3324 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3325 | assert(bo->refcnt == 0); |
||
3326 | assert(bo->reusable); |
||
3327 | assert((flags & CREATE_INACTIVE) == 0 || bo->domain != DOMAIN_GPU); |
||
3328 | ASSERT_MAYBE_IDLE(kgem, bo->handle, flags & CREATE_INACTIVE); |
||
3329 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3330 | bo->refcnt = 1; |
||
3331 | return bo; |
||
3332 | } |
||
3333 | |||
3334 | if (flags & CREATE_INACTIVE && |
||
3335 | !list_is_empty(&kgem->active[bucket][tiling]) && |
||
3336 | __kgem_throttle_retire(kgem, flags)) { |
||
3337 | flags &= ~CREATE_INACTIVE; |
||
3338 | goto search_inactive; |
||
3339 | } |
||
3340 | |||
3341 | if (--retry) { |
||
3342 | bucket++; |
||
3343 | flags &= ~CREATE_INACTIVE; |
||
3344 | goto search_inactive; |
||
3345 | } |
||
3346 | |||
3347 | create: |
||
3348 | if (bucket >= NUM_CACHE_BUCKETS) |
||
3349 | size = ALIGN(size, 1024); |
||
3350 | handle = gem_create(kgem->fd, size); |
||
3351 | if (handle == 0) |
||
3352 | return NULL; |
||
3353 | |||
3354 | bo = __kgem_bo_alloc(handle, size); |
||
3355 | if (!bo) { |
||
3356 | gem_close(kgem->fd, handle); |
||
3357 | return NULL; |
||
3358 | } |
||
3359 | |||
3360 | bo->domain = DOMAIN_CPU; |
||
3361 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3362 | bo->pitch = pitch; |
||
3363 | if (tiling != I915_TILING_NONE && |
||
3364 | gem_set_tiling(kgem->fd, handle, tiling, pitch)) |
||
3365 | bo->tiling = tiling; |
||
3366 | if (bucket >= NUM_CACHE_BUCKETS) { |
||
3367 | DBG(("%s: marking large bo for automatic flushing\n", |
||
3368 | __FUNCTION__)); |
||
3369 | bo->flush = true; |
||
3370 | } |
||
3371 | |||
3372 | assert(bytes(bo) >= bo->pitch * kgem_aligned_height(kgem, height, bo->tiling)); |
||
3373 | |||
3374 | debug_alloc__bo(kgem, bo); |
||
3375 | |||
3376 | DBG((" new pitch=%d, tiling=%d, handle=%d, id=%d, num_pages=%d [%d], bucket=%d\n", |
||
3377 | bo->pitch, bo->tiling, bo->handle, bo->unique_id, |
||
3378 | size, num_pages(bo), bucket(bo))); |
||
3379 | return bo; |
||
3380 | } |
||
3381 | |||
3382 | struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem, |
||
3383 | int width, |
||
3384 | int height, |
||
3385 | int bpp, |
||
3386 | uint32_t flags) |
||
3387 | { |
||
3388 | struct kgem_bo *bo; |
||
3389 | int stride, size; |
||
3390 | |||
3391 | if (DBG_NO_CPU) |
||
3392 | return NULL; |
||
3393 | |||
3394 | DBG(("%s(%dx%d, bpp=%d)\n", __FUNCTION__, width, height, bpp)); |
||
3395 | |||
3396 | if (kgem->has_llc) { |
||
3397 | bo = kgem_create_2d(kgem, width, height, bpp, |
||
3398 | I915_TILING_NONE, flags); |
||
3399 | if (bo == NULL) |
||
3400 | return bo; |
||
3401 | |||
3402 | assert(bo->tiling == I915_TILING_NONE); |
||
3403 | |||
3404 | if (kgem_bo_map__cpu(kgem, bo) == NULL) { |
||
3405 | kgem_bo_destroy(kgem, bo); |
||
3406 | return NULL; |
||
3407 | } |
||
3408 | |||
3409 | return bo; |
||
3410 | } |
||
3411 | |||
3412 | assert(width > 0 && height > 0); |
||
3413 | stride = ALIGN(width, 2) * bpp >> 3; |
||
3414 | stride = ALIGN(stride, 4); |
||
3415 | size = stride * ALIGN(height, 2); |
||
3416 | assert(size >= PAGE_SIZE); |
||
3417 | |||
3418 | DBG(("%s: %dx%d, %d bpp, stride=%d\n", |
||
3419 | __FUNCTION__, width, height, bpp, stride)); |
||
3420 | |||
3421 | bo = search_snoop_cache(kgem, NUM_PAGES(size), 0); |
||
3422 | if (bo) { |
||
3423 | assert(bo->tiling == I915_TILING_NONE); |
||
3424 | assert(bo->snoop); |
||
3425 | bo->refcnt = 1; |
||
3426 | bo->pitch = stride; |
||
3427 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3428 | return bo; |
||
3429 | } |
||
3430 | |||
3431 | if (kgem->has_cacheing) { |
||
3432 | bo = kgem_create_linear(kgem, size, flags); |
||
3433 | if (bo == NULL) |
||
3434 | return NULL; |
||
3435 | |||
3436 | assert(bo->tiling == I915_TILING_NONE); |
||
3437 | |||
3438 | if (!gem_set_cacheing(kgem->fd, bo->handle, SNOOPED)) { |
||
3439 | kgem_bo_destroy(kgem, bo); |
||
3440 | return NULL; |
||
3441 | } |
||
3442 | bo->snoop = true; |
||
3443 | |||
3444 | if (kgem_bo_map__cpu(kgem, bo) == NULL) { |
||
3445 | kgem_bo_destroy(kgem, bo); |
||
3446 | return NULL; |
||
3447 | } |
||
3448 | |||
3449 | bo->pitch = stride; |
||
3450 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3451 | return bo; |
||
3452 | } |
||
3453 | |||
3454 | if (kgem->has_userptr) { |
||
3455 | void *ptr; |
||
3456 | |||
3457 | /* XXX */ |
||
3458 | //if (posix_memalign(&ptr, 64, ALIGN(size, 64))) |
||
3459 | if (posix_memalign(&ptr, PAGE_SIZE, ALIGN(size, PAGE_SIZE))) |
||
3460 | return NULL; |
||
3461 | |||
3462 | bo = kgem_create_map(kgem, ptr, size, false); |
||
3463 | if (bo == NULL) { |
||
3464 | free(ptr); |
||
3465 | return NULL; |
||
3466 | } |
||
3467 | |||
3468 | bo->map = MAKE_USER_MAP(ptr); |
||
3469 | bo->pitch = stride; |
||
3470 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3471 | return bo; |
||
3472 | } |
||
3473 | |||
3474 | return NULL; |
||
3475 | } |
||
3476 | |||
3477 | |||
3478 | #endif |
||
3479 | |||
3480 | |||
3481 | void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) |
||
3482 | { |
||
3483 | DBG(("%s: handle=%d, proxy? %d\n", |
||
3484 | __FUNCTION__, bo->handle, bo->proxy != NULL)); |
||
3485 | |||
3486 | if (bo->proxy) { |
||
3487 | _list_del(&bo->vma); |
||
3488 | _list_del(&bo->request); |
||
3489 | if (bo->io && bo->exec == NULL) |
||
3490 | _kgem_bo_delete_buffer(kgem, bo); |
||
3491 | kgem_bo_unref(kgem, bo->proxy); |
||
3492 | kgem_bo_binding_free(kgem, bo); |
||
3493 | free(bo); |
||
3494 | return; |
||
3495 | } |
||
3496 | |||
3497 | __kgem_bo_destroy(kgem, bo); |
||
3498 | } |
||
3499 | |||
3500 | |||
3501 | |||
3502 | |||
3503 | |||
3504 | |||
3505 | |||
3506 | |||
3507 | |||
3508 | |||
3509 | |||
3510 | |||
3511 | |||
3512 | |||
3513 | |||
3514 | |||
3515 | |||
3516 | |||
3517 | |||
3518 | |||
3519 | |||
3520 | |||
3521 | |||
3522 | |||
3523 | |||
3524 | |||
3525 | |||
3526 | |||
3527 | |||
3528 | |||
3529 | |||
3530 | |||
3531 | |||
3532 | |||
3533 | |||
3534 | |||
3535 | uint32_t kgem_add_reloc(struct kgem *kgem, |
||
3536 | uint32_t pos, |
||
3537 | struct kgem_bo *bo, |
||
3538 | uint32_t read_write_domain, |
||
3539 | uint32_t delta) |
||
3540 | { |
||
3541 | int index; |
||
3542 | |||
3543 | DBG(("%s: handle=%d, pos=%d, delta=%d, domains=%08x\n", |
||
3544 | __FUNCTION__, bo ? bo->handle : 0, pos, delta, read_write_domain)); |
||
3545 | |||
3546 | assert((read_write_domain & 0x7fff) == 0 || bo != NULL); |
||
3547 | |||
3548 | index = kgem->nreloc++; |
||
3549 | assert(index < ARRAY_SIZE(kgem->reloc)); |
||
3550 | kgem->reloc[index].offset = pos * sizeof(kgem->batch[0]); |
||
3551 | if (bo) { |
||
3552 | assert(bo->refcnt); |
||
3553 | assert(!bo->purged); |
||
3554 | |||
3555 | while (bo->proxy) { |
||
3556 | DBG(("%s: adding proxy [delta=%d] for handle=%d\n", |
||
3557 | __FUNCTION__, bo->delta, bo->handle)); |
||
3558 | delta += bo->delta; |
||
3559 | assert(bo->handle == bo->proxy->handle); |
||
3560 | /* need to release the cache upon batch submit */ |
||
3561 | if (bo->exec == NULL) { |
||
3562 | list_move_tail(&bo->request, |
||
3563 | &kgem->next_request->buffers); |
||
3564 | bo->rq = MAKE_REQUEST(kgem->next_request, |
||
3565 | kgem->ring); |
||
3566 | bo->exec = &_kgem_dummy_exec; |
||
3567 | } |
||
3568 | |||
3569 | if (read_write_domain & 0x7fff && !bo->dirty) |
||
3570 | __kgem_bo_mark_dirty(bo); |
||
3571 | |||
3572 | bo = bo->proxy; |
||
3573 | assert(bo->refcnt); |
||
3574 | assert(!bo->purged); |
||
3575 | } |
||
3576 | |||
3577 | if (bo->exec == NULL) |
||
3578 | kgem_add_bo(kgem, bo); |
||
3579 | assert(bo->rq == MAKE_REQUEST(kgem->next_request, kgem->ring)); |
||
3580 | assert(RQ_RING(bo->rq) == kgem->ring); |
||
3581 | |||
3582 | if (kgem->gen < 040 && read_write_domain & KGEM_RELOC_FENCED) { |
||
3583 | if (bo->tiling && |
||
3584 | (bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0) { |
||
3585 | assert(kgem->nfence < kgem->fence_max); |
||
3586 | kgem->aperture_fenced += |
||
3587 | kgem_bo_fenced_size(kgem, bo); |
||
3588 | kgem->nfence++; |
||
3589 | } |
||
3590 | bo->exec->flags |= EXEC_OBJECT_NEEDS_FENCE; |
||
3591 | } |
||
3592 | |||
3593 | kgem->reloc[index].delta = delta; |
||
3594 | kgem->reloc[index].target_handle = bo->target_handle; |
||
3595 | kgem->reloc[index].presumed_offset = bo->presumed_offset; |
||
3596 | |||
3597 | if (read_write_domain & 0x7fff && !bo->dirty) { |
||
3598 | assert(!bo->snoop || kgem->can_blt_cpu); |
||
3599 | __kgem_bo_mark_dirty(bo); |
||
3600 | } |
||
3601 | |||
3602 | delta += bo->presumed_offset; |
||
3603 | } else { |
||
3604 | kgem->reloc[index].delta = delta; |
||
3605 | kgem->reloc[index].target_handle = ~0U; |
||
3606 | kgem->reloc[index].presumed_offset = 0; |
||
3607 | if (kgem->nreloc__self < 256) |
||
3608 | kgem->reloc__self[kgem->nreloc__self++] = index; |
||
3609 | } |
||
3610 | kgem->reloc[index].read_domains = read_write_domain >> 16; |
||
3611 | kgem->reloc[index].write_domain = read_write_domain & 0x7fff; |
||
3612 | |||
3613 | return delta; |
||
3614 | } |
||
3615 | |||
3616 | static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket) |
||
3617 | { |
||
3618 | int i, j; |
||
3619 | |||
3620 | DBG(("%s: type=%d, count=%d (bucket: %d)\n", |
||
3621 | __FUNCTION__, type, kgem->vma[type].count, bucket)); |
||
3622 | if (kgem->vma[type].count <= 0) |
||
3623 | return; |
||
3624 | |||
3625 | if (kgem->need_purge) |
||
3626 | kgem_purge_cache(kgem); |
||
3627 | |||
3628 | /* vma are limited on a per-process basis to around 64k. |
||
3629 | * This includes all malloc arenas as well as other file |
||
3630 | * mappings. In order to be fair and not hog the cache, |
||
3631 | * and more importantly not to exhaust that limit and to |
||
3632 | * start failing mappings, we keep our own number of open |
||
3633 | * vma to within a conservative value. |
||
3634 | */ |
||
3635 | i = 0; |
||
3636 | while (kgem->vma[type].count > 0) { |
||
3637 | struct kgem_bo *bo = NULL; |
||
3638 | |||
3639 | for (j = 0; |
||
3640 | bo == NULL && j < ARRAY_SIZE(kgem->vma[type].inactive); |
||
3641 | j++) { |
||
3642 | struct list *head = &kgem->vma[type].inactive[i++%ARRAY_SIZE(kgem->vma[type].inactive)]; |
||
3643 | if (!list_is_empty(head)) |
||
3644 | bo = list_last_entry(head, struct kgem_bo, vma); |
||
3645 | } |
||
3646 | if (bo == NULL) |
||
3647 | break; |
||
3648 | |||
3649 | DBG(("%s: discarding inactive %s vma cache for %d\n", |
||
3650 | __FUNCTION__, |
||
3651 | IS_CPU_MAP(bo->map) ? "CPU" : "GTT", bo->handle)); |
||
3652 | assert(IS_CPU_MAP(bo->map) == type); |
||
3653 | assert(bo->map); |
||
3654 | assert(bo->rq == NULL); |
||
3655 | |||
3656 | VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo))); |
||
3657 | // munmap(MAP(bo->map), bytes(bo)); |
||
3658 | bo->map = NULL; |
||
3659 | list_del(&bo->vma); |
||
3660 | kgem->vma[type].count--; |
||
3661 | |||
3662 | if (!bo->purged && !kgem_bo_set_purgeable(kgem, bo)) { |
||
3663 | DBG(("%s: freeing unpurgeable old mapping\n", |
||
3664 | __FUNCTION__)); |
||
3665 | kgem_bo_free(kgem, bo); |
||
3666 | } |
||
3667 | } |
||
3668 | } |
||
3669 | |||
3670 | |||
3671 | void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo) |
||
3672 | { |
||
3673 | void *ptr; |
||
3674 | |||
3675 | DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__, |
||
3676 | bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain)); |
||
3677 | |||
3678 | assert(!bo->purged); |
||
3679 | assert(bo->proxy == NULL); |
||
3680 | assert(list_is_empty(&bo->list)); |
||
3681 | assert(bo->exec == NULL); |
||
3682 | |||
3683 | if (bo->tiling == I915_TILING_NONE && !bo->scanout && |
||
3684 | (kgem->has_llc || bo->domain == DOMAIN_CPU)) { |
||
3685 | DBG(("%s: converting request for GTT map into CPU map\n", |
||
3686 | __FUNCTION__)); |
||
3687 | ptr = kgem_bo_map__cpu(kgem, bo); |
||
3688 | kgem_bo_sync__cpu(kgem, bo); |
||
3689 | return ptr; |
||
3690 | } |
||
3691 | |||
3692 | if (IS_CPU_MAP(bo->map)) |
||
3693 | kgem_bo_release_map(kgem, bo); |
||
3694 | |||
3695 | ptr = bo->map; |
||
3696 | if (ptr == NULL) { |
||
3697 | assert(kgem_bo_size(bo) <= kgem->aperture_mappable / 2); |
||
3698 | assert(kgem->gen != 021 || bo->tiling != I915_TILING_Y); |
||
3699 | |||
3700 | kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo)); |
||
3701 | |||
3702 | ptr = __kgem_bo_map__gtt(kgem, bo); |
||
3703 | if (ptr == NULL) |
||
3704 | return NULL; |
||
3705 | |||
3706 | /* Cache this mapping to avoid the overhead of an |
||
3707 | * excruciatingly slow GTT pagefault. This is more an |
||
3708 | * issue with compositing managers which need to frequently |
||
3709 | * flush CPU damage to their GPU bo. |
||
3710 | */ |
||
3711 | bo->map = ptr; |
||
3712 | DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle)); |
||
3713 | } |
||
3714 | |||
3715 | if (bo->domain != DOMAIN_GTT) { |
||
3716 | struct drm_i915_gem_set_domain set_domain; |
||
3717 | |||
3718 | DBG(("%s: sync: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__, |
||
3719 | bo->needs_flush, bo->domain, __kgem_busy(kgem, bo->handle))); |
||
3720 | |||
3721 | /* XXX use PROT_READ to avoid the write flush? */ |
||
3722 | |||
3723 | VG_CLEAR(set_domain); |
||
3724 | set_domain.handle = bo->handle; |
||
3725 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
||
3726 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
||
3727 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) { |
||
3728 | kgem_bo_retire(kgem, bo); |
||
3729 | bo->domain = DOMAIN_GTT; |
||
3730 | } |
||
3731 | } |
||
3732 | |||
3733 | return ptr; |
||
3734 | } |
||
3735 | |||
3736 | void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo) |
||
3737 | { |
||
3738 | void *ptr; |
||
3739 | |||
3740 | DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__, |
||
3741 | bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain)); |
||
3742 | |||
3743 | assert(!bo->purged); |
||
3744 | assert(bo->exec == NULL); |
||
3745 | assert(list_is_empty(&bo->list)); |
||
3746 | |||
3747 | if (IS_CPU_MAP(bo->map)) |
||
3748 | kgem_bo_release_map(kgem, bo); |
||
3749 | |||
3750 | ptr = bo->map; |
||
3751 | if (ptr == NULL) { |
||
3752 | assert(bytes(bo) <= kgem->aperture_mappable / 4); |
||
3753 | |||
3754 | kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo)); |
||
3755 | |||
3756 | ptr = __kgem_bo_map__gtt(kgem, bo); |
||
3757 | if (ptr == NULL) |
||
3758 | return NULL; |
||
3759 | |||
3760 | /* Cache this mapping to avoid the overhead of an |
||
3761 | * excruciatingly slow GTT pagefault. This is more an |
||
3762 | * issue with compositing managers which need to frequently |
||
3763 | * flush CPU damage to their GPU bo. |
||
3764 | */ |
||
3765 | bo->map = ptr; |
||
3766 | DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle)); |
||
3767 | } |
||
3768 | |||
3769 | return ptr; |
||
3770 | } |
||
3771 | |||
3772 | |||
3773 | void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo) |
||
3774 | { |
||
3775 | struct drm_i915_gem_mmap mmap_arg; |
||
3776 | |||
3777 | DBG(("%s(handle=%d, size=%d, mapped? %d)\n", |
||
3778 | __FUNCTION__, bo->handle, bytes(bo), (int)__MAP_TYPE(bo->map))); |
||
3779 | assert(!bo->purged); |
||
3780 | assert(list_is_empty(&bo->list)); |
||
3781 | assert(!bo->scanout); |
||
3782 | assert(bo->proxy == NULL); |
||
3783 | |||
3784 | if (IS_CPU_MAP(bo->map)) |
||
3785 | return MAP(bo->map); |
||
3786 | |||
3787 | if (bo->map) |
||
3788 | kgem_bo_release_map(kgem, bo); |
||
3789 | |||
3790 | kgem_trim_vma_cache(kgem, MAP_CPU, bucket(bo)); |
||
3791 | |||
3792 | retry: |
||
3793 | VG_CLEAR(mmap_arg); |
||
3794 | mmap_arg.handle = bo->handle; |
||
3795 | mmap_arg.offset = 0; |
||
3796 | mmap_arg.size = bytes(bo); |
||
3797 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) { |
||
3798 | printf("%s: failed to mmap %d, %d bytes, into CPU domain: %d\n", |
||
3799 | __FUNCTION__, bo->handle, bytes(bo), 0); |
||
3800 | if (__kgem_throttle_retire(kgem, 0)) |
||
3801 | goto retry; |
||
3802 | |||
3803 | if (kgem->need_expire) { |
||
3804 | kgem_cleanup_cache(kgem); |
||
3805 | goto retry; |
||
3806 | } |
||
3807 | |||
3808 | return NULL; |
||
3809 | } |
||
3810 | |||
3811 | VG(VALGRIND_MAKE_MEM_DEFINED(mmap_arg.addr_ptr, bytes(bo))); |
||
3812 | |||
3813 | DBG(("%s: caching CPU vma for %d\n", __FUNCTION__, bo->handle)); |
||
3814 | bo->map = MAKE_CPU_MAP(mmap_arg.addr_ptr); |
||
3815 | return (void *)(uintptr_t)mmap_arg.addr_ptr; |
||
3816 | } |
||
3817 | |||
3818 | void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo) |
||
3819 | { |
||
3820 | assert(bo->proxy == NULL); |
||
3821 | kgem_bo_submit(kgem, bo); |
||
3822 | |||
3823 | if (bo->domain != DOMAIN_CPU) { |
||
3824 | struct drm_i915_gem_set_domain set_domain; |
||
3825 | |||
3826 | DBG(("%s: SYNC: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__, |
||
3827 | bo->needs_flush, bo->domain, __kgem_busy(kgem, bo->handle))); |
||
3828 | |||
3829 | VG_CLEAR(set_domain); |
||
3830 | set_domain.handle = bo->handle; |
||
3831 | set_domain.read_domains = I915_GEM_DOMAIN_CPU; |
||
3832 | set_domain.write_domain = I915_GEM_DOMAIN_CPU; |
||
3833 | |||
3834 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) { |
||
3835 | kgem_bo_retire(kgem, bo); |
||
3836 | bo->domain = DOMAIN_CPU; |
||
3837 | } |
||
3838 | } |
||
3839 | } |
||
3840 | |||
3254 | Serge | 3841 | void kgem_clear_dirty(struct kgem *kgem) |
3842 | { |
||
3843 | struct list * const buffers = &kgem->next_request->buffers; |
||
3844 | struct kgem_bo *bo; |
||
3845 | |||
3846 | list_for_each_entry(bo, buffers, request) { |
||
3847 | if (!bo->dirty) |
||
3848 | break; |
||
3849 | |||
3850 | bo->dirty = false; |
||
3851 | } |
||
3852 | } |
||
3853 | |||
3854 | uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format) |
||
3855 | { |
||
3856 | struct kgem_bo_binding *b; |
||
3857 | |||
3858 | for (b = &bo->binding; b && b->offset; b = b->next) |
||
3859 | if (format == b->format) |
||
3860 | return b->offset; |
||
3861 | |||
3862 | return 0; |
||
3863 | } |
||
3864 | |||
3865 | void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset) |
||
3866 | { |
||
3867 | struct kgem_bo_binding *b; |
||
3868 | |||
3869 | for (b = &bo->binding; b; b = b->next) { |
||
3870 | if (b->offset) |
||
3871 | continue; |
||
3872 | |||
3873 | b->offset = offset; |
||
3874 | b->format = format; |
||
3875 | |||
3876 | if (b->next) |
||
3877 | b->next->offset = 0; |
||
3878 | |||
3879 | return; |
||
3880 | } |
||
3881 | |||
3882 | b = malloc(sizeof(*b)); |
||
3883 | if (b) { |
||
3884 | b->next = bo->binding.next; |
||
3885 | b->format = format; |
||
3886 | b->offset = offset; |
||
3887 | bo->binding.next = b; |
||
3888 | } |
||
3889 | }=>=>>=>>>>>=>>>=>>=>>=>=>>=>>>>>=>>>=>=>>>>=>=>>>>>=>>>>>>>>>>=>>=>=>=>=>=>=>=>=>>>>=>>=>>=>>=>>>>>>>>><>><>>>>>>>>>>>>>>>>>>>>>=>12) |
||
3890 | |||
3891 | |||
3892 | |||
3893 |