Rev 3299 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
3254 | Serge | 1 | /* |
2 | * Copyright (c) 2011 Intel Corporation |
||
3 | * |
||
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
5 | * copy of this software and associated documentation files (the "Software"), |
||
6 | * to deal in the Software without restriction, including without limitation |
||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
8 | * and/or sell copies of the Software, and to permit persons to whom the |
||
9 | * Software is furnished to do so, subject to the following conditions: |
||
10 | * |
||
11 | * The above copyright notice and this permission notice (including the next |
||
12 | * paragraph) shall be included in all copies or substantial portions of the |
||
13 | * Software. |
||
14 | * |
||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
||
21 | * SOFTWARE. |
||
22 | * |
||
23 | * Authors: |
||
24 | * Chris Wilson |
||
25 | * |
||
26 | */ |
||
27 | |||
28 | #ifdef HAVE_CONFIG_H |
||
29 | #include "config.h" |
||
30 | #endif |
||
31 | |||
32 | #include "sna.h" |
||
33 | #include "sna_reg.h" |
||
34 | |||
3291 | Serge | 35 | static inline |
36 | int user_free(void *mem) |
||
37 | { |
||
38 | int val; |
||
39 | __asm__ __volatile__( |
||
40 | "int $0x40" |
||
41 | :"=a"(val) |
||
42 | :"a"(68),"b"(12),"c"(mem)); |
||
43 | return val; |
||
44 | } |
||
3256 | Serge | 45 | |
3291 | Serge | 46 | |
3256 | Serge | 47 | unsigned int cpu_cache_size(); |
48 | |||
49 | static struct kgem_bo * |
||
50 | search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags); |
||
51 | |||
52 | static struct kgem_bo * |
||
53 | search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags); |
||
54 | |||
3254 | Serge | 55 | #define DBG_NO_HW 0 |
56 | #define DBG_NO_TILING 1 |
||
57 | #define DBG_NO_CACHE 0 |
||
58 | #define DBG_NO_CACHE_LEVEL 0 |
||
59 | #define DBG_NO_CPU 0 |
||
60 | #define DBG_NO_USERPTR 0 |
||
61 | #define DBG_NO_LLC 0 |
||
62 | #define DBG_NO_SEMAPHORES 0 |
||
3256 | Serge | 63 | #define DBG_NO_MADV 1 |
3254 | Serge | 64 | #define DBG_NO_UPLOAD_CACHE 0 |
65 | #define DBG_NO_UPLOAD_ACTIVE 0 |
||
66 | #define DBG_NO_MAP_UPLOAD 0 |
||
67 | #define DBG_NO_RELAXED_FENCING 0 |
||
68 | #define DBG_NO_SECURE_BATCHES 0 |
||
69 | #define DBG_NO_PINNED_BATCHES 0 |
||
70 | #define DBG_NO_FAST_RELOC 0 |
||
71 | #define DBG_NO_HANDLE_LUT 0 |
||
72 | #define DBG_DUMP 0 |
||
73 | |||
3256 | Serge | 74 | #ifndef DEBUG_SYNC |
75 | #define DEBUG_SYNC 0 |
||
76 | #endif |
||
77 | |||
78 | #define SHOW_BATCH 0 |
||
79 | |||
80 | #if 0 |
||
81 | #define ASSERT_IDLE(kgem__, handle__) assert(!__kgem_busy(kgem__, handle__)) |
||
82 | #define ASSERT_MAYBE_IDLE(kgem__, handle__, expect__) assert(!(expect__) || !__kgem_busy(kgem__, handle__)) |
||
83 | #else |
||
84 | #define ASSERT_IDLE(kgem__, handle__) |
||
85 | #define ASSERT_MAYBE_IDLE(kgem__, handle__, expect__) |
||
86 | #endif |
||
87 | |||
3255 | Serge | 88 | /* Worst case seems to be 965gm where we cannot write within a cacheline that |
89 | * is being simultaneously being read by the GPU, or within the sampler |
||
90 | * prefetch. In general, the chipsets seem to have a requirement that sampler |
||
91 | * offsets be aligned to a cacheline (64 bytes). |
||
92 | */ |
||
93 | #define UPLOAD_ALIGNMENT 128 |
||
94 | |||
95 | #define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE) |
||
96 | #define NUM_PAGES(x) (((x) + PAGE_SIZE-1) / PAGE_SIZE) |
||
97 | |||
3254 | Serge | 98 | #define MAX_GTT_VMA_CACHE 512 |
99 | #define MAX_CPU_VMA_CACHE INT16_MAX |
||
100 | #define MAP_PRESERVE_TIME 10 |
||
101 | |||
102 | #define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3)) |
||
103 | #define MAKE_CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1)) |
||
104 | #define MAKE_USER_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3)) |
||
105 | #define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 2) |
||
106 | #define __MAP_TYPE(ptr) ((uintptr_t)(ptr) & 3) |
||
107 | |||
108 | #define MAKE_REQUEST(rq, ring) ((struct kgem_request *)((uintptr_t)(rq) | (ring))) |
||
109 | |||
110 | #define LOCAL_I915_PARAM_HAS_BLT 11 |
||
111 | #define LOCAL_I915_PARAM_HAS_RELAXED_FENCING 12 |
||
112 | #define LOCAL_I915_PARAM_HAS_RELAXED_DELTA 15 |
||
113 | #define LOCAL_I915_PARAM_HAS_SEMAPHORES 20 |
||
114 | #define LOCAL_I915_PARAM_HAS_SECURE_BATCHES 23 |
||
115 | #define LOCAL_I915_PARAM_HAS_PINNED_BATCHES 24 |
||
116 | #define LOCAL_I915_PARAM_HAS_NO_RELOC 25 |
||
117 | #define LOCAL_I915_PARAM_HAS_HANDLE_LUT 26 |
||
118 | |||
3256 | Serge | 119 | #define LOCAL_I915_EXEC_IS_PINNED (1<<10) |
120 | #define LOCAL_I915_EXEC_NO_RELOC (1<<11) |
||
121 | #define LOCAL_I915_EXEC_HANDLE_LUT (1<<12) |
||
3263 | Serge | 122 | struct local_i915_gem_userptr { |
123 | uint64_t user_ptr; |
||
124 | uint32_t user_size; |
||
125 | uint32_t flags; |
||
126 | #define I915_USERPTR_READ_ONLY (1<<0) |
||
127 | #define I915_USERPTR_UNSYNCHRONIZED (1<<31) |
||
128 | uint32_t handle; |
||
129 | }; |
||
130 | |||
3256 | Serge | 131 | #define UNCACHED 0 |
132 | #define SNOOPED 1 |
||
133 | |||
134 | struct local_i915_gem_cacheing { |
||
135 | uint32_t handle; |
||
136 | uint32_t cacheing; |
||
137 | }; |
||
3258 | Serge | 138 | |
139 | #define LOCAL_IOCTL_I915_GEM_SET_CACHEING SRV_I915_GEM_SET_CACHEING |
||
140 | |||
3263 | Serge | 141 | struct local_fbinfo { |
142 | int width; |
||
143 | int height; |
||
144 | int pitch; |
||
145 | int tiling; |
||
146 | }; |
||
147 | |||
3258 | Serge | 148 | struct kgem_buffer { |
149 | struct kgem_bo base; |
||
150 | void *mem; |
||
151 | uint32_t used; |
||
152 | uint32_t need_io : 1; |
||
153 | uint32_t write : 2; |
||
154 | uint32_t mmapped : 1; |
||
155 | }; |
||
156 | |||
3255 | Serge | 157 | static struct kgem_bo *__kgem_freed_bo; |
3256 | Serge | 158 | static struct kgem_request *__kgem_freed_request; |
3258 | Serge | 159 | static struct drm_i915_gem_exec_object2 _kgem_dummy_exec; |
3254 | Serge | 160 | |
3258 | Serge | 161 | static inline int bytes(struct kgem_bo *bo) |
162 | { |
||
163 | return __kgem_bo_size(bo); |
||
164 | } |
||
165 | |||
3255 | Serge | 166 | #define bucket(B) (B)->size.pages.bucket |
167 | #define num_pages(B) (B)->size.pages.count |
||
3254 | Serge | 168 | |
3255 | Serge | 169 | #ifdef DEBUG_MEMORY |
170 | static void debug_alloc(struct kgem *kgem, size_t size) |
||
171 | { |
||
172 | kgem->debug_memory.bo_allocs++; |
||
173 | kgem->debug_memory.bo_bytes += size; |
||
174 | } |
||
175 | static void debug_alloc__bo(struct kgem *kgem, struct kgem_bo *bo) |
||
176 | { |
||
177 | debug_alloc(kgem, bytes(bo)); |
||
178 | } |
||
179 | #else |
||
180 | #define debug_alloc(k, b) |
||
181 | #define debug_alloc__bo(k, b) |
||
182 | #endif |
||
183 | |||
3258 | Serge | 184 | static void kgem_sna_reset(struct kgem *kgem) |
185 | { |
||
186 | struct sna *sna = container_of(kgem, struct sna, kgem); |
||
187 | |||
188 | sna->render.reset(sna); |
||
189 | sna->blt_state.fill_bo = 0; |
||
190 | } |
||
191 | |||
192 | static void kgem_sna_flush(struct kgem *kgem) |
||
193 | { |
||
194 | struct sna *sna = container_of(kgem, struct sna, kgem); |
||
195 | |||
196 | sna->render.flush(sna); |
||
197 | |||
198 | // if (sna->render.solid_cache.dirty) |
||
199 | // sna_render_flush_solid(sna); |
||
200 | } |
||
201 | |||
3256 | Serge | 202 | static bool gem_set_tiling(int fd, uint32_t handle, int tiling, int stride) |
203 | { |
||
204 | struct drm_i915_gem_set_tiling set_tiling; |
||
205 | int ret; |
||
206 | |||
207 | if (DBG_NO_TILING) |
||
208 | return false; |
||
209 | /* |
||
210 | VG_CLEAR(set_tiling); |
||
211 | do { |
||
212 | set_tiling.handle = handle; |
||
213 | set_tiling.tiling_mode = tiling; |
||
214 | set_tiling.stride = stride; |
||
215 | |||
216 | ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling); |
||
217 | } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); |
||
218 | */ |
||
3263 | Serge | 219 | return false;//ret == 0; |
3256 | Serge | 220 | } |
221 | |||
222 | static bool gem_set_cacheing(int fd, uint32_t handle, int cacheing) |
||
223 | { |
||
224 | struct local_i915_gem_cacheing arg; |
||
225 | |||
226 | VG_CLEAR(arg); |
||
227 | arg.handle = handle; |
||
228 | arg.cacheing = cacheing; |
||
3258 | Serge | 229 | return drmIoctl(fd, LOCAL_IOCTL_I915_GEM_SET_CACHEING, &arg) == 0; |
230 | } |
||
3256 | Serge | 231 | |
3258 | Serge | 232 | |
3256 | Serge | 233 | |
234 | |||
235 | |||
236 | static bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags) |
||
237 | { |
||
238 | if (flags & CREATE_NO_RETIRE) { |
||
239 | DBG(("%s: not retiring per-request\n", __FUNCTION__)); |
||
240 | return false; |
||
241 | } |
||
242 | |||
243 | if (!kgem->need_retire) { |
||
244 | DBG(("%s: nothing to retire\n", __FUNCTION__)); |
||
245 | return false; |
||
246 | } |
||
247 | |||
3258 | Serge | 248 | if (kgem_retire(kgem)) |
249 | return true; |
||
3256 | Serge | 250 | |
251 | if (flags & CREATE_NO_THROTTLE || !kgem->need_throttle) { |
||
252 | DBG(("%s: not throttling\n", __FUNCTION__)); |
||
253 | return false; |
||
254 | } |
||
255 | |||
3258 | Serge | 256 | kgem_throttle(kgem); |
257 | return kgem_retire(kgem); |
||
258 | } |
||
3256 | Serge | 259 | |
3258 | Serge | 260 | static void *__kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo) |
261 | { |
||
262 | struct drm_i915_gem_mmap_gtt mmap_arg; |
||
263 | void *ptr; |
||
264 | |||
265 | DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__, |
||
266 | bo->handle, bytes(bo))); |
||
267 | assert(bo->proxy == NULL); |
||
268 | |||
269 | retry_gtt: |
||
270 | VG_CLEAR(mmap_arg); |
||
271 | mmap_arg.handle = bo->handle; |
||
272 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg)) { |
||
273 | printf("%s: failed to retrieve GTT offset for handle=%d: %d\n", |
||
274 | __FUNCTION__, bo->handle, 0); |
||
275 | (void)__kgem_throttle_retire(kgem, 0); |
||
276 | if (kgem_expire_cache(kgem)) |
||
277 | goto retry_gtt; |
||
278 | |||
279 | if (kgem->need_expire) { |
||
280 | kgem_cleanup_cache(kgem); |
||
281 | goto retry_gtt; |
||
282 | } |
||
283 | |||
284 | return NULL; |
||
285 | } |
||
286 | |||
287 | retry_mmap: |
||
3769 | Serge | 288 | ptr = (void*)(int)mmap_arg.offset; |
289 | if (ptr == NULL) { |
||
3258 | Serge | 290 | printf("%s: failed to mmap %d, %d bytes, into GTT domain: %d\n", |
291 | __FUNCTION__, bo->handle, bytes(bo), 0); |
||
292 | |||
3769 | Serge | 293 | } |
3258 | Serge | 294 | |
295 | return ptr; |
||
3256 | Serge | 296 | } |
297 | |||
3258 | Serge | 298 | static int __gem_write(int fd, uint32_t handle, |
299 | int offset, int length, |
||
300 | const void *src) |
||
301 | { |
||
302 | struct drm_i915_gem_pwrite pwrite; |
||
303 | |||
304 | DBG(("%s(handle=%d, offset=%d, len=%d)\n", __FUNCTION__, |
||
305 | handle, offset, length)); |
||
306 | |||
307 | VG_CLEAR(pwrite); |
||
308 | pwrite.handle = handle; |
||
309 | pwrite.offset = offset; |
||
310 | pwrite.size = length; |
||
311 | pwrite.data_ptr = (uintptr_t)src; |
||
312 | return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite); |
||
313 | } |
||
314 | |||
3256 | Serge | 315 | static int gem_write(int fd, uint32_t handle, |
316 | int offset, int length, |
||
317 | const void *src) |
||
318 | { |
||
319 | struct drm_i915_gem_pwrite pwrite; |
||
320 | |||
321 | DBG(("%s(handle=%d, offset=%d, len=%d)\n", __FUNCTION__, |
||
322 | handle, offset, length)); |
||
323 | |||
324 | VG_CLEAR(pwrite); |
||
325 | pwrite.handle = handle; |
||
326 | /* align the transfer to cachelines; fortuitously this is safe! */ |
||
327 | if ((offset | length) & 63) { |
||
328 | pwrite.offset = offset & ~63; |
||
329 | pwrite.size = ALIGN(offset+length, 64) - pwrite.offset; |
||
330 | pwrite.data_ptr = (uintptr_t)src + pwrite.offset - offset; |
||
331 | } else { |
||
332 | pwrite.offset = offset; |
||
333 | pwrite.size = length; |
||
334 | pwrite.data_ptr = (uintptr_t)src; |
||
335 | } |
||
3258 | Serge | 336 | return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite); |
3256 | Serge | 337 | } |
3258 | Serge | 338 | |
3256 | Serge | 339 | |
3258 | Serge | 340 | bool __kgem_busy(struct kgem *kgem, int handle) |
341 | { |
||
342 | struct drm_i915_gem_busy busy; |
||
343 | |||
344 | VG_CLEAR(busy); |
||
345 | busy.handle = handle; |
||
346 | busy.busy = !kgem->wedged; |
||
347 | (void)drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); |
||
348 | DBG(("%s: handle=%d, busy=%d, wedged=%d\n", |
||
349 | __FUNCTION__, handle, busy.busy, kgem->wedged)); |
||
3256 | Serge | 350 | |
3258 | Serge | 351 | return busy.busy; |
352 | } |
||
353 | |||
354 | static void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo) |
||
355 | { |
||
356 | DBG(("%s: retiring bo handle=%d (needed flush? %d), rq? %d [busy?=%d]\n", |
||
357 | __FUNCTION__, bo->handle, bo->needs_flush, bo->rq != NULL, |
||
358 | __kgem_busy(kgem, bo->handle))); |
||
359 | assert(bo->exec == NULL); |
||
360 | assert(list_is_empty(&bo->vma)); |
||
361 | |||
362 | if (bo->rq) { |
||
363 | if (!__kgem_busy(kgem, bo->handle)) { |
||
364 | __kgem_bo_clear_busy(bo); |
||
365 | kgem_retire(kgem); |
||
366 | } |
||
367 | } else { |
||
368 | assert(!bo->needs_flush); |
||
369 | ASSERT_IDLE(kgem, bo->handle); |
||
370 | } |
||
371 | } |
||
372 | |||
3256 | Serge | 373 | bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo, |
374 | const void *data, int length) |
||
375 | { |
||
376 | assert(bo->refcnt); |
||
377 | assert(!bo->purged); |
||
378 | assert(bo->proxy == NULL); |
||
379 | ASSERT_IDLE(kgem, bo->handle); |
||
380 | |||
381 | assert(length <= bytes(bo)); |
||
382 | if (gem_write(kgem->fd, bo->handle, 0, length, data)) |
||
383 | return false; |
||
384 | |||
385 | DBG(("%s: flush=%d, domain=%d\n", __FUNCTION__, bo->flush, bo->domain)); |
||
386 | if (bo->exec == NULL) { |
||
3258 | Serge | 387 | kgem_bo_retire(kgem, bo); |
3256 | Serge | 388 | bo->domain = DOMAIN_NONE; |
389 | } |
||
390 | return true; |
||
391 | } |
||
392 | |||
3255 | Serge | 393 | static uint32_t gem_create(int fd, int num_pages) |
394 | { |
||
395 | struct drm_i915_gem_create create; |
||
396 | |||
397 | VG_CLEAR(create); |
||
398 | create.handle = 0; |
||
399 | create.size = PAGE_SIZE * num_pages; |
||
3258 | Serge | 400 | (void)drmIoctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create); |
3255 | Serge | 401 | |
402 | return create.handle; |
||
403 | } |
||
404 | |||
3256 | Serge | 405 | static bool |
406 | kgem_bo_set_purgeable(struct kgem *kgem, struct kgem_bo *bo) |
||
407 | { |
||
408 | #if DBG_NO_MADV |
||
409 | return true; |
||
410 | #else |
||
411 | struct drm_i915_gem_madvise madv; |
||
412 | |||
413 | assert(bo->exec == NULL); |
||
414 | assert(!bo->purged); |
||
415 | |||
416 | VG_CLEAR(madv); |
||
417 | madv.handle = bo->handle; |
||
418 | madv.madv = I915_MADV_DONTNEED; |
||
419 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) { |
||
420 | bo->purged = 1; |
||
421 | kgem->need_purge |= !madv.retained && bo->domain == DOMAIN_GPU; |
||
422 | return madv.retained; |
||
423 | } |
||
424 | |||
425 | return true; |
||
426 | #endif |
||
427 | } |
||
428 | |||
429 | static bool |
||
430 | kgem_bo_is_retained(struct kgem *kgem, struct kgem_bo *bo) |
||
431 | { |
||
432 | #if DBG_NO_MADV |
||
433 | return true; |
||
434 | #else |
||
435 | struct drm_i915_gem_madvise madv; |
||
436 | |||
437 | if (!bo->purged) |
||
438 | return true; |
||
439 | |||
440 | VG_CLEAR(madv); |
||
441 | madv.handle = bo->handle; |
||
442 | madv.madv = I915_MADV_DONTNEED; |
||
443 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) |
||
444 | return madv.retained; |
||
445 | |||
446 | return false; |
||
447 | #endif |
||
448 | } |
||
449 | |||
450 | static bool |
||
451 | kgem_bo_clear_purgeable(struct kgem *kgem, struct kgem_bo *bo) |
||
452 | { |
||
453 | #if DBG_NO_MADV |
||
454 | return true; |
||
455 | #else |
||
456 | struct drm_i915_gem_madvise madv; |
||
457 | |||
458 | assert(bo->purged); |
||
459 | |||
460 | VG_CLEAR(madv); |
||
461 | madv.handle = bo->handle; |
||
462 | madv.madv = I915_MADV_WILLNEED; |
||
463 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) { |
||
464 | bo->purged = !madv.retained; |
||
465 | kgem->need_purge |= !madv.retained && bo->domain == DOMAIN_GPU; |
||
466 | return madv.retained; |
||
467 | } |
||
468 | |||
469 | return false; |
||
470 | #endif |
||
471 | } |
||
472 | |||
3255 | Serge | 473 | static void gem_close(int fd, uint32_t handle) |
474 | { |
||
475 | struct drm_gem_close close; |
||
476 | |||
477 | VG_CLEAR(close); |
||
478 | close.handle = handle; |
||
3258 | Serge | 479 | (void)drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &close); |
3255 | Serge | 480 | } |
481 | |||
482 | constant inline static unsigned long __fls(unsigned long word) |
||
483 | { |
||
484 | #if defined(__GNUC__) && (defined(__i386__) || defined(__x86__) || defined(__x86_64__)) |
||
485 | asm("bsr %1,%0" |
||
486 | : "=r" (word) |
||
487 | : "rm" (word)); |
||
488 | return word; |
||
489 | #else |
||
490 | unsigned int v = 0; |
||
491 | |||
492 | while (word >>= 1) |
||
493 | v++; |
||
494 | |||
495 | return v; |
||
496 | #endif |
||
497 | } |
||
498 | |||
499 | constant inline static int cache_bucket(int num_pages) |
||
500 | { |
||
501 | return __fls(num_pages); |
||
502 | } |
||
503 | |||
504 | static struct kgem_bo *__kgem_bo_init(struct kgem_bo *bo, |
||
505 | int handle, int num_pages) |
||
506 | { |
||
507 | assert(num_pages); |
||
508 | memset(bo, 0, sizeof(*bo)); |
||
509 | |||
510 | bo->refcnt = 1; |
||
511 | bo->handle = handle; |
||
512 | bo->target_handle = -1; |
||
513 | num_pages(bo) = num_pages; |
||
514 | bucket(bo) = cache_bucket(num_pages); |
||
515 | bo->reusable = true; |
||
516 | bo->domain = DOMAIN_CPU; |
||
517 | list_init(&bo->request); |
||
518 | list_init(&bo->list); |
||
519 | list_init(&bo->vma); |
||
520 | |||
521 | return bo; |
||
522 | } |
||
523 | |||
524 | static struct kgem_bo *__kgem_bo_alloc(int handle, int num_pages) |
||
525 | { |
||
526 | struct kgem_bo *bo; |
||
527 | |||
528 | if (__kgem_freed_bo) { |
||
529 | bo = __kgem_freed_bo; |
||
530 | __kgem_freed_bo = *(struct kgem_bo **)bo; |
||
531 | } else { |
||
532 | bo = malloc(sizeof(*bo)); |
||
533 | if (bo == NULL) |
||
534 | return NULL; |
||
535 | } |
||
536 | |||
537 | return __kgem_bo_init(bo, handle, num_pages); |
||
538 | } |
||
539 | |||
3256 | Serge | 540 | static struct kgem_request *__kgem_request_alloc(struct kgem *kgem) |
541 | { |
||
542 | struct kgem_request *rq; |
||
543 | |||
544 | rq = __kgem_freed_request; |
||
545 | if (rq) { |
||
546 | __kgem_freed_request = *(struct kgem_request **)rq; |
||
547 | } else { |
||
548 | rq = malloc(sizeof(*rq)); |
||
549 | if (rq == NULL) |
||
550 | rq = &kgem->static_request; |
||
551 | } |
||
552 | |||
553 | list_init(&rq->buffers); |
||
554 | rq->bo = NULL; |
||
555 | rq->ring = 0; |
||
556 | |||
557 | return rq; |
||
558 | } |
||
559 | |||
560 | static void __kgem_request_free(struct kgem_request *rq) |
||
561 | { |
||
562 | _list_del(&rq->list); |
||
563 | *(struct kgem_request **)rq = __kgem_freed_request; |
||
564 | __kgem_freed_request = rq; |
||
565 | } |
||
566 | |||
567 | static struct list *inactive(struct kgem *kgem, int num_pages) |
||
568 | { |
||
569 | assert(num_pages < MAX_CACHE_SIZE / PAGE_SIZE); |
||
570 | assert(cache_bucket(num_pages) < NUM_CACHE_BUCKETS); |
||
571 | return &kgem->inactive[cache_bucket(num_pages)]; |
||
572 | } |
||
573 | |||
574 | static struct list *active(struct kgem *kgem, int num_pages, int tiling) |
||
575 | { |
||
576 | assert(num_pages < MAX_CACHE_SIZE / PAGE_SIZE); |
||
577 | assert(cache_bucket(num_pages) < NUM_CACHE_BUCKETS); |
||
578 | return &kgem->active[cache_bucket(num_pages)][tiling]; |
||
579 | } |
||
580 | |||
581 | static size_t |
||
582 | agp_aperture_size(struct pci_device *dev, unsigned gen) |
||
583 | { |
||
584 | /* XXX assume that only future chipsets are unknown and follow |
||
585 | * the post gen2 PCI layout. |
||
586 | */ |
||
587 | // return dev->regions[gen < 030 ? 0 : 2].size; |
||
588 | |||
589 | return 0; |
||
590 | } |
||
591 | |||
592 | static size_t |
||
593 | total_ram_size(void) |
||
594 | { |
||
595 | uint32_t data[9]; |
||
596 | size_t size = 0; |
||
597 | |||
598 | asm volatile("int $0x40" |
||
599 | : "=a" (size) |
||
600 | : "a" (18),"b"(20), "c" (data) |
||
601 | : "memory"); |
||
602 | |||
603 | return size != -1 ? size : 0; |
||
604 | } |
||
605 | |||
3254 | Serge | 606 | static int gem_param(struct kgem *kgem, int name) |
607 | { |
||
608 | drm_i915_getparam_t gp; |
||
609 | int v = -1; /* No param uses the sign bit, reserve it for errors */ |
||
610 | |||
611 | VG_CLEAR(gp); |
||
612 | gp.param = name; |
||
613 | gp.value = &v; |
||
3258 | Serge | 614 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GETPARAM, &gp)) |
3254 | Serge | 615 | return -1; |
616 | |||
617 | VG(VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v))); |
||
618 | return v; |
||
619 | } |
||
620 | |||
3255 | Serge | 621 | static bool test_has_execbuffer2(struct kgem *kgem) |
622 | { |
||
623 | return 1; |
||
624 | } |
||
625 | |||
3254 | Serge | 626 | static bool test_has_no_reloc(struct kgem *kgem) |
627 | { |
||
628 | if (DBG_NO_FAST_RELOC) |
||
629 | return false; |
||
630 | |||
631 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_NO_RELOC) > 0; |
||
632 | } |
||
633 | |||
634 | static bool test_has_handle_lut(struct kgem *kgem) |
||
635 | { |
||
636 | if (DBG_NO_HANDLE_LUT) |
||
637 | return false; |
||
638 | |||
639 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_HANDLE_LUT) > 0; |
||
640 | } |
||
641 | |||
642 | static bool test_has_semaphores_enabled(struct kgem *kgem) |
||
643 | { |
||
644 | bool detected = false; |
||
645 | int ret; |
||
646 | |||
647 | if (DBG_NO_SEMAPHORES) |
||
648 | return false; |
||
649 | |||
650 | ret = gem_param(kgem, LOCAL_I915_PARAM_HAS_SEMAPHORES); |
||
651 | if (ret != -1) |
||
652 | return ret > 0; |
||
653 | |||
654 | return detected; |
||
655 | } |
||
656 | |||
3255 | Serge | 657 | static bool __kgem_throttle(struct kgem *kgem) |
658 | { |
||
3263 | Serge | 659 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_THROTTLE, NULL) == 0) |
3255 | Serge | 660 | return false; |
3254 | Serge | 661 | |
3263 | Serge | 662 | return errno == EIO; |
3255 | Serge | 663 | } |
664 | |||
665 | static bool is_hw_supported(struct kgem *kgem, |
||
666 | struct pci_device *dev) |
||
667 | { |
||
668 | if (DBG_NO_HW) |
||
669 | return false; |
||
670 | |||
671 | if (!test_has_execbuffer2(kgem)) |
||
672 | return false; |
||
673 | |||
674 | if (kgem->gen == (unsigned)-1) /* unknown chipset, assume future gen */ |
||
675 | return kgem->has_blt; |
||
676 | |||
677 | /* Although pre-855gm the GMCH is fubar, it works mostly. So |
||
678 | * let the user decide through "NoAccel" whether or not to risk |
||
679 | * hw acceleration. |
||
680 | */ |
||
681 | |||
682 | if (kgem->gen == 060 && dev->revision < 8) { |
||
683 | /* pre-production SNB with dysfunctional BLT */ |
||
684 | return false; |
||
685 | } |
||
686 | |||
687 | if (kgem->gen >= 060) /* Only if the kernel supports the BLT ring */ |
||
688 | return kgem->has_blt; |
||
689 | |||
690 | return true; |
||
691 | } |
||
692 | |||
3254 | Serge | 693 | static bool test_has_relaxed_fencing(struct kgem *kgem) |
694 | { |
||
695 | if (kgem->gen < 040) { |
||
696 | if (DBG_NO_RELAXED_FENCING) |
||
697 | return false; |
||
698 | |||
699 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_FENCING) > 0; |
||
700 | } else |
||
701 | return true; |
||
702 | } |
||
703 | |||
704 | static bool test_has_llc(struct kgem *kgem) |
||
705 | { |
||
706 | int has_llc = -1; |
||
707 | |||
708 | if (DBG_NO_LLC) |
||
709 | return false; |
||
710 | |||
711 | #if defined(I915_PARAM_HAS_LLC) /* Expected in libdrm-2.4.31 */ |
||
712 | has_llc = gem_param(kgem, I915_PARAM_HAS_LLC); |
||
713 | #endif |
||
714 | if (has_llc == -1) { |
||
715 | DBG(("%s: no kernel/drm support for HAS_LLC, assuming support for LLC based on GPU generation\n", __FUNCTION__)); |
||
716 | has_llc = kgem->gen >= 060; |
||
717 | } |
||
718 | |||
719 | return has_llc; |
||
720 | } |
||
721 | |||
722 | static bool test_has_cacheing(struct kgem *kgem) |
||
723 | { |
||
724 | uint32_t handle; |
||
3256 | Serge | 725 | bool ret; |
3254 | Serge | 726 | |
727 | if (DBG_NO_CACHE_LEVEL) |
||
728 | return false; |
||
729 | |||
730 | /* Incoherent blt and sampler hangs the GPU */ |
||
731 | if (kgem->gen == 040) |
||
732 | return false; |
||
733 | |||
3256 | Serge | 734 | handle = gem_create(kgem->fd, 1); |
735 | if (handle == 0) |
||
736 | return false; |
||
3254 | Serge | 737 | |
3256 | Serge | 738 | ret = gem_set_cacheing(kgem->fd, handle, UNCACHED); |
739 | gem_close(kgem->fd, handle); |
||
3254 | Serge | 740 | return ret; |
741 | } |
||
742 | |||
743 | static bool test_has_userptr(struct kgem *kgem) |
||
744 | { |
||
745 | #if defined(USE_USERPTR) |
||
746 | uint32_t handle; |
||
747 | void *ptr; |
||
748 | |||
749 | if (DBG_NO_USERPTR) |
||
750 | return false; |
||
751 | |||
752 | /* Incoherent blt and sampler hangs the GPU */ |
||
753 | if (kgem->gen == 040) |
||
754 | return false; |
||
755 | |||
756 | ptr = malloc(PAGE_SIZE); |
||
757 | handle = gem_userptr(kgem->fd, ptr, PAGE_SIZE, false); |
||
758 | gem_close(kgem->fd, handle); |
||
759 | free(ptr); |
||
760 | |||
761 | return handle != 0; |
||
762 | #else |
||
763 | return false; |
||
764 | #endif |
||
765 | } |
||
766 | |||
767 | static bool test_has_secure_batches(struct kgem *kgem) |
||
768 | { |
||
769 | if (DBG_NO_SECURE_BATCHES) |
||
770 | return false; |
||
771 | |||
772 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_SECURE_BATCHES) > 0; |
||
773 | } |
||
774 | |||
775 | static bool test_has_pinned_batches(struct kgem *kgem) |
||
776 | { |
||
777 | if (DBG_NO_PINNED_BATCHES) |
||
778 | return false; |
||
779 | |||
780 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_PINNED_BATCHES) > 0; |
||
781 | } |
||
782 | |||
783 | |||
3255 | Serge | 784 | static bool kgem_init_pinned_batches(struct kgem *kgem) |
785 | { |
||
3299 | Serge | 786 | int count[2] = { 2, 1 }; |
787 | int size[2] = { 1, 2 }; |
||
3255 | Serge | 788 | int n, i; |
789 | |||
790 | if (kgem->wedged) |
||
791 | return true; |
||
792 | |||
793 | for (n = 0; n < ARRAY_SIZE(count); n++) { |
||
794 | for (i = 0; i < count[n]; i++) { |
||
795 | struct drm_i915_gem_pin pin; |
||
796 | struct kgem_bo *bo; |
||
797 | |||
798 | VG_CLEAR(pin); |
||
799 | |||
800 | pin.handle = gem_create(kgem->fd, size[n]); |
||
801 | if (pin.handle == 0) |
||
802 | goto err; |
||
803 | |||
804 | DBG(("%s: new handle=%d, num_pages=%d\n", |
||
805 | __FUNCTION__, pin.handle, size[n])); |
||
806 | |||
807 | bo = __kgem_bo_alloc(pin.handle, size[n]); |
||
808 | if (bo == NULL) { |
||
809 | gem_close(kgem->fd, pin.handle); |
||
810 | goto err; |
||
811 | } |
||
812 | |||
813 | pin.alignment = 0; |
||
3258 | Serge | 814 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_PIN, &pin)) { |
3255 | Serge | 815 | gem_close(kgem->fd, pin.handle); |
816 | goto err; |
||
817 | } |
||
818 | bo->presumed_offset = pin.offset; |
||
819 | debug_alloc__bo(kgem, bo); |
||
820 | list_add(&bo->list, &kgem->pinned_batches[n]); |
||
821 | } |
||
822 | } |
||
823 | |||
824 | return true; |
||
825 | |||
826 | err: |
||
827 | for (n = 0; n < ARRAY_SIZE(kgem->pinned_batches); n++) { |
||
828 | while (!list_is_empty(&kgem->pinned_batches[n])) { |
||
829 | kgem_bo_destroy(kgem, |
||
830 | list_first_entry(&kgem->pinned_batches[n], |
||
831 | struct kgem_bo, list)); |
||
832 | } |
||
833 | } |
||
834 | |||
835 | /* For simplicity populate the lists with a single unpinned bo */ |
||
836 | for (n = 0; n < ARRAY_SIZE(count); n++) { |
||
837 | struct kgem_bo *bo; |
||
838 | uint32_t handle; |
||
839 | |||
840 | handle = gem_create(kgem->fd, size[n]); |
||
841 | if (handle == 0) |
||
842 | break; |
||
843 | |||
844 | bo = __kgem_bo_alloc(handle, size[n]); |
||
845 | if (bo == NULL) { |
||
846 | gem_close(kgem->fd, handle); |
||
847 | break; |
||
848 | } |
||
849 | |||
850 | debug_alloc__bo(kgem, bo); |
||
851 | list_add(&bo->list, &kgem->pinned_batches[n]); |
||
852 | } |
||
853 | return false; |
||
854 | } |
||
855 | |||
3254 | Serge | 856 | void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen) |
857 | { |
||
858 | struct drm_i915_gem_get_aperture aperture; |
||
859 | size_t totalram; |
||
860 | unsigned half_gpu_max; |
||
861 | unsigned int i, j; |
||
862 | |||
863 | DBG(("%s: fd=%d, gen=%d\n", __FUNCTION__, fd, gen)); |
||
864 | |||
865 | memset(kgem, 0, sizeof(*kgem)); |
||
866 | |||
867 | kgem->fd = fd; |
||
868 | kgem->gen = gen; |
||
869 | |||
870 | list_init(&kgem->requests[0]); |
||
871 | list_init(&kgem->requests[1]); |
||
872 | list_init(&kgem->batch_buffers); |
||
873 | list_init(&kgem->active_buffers); |
||
874 | list_init(&kgem->flushing); |
||
875 | list_init(&kgem->large); |
||
876 | list_init(&kgem->large_inactive); |
||
877 | list_init(&kgem->snoop); |
||
878 | list_init(&kgem->scanout); |
||
879 | for (i = 0; i < ARRAY_SIZE(kgem->pinned_batches); i++) |
||
880 | list_init(&kgem->pinned_batches[i]); |
||
881 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) |
||
882 | list_init(&kgem->inactive[i]); |
||
883 | for (i = 0; i < ARRAY_SIZE(kgem->active); i++) { |
||
884 | for (j = 0; j < ARRAY_SIZE(kgem->active[i]); j++) |
||
885 | list_init(&kgem->active[i][j]); |
||
886 | } |
||
887 | for (i = 0; i < ARRAY_SIZE(kgem->vma); i++) { |
||
888 | for (j = 0; j < ARRAY_SIZE(kgem->vma[i].inactive); j++) |
||
889 | list_init(&kgem->vma[i].inactive[j]); |
||
890 | } |
||
891 | kgem->vma[MAP_GTT].count = -MAX_GTT_VMA_CACHE; |
||
892 | kgem->vma[MAP_CPU].count = -MAX_CPU_VMA_CACHE; |
||
893 | |||
894 | kgem->has_blt = gem_param(kgem, LOCAL_I915_PARAM_HAS_BLT) > 0; |
||
895 | DBG(("%s: has BLT ring? %d\n", __FUNCTION__, |
||
896 | kgem->has_blt)); |
||
897 | |||
898 | kgem->has_relaxed_delta = |
||
899 | gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_DELTA) > 0; |
||
900 | DBG(("%s: has relaxed delta? %d\n", __FUNCTION__, |
||
901 | kgem->has_relaxed_delta)); |
||
902 | |||
903 | kgem->has_relaxed_fencing = test_has_relaxed_fencing(kgem); |
||
904 | DBG(("%s: has relaxed fencing? %d\n", __FUNCTION__, |
||
905 | kgem->has_relaxed_fencing)); |
||
906 | |||
907 | kgem->has_llc = test_has_llc(kgem); |
||
908 | DBG(("%s: has shared last-level-cache? %d\n", __FUNCTION__, |
||
909 | kgem->has_llc)); |
||
910 | |||
911 | kgem->has_cacheing = test_has_cacheing(kgem); |
||
912 | DBG(("%s: has set-cache-level? %d\n", __FUNCTION__, |
||
913 | kgem->has_cacheing)); |
||
914 | |||
915 | kgem->has_userptr = test_has_userptr(kgem); |
||
916 | DBG(("%s: has userptr? %d\n", __FUNCTION__, |
||
917 | kgem->has_userptr)); |
||
918 | |||
919 | kgem->has_no_reloc = test_has_no_reloc(kgem); |
||
920 | DBG(("%s: has no-reloc? %d\n", __FUNCTION__, |
||
921 | kgem->has_no_reloc)); |
||
922 | |||
923 | kgem->has_handle_lut = test_has_handle_lut(kgem); |
||
924 | DBG(("%s: has handle-lut? %d\n", __FUNCTION__, |
||
925 | kgem->has_handle_lut)); |
||
926 | |||
927 | kgem->has_semaphores = false; |
||
928 | if (kgem->has_blt && test_has_semaphores_enabled(kgem)) |
||
929 | kgem->has_semaphores = true; |
||
930 | DBG(("%s: semaphores enabled? %d\n", __FUNCTION__, |
||
931 | kgem->has_semaphores)); |
||
932 | |||
933 | kgem->can_blt_cpu = gen >= 030; |
||
934 | DBG(("%s: can blt to cpu? %d\n", __FUNCTION__, |
||
935 | kgem->can_blt_cpu)); |
||
936 | |||
937 | kgem->has_secure_batches = test_has_secure_batches(kgem); |
||
938 | DBG(("%s: can use privileged batchbuffers? %d\n", __FUNCTION__, |
||
939 | kgem->has_secure_batches)); |
||
940 | |||
941 | kgem->has_pinned_batches = test_has_pinned_batches(kgem); |
||
942 | DBG(("%s: can use pinned batchbuffers (to avoid CS w/a)? %d\n", __FUNCTION__, |
||
943 | kgem->has_pinned_batches)); |
||
944 | |||
945 | if (!is_hw_supported(kgem, dev)) { |
||
3255 | Serge | 946 | printf("Detected unsupported/dysfunctional hardware, disabling acceleration.\n"); |
3254 | Serge | 947 | kgem->wedged = 1; |
948 | } else if (__kgem_throttle(kgem)) { |
||
3255 | Serge | 949 | printf("Detected a hung GPU, disabling acceleration.\n"); |
3254 | Serge | 950 | kgem->wedged = 1; |
951 | } |
||
952 | |||
953 | kgem->batch_size = ARRAY_SIZE(kgem->batch); |
||
954 | if (gen == 020 && !kgem->has_pinned_batches) |
||
955 | /* Limited to what we can pin */ |
||
956 | kgem->batch_size = 4*1024; |
||
957 | if (gen == 022) |
||
958 | /* 865g cannot handle a batch spanning multiple pages */ |
||
959 | kgem->batch_size = PAGE_SIZE / sizeof(uint32_t); |
||
960 | if ((gen >> 3) == 7) |
||
961 | kgem->batch_size = 16*1024; |
||
962 | if (!kgem->has_relaxed_delta && kgem->batch_size > 4*1024) |
||
963 | kgem->batch_size = 4*1024; |
||
964 | |||
965 | if (!kgem_init_pinned_batches(kgem) && gen == 020) { |
||
3255 | Serge | 966 | printf("Unable to reserve memory for GPU, disabling acceleration.\n"); |
3254 | Serge | 967 | kgem->wedged = 1; |
968 | } |
||
969 | |||
970 | DBG(("%s: maximum batch size? %d\n", __FUNCTION__, |
||
971 | kgem->batch_size)); |
||
972 | |||
3291 | Serge | 973 | kgem->min_alignment = 16; |
3254 | Serge | 974 | if (gen < 040) |
975 | kgem->min_alignment = 64; |
||
976 | |||
977 | kgem->half_cpu_cache_pages = cpu_cache_size() >> 13; |
||
978 | DBG(("%s: half cpu cache %d pages\n", __FUNCTION__, |
||
979 | kgem->half_cpu_cache_pages)); |
||
980 | |||
981 | kgem->next_request = __kgem_request_alloc(kgem); |
||
982 | |||
983 | DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, userptr? %d\n", __FUNCTION__, |
||
984 | !DBG_NO_CPU && (kgem->has_llc | kgem->has_userptr | kgem->has_cacheing), |
||
985 | kgem->has_llc, kgem->has_cacheing, kgem->has_userptr)); |
||
986 | |||
987 | VG_CLEAR(aperture); |
||
988 | aperture.aper_size = 0; |
||
3258 | Serge | 989 | (void)drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture); |
3254 | Serge | 990 | if (aperture.aper_size == 0) |
991 | aperture.aper_size = 64*1024*1024; |
||
992 | |||
993 | DBG(("%s: aperture size %lld, available now %lld\n", |
||
994 | __FUNCTION__, |
||
995 | (long long)aperture.aper_size, |
||
996 | (long long)aperture.aper_available_size)); |
||
997 | |||
998 | kgem->aperture_total = aperture.aper_size; |
||
999 | kgem->aperture_high = aperture.aper_size * 3/4; |
||
1000 | kgem->aperture_low = aperture.aper_size * 1/3; |
||
1001 | if (gen < 033) { |
||
1002 | /* Severe alignment penalties */ |
||
1003 | kgem->aperture_high /= 2; |
||
1004 | kgem->aperture_low /= 2; |
||
1005 | } |
||
1006 | DBG(("%s: aperture low=%d [%d], high=%d [%d]\n", __FUNCTION__, |
||
1007 | kgem->aperture_low, kgem->aperture_low / (1024*1024), |
||
1008 | kgem->aperture_high, kgem->aperture_high / (1024*1024))); |
||
1009 | |||
1010 | kgem->aperture_mappable = agp_aperture_size(dev, gen); |
||
1011 | if (kgem->aperture_mappable == 0 || |
||
1012 | kgem->aperture_mappable > aperture.aper_size) |
||
1013 | kgem->aperture_mappable = aperture.aper_size; |
||
1014 | DBG(("%s: aperture mappable=%d [%d MiB]\n", __FUNCTION__, |
||
1015 | kgem->aperture_mappable, kgem->aperture_mappable / (1024*1024))); |
||
1016 | |||
1017 | kgem->buffer_size = 64 * 1024; |
||
1018 | while (kgem->buffer_size < kgem->aperture_mappable >> 10) |
||
1019 | kgem->buffer_size *= 2; |
||
1020 | if (kgem->buffer_size >> 12 > kgem->half_cpu_cache_pages) |
||
1021 | kgem->buffer_size = kgem->half_cpu_cache_pages << 12; |
||
1022 | DBG(("%s: buffer size=%d [%d KiB]\n", __FUNCTION__, |
||
1023 | kgem->buffer_size, kgem->buffer_size / 1024)); |
||
1024 | |||
1025 | kgem->max_object_size = 3 * (kgem->aperture_high >> 12) << 10; |
||
1026 | kgem->max_gpu_size = kgem->max_object_size; |
||
1027 | if (!kgem->has_llc) |
||
1028 | kgem->max_gpu_size = MAX_CACHE_SIZE; |
||
1029 | |||
1030 | totalram = total_ram_size(); |
||
1031 | if (totalram == 0) { |
||
1032 | DBG(("%s: total ram size unknown, assuming maximum of total aperture\n", |
||
1033 | __FUNCTION__)); |
||
1034 | totalram = kgem->aperture_total; |
||
1035 | } |
||
3256 | Serge | 1036 | DBG(("%s: total ram=%u\n", __FUNCTION__, totalram)); |
3254 | Serge | 1037 | if (kgem->max_object_size > totalram / 2) |
1038 | kgem->max_object_size = totalram / 2; |
||
1039 | if (kgem->max_gpu_size > totalram / 4) |
||
1040 | kgem->max_gpu_size = totalram / 4; |
||
1041 | |||
1042 | kgem->max_cpu_size = kgem->max_object_size; |
||
1043 | |||
1044 | half_gpu_max = kgem->max_gpu_size / 2; |
||
1045 | kgem->max_copy_tile_size = (MAX_CACHE_SIZE + 1)/2; |
||
1046 | if (kgem->max_copy_tile_size > half_gpu_max) |
||
1047 | kgem->max_copy_tile_size = half_gpu_max; |
||
1048 | |||
1049 | if (kgem->has_llc) |
||
1050 | kgem->max_upload_tile_size = kgem->max_copy_tile_size; |
||
1051 | else |
||
1052 | kgem->max_upload_tile_size = kgem->aperture_mappable / 4; |
||
1053 | if (kgem->max_upload_tile_size > half_gpu_max) |
||
1054 | kgem->max_upload_tile_size = half_gpu_max; |
||
1055 | |||
1056 | kgem->large_object_size = MAX_CACHE_SIZE; |
||
1057 | if (kgem->large_object_size > kgem->max_gpu_size) |
||
1058 | kgem->large_object_size = kgem->max_gpu_size; |
||
1059 | |||
1060 | if (kgem->has_llc | kgem->has_cacheing | kgem->has_userptr) { |
||
1061 | if (kgem->large_object_size > kgem->max_cpu_size) |
||
1062 | kgem->large_object_size = kgem->max_cpu_size; |
||
1063 | } else |
||
1064 | kgem->max_cpu_size = 0; |
||
1065 | if (DBG_NO_CPU) |
||
1066 | kgem->max_cpu_size = 0; |
||
1067 | |||
1068 | DBG(("%s: maximum object size=%d\n", |
||
1069 | __FUNCTION__, kgem->max_object_size)); |
||
1070 | DBG(("%s: large object thresold=%d\n", |
||
1071 | __FUNCTION__, kgem->large_object_size)); |
||
1072 | DBG(("%s: max object sizes (gpu=%d, cpu=%d, tile upload=%d, copy=%d)\n", |
||
1073 | __FUNCTION__, |
||
1074 | kgem->max_gpu_size, kgem->max_cpu_size, |
||
1075 | kgem->max_upload_tile_size, kgem->max_copy_tile_size)); |
||
1076 | |||
1077 | /* Convert the aperture thresholds to pages */ |
||
1078 | kgem->aperture_low /= PAGE_SIZE; |
||
1079 | kgem->aperture_high /= PAGE_SIZE; |
||
1080 | |||
1081 | kgem->fence_max = gem_param(kgem, I915_PARAM_NUM_FENCES_AVAIL) - 2; |
||
1082 | if ((int)kgem->fence_max < 0) |
||
1083 | kgem->fence_max = 5; /* minimum safe value for all hw */ |
||
1084 | DBG(("%s: max fences=%d\n", __FUNCTION__, kgem->fence_max)); |
||
1085 | |||
1086 | kgem->batch_flags_base = 0; |
||
1087 | if (kgem->has_no_reloc) |
||
1088 | kgem->batch_flags_base |= LOCAL_I915_EXEC_NO_RELOC; |
||
1089 | if (kgem->has_handle_lut) |
||
1090 | kgem->batch_flags_base |= LOCAL_I915_EXEC_HANDLE_LUT; |
||
1091 | if (kgem->has_pinned_batches) |
||
1092 | kgem->batch_flags_base |= LOCAL_I915_EXEC_IS_PINNED; |
||
3263 | Serge | 1093 | } |
3254 | Serge | 1094 | |
3263 | Serge | 1095 | /* XXX hopefully a good approximation */ |
1096 | static uint32_t kgem_get_unique_id(struct kgem *kgem) |
||
1097 | { |
||
1098 | uint32_t id; |
||
1099 | id = ++kgem->unique_id; |
||
1100 | if (id == 0) |
||
1101 | id = ++kgem->unique_id; |
||
1102 | return id; |
||
3256 | Serge | 1103 | } |
3254 | Serge | 1104 | |
3263 | Serge | 1105 | inline static uint32_t kgem_pitch_alignment(struct kgem *kgem, unsigned flags) |
1106 | { |
||
1107 | if (flags & CREATE_PRIME) |
||
1108 | return 256; |
||
1109 | if (flags & CREATE_SCANOUT) |
||
1110 | return 64; |
||
1111 | return kgem->min_alignment; |
||
1112 | } |
||
1113 | |||
1114 | static uint32_t kgem_untiled_pitch(struct kgem *kgem, |
||
1115 | uint32_t width, uint32_t bpp, |
||
1116 | unsigned flags) |
||
1117 | { |
||
1118 | width = ALIGN(width, 2) * bpp >> 3; |
||
1119 | return ALIGN(width, kgem_pitch_alignment(kgem, flags)); |
||
1120 | } |
||
3769 | Serge | 1121 | |
1122 | uint32_t kgem_surface_size(struct kgem *kgem, |
||
3263 | Serge | 1123 | bool relaxed_fencing, |
1124 | unsigned flags, |
||
1125 | uint32_t width, |
||
1126 | uint32_t height, |
||
1127 | uint32_t bpp, |
||
1128 | uint32_t tiling, |
||
1129 | uint32_t *pitch) |
||
1130 | { |
||
1131 | uint32_t tile_width, tile_height; |
||
1132 | uint32_t size; |
||
1133 | |||
1134 | assert(width <= MAXSHORT); |
||
1135 | assert(height <= MAXSHORT); |
||
1136 | |||
1137 | if (kgem->gen <= 030) { |
||
1138 | if (tiling) { |
||
1139 | if (kgem->gen < 030) { |
||
1140 | tile_width = 128; |
||
1141 | tile_height = 32; |
||
1142 | } else { |
||
1143 | tile_width = 512; |
||
1144 | tile_height = 16; |
||
1145 | } |
||
1146 | } else { |
||
1147 | tile_width = 2 * bpp >> 3; |
||
1148 | tile_width = ALIGN(tile_width, |
||
1149 | kgem_pitch_alignment(kgem, flags)); |
||
1150 | tile_height = 2; |
||
1151 | } |
||
1152 | } else switch (tiling) { |
||
1153 | default: |
||
1154 | case I915_TILING_NONE: |
||
1155 | tile_width = 2 * bpp >> 3; |
||
1156 | tile_width = ALIGN(tile_width, |
||
1157 | kgem_pitch_alignment(kgem, flags)); |
||
1158 | tile_height = 2; |
||
1159 | break; |
||
1160 | |||
1161 | /* XXX align to an even tile row */ |
||
1162 | case I915_TILING_X: |
||
1163 | tile_width = 512; |
||
1164 | tile_height = 16; |
||
1165 | break; |
||
1166 | case I915_TILING_Y: |
||
1167 | tile_width = 128; |
||
1168 | tile_height = 64; |
||
1169 | break; |
||
1170 | } |
||
1171 | |||
1172 | *pitch = ALIGN(width * bpp / 8, tile_width); |
||
1173 | height = ALIGN(height, tile_height); |
||
1174 | if (kgem->gen >= 040) |
||
1175 | return PAGE_ALIGN(*pitch * height); |
||
1176 | |||
1177 | /* If it is too wide for the blitter, don't even bother. */ |
||
1178 | if (tiling != I915_TILING_NONE) { |
||
1179 | if (*pitch > 8192) |
||
1180 | return 0; |
||
1181 | |||
1182 | for (size = tile_width; size < *pitch; size <<= 1) |
||
1183 | ; |
||
1184 | *pitch = size; |
||
1185 | } else { |
||
1186 | if (*pitch >= 32768) |
||
1187 | return 0; |
||
1188 | } |
||
1189 | |||
1190 | size = *pitch * height; |
||
1191 | if (relaxed_fencing || tiling == I915_TILING_NONE) |
||
1192 | return PAGE_ALIGN(size); |
||
1193 | |||
1194 | /* We need to allocate a pot fence region for a tiled buffer. */ |
||
1195 | if (kgem->gen < 030) |
||
1196 | tile_width = 512 * 1024; |
||
1197 | else |
||
1198 | tile_width = 1024 * 1024; |
||
1199 | while (tile_width < size) |
||
1200 | tile_width *= 2; |
||
1201 | return tile_width; |
||
1202 | } |
||
1203 | |||
1204 | static uint32_t kgem_aligned_height(struct kgem *kgem, |
||
1205 | uint32_t height, uint32_t tiling) |
||
1206 | { |
||
1207 | uint32_t tile_height; |
||
1208 | |||
1209 | if (kgem->gen <= 030) { |
||
1210 | tile_height = tiling ? kgem->gen < 030 ? 32 : 16 : 1; |
||
1211 | } else switch (tiling) { |
||
1212 | /* XXX align to an even tile row */ |
||
1213 | default: |
||
1214 | case I915_TILING_NONE: |
||
1215 | tile_height = 1; |
||
1216 | break; |
||
1217 | case I915_TILING_X: |
||
1218 | tile_height = 16; |
||
1219 | break; |
||
1220 | case I915_TILING_Y: |
||
1221 | tile_height = 64; |
||
1222 | break; |
||
1223 | } |
||
1224 | |||
1225 | return ALIGN(height, tile_height); |
||
1226 | } |
||
1227 | |||
3258 | Serge | 1228 | static struct drm_i915_gem_exec_object2 * |
1229 | kgem_add_handle(struct kgem *kgem, struct kgem_bo *bo) |
||
1230 | { |
||
1231 | struct drm_i915_gem_exec_object2 *exec; |
||
3256 | Serge | 1232 | |
3258 | Serge | 1233 | DBG(("%s: handle=%d, index=%d\n", |
1234 | __FUNCTION__, bo->handle, kgem->nexec)); |
||
1235 | |||
1236 | assert(kgem->nexec < ARRAY_SIZE(kgem->exec)); |
||
1237 | bo->target_handle = kgem->has_handle_lut ? kgem->nexec : bo->handle; |
||
1238 | exec = memset(&kgem->exec[kgem->nexec++], 0, sizeof(*exec)); |
||
1239 | exec->handle = bo->handle; |
||
1240 | exec->offset = bo->presumed_offset; |
||
1241 | |||
1242 | kgem->aperture += num_pages(bo); |
||
1243 | |||
1244 | return exec; |
||
1245 | } |
||
1246 | |||
1247 | static void kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo) |
||
1248 | { |
||
1249 | bo->exec = kgem_add_handle(kgem, bo); |
||
1250 | bo->rq = MAKE_REQUEST(kgem->next_request, kgem->ring); |
||
1251 | |||
1252 | list_move_tail(&bo->request, &kgem->next_request->buffers); |
||
1253 | |||
1254 | /* XXX is it worth working around gcc here? */ |
||
1255 | kgem->flush |= bo->flush; |
||
1256 | } |
||
1257 | |||
1258 | static uint32_t kgem_end_batch(struct kgem *kgem) |
||
1259 | { |
||
1260 | kgem->batch[kgem->nbatch++] = MI_BATCH_BUFFER_END; |
||
1261 | if (kgem->nbatch & 1) |
||
1262 | kgem->batch[kgem->nbatch++] = MI_NOOP; |
||
1263 | |||
1264 | return kgem->nbatch; |
||
1265 | } |
||
1266 | |||
1267 | static void kgem_fixup_self_relocs(struct kgem *kgem, struct kgem_bo *bo) |
||
1268 | { |
||
1269 | int n; |
||
1270 | |||
1271 | if (kgem->nreloc__self == 0) |
||
1272 | return; |
||
1273 | |||
1274 | for (n = 0; n < kgem->nreloc__self; n++) { |
||
1275 | int i = kgem->reloc__self[n]; |
||
1276 | assert(kgem->reloc[i].target_handle == ~0U); |
||
1277 | kgem->reloc[i].target_handle = bo->target_handle; |
||
1278 | kgem->reloc[i].presumed_offset = bo->presumed_offset; |
||
1279 | kgem->batch[kgem->reloc[i].offset/sizeof(kgem->batch[0])] = |
||
1280 | kgem->reloc[i].delta + bo->presumed_offset; |
||
1281 | } |
||
1282 | |||
1283 | if (n == 256) { |
||
1284 | for (n = kgem->reloc__self[255]; n < kgem->nreloc; n++) { |
||
1285 | if (kgem->reloc[n].target_handle == ~0U) { |
||
1286 | kgem->reloc[n].target_handle = bo->target_handle; |
||
1287 | kgem->reloc[n].presumed_offset = bo->presumed_offset; |
||
1288 | kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] = |
||
1289 | kgem->reloc[n].delta + bo->presumed_offset; |
||
1290 | } |
||
1291 | } |
||
1292 | |||
1293 | } |
||
1294 | |||
1295 | } |
||
1296 | |||
1297 | static void kgem_bo_binding_free(struct kgem *kgem, struct kgem_bo *bo) |
||
1298 | { |
||
1299 | struct kgem_bo_binding *b; |
||
1300 | |||
1301 | b = bo->binding.next; |
||
1302 | while (b) { |
||
1303 | struct kgem_bo_binding *next = b->next; |
||
1304 | free (b); |
||
1305 | b = next; |
||
1306 | } |
||
1307 | } |
||
1308 | |||
1309 | static void kgem_bo_release_map(struct kgem *kgem, struct kgem_bo *bo) |
||
1310 | { |
||
1311 | int type = IS_CPU_MAP(bo->map); |
||
1312 | |||
1313 | assert(!IS_USER_MAP(bo->map)); |
||
1314 | |||
1315 | DBG(("%s: releasing %s vma for handle=%d, count=%d\n", |
||
1316 | __FUNCTION__, type ? "CPU" : "GTT", |
||
1317 | bo->handle, kgem->vma[type].count)); |
||
1318 | |||
1319 | VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo))); |
||
3291 | Serge | 1320 | user_free(MAP(bo->map)); |
3258 | Serge | 1321 | bo->map = NULL; |
1322 | |||
1323 | if (!list_is_empty(&bo->vma)) { |
||
1324 | list_del(&bo->vma); |
||
1325 | kgem->vma[type].count--; |
||
1326 | } |
||
1327 | } |
||
1328 | |||
1329 | static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo) |
||
1330 | { |
||
1331 | DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle)); |
||
3291 | Serge | 1332 | |
3258 | Serge | 1333 | assert(bo->refcnt == 0); |
1334 | assert(bo->exec == NULL); |
||
1335 | assert(!bo->snoop || bo->rq == NULL); |
||
1336 | |||
1337 | #ifdef DEBUG_MEMORY |
||
1338 | kgem->debug_memory.bo_allocs--; |
||
1339 | kgem->debug_memory.bo_bytes -= bytes(bo); |
||
1340 | #endif |
||
1341 | |||
1342 | kgem_bo_binding_free(kgem, bo); |
||
1343 | |||
1344 | if (IS_USER_MAP(bo->map)) { |
||
1345 | assert(bo->rq == NULL); |
||
1346 | assert(MAP(bo->map) != bo || bo->io); |
||
1347 | if (bo != MAP(bo->map)) { |
||
1348 | DBG(("%s: freeing snooped base\n", __FUNCTION__)); |
||
1349 | free(MAP(bo->map)); |
||
1350 | } |
||
1351 | bo->map = NULL; |
||
1352 | } |
||
1353 | if (bo->map) |
||
1354 | kgem_bo_release_map(kgem, bo); |
||
1355 | assert(list_is_empty(&bo->vma)); |
||
1356 | |||
1357 | _list_del(&bo->list); |
||
1358 | _list_del(&bo->request); |
||
1359 | gem_close(kgem->fd, bo->handle); |
||
1360 | |||
1361 | if (!bo->io) { |
||
1362 | *(struct kgem_bo **)bo = __kgem_freed_bo; |
||
1363 | __kgem_freed_bo = bo; |
||
1364 | } else |
||
1365 | free(bo); |
||
1366 | } |
||
1367 | |||
1368 | inline static void kgem_bo_move_to_inactive(struct kgem *kgem, |
||
1369 | struct kgem_bo *bo) |
||
1370 | { |
||
1371 | DBG(("%s: moving handle=%d to inactive\n", __FUNCTION__, bo->handle)); |
||
1372 | |||
1373 | assert(bo->refcnt == 0); |
||
1374 | assert(bo->reusable); |
||
1375 | assert(bo->rq == NULL); |
||
1376 | assert(bo->exec == NULL); |
||
1377 | assert(bo->domain != DOMAIN_GPU); |
||
1378 | assert(!bo->proxy); |
||
1379 | assert(!bo->io); |
||
1380 | assert(!bo->scanout); |
||
1381 | assert(!bo->needs_flush); |
||
1382 | assert(list_is_empty(&bo->vma)); |
||
1383 | ASSERT_IDLE(kgem, bo->handle); |
||
1384 | |||
1385 | kgem->need_expire = true; |
||
1386 | |||
1387 | if (bucket(bo) >= NUM_CACHE_BUCKETS) { |
||
1388 | list_move(&bo->list, &kgem->large_inactive); |
||
1389 | return; |
||
1390 | } |
||
1391 | |||
1392 | assert(bo->flush == false); |
||
1393 | list_move(&bo->list, &kgem->inactive[bucket(bo)]); |
||
1394 | if (bo->map) { |
||
1395 | int type = IS_CPU_MAP(bo->map); |
||
1396 | if (bucket(bo) >= NUM_CACHE_BUCKETS || |
||
1397 | (!type && !__kgem_bo_is_mappable(kgem, bo))) { |
||
1398 | // munmap(MAP(bo->map), bytes(bo)); |
||
1399 | bo->map = NULL; |
||
1400 | } |
||
1401 | if (bo->map) { |
||
1402 | list_add(&bo->vma, &kgem->vma[type].inactive[bucket(bo)]); |
||
1403 | kgem->vma[type].count++; |
||
1404 | } |
||
1405 | } |
||
1406 | } |
||
1407 | |||
1408 | static struct kgem_bo *kgem_bo_replace_io(struct kgem_bo *bo) |
||
1409 | { |
||
1410 | struct kgem_bo *base; |
||
1411 | |||
1412 | if (!bo->io) |
||
1413 | return bo; |
||
1414 | |||
1415 | assert(!bo->snoop); |
||
1416 | base = malloc(sizeof(*base)); |
||
1417 | if (base) { |
||
1418 | DBG(("%s: transferring io handle=%d to bo\n", |
||
1419 | __FUNCTION__, bo->handle)); |
||
1420 | /* transfer the handle to a minimum bo */ |
||
1421 | memcpy(base, bo, sizeof(*base)); |
||
1422 | base->io = false; |
||
1423 | list_init(&base->list); |
||
1424 | list_replace(&bo->request, &base->request); |
||
1425 | list_replace(&bo->vma, &base->vma); |
||
1426 | free(bo); |
||
1427 | bo = base; |
||
1428 | } else |
||
1429 | bo->reusable = false; |
||
1430 | |||
1431 | return bo; |
||
1432 | } |
||
1433 | |||
3256 | Serge | 1434 | inline static void kgem_bo_remove_from_inactive(struct kgem *kgem, |
1435 | struct kgem_bo *bo) |
||
1436 | { |
||
1437 | DBG(("%s: removing handle=%d from inactive\n", __FUNCTION__, bo->handle)); |
||
1438 | |||
1439 | list_del(&bo->list); |
||
1440 | assert(bo->rq == NULL); |
||
1441 | assert(bo->exec == NULL); |
||
1442 | if (bo->map) { |
||
1443 | assert(!list_is_empty(&bo->vma)); |
||
1444 | list_del(&bo->vma); |
||
1445 | kgem->vma[IS_CPU_MAP(bo->map)].count--; |
||
1446 | } |
||
3254 | Serge | 1447 | } |
1448 | |||
3258 | Serge | 1449 | inline static void kgem_bo_remove_from_active(struct kgem *kgem, |
1450 | struct kgem_bo *bo) |
||
1451 | { |
||
1452 | DBG(("%s: removing handle=%d from active\n", __FUNCTION__, bo->handle)); |
||
3254 | Serge | 1453 | |
3258 | Serge | 1454 | list_del(&bo->list); |
1455 | assert(bo->rq != NULL); |
||
1456 | if (bo->rq == (void *)kgem) |
||
1457 | list_del(&bo->request); |
||
1458 | assert(list_is_empty(&bo->vma)); |
||
1459 | } |
||
3254 | Serge | 1460 | |
3258 | Serge | 1461 | static void kgem_bo_clear_scanout(struct kgem *kgem, struct kgem_bo *bo) |
1462 | { |
||
1463 | assert(bo->scanout); |
||
1464 | assert(!bo->refcnt); |
||
1465 | assert(bo->exec == NULL); |
||
1466 | assert(bo->proxy == NULL); |
||
3256 | Serge | 1467 | |
3258 | Serge | 1468 | DBG(("%s: handle=%d, fb=%d (reusable=%d)\n", |
1469 | __FUNCTION__, bo->handle, bo->delta, bo->reusable)); |
||
1470 | if (bo->delta) { |
||
1471 | /* XXX will leak if we are not DRM_MASTER. *shrug* */ |
||
1472 | // drmModeRmFB(kgem->fd, bo->delta); |
||
1473 | bo->delta = 0; |
||
1474 | } |
||
1475 | |||
1476 | bo->scanout = false; |
||
1477 | bo->flush = false; |
||
1478 | bo->reusable = true; |
||
1479 | |||
1480 | if (kgem->has_llc && |
||
1481 | !gem_set_cacheing(kgem->fd, bo->handle, SNOOPED)) |
||
1482 | bo->reusable = false; |
||
1483 | } |
||
1484 | |||
1485 | static void _kgem_bo_delete_buffer(struct kgem *kgem, struct kgem_bo *bo) |
||
1486 | { |
||
1487 | struct kgem_buffer *io = (struct kgem_buffer *)bo->proxy; |
||
1488 | |||
1489 | DBG(("%s: size=%d, offset=%d, parent used=%d\n", |
||
1490 | __FUNCTION__, bo->size.bytes, bo->delta, io->used)); |
||
1491 | |||
1492 | if (ALIGN(bo->delta + bo->size.bytes, UPLOAD_ALIGNMENT) == io->used) |
||
1493 | io->used = bo->delta; |
||
1494 | } |
||
1495 | |||
1496 | static void kgem_bo_move_to_scanout(struct kgem *kgem, struct kgem_bo *bo) |
||
1497 | { |
||
1498 | assert(bo->refcnt == 0); |
||
1499 | assert(bo->scanout); |
||
1500 | assert(bo->delta); |
||
1501 | assert(!bo->snoop); |
||
1502 | assert(!bo->io); |
||
1503 | |||
1504 | DBG(("%s: moving %d [fb %d] to scanout cache, active? %d\n", |
||
1505 | __FUNCTION__, bo->handle, bo->delta, bo->rq != NULL)); |
||
1506 | if (bo->rq) |
||
1507 | list_move_tail(&bo->list, &kgem->scanout); |
||
1508 | else |
||
1509 | list_move(&bo->list, &kgem->scanout); |
||
1510 | } |
||
1511 | |||
1512 | static void kgem_bo_move_to_snoop(struct kgem *kgem, struct kgem_bo *bo) |
||
1513 | { |
||
1514 | assert(bo->refcnt == 0); |
||
1515 | assert(bo->exec == NULL); |
||
1516 | |||
1517 | if (num_pages(bo) > kgem->max_cpu_size >> 13) { |
||
1518 | DBG(("%s handle=%d discarding large CPU buffer (%d >%d pages)\n", |
||
1519 | __FUNCTION__, bo->handle, num_pages(bo), kgem->max_cpu_size >> 13)); |
||
1520 | kgem_bo_free(kgem, bo); |
||
1521 | return; |
||
1522 | } |
||
1523 | |||
1524 | assert(bo->tiling == I915_TILING_NONE); |
||
1525 | assert(bo->rq == NULL); |
||
1526 | |||
1527 | DBG(("%s: moving %d to snoop cachee\n", __FUNCTION__, bo->handle)); |
||
1528 | list_add(&bo->list, &kgem->snoop); |
||
1529 | } |
||
1530 | |||
3256 | Serge | 1531 | static struct kgem_bo * |
3258 | Serge | 1532 | search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags) |
1533 | { |
||
1534 | struct kgem_bo *bo, *first = NULL; |
||
1535 | |||
1536 | DBG(("%s: num_pages=%d, flags=%x\n", __FUNCTION__, num_pages, flags)); |
||
1537 | |||
1538 | if ((kgem->has_cacheing | kgem->has_userptr) == 0) |
||
1539 | return NULL; |
||
1540 | |||
1541 | if (list_is_empty(&kgem->snoop)) { |
||
1542 | DBG(("%s: inactive and cache empty\n", __FUNCTION__)); |
||
1543 | if (!__kgem_throttle_retire(kgem, flags)) { |
||
1544 | DBG(("%s: nothing retired\n", __FUNCTION__)); |
||
1545 | return NULL; |
||
1546 | } |
||
1547 | } |
||
1548 | |||
1549 | list_for_each_entry(bo, &kgem->snoop, list) { |
||
1550 | assert(bo->refcnt == 0); |
||
1551 | assert(bo->snoop); |
||
1552 | assert(!bo->scanout); |
||
1553 | assert(bo->proxy == NULL); |
||
1554 | assert(bo->tiling == I915_TILING_NONE); |
||
1555 | assert(bo->rq == NULL); |
||
1556 | assert(bo->exec == NULL); |
||
1557 | |||
1558 | if (num_pages > num_pages(bo)) |
||
1559 | continue; |
||
1560 | |||
1561 | if (num_pages(bo) > 2*num_pages) { |
||
1562 | if (first == NULL) |
||
1563 | first = bo; |
||
1564 | continue; |
||
1565 | } |
||
1566 | |||
1567 | list_del(&bo->list); |
||
1568 | bo->pitch = 0; |
||
1569 | bo->delta = 0; |
||
1570 | |||
1571 | DBG((" %s: found handle=%d (num_pages=%d) in snoop cache\n", |
||
1572 | __FUNCTION__, bo->handle, num_pages(bo))); |
||
1573 | return bo; |
||
1574 | } |
||
1575 | |||
1576 | if (first) { |
||
1577 | list_del(&first->list); |
||
1578 | first->pitch = 0; |
||
1579 | first->delta = 0; |
||
1580 | |||
1581 | DBG((" %s: found handle=%d (num_pages=%d) in snoop cache\n", |
||
1582 | __FUNCTION__, first->handle, num_pages(first))); |
||
1583 | return first; |
||
1584 | } |
||
1585 | |||
1586 | return NULL; |
||
1587 | } |
||
1588 | |||
1589 | static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) |
||
1590 | { |
||
1591 | DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle)); |
||
1592 | |||
1593 | assert(list_is_empty(&bo->list)); |
||
1594 | assert(bo->refcnt == 0); |
||
1595 | assert(!bo->purged); |
||
1596 | assert(bo->proxy == NULL); |
||
1597 | |||
1598 | bo->binding.offset = 0; |
||
1599 | |||
1600 | if (DBG_NO_CACHE) |
||
1601 | goto destroy; |
||
1602 | |||
1603 | if (bo->snoop && !bo->flush) { |
||
1604 | DBG(("%s: handle=%d is snooped\n", __FUNCTION__, bo->handle)); |
||
1605 | assert(!bo->flush); |
||
1606 | assert(list_is_empty(&bo->list)); |
||
1607 | if (bo->exec == NULL && bo->rq && !__kgem_busy(kgem, bo->handle)) |
||
1608 | __kgem_bo_clear_busy(bo); |
||
1609 | if (bo->rq == NULL) { |
||
1610 | assert(!bo->needs_flush); |
||
1611 | kgem_bo_move_to_snoop(kgem, bo); |
||
1612 | } |
||
1613 | return; |
||
1614 | } |
||
1615 | |||
1616 | if (bo->scanout) { |
||
1617 | kgem_bo_move_to_scanout(kgem, bo); |
||
1618 | return; |
||
1619 | } |
||
1620 | |||
1621 | if (bo->io) |
||
1622 | bo = kgem_bo_replace_io(bo); |
||
1623 | if (!bo->reusable) { |
||
1624 | DBG(("%s: handle=%d, not reusable\n", |
||
1625 | __FUNCTION__, bo->handle)); |
||
1626 | goto destroy; |
||
1627 | } |
||
1628 | |||
1629 | if (!kgem->has_llc && IS_CPU_MAP(bo->map) && bo->domain != DOMAIN_CPU) |
||
1630 | kgem_bo_release_map(kgem, bo); |
||
1631 | |||
1632 | assert(list_is_empty(&bo->vma)); |
||
1633 | assert(list_is_empty(&bo->list)); |
||
1634 | assert(bo->snoop == false); |
||
1635 | assert(bo->io == false); |
||
1636 | assert(bo->scanout == false); |
||
1637 | |||
1638 | if (bo->exec && kgem->nexec == 1) { |
||
1639 | DBG(("%s: only handle in batch, discarding last operations\n", |
||
1640 | __FUNCTION__)); |
||
1641 | assert(bo->exec == &kgem->exec[0]); |
||
1642 | assert(kgem->exec[0].handle == bo->handle); |
||
1643 | assert(RQ(bo->rq) == kgem->next_request); |
||
1644 | bo->refcnt = 1; |
||
1645 | kgem_reset(kgem); |
||
1646 | bo->refcnt = 0; |
||
1647 | } |
||
1648 | |||
1649 | if (bo->rq && bo->exec == NULL && !__kgem_busy(kgem, bo->handle)) |
||
1650 | __kgem_bo_clear_busy(bo); |
||
1651 | |||
1652 | if (bo->rq) { |
||
1653 | struct list *cache; |
||
1654 | |||
1655 | DBG(("%s: handle=%d -> active\n", __FUNCTION__, bo->handle)); |
||
1656 | if (bucket(bo) < NUM_CACHE_BUCKETS) |
||
1657 | cache = &kgem->active[bucket(bo)][bo->tiling]; |
||
1658 | else |
||
1659 | cache = &kgem->large; |
||
1660 | list_add(&bo->list, cache); |
||
1661 | return; |
||
1662 | } |
||
1663 | |||
1664 | assert(bo->exec == NULL); |
||
1665 | assert(list_is_empty(&bo->request)); |
||
1666 | |||
1667 | if (!IS_CPU_MAP(bo->map)) { |
||
1668 | if (!kgem_bo_set_purgeable(kgem, bo)) |
||
1669 | goto destroy; |
||
1670 | |||
1671 | if (!kgem->has_llc && bo->domain == DOMAIN_CPU) |
||
1672 | goto destroy; |
||
1673 | |||
1674 | DBG(("%s: handle=%d, purged\n", |
||
1675 | __FUNCTION__, bo->handle)); |
||
1676 | } |
||
1677 | |||
1678 | kgem_bo_move_to_inactive(kgem, bo); |
||
1679 | return; |
||
1680 | |||
1681 | destroy: |
||
1682 | if (!bo->exec) |
||
1683 | kgem_bo_free(kgem, bo); |
||
1684 | } |
||
1685 | |||
1686 | static void kgem_bo_unref(struct kgem *kgem, struct kgem_bo *bo) |
||
1687 | { |
||
1688 | assert(bo->refcnt); |
||
1689 | if (--bo->refcnt == 0) |
||
1690 | __kgem_bo_destroy(kgem, bo); |
||
1691 | } |
||
1692 | |||
1693 | static void kgem_buffer_release(struct kgem *kgem, struct kgem_buffer *bo) |
||
1694 | { |
||
1695 | while (!list_is_empty(&bo->base.vma)) { |
||
1696 | struct kgem_bo *cached; |
||
1697 | |||
1698 | cached = list_first_entry(&bo->base.vma, struct kgem_bo, vma); |
||
1699 | assert(cached->proxy == &bo->base); |
||
1700 | list_del(&cached->vma); |
||
1701 | |||
1702 | assert(*(struct kgem_bo **)cached->map == cached); |
||
1703 | *(struct kgem_bo **)cached->map = NULL; |
||
1704 | cached->map = NULL; |
||
1705 | |||
1706 | kgem_bo_destroy(kgem, cached); |
||
1707 | } |
||
1708 | } |
||
1709 | |||
1710 | static bool kgem_retire__buffers(struct kgem *kgem) |
||
1711 | { |
||
1712 | bool retired = false; |
||
1713 | |||
1714 | while (!list_is_empty(&kgem->active_buffers)) { |
||
1715 | struct kgem_buffer *bo = |
||
1716 | list_last_entry(&kgem->active_buffers, |
||
1717 | struct kgem_buffer, |
||
1718 | base.list); |
||
1719 | |||
1720 | if (bo->base.rq) |
||
1721 | break; |
||
1722 | |||
1723 | DBG(("%s: releasing upload cache for handle=%d? %d\n", |
||
1724 | __FUNCTION__, bo->base.handle, !list_is_empty(&bo->base.vma))); |
||
1725 | list_del(&bo->base.list); |
||
1726 | kgem_buffer_release(kgem, bo); |
||
1727 | kgem_bo_unref(kgem, &bo->base); |
||
1728 | retired = true; |
||
1729 | } |
||
1730 | |||
1731 | return retired; |
||
1732 | } |
||
1733 | |||
1734 | static bool kgem_retire__flushing(struct kgem *kgem) |
||
1735 | { |
||
1736 | struct kgem_bo *bo, *next; |
||
1737 | bool retired = false; |
||
1738 | |||
1739 | list_for_each_entry_safe(bo, next, &kgem->flushing, request) { |
||
1740 | assert(bo->rq == (void *)kgem); |
||
1741 | assert(bo->exec == NULL); |
||
1742 | |||
1743 | if (__kgem_busy(kgem, bo->handle)) |
||
1744 | break; |
||
1745 | |||
1746 | __kgem_bo_clear_busy(bo); |
||
1747 | |||
1748 | if (bo->refcnt) |
||
1749 | continue; |
||
1750 | |||
1751 | if (bo->snoop) { |
||
1752 | kgem_bo_move_to_snoop(kgem, bo); |
||
1753 | } else if (bo->scanout) { |
||
1754 | kgem_bo_move_to_scanout(kgem, bo); |
||
1755 | } else if ((bo = kgem_bo_replace_io(bo))->reusable && |
||
1756 | kgem_bo_set_purgeable(kgem, bo)) { |
||
1757 | kgem_bo_move_to_inactive(kgem, bo); |
||
1758 | retired = true; |
||
1759 | } else |
||
1760 | kgem_bo_free(kgem, bo); |
||
1761 | } |
||
1762 | #if HAS_DEBUG_FULL |
||
1763 | { |
||
1764 | int count = 0; |
||
1765 | list_for_each_entry(bo, &kgem->flushing, request) |
||
1766 | count++; |
||
1767 | printf("%s: %d bo on flushing list\n", __FUNCTION__, count); |
||
1768 | } |
||
1769 | #endif |
||
1770 | |||
1771 | kgem->need_retire |= !list_is_empty(&kgem->flushing); |
||
1772 | |||
1773 | return retired; |
||
1774 | } |
||
1775 | |||
1776 | |||
1777 | static bool __kgem_retire_rq(struct kgem *kgem, struct kgem_request *rq) |
||
1778 | { |
||
1779 | bool retired = false; |
||
1780 | |||
1781 | DBG(("%s: request %d complete\n", |
||
1782 | __FUNCTION__, rq->bo->handle)); |
||
1783 | |||
1784 | while (!list_is_empty(&rq->buffers)) { |
||
1785 | struct kgem_bo *bo; |
||
1786 | |||
1787 | bo = list_first_entry(&rq->buffers, |
||
1788 | struct kgem_bo, |
||
1789 | request); |
||
1790 | |||
1791 | assert(RQ(bo->rq) == rq); |
||
1792 | assert(bo->exec == NULL); |
||
1793 | assert(bo->domain == DOMAIN_GPU || bo->domain == DOMAIN_NONE); |
||
1794 | |||
1795 | list_del(&bo->request); |
||
1796 | |||
1797 | if (bo->needs_flush) |
||
1798 | bo->needs_flush = __kgem_busy(kgem, bo->handle); |
||
1799 | if (bo->needs_flush) { |
||
1800 | DBG(("%s: moving %d to flushing\n", |
||
1801 | __FUNCTION__, bo->handle)); |
||
1802 | list_add(&bo->request, &kgem->flushing); |
||
1803 | bo->rq = (void *)kgem; |
||
1804 | continue; |
||
1805 | } |
||
1806 | |||
1807 | bo->domain = DOMAIN_NONE; |
||
1808 | bo->rq = NULL; |
||
1809 | if (bo->refcnt) |
||
1810 | continue; |
||
1811 | |||
1812 | if (bo->snoop) { |
||
1813 | kgem_bo_move_to_snoop(kgem, bo); |
||
1814 | } else if (bo->scanout) { |
||
1815 | kgem_bo_move_to_scanout(kgem, bo); |
||
1816 | } else if ((bo = kgem_bo_replace_io(bo))->reusable && |
||
1817 | kgem_bo_set_purgeable(kgem, bo)) { |
||
1818 | kgem_bo_move_to_inactive(kgem, bo); |
||
1819 | retired = true; |
||
1820 | } else { |
||
1821 | DBG(("%s: closing %d\n", |
||
1822 | __FUNCTION__, bo->handle)); |
||
1823 | kgem_bo_free(kgem, bo); |
||
1824 | } |
||
1825 | } |
||
1826 | |||
1827 | assert(rq->bo->rq == NULL); |
||
1828 | assert(list_is_empty(&rq->bo->request)); |
||
1829 | |||
1830 | if (--rq->bo->refcnt == 0) { |
||
1831 | if (kgem_bo_set_purgeable(kgem, rq->bo)) { |
||
1832 | kgem_bo_move_to_inactive(kgem, rq->bo); |
||
1833 | retired = true; |
||
1834 | } else { |
||
1835 | DBG(("%s: closing %d\n", |
||
1836 | __FUNCTION__, rq->bo->handle)); |
||
1837 | kgem_bo_free(kgem, rq->bo); |
||
1838 | } |
||
1839 | } |
||
1840 | |||
1841 | __kgem_request_free(rq); |
||
1842 | return retired; |
||
1843 | } |
||
1844 | |||
1845 | static bool kgem_retire__requests_ring(struct kgem *kgem, int ring) |
||
1846 | { |
||
1847 | bool retired = false; |
||
1848 | |||
1849 | while (!list_is_empty(&kgem->requests[ring])) { |
||
1850 | struct kgem_request *rq; |
||
1851 | |||
1852 | rq = list_first_entry(&kgem->requests[ring], |
||
1853 | struct kgem_request, |
||
1854 | list); |
||
1855 | if (__kgem_busy(kgem, rq->bo->handle)) |
||
1856 | break; |
||
1857 | |||
1858 | retired |= __kgem_retire_rq(kgem, rq); |
||
1859 | } |
||
1860 | |||
1861 | #if HAS_DEBUG_FULL |
||
1862 | { |
||
1863 | struct kgem_bo *bo; |
||
1864 | int count = 0; |
||
1865 | |||
1866 | list_for_each_entry(bo, &kgem->requests[ring], request) |
||
1867 | count++; |
||
1868 | |||
1869 | bo = NULL; |
||
1870 | if (!list_is_empty(&kgem->requests[ring])) |
||
1871 | bo = list_first_entry(&kgem->requests[ring], |
||
1872 | struct kgem_request, |
||
1873 | list)->bo; |
||
1874 | |||
1875 | printf("%s: ring=%d, %d outstanding requests, oldest=%d\n", |
||
1876 | __FUNCTION__, ring, count, bo ? bo->handle : 0); |
||
1877 | } |
||
1878 | #endif |
||
1879 | |||
1880 | return retired; |
||
1881 | } |
||
1882 | |||
1883 | static bool kgem_retire__requests(struct kgem *kgem) |
||
1884 | { |
||
1885 | bool retired = false; |
||
1886 | int n; |
||
1887 | |||
1888 | for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) { |
||
1889 | retired |= kgem_retire__requests_ring(kgem, n); |
||
1890 | kgem->need_retire |= !list_is_empty(&kgem->requests[n]); |
||
1891 | } |
||
1892 | |||
1893 | return retired; |
||
1894 | } |
||
1895 | |||
1896 | bool kgem_retire(struct kgem *kgem) |
||
1897 | { |
||
1898 | bool retired = false; |
||
1899 | |||
1900 | DBG(("%s\n", __FUNCTION__)); |
||
1901 | |||
1902 | kgem->need_retire = false; |
||
1903 | |||
1904 | retired |= kgem_retire__flushing(kgem); |
||
1905 | retired |= kgem_retire__requests(kgem); |
||
1906 | retired |= kgem_retire__buffers(kgem); |
||
1907 | |||
1908 | DBG(("%s -- retired=%d, need_retire=%d\n", |
||
1909 | __FUNCTION__, retired, kgem->need_retire)); |
||
1910 | |||
1911 | kgem->retire(kgem); |
||
1912 | |||
1913 | return retired; |
||
1914 | } |
||
1915 | |||
3263 | Serge | 1916 | bool __kgem_ring_is_idle(struct kgem *kgem, int ring) |
1917 | { |
||
1918 | struct kgem_request *rq; |
||
3258 | Serge | 1919 | |
3263 | Serge | 1920 | assert(!list_is_empty(&kgem->requests[ring])); |
3258 | Serge | 1921 | |
3263 | Serge | 1922 | rq = list_last_entry(&kgem->requests[ring], |
1923 | struct kgem_request, list); |
||
1924 | if (__kgem_busy(kgem, rq->bo->handle)) { |
||
1925 | DBG(("%s: last requests handle=%d still busy\n", |
||
1926 | __FUNCTION__, rq->bo->handle)); |
||
1927 | return false; |
||
1928 | } |
||
3258 | Serge | 1929 | |
3263 | Serge | 1930 | DBG(("%s: ring=%d idle (handle=%d)\n", |
1931 | __FUNCTION__, ring, rq->bo->handle)); |
||
3258 | Serge | 1932 | |
3263 | Serge | 1933 | kgem_retire__requests_ring(kgem, ring); |
1934 | assert(list_is_empty(&kgem->requests[ring])); |
||
1935 | return true; |
||
1936 | } |
||
3258 | Serge | 1937 | |
1938 | static void kgem_commit(struct kgem *kgem) |
||
1939 | { |
||
1940 | struct kgem_request *rq = kgem->next_request; |
||
1941 | struct kgem_bo *bo, *next; |
||
1942 | |||
1943 | list_for_each_entry_safe(bo, next, &rq->buffers, request) { |
||
1944 | assert(next->request.prev == &bo->request); |
||
1945 | |||
1946 | DBG(("%s: release handle=%d (proxy? %d), dirty? %d flush? %d, snoop? %d -> offset=%x\n", |
||
1947 | __FUNCTION__, bo->handle, bo->proxy != NULL, |
||
1948 | bo->dirty, bo->needs_flush, bo->snoop, |
||
1949 | (unsigned)bo->exec->offset)); |
||
1950 | |||
1951 | assert(!bo->purged); |
||
1952 | assert(bo->exec); |
||
1953 | assert(bo->proxy == NULL || bo->exec == &_kgem_dummy_exec); |
||
1954 | assert(RQ(bo->rq) == rq || (RQ(bo->proxy->rq) == rq)); |
||
1955 | |||
1956 | bo->presumed_offset = bo->exec->offset; |
||
1957 | bo->exec = NULL; |
||
1958 | bo->target_handle = -1; |
||
1959 | |||
1960 | if (!bo->refcnt && !bo->reusable) { |
||
1961 | assert(!bo->snoop); |
||
1962 | kgem_bo_free(kgem, bo); |
||
1963 | continue; |
||
1964 | } |
||
1965 | |||
1966 | bo->binding.offset = 0; |
||
1967 | bo->domain = DOMAIN_GPU; |
||
1968 | bo->dirty = false; |
||
1969 | |||
1970 | if (bo->proxy) { |
||
1971 | /* proxies are not used for domain tracking */ |
||
1972 | bo->exec = NULL; |
||
1973 | __kgem_bo_clear_busy(bo); |
||
1974 | } |
||
1975 | |||
1976 | kgem->scanout_busy |= bo->scanout; |
||
1977 | } |
||
1978 | |||
1979 | if (rq == &kgem->static_request) { |
||
1980 | struct drm_i915_gem_set_domain set_domain; |
||
1981 | |||
1982 | DBG(("%s: syncing due to allocation failure\n", __FUNCTION__)); |
||
1983 | |||
1984 | VG_CLEAR(set_domain); |
||
1985 | set_domain.handle = rq->bo->handle; |
||
1986 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
||
1987 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
||
1988 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) { |
||
1989 | DBG(("%s: sync: GPU hang detected\n", __FUNCTION__)); |
||
1990 | kgem_throttle(kgem); |
||
1991 | } |
||
1992 | |||
1993 | kgem_retire(kgem); |
||
1994 | assert(list_is_empty(&rq->buffers)); |
||
1995 | |||
1996 | gem_close(kgem->fd, rq->bo->handle); |
||
1997 | kgem_cleanup_cache(kgem); |
||
1998 | } else { |
||
1999 | list_add_tail(&rq->list, &kgem->requests[rq->ring]); |
||
2000 | kgem->need_throttle = kgem->need_retire = 1; |
||
2001 | } |
||
2002 | |||
2003 | kgem->next_request = NULL; |
||
2004 | } |
||
2005 | |||
2006 | static void kgem_close_list(struct kgem *kgem, struct list *head) |
||
2007 | { |
||
2008 | while (!list_is_empty(head)) |
||
2009 | kgem_bo_free(kgem, list_first_entry(head, struct kgem_bo, list)); |
||
2010 | } |
||
2011 | |||
2012 | static void kgem_close_inactive(struct kgem *kgem) |
||
2013 | { |
||
2014 | unsigned int i; |
||
2015 | |||
2016 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) |
||
2017 | kgem_close_list(kgem, &kgem->inactive[i]); |
||
2018 | } |
||
2019 | |||
2020 | static void kgem_finish_buffers(struct kgem *kgem) |
||
2021 | { |
||
2022 | struct kgem_buffer *bo, *next; |
||
2023 | |||
2024 | list_for_each_entry_safe(bo, next, &kgem->batch_buffers, base.list) { |
||
2025 | DBG(("%s: buffer handle=%d, used=%d, exec?=%d, write=%d, mmapped=%d\n", |
||
2026 | __FUNCTION__, bo->base.handle, bo->used, bo->base.exec!=NULL, |
||
2027 | bo->write, bo->mmapped)); |
||
2028 | |||
2029 | assert(next->base.list.prev == &bo->base.list); |
||
2030 | assert(bo->base.io); |
||
2031 | assert(bo->base.refcnt >= 1); |
||
2032 | |||
2033 | if (!bo->base.exec) { |
||
2034 | DBG(("%s: skipping unattached handle=%d, used=%d\n", |
||
2035 | __FUNCTION__, bo->base.handle, bo->used)); |
||
2036 | continue; |
||
2037 | } |
||
2038 | |||
2039 | if (!bo->write) { |
||
2040 | assert(bo->base.exec || bo->base.refcnt > 1); |
||
2041 | goto decouple; |
||
2042 | } |
||
2043 | |||
2044 | if (bo->mmapped) { |
||
2045 | int used; |
||
2046 | |||
2047 | assert(!bo->need_io); |
||
2048 | |||
2049 | used = ALIGN(bo->used, PAGE_SIZE); |
||
2050 | if (!DBG_NO_UPLOAD_ACTIVE && |
||
2051 | used + PAGE_SIZE <= bytes(&bo->base) && |
||
2052 | (kgem->has_llc || !IS_CPU_MAP(bo->base.map) || bo->base.snoop)) { |
||
2053 | DBG(("%s: retaining upload buffer (%d/%d)\n", |
||
2054 | __FUNCTION__, bo->used, bytes(&bo->base))); |
||
2055 | bo->used = used; |
||
2056 | list_move(&bo->base.list, |
||
2057 | &kgem->active_buffers); |
||
2058 | continue; |
||
2059 | } |
||
2060 | DBG(("%s: discarding mmapped buffer, used=%d, map type=%d\n", |
||
2061 | __FUNCTION__, bo->used, (int)__MAP_TYPE(bo->base.map))); |
||
2062 | goto decouple; |
||
2063 | } |
||
2064 | |||
2065 | if (!bo->used) { |
||
2066 | /* Unless we replace the handle in the execbuffer, |
||
2067 | * then this bo will become active. So decouple it |
||
2068 | * from the buffer list and track it in the normal |
||
2069 | * manner. |
||
2070 | */ |
||
2071 | goto decouple; |
||
2072 | } |
||
2073 | |||
2074 | assert(bo->need_io); |
||
2075 | assert(bo->base.rq == MAKE_REQUEST(kgem->next_request, kgem->ring)); |
||
2076 | assert(bo->base.domain != DOMAIN_GPU); |
||
2077 | |||
2078 | if (bo->base.refcnt == 1 && |
||
2079 | bo->base.size.pages.count > 1 && |
||
2080 | bo->used < bytes(&bo->base) / 2) { |
||
2081 | struct kgem_bo *shrink; |
||
2082 | unsigned alloc = NUM_PAGES(bo->used); |
||
2083 | |||
2084 | shrink = search_snoop_cache(kgem, alloc, |
||
2085 | CREATE_INACTIVE | CREATE_NO_RETIRE); |
||
2086 | if (shrink) { |
||
2087 | void *map; |
||
2088 | int n; |
||
2089 | |||
2090 | DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n", |
||
2091 | __FUNCTION__, |
||
2092 | bo->used, bytes(&bo->base), bytes(shrink), |
||
2093 | bo->base.handle, shrink->handle)); |
||
2094 | |||
2095 | assert(bo->used <= bytes(shrink)); |
||
2096 | map = kgem_bo_map__cpu(kgem, shrink); |
||
2097 | if (map) { |
||
2098 | kgem_bo_sync__cpu(kgem, shrink); |
||
2099 | memcpy(map, bo->mem, bo->used); |
||
2100 | |||
2101 | shrink->target_handle = |
||
2102 | kgem->has_handle_lut ? bo->base.target_handle : shrink->handle; |
||
2103 | for (n = 0; n < kgem->nreloc; n++) { |
||
2104 | if (kgem->reloc[n].target_handle == bo->base.target_handle) { |
||
2105 | kgem->reloc[n].target_handle = shrink->target_handle; |
||
2106 | kgem->reloc[n].presumed_offset = shrink->presumed_offset; |
||
2107 | kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] = |
||
2108 | kgem->reloc[n].delta + shrink->presumed_offset; |
||
2109 | } |
||
2110 | } |
||
2111 | |||
2112 | bo->base.exec->handle = shrink->handle; |
||
2113 | bo->base.exec->offset = shrink->presumed_offset; |
||
2114 | shrink->exec = bo->base.exec; |
||
2115 | shrink->rq = bo->base.rq; |
||
2116 | list_replace(&bo->base.request, |
||
2117 | &shrink->request); |
||
2118 | list_init(&bo->base.request); |
||
2119 | shrink->needs_flush = bo->base.dirty; |
||
2120 | |||
2121 | bo->base.exec = NULL; |
||
2122 | bo->base.rq = NULL; |
||
2123 | bo->base.dirty = false; |
||
2124 | bo->base.needs_flush = false; |
||
2125 | bo->used = 0; |
||
2126 | |||
2127 | goto decouple; |
||
2128 | } |
||
2129 | |||
2130 | __kgem_bo_destroy(kgem, shrink); |
||
2131 | } |
||
2132 | |||
2133 | shrink = search_linear_cache(kgem, alloc, |
||
2134 | CREATE_INACTIVE | CREATE_NO_RETIRE); |
||
2135 | if (shrink) { |
||
2136 | int n; |
||
2137 | |||
2138 | DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n", |
||
2139 | __FUNCTION__, |
||
2140 | bo->used, bytes(&bo->base), bytes(shrink), |
||
2141 | bo->base.handle, shrink->handle)); |
||
2142 | |||
2143 | assert(bo->used <= bytes(shrink)); |
||
2144 | if (gem_write(kgem->fd, shrink->handle, |
||
2145 | 0, bo->used, bo->mem) == 0) { |
||
2146 | shrink->target_handle = |
||
2147 | kgem->has_handle_lut ? bo->base.target_handle : shrink->handle; |
||
2148 | for (n = 0; n < kgem->nreloc; n++) { |
||
2149 | if (kgem->reloc[n].target_handle == bo->base.target_handle) { |
||
2150 | kgem->reloc[n].target_handle = shrink->target_handle; |
||
2151 | kgem->reloc[n].presumed_offset = shrink->presumed_offset; |
||
2152 | kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] = |
||
2153 | kgem->reloc[n].delta + shrink->presumed_offset; |
||
2154 | } |
||
2155 | } |
||
2156 | |||
2157 | bo->base.exec->handle = shrink->handle; |
||
2158 | bo->base.exec->offset = shrink->presumed_offset; |
||
2159 | shrink->exec = bo->base.exec; |
||
2160 | shrink->rq = bo->base.rq; |
||
2161 | list_replace(&bo->base.request, |
||
2162 | &shrink->request); |
||
2163 | list_init(&bo->base.request); |
||
2164 | shrink->needs_flush = bo->base.dirty; |
||
2165 | |||
2166 | bo->base.exec = NULL; |
||
2167 | bo->base.rq = NULL; |
||
2168 | bo->base.dirty = false; |
||
2169 | bo->base.needs_flush = false; |
||
2170 | bo->used = 0; |
||
2171 | |||
2172 | goto decouple; |
||
2173 | } |
||
2174 | |||
2175 | __kgem_bo_destroy(kgem, shrink); |
||
2176 | } |
||
2177 | } |
||
2178 | |||
2179 | DBG(("%s: handle=%d, uploading %d/%d\n", |
||
2180 | __FUNCTION__, bo->base.handle, bo->used, bytes(&bo->base))); |
||
2181 | ASSERT_IDLE(kgem, bo->base.handle); |
||
2182 | assert(bo->used <= bytes(&bo->base)); |
||
2183 | gem_write(kgem->fd, bo->base.handle, |
||
2184 | 0, bo->used, bo->mem); |
||
2185 | bo->need_io = 0; |
||
2186 | |||
2187 | decouple: |
||
2188 | DBG(("%s: releasing handle=%d\n", |
||
2189 | __FUNCTION__, bo->base.handle)); |
||
2190 | list_del(&bo->base.list); |
||
2191 | kgem_bo_unref(kgem, &bo->base); |
||
2192 | } |
||
2193 | } |
||
2194 | |||
2195 | static void kgem_cleanup(struct kgem *kgem) |
||
2196 | { |
||
2197 | int n; |
||
2198 | |||
2199 | for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) { |
||
2200 | while (!list_is_empty(&kgem->requests[n])) { |
||
2201 | struct kgem_request *rq; |
||
2202 | |||
2203 | rq = list_first_entry(&kgem->requests[n], |
||
2204 | struct kgem_request, |
||
2205 | list); |
||
2206 | while (!list_is_empty(&rq->buffers)) { |
||
2207 | struct kgem_bo *bo; |
||
2208 | |||
2209 | bo = list_first_entry(&rq->buffers, |
||
2210 | struct kgem_bo, |
||
2211 | request); |
||
2212 | |||
2213 | bo->exec = NULL; |
||
2214 | bo->dirty = false; |
||
2215 | __kgem_bo_clear_busy(bo); |
||
2216 | if (bo->refcnt == 0) |
||
2217 | kgem_bo_free(kgem, bo); |
||
2218 | } |
||
2219 | |||
2220 | __kgem_request_free(rq); |
||
2221 | } |
||
2222 | } |
||
2223 | |||
2224 | kgem_close_inactive(kgem); |
||
2225 | } |
||
2226 | |||
2227 | static int kgem_batch_write(struct kgem *kgem, uint32_t handle, uint32_t size) |
||
2228 | { |
||
2229 | int ret; |
||
2230 | |||
2231 | ASSERT_IDLE(kgem, handle); |
||
2232 | |||
2233 | /* If there is no surface data, just upload the batch */ |
||
2234 | if (kgem->surface == kgem->batch_size) |
||
2235 | return gem_write(kgem->fd, handle, |
||
2236 | 0, sizeof(uint32_t)*kgem->nbatch, |
||
2237 | kgem->batch); |
||
2238 | |||
2239 | /* Are the batch pages conjoint with the surface pages? */ |
||
2240 | if (kgem->surface < kgem->nbatch + PAGE_SIZE/sizeof(uint32_t)) { |
||
2241 | assert(size == PAGE_ALIGN(kgem->batch_size*sizeof(uint32_t))); |
||
2242 | return gem_write(kgem->fd, handle, |
||
2243 | 0, kgem->batch_size*sizeof(uint32_t), |
||
2244 | kgem->batch); |
||
2245 | } |
||
2246 | |||
2247 | /* Disjoint surface/batch, upload separately */ |
||
2248 | ret = gem_write(kgem->fd, handle, |
||
2249 | 0, sizeof(uint32_t)*kgem->nbatch, |
||
2250 | kgem->batch); |
||
2251 | if (ret) |
||
2252 | return ret; |
||
2253 | |||
2254 | ret = PAGE_ALIGN(sizeof(uint32_t) * kgem->batch_size); |
||
2255 | ret -= sizeof(uint32_t) * kgem->surface; |
||
2256 | assert(size-ret >= kgem->nbatch*sizeof(uint32_t)); |
||
2257 | return __gem_write(kgem->fd, handle, |
||
2258 | size - ret, (kgem->batch_size - kgem->surface)*sizeof(uint32_t), |
||
2259 | kgem->batch + kgem->surface); |
||
2260 | } |
||
2261 | |||
2262 | void kgem_reset(struct kgem *kgem) |
||
2263 | { |
||
2264 | if (kgem->next_request) { |
||
2265 | struct kgem_request *rq = kgem->next_request; |
||
2266 | |||
2267 | while (!list_is_empty(&rq->buffers)) { |
||
2268 | struct kgem_bo *bo = |
||
2269 | list_first_entry(&rq->buffers, |
||
2270 | struct kgem_bo, |
||
2271 | request); |
||
2272 | list_del(&bo->request); |
||
2273 | |||
2274 | assert(RQ(bo->rq) == rq); |
||
2275 | |||
2276 | bo->binding.offset = 0; |
||
2277 | bo->exec = NULL; |
||
2278 | bo->target_handle = -1; |
||
2279 | bo->dirty = false; |
||
2280 | |||
2281 | if (bo->needs_flush && __kgem_busy(kgem, bo->handle)) { |
||
2282 | list_add(&bo->request, &kgem->flushing); |
||
2283 | bo->rq = (void *)kgem; |
||
2284 | } else |
||
2285 | __kgem_bo_clear_busy(bo); |
||
2286 | |||
2287 | if (!bo->refcnt && !bo->reusable) { |
||
2288 | assert(!bo->snoop); |
||
2289 | DBG(("%s: discarding handle=%d\n", |
||
2290 | __FUNCTION__, bo->handle)); |
||
2291 | kgem_bo_free(kgem, bo); |
||
2292 | } |
||
2293 | } |
||
2294 | |||
2295 | if (rq != &kgem->static_request) { |
||
2296 | list_init(&rq->list); |
||
2297 | __kgem_request_free(rq); |
||
2298 | } |
||
2299 | } |
||
2300 | |||
2301 | kgem->nfence = 0; |
||
2302 | kgem->nexec = 0; |
||
2303 | kgem->nreloc = 0; |
||
2304 | kgem->nreloc__self = 0; |
||
2305 | kgem->aperture = 0; |
||
2306 | kgem->aperture_fenced = 0; |
||
2307 | kgem->nbatch = 0; |
||
2308 | kgem->surface = kgem->batch_size; |
||
2309 | kgem->mode = KGEM_NONE; |
||
2310 | kgem->flush = 0; |
||
2311 | kgem->batch_flags = kgem->batch_flags_base; |
||
2312 | |||
2313 | kgem->next_request = __kgem_request_alloc(kgem); |
||
2314 | |||
2315 | kgem_sna_reset(kgem); |
||
2316 | } |
||
2317 | |||
2318 | static int compact_batch_surface(struct kgem *kgem) |
||
2319 | { |
||
2320 | int size, shrink, n; |
||
2321 | |||
2322 | if (!kgem->has_relaxed_delta) |
||
2323 | return kgem->batch_size; |
||
2324 | |||
2325 | /* See if we can pack the contents into one or two pages */ |
||
2326 | n = ALIGN(kgem->batch_size, 1024); |
||
2327 | size = n - kgem->surface + kgem->nbatch; |
||
2328 | size = ALIGN(size, 1024); |
||
2329 | |||
2330 | shrink = n - size; |
||
2331 | if (shrink) { |
||
2332 | DBG(("shrinking from %d to %d\n", kgem->batch_size, size)); |
||
2333 | |||
2334 | shrink *= sizeof(uint32_t); |
||
2335 | for (n = 0; n < kgem->nreloc; n++) { |
||
2336 | if (kgem->reloc[n].read_domains == I915_GEM_DOMAIN_INSTRUCTION && |
||
2337 | kgem->reloc[n].target_handle == ~0U) |
||
2338 | kgem->reloc[n].delta -= shrink; |
||
2339 | |||
2340 | if (kgem->reloc[n].offset >= sizeof(uint32_t)*kgem->nbatch) |
||
2341 | kgem->reloc[n].offset -= shrink; |
||
2342 | } |
||
2343 | } |
||
2344 | |||
2345 | return size * sizeof(uint32_t); |
||
2346 | } |
||
2347 | |||
2348 | static struct kgem_bo * |
||
2349 | kgem_create_batch(struct kgem *kgem, int size) |
||
2350 | { |
||
2351 | struct drm_i915_gem_set_domain set_domain; |
||
2352 | struct kgem_bo *bo; |
||
2353 | |||
2354 | if (size <= 4096) { |
||
2355 | bo = list_first_entry(&kgem->pinned_batches[0], |
||
2356 | struct kgem_bo, |
||
2357 | list); |
||
2358 | if (!bo->rq) { |
||
2359 | out_4096: |
||
2360 | list_move_tail(&bo->list, &kgem->pinned_batches[0]); |
||
2361 | return kgem_bo_reference(bo); |
||
2362 | } |
||
2363 | |||
2364 | if (!__kgem_busy(kgem, bo->handle)) { |
||
2365 | assert(RQ(bo->rq)->bo == bo); |
||
2366 | __kgem_retire_rq(kgem, RQ(bo->rq)); |
||
2367 | goto out_4096; |
||
2368 | } |
||
2369 | } |
||
2370 | |||
2371 | if (size <= 16384) { |
||
2372 | bo = list_first_entry(&kgem->pinned_batches[1], |
||
2373 | struct kgem_bo, |
||
2374 | list); |
||
2375 | if (!bo->rq) { |
||
2376 | out_16384: |
||
2377 | list_move_tail(&bo->list, &kgem->pinned_batches[1]); |
||
2378 | return kgem_bo_reference(bo); |
||
2379 | } |
||
2380 | |||
2381 | if (!__kgem_busy(kgem, bo->handle)) { |
||
2382 | assert(RQ(bo->rq)->bo == bo); |
||
2383 | __kgem_retire_rq(kgem, RQ(bo->rq)); |
||
2384 | goto out_16384; |
||
2385 | } |
||
2386 | } |
||
2387 | |||
2388 | if (kgem->gen == 020 && !kgem->has_pinned_batches) { |
||
2389 | assert(size <= 16384); |
||
2390 | |||
2391 | bo = list_first_entry(&kgem->pinned_batches[size > 4096], |
||
2392 | struct kgem_bo, |
||
2393 | list); |
||
2394 | list_move_tail(&bo->list, &kgem->pinned_batches[size > 4096]); |
||
2395 | |||
2396 | DBG(("%s: syncing due to busy batches\n", __FUNCTION__)); |
||
2397 | |||
2398 | VG_CLEAR(set_domain); |
||
2399 | set_domain.handle = bo->handle; |
||
2400 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
||
2401 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
||
2402 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) { |
||
2403 | DBG(("%s: sync: GPU hang detected\n", __FUNCTION__)); |
||
2404 | kgem_throttle(kgem); |
||
2405 | return NULL; |
||
2406 | } |
||
2407 | |||
2408 | kgem_retire(kgem); |
||
2409 | assert(bo->rq == NULL); |
||
2410 | return kgem_bo_reference(bo); |
||
2411 | } |
||
2412 | |||
2413 | return kgem_create_linear(kgem, size, CREATE_NO_THROTTLE); |
||
2414 | } |
||
2415 | |||
2416 | void _kgem_submit(struct kgem *kgem) |
||
2417 | { |
||
2418 | struct kgem_request *rq; |
||
2419 | uint32_t batch_end; |
||
2420 | int size; |
||
2421 | |||
2422 | assert(!DBG_NO_HW); |
||
2423 | assert(!kgem->wedged); |
||
2424 | |||
2425 | assert(kgem->nbatch); |
||
2426 | assert(kgem->nbatch <= KGEM_BATCH_SIZE(kgem)); |
||
2427 | assert(kgem->nbatch <= kgem->surface); |
||
2428 | |||
2429 | batch_end = kgem_end_batch(kgem); |
||
2430 | kgem_sna_flush(kgem); |
||
2431 | |||
2432 | DBG(("batch[%d/%d]: %d %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d\n", |
||
2433 | kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface, kgem->batch_size, |
||
2434 | kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture)); |
||
2435 | |||
2436 | assert(kgem->nbatch <= kgem->batch_size); |
||
2437 | assert(kgem->nbatch <= kgem->surface); |
||
2438 | assert(kgem->nreloc <= ARRAY_SIZE(kgem->reloc)); |
||
2439 | assert(kgem->nexec < ARRAY_SIZE(kgem->exec)); |
||
2440 | assert(kgem->nfence <= kgem->fence_max); |
||
2441 | |||
2442 | kgem_finish_buffers(kgem); |
||
2443 | |||
2444 | #if SHOW_BATCH |
||
2445 | __kgem_batch_debug(kgem, batch_end); |
||
2446 | #endif |
||
2447 | |||
2448 | rq = kgem->next_request; |
||
2449 | if (kgem->surface != kgem->batch_size) |
||
2450 | size = compact_batch_surface(kgem); |
||
2451 | else |
||
2452 | size = kgem->nbatch * sizeof(kgem->batch[0]); |
||
2453 | rq->bo = kgem_create_batch(kgem, size); |
||
2454 | if (rq->bo) { |
||
2455 | uint32_t handle = rq->bo->handle; |
||
2456 | int i; |
||
2457 | |||
2458 | assert(!rq->bo->needs_flush); |
||
2459 | |||
2460 | i = kgem->nexec++; |
||
2461 | kgem->exec[i].handle = handle; |
||
2462 | kgem->exec[i].relocation_count = kgem->nreloc; |
||
2463 | kgem->exec[i].relocs_ptr = (uintptr_t)kgem->reloc; |
||
2464 | kgem->exec[i].alignment = 0; |
||
2465 | kgem->exec[i].offset = rq->bo->presumed_offset; |
||
2466 | kgem->exec[i].flags = 0; |
||
2467 | kgem->exec[i].rsvd1 = 0; |
||
2468 | kgem->exec[i].rsvd2 = 0; |
||
2469 | |||
2470 | rq->bo->target_handle = kgem->has_handle_lut ? i : handle; |
||
2471 | rq->bo->exec = &kgem->exec[i]; |
||
2472 | rq->bo->rq = MAKE_REQUEST(rq, kgem->ring); /* useful sanity check */ |
||
2473 | list_add(&rq->bo->request, &rq->buffers); |
||
2474 | rq->ring = kgem->ring == KGEM_BLT; |
||
2475 | |||
2476 | kgem_fixup_self_relocs(kgem, rq->bo); |
||
2477 | |||
2478 | if (kgem_batch_write(kgem, handle, size) == 0) { |
||
2479 | struct drm_i915_gem_execbuffer2 execbuf; |
||
2480 | int ret, retry = 3; |
||
2481 | |||
2482 | VG_CLEAR(execbuf); |
||
2483 | execbuf.buffers_ptr = (uintptr_t)kgem->exec; |
||
2484 | execbuf.buffer_count = kgem->nexec; |
||
2485 | execbuf.batch_start_offset = 0; |
||
2486 | execbuf.batch_len = batch_end*sizeof(uint32_t); |
||
2487 | execbuf.cliprects_ptr = 0; |
||
2488 | execbuf.num_cliprects = 0; |
||
2489 | execbuf.DR1 = 0; |
||
2490 | execbuf.DR4 = 0; |
||
2491 | execbuf.flags = kgem->ring | kgem->batch_flags; |
||
2492 | execbuf.rsvd1 = 0; |
||
2493 | execbuf.rsvd2 = 0; |
||
2494 | |||
2495 | |||
2496 | |||
3263 | Serge | 2497 | ret = drmIoctl(kgem->fd, |
2498 | DRM_IOCTL_I915_GEM_EXECBUFFER2, |
||
2499 | &execbuf); |
||
2500 | while (ret == -1 && errno == EBUSY && retry--) { |
||
2501 | __kgem_throttle(kgem); |
||
2502 | ret = drmIoctl(kgem->fd, |
||
2503 | DRM_IOCTL_I915_GEM_EXECBUFFER2, |
||
2504 | &execbuf); |
||
2505 | } |
||
3258 | Serge | 2506 | if (DEBUG_SYNC && ret == 0) { |
2507 | struct drm_i915_gem_set_domain set_domain; |
||
2508 | |||
2509 | VG_CLEAR(set_domain); |
||
2510 | set_domain.handle = handle; |
||
2511 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
||
2512 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
||
2513 | |||
2514 | ret = drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain); |
||
2515 | } |
||
2516 | if (ret == -1) { |
||
2517 | // DBG(("%s: GPU hang detected [%d]\n", |
||
2518 | // __FUNCTION__, errno)); |
||
2519 | kgem_throttle(kgem); |
||
2520 | kgem->wedged = true; |
||
2521 | |||
2522 | #if 0 |
||
2523 | ret = errno; |
||
2524 | ErrorF("batch[%d/%d]: %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d: errno=%d\n", |
||
2525 | kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface, |
||
2526 | kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture, errno); |
||
2527 | |||
2528 | for (i = 0; i < kgem->nexec; i++) { |
||
2529 | struct kgem_bo *bo, *found = NULL; |
||
2530 | |||
2531 | list_for_each_entry(bo, &kgem->next_request->buffers, request) { |
||
2532 | if (bo->handle == kgem->exec[i].handle) { |
||
2533 | found = bo; |
||
2534 | break; |
||
2535 | } |
||
2536 | } |
||
2537 | ErrorF("exec[%d] = handle:%d, presumed offset: %x, size: %d, tiling %d, fenced %d, snooped %d, deleted %d\n", |
||
2538 | i, |
||
2539 | kgem->exec[i].handle, |
||
2540 | (int)kgem->exec[i].offset, |
||
2541 | found ? kgem_bo_size(found) : -1, |
||
2542 | found ? found->tiling : -1, |
||
2543 | (int)(kgem->exec[i].flags & EXEC_OBJECT_NEEDS_FENCE), |
||
2544 | found ? found->snoop : -1, |
||
2545 | found ? found->purged : -1); |
||
2546 | } |
||
2547 | for (i = 0; i < kgem->nreloc; i++) { |
||
2548 | ErrorF("reloc[%d] = pos:%d, target:%d, delta:%d, read:%x, write:%x, offset:%x\n", |
||
2549 | i, |
||
2550 | (int)kgem->reloc[i].offset, |
||
2551 | kgem->reloc[i].target_handle, |
||
2552 | kgem->reloc[i].delta, |
||
2553 | kgem->reloc[i].read_domains, |
||
2554 | kgem->reloc[i].write_domain, |
||
2555 | (int)kgem->reloc[i].presumed_offset); |
||
2556 | } |
||
2557 | |||
2558 | if (DEBUG_SYNC) { |
||
2559 | int fd = open("/tmp/batchbuffer", O_WRONLY | O_CREAT | O_APPEND, 0666); |
||
2560 | if (fd != -1) { |
||
2561 | write(fd, kgem->batch, batch_end*sizeof(uint32_t)); |
||
2562 | close(fd); |
||
2563 | } |
||
2564 | |||
2565 | FatalError("SNA: failed to submit batchbuffer, errno=%d\n", ret); |
||
2566 | } |
||
2567 | #endif |
||
2568 | } |
||
2569 | } |
||
2570 | |||
2571 | kgem_commit(kgem); |
||
2572 | } |
||
2573 | if (kgem->wedged) |
||
2574 | kgem_cleanup(kgem); |
||
2575 | |||
2576 | kgem_reset(kgem); |
||
2577 | |||
2578 | assert(kgem->next_request != NULL); |
||
2579 | } |
||
2580 | |||
2581 | void kgem_throttle(struct kgem *kgem) |
||
2582 | { |
||
2583 | kgem->need_throttle = 0; |
||
2584 | if (kgem->wedged) |
||
2585 | return; |
||
2586 | |||
2587 | kgem->wedged = __kgem_throttle(kgem); |
||
2588 | if (kgem->wedged) { |
||
2589 | printf("Detected a hung GPU, disabling acceleration.\n"); |
||
2590 | printf("When reporting this, please include i915_error_state from debugfs and the full dmesg.\n"); |
||
2591 | } |
||
2592 | } |
||
2593 | |||
2594 | void kgem_purge_cache(struct kgem *kgem) |
||
2595 | { |
||
2596 | struct kgem_bo *bo, *next; |
||
2597 | int i; |
||
2598 | |||
2599 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { |
||
2600 | list_for_each_entry_safe(bo, next, &kgem->inactive[i], list) { |
||
2601 | if (!kgem_bo_is_retained(kgem, bo)) { |
||
2602 | DBG(("%s: purging %d\n", |
||
2603 | __FUNCTION__, bo->handle)); |
||
2604 | kgem_bo_free(kgem, bo); |
||
2605 | } |
||
2606 | } |
||
2607 | } |
||
2608 | |||
2609 | kgem->need_purge = false; |
||
2610 | } |
||
2611 | |||
2612 | bool kgem_expire_cache(struct kgem *kgem) |
||
2613 | { |
||
2614 | time_t now, expire; |
||
2615 | struct kgem_bo *bo; |
||
2616 | unsigned int size = 0, count = 0; |
||
2617 | bool idle; |
||
2618 | unsigned int i; |
||
2619 | |||
2620 | time(&now); |
||
2621 | |||
2622 | while (__kgem_freed_bo) { |
||
2623 | bo = __kgem_freed_bo; |
||
2624 | __kgem_freed_bo = *(struct kgem_bo **)bo; |
||
2625 | free(bo); |
||
2626 | } |
||
2627 | |||
2628 | while (__kgem_freed_request) { |
||
2629 | struct kgem_request *rq = __kgem_freed_request; |
||
2630 | __kgem_freed_request = *(struct kgem_request **)rq; |
||
2631 | free(rq); |
||
2632 | } |
||
2633 | |||
2634 | while (!list_is_empty(&kgem->large_inactive)) { |
||
2635 | kgem_bo_free(kgem, |
||
2636 | list_first_entry(&kgem->large_inactive, |
||
2637 | struct kgem_bo, list)); |
||
2638 | |||
2639 | } |
||
2640 | |||
2641 | while (!list_is_empty(&kgem->scanout)) { |
||
2642 | bo = list_first_entry(&kgem->scanout, struct kgem_bo, list); |
||
2643 | if (__kgem_busy(kgem, bo->handle)) |
||
2644 | break; |
||
2645 | |||
2646 | list_del(&bo->list); |
||
2647 | kgem_bo_clear_scanout(kgem, bo); |
||
2648 | __kgem_bo_destroy(kgem, bo); |
||
2649 | } |
||
2650 | |||
2651 | expire = 0; |
||
2652 | list_for_each_entry(bo, &kgem->snoop, list) { |
||
2653 | if (bo->delta) { |
||
2654 | expire = now - MAX_INACTIVE_TIME/2; |
||
2655 | break; |
||
2656 | } |
||
2657 | |||
2658 | bo->delta = now; |
||
2659 | } |
||
2660 | if (expire) { |
||
2661 | while (!list_is_empty(&kgem->snoop)) { |
||
2662 | bo = list_last_entry(&kgem->snoop, struct kgem_bo, list); |
||
2663 | |||
2664 | if (bo->delta > expire) |
||
2665 | break; |
||
2666 | |||
2667 | kgem_bo_free(kgem, bo); |
||
2668 | } |
||
2669 | } |
||
2670 | #ifdef DEBUG_MEMORY |
||
2671 | { |
||
2672 | long snoop_size = 0; |
||
2673 | int snoop_count = 0; |
||
2674 | list_for_each_entry(bo, &kgem->snoop, list) |
||
2675 | snoop_count++, snoop_size += bytes(bo); |
||
2676 | ErrorF("%s: still allocated %d bo, %ld bytes, in snoop cache\n", |
||
2677 | __FUNCTION__, snoop_count, snoop_size); |
||
2678 | } |
||
2679 | #endif |
||
2680 | |||
2681 | kgem_retire(kgem); |
||
2682 | if (kgem->wedged) |
||
2683 | kgem_cleanup(kgem); |
||
2684 | |||
2685 | kgem->expire(kgem); |
||
2686 | |||
2687 | if (kgem->need_purge) |
||
2688 | kgem_purge_cache(kgem); |
||
2689 | |||
2690 | expire = 0; |
||
2691 | |||
2692 | idle = !kgem->need_retire; |
||
2693 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { |
||
2694 | idle &= list_is_empty(&kgem->inactive[i]); |
||
2695 | list_for_each_entry(bo, &kgem->inactive[i], list) { |
||
2696 | if (bo->delta) { |
||
2697 | expire = now - MAX_INACTIVE_TIME; |
||
2698 | break; |
||
2699 | } |
||
2700 | |||
2701 | bo->delta = now; |
||
2702 | } |
||
2703 | } |
||
2704 | if (idle) { |
||
2705 | DBG(("%s: idle\n", __FUNCTION__)); |
||
2706 | kgem->need_expire = false; |
||
2707 | return false; |
||
2708 | } |
||
2709 | if (expire == 0) |
||
2710 | return true; |
||
2711 | |||
2712 | idle = !kgem->need_retire; |
||
2713 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { |
||
2714 | struct list preserve; |
||
2715 | |||
2716 | list_init(&preserve); |
||
2717 | while (!list_is_empty(&kgem->inactive[i])) { |
||
2718 | bo = list_last_entry(&kgem->inactive[i], |
||
2719 | struct kgem_bo, list); |
||
2720 | |||
2721 | if (bo->delta > expire) { |
||
2722 | idle = false; |
||
2723 | break; |
||
2724 | } |
||
2725 | |||
2726 | if (bo->map && bo->delta + MAP_PRESERVE_TIME > expire) { |
||
2727 | idle = false; |
||
2728 | list_move_tail(&bo->list, &preserve); |
||
2729 | } else { |
||
2730 | count++; |
||
2731 | size += bytes(bo); |
||
2732 | kgem_bo_free(kgem, bo); |
||
2733 | DBG(("%s: expiring %d\n", |
||
2734 | __FUNCTION__, bo->handle)); |
||
2735 | } |
||
2736 | } |
||
2737 | if (!list_is_empty(&preserve)) { |
||
2738 | preserve.prev->next = kgem->inactive[i].next; |
||
2739 | kgem->inactive[i].next->prev = preserve.prev; |
||
2740 | kgem->inactive[i].next = preserve.next; |
||
2741 | preserve.next->prev = &kgem->inactive[i]; |
||
2742 | } |
||
2743 | } |
||
2744 | |||
2745 | #ifdef DEBUG_MEMORY |
||
2746 | { |
||
2747 | long inactive_size = 0; |
||
2748 | int inactive_count = 0; |
||
2749 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) |
||
2750 | list_for_each_entry(bo, &kgem->inactive[i], list) |
||
2751 | inactive_count++, inactive_size += bytes(bo); |
||
2752 | ErrorF("%s: still allocated %d bo, %ld bytes, in inactive cache\n", |
||
2753 | __FUNCTION__, inactive_count, inactive_size); |
||
2754 | } |
||
2755 | #endif |
||
2756 | |||
2757 | DBG(("%s: expired %d objects, %d bytes, idle? %d\n", |
||
2758 | __FUNCTION__, count, size, idle)); |
||
2759 | |||
2760 | kgem->need_expire = !idle; |
||
2761 | return !idle; |
||
2762 | (void)count; |
||
2763 | (void)size; |
||
2764 | } |
||
2765 | |||
2766 | void kgem_cleanup_cache(struct kgem *kgem) |
||
2767 | { |
||
2768 | unsigned int i; |
||
2769 | int n; |
||
2770 | |||
2771 | /* sync to the most recent request */ |
||
2772 | for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) { |
||
2773 | if (!list_is_empty(&kgem->requests[n])) { |
||
2774 | struct kgem_request *rq; |
||
2775 | struct drm_i915_gem_set_domain set_domain; |
||
2776 | |||
2777 | rq = list_first_entry(&kgem->requests[n], |
||
2778 | struct kgem_request, |
||
2779 | list); |
||
2780 | |||
2781 | DBG(("%s: sync on cleanup\n", __FUNCTION__)); |
||
2782 | |||
2783 | VG_CLEAR(set_domain); |
||
2784 | set_domain.handle = rq->bo->handle; |
||
2785 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
||
2786 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
||
2787 | (void)drmIoctl(kgem->fd, |
||
2788 | DRM_IOCTL_I915_GEM_SET_DOMAIN, |
||
2789 | &set_domain); |
||
2790 | } |
||
2791 | } |
||
2792 | |||
2793 | kgem_retire(kgem); |
||
2794 | kgem_cleanup(kgem); |
||
2795 | |||
2796 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { |
||
2797 | while (!list_is_empty(&kgem->inactive[i])) |
||
2798 | kgem_bo_free(kgem, |
||
2799 | list_last_entry(&kgem->inactive[i], |
||
2800 | struct kgem_bo, list)); |
||
2801 | } |
||
2802 | |||
2803 | while (!list_is_empty(&kgem->snoop)) |
||
2804 | kgem_bo_free(kgem, |
||
2805 | list_last_entry(&kgem->snoop, |
||
2806 | struct kgem_bo, list)); |
||
2807 | |||
2808 | while (__kgem_freed_bo) { |
||
2809 | struct kgem_bo *bo = __kgem_freed_bo; |
||
2810 | __kgem_freed_bo = *(struct kgem_bo **)bo; |
||
2811 | free(bo); |
||
2812 | } |
||
2813 | |||
2814 | kgem->need_purge = false; |
||
2815 | kgem->need_expire = false; |
||
2816 | } |
||
2817 | |||
2818 | static struct kgem_bo * |
||
3256 | Serge | 2819 | search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags) |
2820 | { |
||
2821 | struct kgem_bo *bo, *first = NULL; |
||
2822 | bool use_active = (flags & CREATE_INACTIVE) == 0; |
||
2823 | struct list *cache; |
||
2824 | |||
2825 | DBG(("%s: num_pages=%d, flags=%x, use_active? %d\n", |
||
2826 | __FUNCTION__, num_pages, flags, use_active)); |
||
2827 | |||
2828 | if (num_pages >= MAX_CACHE_SIZE / PAGE_SIZE) |
||
2829 | return NULL; |
||
2830 | |||
2831 | if (!use_active && list_is_empty(inactive(kgem, num_pages))) { |
||
2832 | DBG(("%s: inactive and cache bucket empty\n", |
||
2833 | __FUNCTION__)); |
||
2834 | |||
2835 | if (flags & CREATE_NO_RETIRE) { |
||
2836 | DBG(("%s: can not retire\n", __FUNCTION__)); |
||
2837 | return NULL; |
||
2838 | } |
||
2839 | |||
2840 | if (list_is_empty(active(kgem, num_pages, I915_TILING_NONE))) { |
||
2841 | DBG(("%s: active cache bucket empty\n", __FUNCTION__)); |
||
2842 | return NULL; |
||
2843 | } |
||
2844 | |||
2845 | if (!__kgem_throttle_retire(kgem, flags)) { |
||
2846 | DBG(("%s: nothing retired\n", __FUNCTION__)); |
||
2847 | return NULL; |
||
2848 | } |
||
2849 | |||
2850 | if (list_is_empty(inactive(kgem, num_pages))) { |
||
2851 | DBG(("%s: active cache bucket still empty after retire\n", |
||
2852 | __FUNCTION__)); |
||
2853 | return NULL; |
||
2854 | } |
||
2855 | } |
||
2856 | |||
2857 | if (!use_active && flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
||
2858 | int for_cpu = !!(flags & CREATE_CPU_MAP); |
||
2859 | DBG(("%s: searching for inactive %s map\n", |
||
2860 | __FUNCTION__, for_cpu ? "cpu" : "gtt")); |
||
2861 | cache = &kgem->vma[for_cpu].inactive[cache_bucket(num_pages)]; |
||
2862 | list_for_each_entry(bo, cache, vma) { |
||
2863 | assert(IS_CPU_MAP(bo->map) == for_cpu); |
||
2864 | assert(bucket(bo) == cache_bucket(num_pages)); |
||
2865 | assert(bo->proxy == NULL); |
||
2866 | assert(bo->rq == NULL); |
||
2867 | assert(bo->exec == NULL); |
||
2868 | assert(!bo->scanout); |
||
2869 | |||
2870 | if (num_pages > num_pages(bo)) { |
||
2871 | DBG(("inactive too small: %d < %d\n", |
||
2872 | num_pages(bo), num_pages)); |
||
2873 | continue; |
||
2874 | } |
||
2875 | |||
2876 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
||
2877 | kgem_bo_free(kgem, bo); |
||
2878 | break; |
||
2879 | } |
||
2880 | |||
2881 | if (I915_TILING_NONE != bo->tiling && |
||
2882 | !gem_set_tiling(kgem->fd, bo->handle, |
||
2883 | I915_TILING_NONE, 0)) |
||
2884 | continue; |
||
2885 | |||
2886 | kgem_bo_remove_from_inactive(kgem, bo); |
||
2887 | |||
2888 | bo->tiling = I915_TILING_NONE; |
||
2889 | bo->pitch = 0; |
||
2890 | bo->delta = 0; |
||
2891 | DBG((" %s: found handle=%d (num_pages=%d) in linear vma cache\n", |
||
2892 | __FUNCTION__, bo->handle, num_pages(bo))); |
||
2893 | assert(use_active || bo->domain != DOMAIN_GPU); |
||
2894 | assert(!bo->needs_flush); |
||
2895 | ASSERT_MAYBE_IDLE(kgem, bo->handle, !use_active); |
||
2896 | return bo; |
||
2897 | } |
||
2898 | |||
2899 | if (flags & CREATE_EXACT) |
||
2900 | return NULL; |
||
2901 | |||
2902 | if (flags & CREATE_CPU_MAP && !kgem->has_llc) |
||
2903 | return NULL; |
||
2904 | } |
||
2905 | |||
2906 | cache = use_active ? active(kgem, num_pages, I915_TILING_NONE) : inactive(kgem, num_pages); |
||
2907 | list_for_each_entry(bo, cache, list) { |
||
2908 | assert(bo->refcnt == 0); |
||
2909 | assert(bo->reusable); |
||
2910 | assert(!!bo->rq == !!use_active); |
||
2911 | assert(bo->proxy == NULL); |
||
2912 | assert(!bo->scanout); |
||
2913 | |||
2914 | if (num_pages > num_pages(bo)) |
||
2915 | continue; |
||
2916 | |||
2917 | if (use_active && |
||
2918 | kgem->gen <= 040 && |
||
2919 | bo->tiling != I915_TILING_NONE) |
||
2920 | continue; |
||
2921 | |||
2922 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
||
2923 | kgem_bo_free(kgem, bo); |
||
2924 | break; |
||
2925 | } |
||
2926 | |||
2927 | if (I915_TILING_NONE != bo->tiling) { |
||
2928 | if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) |
||
2929 | continue; |
||
2930 | |||
2931 | if (first) |
||
2932 | continue; |
||
2933 | |||
2934 | if (!gem_set_tiling(kgem->fd, bo->handle, |
||
2935 | I915_TILING_NONE, 0)) |
||
2936 | continue; |
||
2937 | |||
2938 | bo->tiling = I915_TILING_NONE; |
||
2939 | bo->pitch = 0; |
||
2940 | } |
||
2941 | |||
2942 | if (bo->map) { |
||
2943 | if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
||
2944 | int for_cpu = !!(flags & CREATE_CPU_MAP); |
||
2945 | if (IS_CPU_MAP(bo->map) != for_cpu) { |
||
2946 | if (first != NULL) |
||
2947 | break; |
||
2948 | |||
2949 | first = bo; |
||
2950 | continue; |
||
2951 | } |
||
2952 | } else { |
||
2953 | if (first != NULL) |
||
2954 | break; |
||
2955 | |||
2956 | first = bo; |
||
2957 | continue; |
||
2958 | } |
||
2959 | } else { |
||
2960 | if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
||
2961 | if (first != NULL) |
||
2962 | break; |
||
2963 | |||
2964 | first = bo; |
||
2965 | continue; |
||
2966 | } |
||
2967 | } |
||
2968 | |||
2969 | if (use_active) |
||
2970 | kgem_bo_remove_from_active(kgem, bo); |
||
2971 | else |
||
2972 | kgem_bo_remove_from_inactive(kgem, bo); |
||
2973 | |||
2974 | assert(bo->tiling == I915_TILING_NONE); |
||
2975 | bo->pitch = 0; |
||
2976 | bo->delta = 0; |
||
2977 | DBG((" %s: found handle=%d (num_pages=%d) in linear %s cache\n", |
||
2978 | __FUNCTION__, bo->handle, num_pages(bo), |
||
2979 | use_active ? "active" : "inactive")); |
||
2980 | assert(list_is_empty(&bo->list)); |
||
2981 | assert(use_active || bo->domain != DOMAIN_GPU); |
||
2982 | assert(!bo->needs_flush || use_active); |
||
2983 | ASSERT_MAYBE_IDLE(kgem, bo->handle, !use_active); |
||
2984 | return bo; |
||
2985 | } |
||
2986 | |||
2987 | if (first) { |
||
2988 | assert(first->tiling == I915_TILING_NONE); |
||
2989 | |||
2990 | if (use_active) |
||
2991 | kgem_bo_remove_from_active(kgem, first); |
||
2992 | else |
||
2993 | kgem_bo_remove_from_inactive(kgem, first); |
||
2994 | |||
2995 | first->pitch = 0; |
||
2996 | first->delta = 0; |
||
2997 | DBG((" %s: found handle=%d (near-miss) (num_pages=%d) in linear %s cache\n", |
||
2998 | __FUNCTION__, first->handle, num_pages(first), |
||
2999 | use_active ? "active" : "inactive")); |
||
3000 | assert(list_is_empty(&first->list)); |
||
3001 | assert(use_active || first->domain != DOMAIN_GPU); |
||
3002 | assert(!first->needs_flush || use_active); |
||
3003 | ASSERT_MAYBE_IDLE(kgem, first->handle, !use_active); |
||
3004 | return first; |
||
3005 | } |
||
3006 | |||
3007 | return NULL; |
||
3008 | } |
||
3009 | |||
3010 | |||
3011 | struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags) |
||
3012 | { |
||
3013 | struct kgem_bo *bo; |
||
3014 | uint32_t handle; |
||
3015 | |||
3016 | DBG(("%s(%d)\n", __FUNCTION__, size)); |
||
3017 | |||
3018 | if (flags & CREATE_GTT_MAP && kgem->has_llc) { |
||
3019 | flags &= ~CREATE_GTT_MAP; |
||
3020 | flags |= CREATE_CPU_MAP; |
||
3021 | } |
||
3022 | |||
3023 | size = (size + PAGE_SIZE - 1) / PAGE_SIZE; |
||
3024 | bo = search_linear_cache(kgem, size, CREATE_INACTIVE | flags); |
||
3025 | if (bo) { |
||
3026 | assert(bo->domain != DOMAIN_GPU); |
||
3027 | ASSERT_IDLE(kgem, bo->handle); |
||
3028 | bo->refcnt = 1; |
||
3029 | return bo; |
||
3030 | } |
||
3031 | |||
3032 | if (flags & CREATE_CACHED) |
||
3033 | return NULL; |
||
3034 | |||
3035 | handle = gem_create(kgem->fd, size); |
||
3036 | if (handle == 0) |
||
3037 | return NULL; |
||
3038 | |||
3039 | DBG(("%s: new handle=%d, num_pages=%d\n", __FUNCTION__, handle, size)); |
||
3040 | bo = __kgem_bo_alloc(handle, size); |
||
3041 | if (bo == NULL) { |
||
3042 | gem_close(kgem->fd, handle); |
||
3043 | return NULL; |
||
3044 | } |
||
3045 | |||
3046 | debug_alloc__bo(kgem, bo); |
||
3047 | return bo; |
||
3048 | } |
||
3049 | |||
3258 | Serge | 3050 | inline int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo) |
3051 | { |
||
3052 | unsigned int size; |
||
3256 | Serge | 3053 | |
3258 | Serge | 3054 | assert(bo->tiling); |
3055 | assert(kgem->gen < 040); |
||
3256 | Serge | 3056 | |
3258 | Serge | 3057 | if (kgem->gen < 030) |
3058 | size = 512 * 1024; |
||
3059 | else |
||
3060 | size = 1024 * 1024; |
||
3061 | while (size < bytes(bo)) |
||
3062 | size *= 2; |
||
3256 | Serge | 3063 | |
3258 | Serge | 3064 | return size; |
3065 | } |
||
3256 | Serge | 3066 | |
3258 | Serge | 3067 | struct kgem_bo *kgem_create_2d(struct kgem *kgem, |
3068 | int width, |
||
3069 | int height, |
||
3070 | int bpp, |
||
3071 | int tiling, |
||
3072 | uint32_t flags) |
||
3073 | { |
||
3074 | struct list *cache; |
||
3075 | struct kgem_bo *bo; |
||
3076 | uint32_t pitch, untiled_pitch, tiled_height, size; |
||
3077 | uint32_t handle; |
||
3078 | int i, bucket, retry; |
||
3079 | |||
3080 | if (tiling < 0) |
||
3081 | tiling = -tiling, flags |= CREATE_EXACT; |
||
3082 | |||
3083 | DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d, cpu-mapping=%d, gtt-mapping=%d, scanout?=%d, prime?=%d, temp?=%d)\n", __FUNCTION__, |
||
3084 | width, height, bpp, tiling, |
||
3085 | !!(flags & CREATE_EXACT), |
||
3086 | !!(flags & CREATE_INACTIVE), |
||
3087 | !!(flags & CREATE_CPU_MAP), |
||
3088 | !!(flags & CREATE_GTT_MAP), |
||
3089 | !!(flags & CREATE_SCANOUT), |
||
3090 | !!(flags & CREATE_PRIME), |
||
3091 | !!(flags & CREATE_TEMPORARY))); |
||
3092 | |||
3093 | size = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags, |
||
3094 | width, height, bpp, tiling, &pitch); |
||
3095 | assert(size && size <= kgem->max_object_size); |
||
3096 | size /= PAGE_SIZE; |
||
3097 | bucket = cache_bucket(size); |
||
3098 | |||
3099 | if (flags & CREATE_SCANOUT) { |
||
3100 | assert((flags & CREATE_INACTIVE) == 0); |
||
3101 | list_for_each_entry_reverse(bo, &kgem->scanout, list) { |
||
3102 | assert(bo->scanout); |
||
3103 | assert(bo->delta); |
||
3104 | assert(!bo->purged); |
||
3105 | |||
3106 | if (size > num_pages(bo) || num_pages(bo) > 2*size) |
||
3107 | continue; |
||
3108 | |||
3109 | if (bo->tiling != tiling || |
||
3110 | (tiling != I915_TILING_NONE && bo->pitch != pitch)) { |
||
3111 | if (!gem_set_tiling(kgem->fd, bo->handle, |
||
3112 | tiling, pitch)) |
||
3113 | continue; |
||
3114 | |||
3115 | bo->tiling = tiling; |
||
3116 | bo->pitch = pitch; |
||
3117 | } |
||
3118 | |||
3119 | list_del(&bo->list); |
||
3120 | |||
3121 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3122 | DBG((" 1:from scanout: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3123 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3124 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3125 | bo->refcnt = 1; |
||
3126 | return bo; |
||
3127 | } |
||
3128 | } |
||
3129 | |||
3130 | if (bucket >= NUM_CACHE_BUCKETS) { |
||
3131 | DBG(("%s: large bo num pages=%d, bucket=%d\n", |
||
3132 | __FUNCTION__, size, bucket)); |
||
3133 | |||
3134 | if (flags & CREATE_INACTIVE) |
||
3135 | goto large_inactive; |
||
3136 | |||
3137 | tiled_height = kgem_aligned_height(kgem, height, tiling); |
||
3138 | untiled_pitch = kgem_untiled_pitch(kgem, width, bpp, flags); |
||
3139 | |||
3140 | list_for_each_entry(bo, &kgem->large, list) { |
||
3141 | assert(!bo->purged); |
||
3142 | assert(!bo->scanout); |
||
3143 | assert(bo->refcnt == 0); |
||
3144 | assert(bo->reusable); |
||
3145 | assert(bo->flush == true); |
||
3146 | |||
3147 | if (kgem->gen < 040) { |
||
3148 | if (bo->pitch < pitch) { |
||
3149 | DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n", |
||
3150 | bo->tiling, tiling, |
||
3151 | bo->pitch, pitch)); |
||
3152 | continue; |
||
3153 | } |
||
3154 | |||
3155 | if (bo->pitch * tiled_height > bytes(bo)) |
||
3156 | continue; |
||
3157 | } else { |
||
3158 | if (num_pages(bo) < size) |
||
3159 | continue; |
||
3160 | |||
3161 | if (bo->pitch != pitch || bo->tiling != tiling) { |
||
3162 | if (!gem_set_tiling(kgem->fd, bo->handle, |
||
3163 | tiling, pitch)) |
||
3164 | continue; |
||
3165 | |||
3166 | bo->pitch = pitch; |
||
3167 | bo->tiling = tiling; |
||
3168 | } |
||
3169 | } |
||
3170 | |||
3171 | kgem_bo_remove_from_active(kgem, bo); |
||
3172 | |||
3173 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3174 | bo->delta = 0; |
||
3175 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3176 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3177 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3178 | bo->refcnt = 1; |
||
3179 | return bo; |
||
3180 | } |
||
3181 | |||
3182 | large_inactive: |
||
3183 | list_for_each_entry(bo, &kgem->large_inactive, list) { |
||
3184 | assert(bo->refcnt == 0); |
||
3185 | assert(bo->reusable); |
||
3186 | assert(!bo->scanout); |
||
3187 | |||
3188 | if (size > num_pages(bo)) |
||
3189 | continue; |
||
3190 | |||
3191 | if (bo->tiling != tiling || |
||
3192 | (tiling != I915_TILING_NONE && bo->pitch != pitch)) { |
||
3193 | if (!gem_set_tiling(kgem->fd, bo->handle, |
||
3194 | tiling, pitch)) |
||
3195 | continue; |
||
3196 | |||
3197 | bo->tiling = tiling; |
||
3198 | bo->pitch = pitch; |
||
3199 | } |
||
3200 | |||
3201 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
||
3202 | kgem_bo_free(kgem, bo); |
||
3203 | break; |
||
3204 | } |
||
3205 | |||
3206 | list_del(&bo->list); |
||
3207 | |||
3208 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3209 | bo->pitch = pitch; |
||
3210 | bo->delta = 0; |
||
3211 | DBG((" 1:from large inactive: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3212 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3213 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3214 | bo->refcnt = 1; |
||
3215 | return bo; |
||
3216 | } |
||
3217 | |||
3218 | goto create; |
||
3219 | } |
||
3220 | |||
3221 | if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
||
3222 | int for_cpu = !!(flags & CREATE_CPU_MAP); |
||
3223 | if (kgem->has_llc && tiling == I915_TILING_NONE) |
||
3224 | for_cpu = 1; |
||
3225 | /* We presume that we will need to upload to this bo, |
||
3226 | * and so would prefer to have an active VMA. |
||
3227 | */ |
||
3228 | cache = &kgem->vma[for_cpu].inactive[bucket]; |
||
3229 | do { |
||
3230 | list_for_each_entry(bo, cache, vma) { |
||
3231 | assert(bucket(bo) == bucket); |
||
3232 | assert(bo->refcnt == 0); |
||
3233 | assert(!bo->scanout); |
||
3234 | assert(bo->map); |
||
3235 | assert(IS_CPU_MAP(bo->map) == for_cpu); |
||
3236 | assert(bo->rq == NULL); |
||
3237 | assert(list_is_empty(&bo->request)); |
||
3238 | assert(bo->flush == false); |
||
3239 | |||
3240 | if (size > num_pages(bo)) { |
||
3241 | DBG(("inactive too small: %d < %d\n", |
||
3242 | num_pages(bo), size)); |
||
3243 | continue; |
||
3244 | } |
||
3245 | |||
3246 | if (bo->tiling != tiling || |
||
3247 | (tiling != I915_TILING_NONE && bo->pitch != pitch)) { |
||
3248 | DBG(("inactive vma with wrong tiling: %d < %d\n", |
||
3249 | bo->tiling, tiling)); |
||
3250 | continue; |
||
3251 | } |
||
3252 | |||
3253 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
||
3254 | kgem_bo_free(kgem, bo); |
||
3255 | break; |
||
3256 | } |
||
3257 | |||
3258 | bo->pitch = pitch; |
||
3259 | bo->delta = 0; |
||
3260 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3261 | |||
3262 | kgem_bo_remove_from_inactive(kgem, bo); |
||
3263 | |||
3264 | DBG((" from inactive vma: pitch=%d, tiling=%d: handle=%d, id=%d\n", |
||
3265 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3266 | assert(bo->reusable); |
||
3267 | assert(bo->domain != DOMAIN_GPU); |
||
3268 | ASSERT_IDLE(kgem, bo->handle); |
||
3269 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3270 | bo->refcnt = 1; |
||
3271 | return bo; |
||
3272 | } |
||
3273 | } while (!list_is_empty(cache) && |
||
3274 | __kgem_throttle_retire(kgem, flags)); |
||
3275 | |||
3276 | if (flags & CREATE_CPU_MAP && !kgem->has_llc) |
||
3277 | goto create; |
||
3278 | } |
||
3279 | |||
3280 | if (flags & CREATE_INACTIVE) |
||
3281 | goto skip_active_search; |
||
3282 | |||
3283 | /* Best active match */ |
||
3284 | retry = NUM_CACHE_BUCKETS - bucket; |
||
3285 | if (retry > 3 && (flags & CREATE_TEMPORARY) == 0) |
||
3286 | retry = 3; |
||
3287 | search_again: |
||
3288 | assert(bucket < NUM_CACHE_BUCKETS); |
||
3289 | cache = &kgem->active[bucket][tiling]; |
||
3290 | if (tiling) { |
||
3291 | tiled_height = kgem_aligned_height(kgem, height, tiling); |
||
3292 | list_for_each_entry(bo, cache, list) { |
||
3293 | assert(!bo->purged); |
||
3294 | assert(bo->refcnt == 0); |
||
3295 | assert(bucket(bo) == bucket); |
||
3296 | assert(bo->reusable); |
||
3297 | assert(bo->tiling == tiling); |
||
3298 | assert(bo->flush == false); |
||
3299 | assert(!bo->scanout); |
||
3300 | |||
3301 | if (kgem->gen < 040) { |
||
3302 | if (bo->pitch < pitch) { |
||
3303 | DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n", |
||
3304 | bo->tiling, tiling, |
||
3305 | bo->pitch, pitch)); |
||
3306 | continue; |
||
3307 | } |
||
3308 | |||
3309 | if (bo->pitch * tiled_height > bytes(bo)) |
||
3310 | continue; |
||
3311 | } else { |
||
3312 | if (num_pages(bo) < size) |
||
3313 | continue; |
||
3314 | |||
3315 | if (bo->pitch != pitch) { |
||
3316 | if (!gem_set_tiling(kgem->fd, |
||
3317 | bo->handle, |
||
3318 | tiling, pitch)) |
||
3319 | continue; |
||
3320 | |||
3321 | bo->pitch = pitch; |
||
3322 | } |
||
3323 | } |
||
3324 | |||
3325 | kgem_bo_remove_from_active(kgem, bo); |
||
3326 | |||
3327 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3328 | bo->delta = 0; |
||
3329 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3330 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3331 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3332 | bo->refcnt = 1; |
||
3333 | return bo; |
||
3334 | } |
||
3335 | } else { |
||
3336 | list_for_each_entry(bo, cache, list) { |
||
3337 | assert(bucket(bo) == bucket); |
||
3338 | assert(!bo->purged); |
||
3339 | assert(bo->refcnt == 0); |
||
3340 | assert(bo->reusable); |
||
3341 | assert(!bo->scanout); |
||
3342 | assert(bo->tiling == tiling); |
||
3343 | assert(bo->flush == false); |
||
3344 | |||
3345 | if (num_pages(bo) < size) |
||
3346 | continue; |
||
3347 | |||
3348 | kgem_bo_remove_from_active(kgem, bo); |
||
3349 | |||
3350 | bo->pitch = pitch; |
||
3351 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3352 | bo->delta = 0; |
||
3353 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3354 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3355 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3356 | bo->refcnt = 1; |
||
3357 | return bo; |
||
3358 | } |
||
3359 | } |
||
3360 | |||
3361 | if (--retry && flags & CREATE_EXACT) { |
||
3362 | if (kgem->gen >= 040) { |
||
3363 | for (i = I915_TILING_NONE; i <= I915_TILING_Y; i++) { |
||
3364 | if (i == tiling) |
||
3365 | continue; |
||
3366 | |||
3367 | cache = &kgem->active[bucket][i]; |
||
3368 | list_for_each_entry(bo, cache, list) { |
||
3369 | assert(!bo->purged); |
||
3370 | assert(bo->refcnt == 0); |
||
3371 | assert(bo->reusable); |
||
3372 | assert(!bo->scanout); |
||
3373 | assert(bo->flush == false); |
||
3374 | |||
3375 | if (num_pages(bo) < size) |
||
3376 | continue; |
||
3377 | |||
3378 | if (!gem_set_tiling(kgem->fd, |
||
3379 | bo->handle, |
||
3380 | tiling, pitch)) |
||
3381 | continue; |
||
3382 | |||
3383 | kgem_bo_remove_from_active(kgem, bo); |
||
3384 | |||
3385 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3386 | bo->pitch = pitch; |
||
3387 | bo->tiling = tiling; |
||
3388 | bo->delta = 0; |
||
3389 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3390 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3391 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3392 | bo->refcnt = 1; |
||
3393 | return bo; |
||
3394 | } |
||
3395 | } |
||
3396 | } |
||
3397 | |||
3398 | bucket++; |
||
3399 | goto search_again; |
||
3400 | } |
||
3401 | |||
3402 | if ((flags & CREATE_EXACT) == 0) { /* allow an active near-miss? */ |
||
3403 | untiled_pitch = kgem_untiled_pitch(kgem, width, bpp, flags); |
||
3404 | i = tiling; |
||
3405 | while (--i >= 0) { |
||
3406 | tiled_height = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags, |
||
3407 | width, height, bpp, tiling, &pitch); |
||
3408 | cache = active(kgem, tiled_height / PAGE_SIZE, i); |
||
3409 | tiled_height = kgem_aligned_height(kgem, height, i); |
||
3410 | list_for_each_entry(bo, cache, list) { |
||
3411 | assert(!bo->purged); |
||
3412 | assert(bo->refcnt == 0); |
||
3413 | assert(bo->reusable); |
||
3414 | assert(!bo->scanout); |
||
3415 | assert(bo->flush == false); |
||
3416 | |||
3417 | if (bo->tiling) { |
||
3418 | if (bo->pitch < pitch) { |
||
3419 | DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n", |
||
3420 | bo->tiling, tiling, |
||
3421 | bo->pitch, pitch)); |
||
3422 | continue; |
||
3423 | } |
||
3424 | } else |
||
3425 | bo->pitch = untiled_pitch; |
||
3426 | |||
3427 | if (bo->pitch * tiled_height > bytes(bo)) |
||
3428 | continue; |
||
3429 | |||
3430 | kgem_bo_remove_from_active(kgem, bo); |
||
3431 | |||
3432 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3433 | bo->delta = 0; |
||
3434 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
||
3435 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3436 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3437 | bo->refcnt = 1; |
||
3438 | return bo; |
||
3439 | } |
||
3440 | } |
||
3441 | } |
||
3442 | |||
3443 | skip_active_search: |
||
3444 | bucket = cache_bucket(size); |
||
3445 | retry = NUM_CACHE_BUCKETS - bucket; |
||
3446 | if (retry > 3) |
||
3447 | retry = 3; |
||
3448 | search_inactive: |
||
3449 | /* Now just look for a close match and prefer any currently active */ |
||
3450 | assert(bucket < NUM_CACHE_BUCKETS); |
||
3451 | cache = &kgem->inactive[bucket]; |
||
3452 | list_for_each_entry(bo, cache, list) { |
||
3453 | assert(bucket(bo) == bucket); |
||
3454 | assert(bo->reusable); |
||
3455 | assert(!bo->scanout); |
||
3456 | assert(bo->flush == false); |
||
3457 | |||
3458 | if (size > num_pages(bo)) { |
||
3459 | DBG(("inactive too small: %d < %d\n", |
||
3460 | num_pages(bo), size)); |
||
3461 | continue; |
||
3462 | } |
||
3463 | |||
3464 | if (bo->tiling != tiling || |
||
3465 | (tiling != I915_TILING_NONE && bo->pitch != pitch)) { |
||
3466 | if (!gem_set_tiling(kgem->fd, bo->handle, |
||
3467 | tiling, pitch)) |
||
3468 | continue; |
||
3469 | |||
3470 | if (bo->map) |
||
3471 | kgem_bo_release_map(kgem, bo); |
||
3472 | } |
||
3473 | |||
3474 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
||
3475 | kgem_bo_free(kgem, bo); |
||
3476 | break; |
||
3477 | } |
||
3478 | |||
3479 | kgem_bo_remove_from_inactive(kgem, bo); |
||
3480 | |||
3481 | bo->pitch = pitch; |
||
3482 | bo->tiling = tiling; |
||
3483 | |||
3484 | bo->delta = 0; |
||
3485 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3486 | assert(bo->pitch); |
||
3487 | DBG((" from inactive: pitch=%d, tiling=%d: handle=%d, id=%d\n", |
||
3488 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
||
3489 | assert(bo->refcnt == 0); |
||
3490 | assert(bo->reusable); |
||
3491 | assert((flags & CREATE_INACTIVE) == 0 || bo->domain != DOMAIN_GPU); |
||
3492 | ASSERT_MAYBE_IDLE(kgem, bo->handle, flags & CREATE_INACTIVE); |
||
3493 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
||
3494 | bo->refcnt = 1; |
||
3495 | return bo; |
||
3496 | } |
||
3497 | |||
3498 | if (flags & CREATE_INACTIVE && |
||
3499 | !list_is_empty(&kgem->active[bucket][tiling]) && |
||
3500 | __kgem_throttle_retire(kgem, flags)) { |
||
3501 | flags &= ~CREATE_INACTIVE; |
||
3502 | goto search_inactive; |
||
3503 | } |
||
3504 | |||
3505 | if (--retry) { |
||
3506 | bucket++; |
||
3507 | flags &= ~CREATE_INACTIVE; |
||
3508 | goto search_inactive; |
||
3509 | } |
||
3510 | |||
3511 | create: |
||
3512 | if (bucket >= NUM_CACHE_BUCKETS) |
||
3513 | size = ALIGN(size, 1024); |
||
3514 | handle = gem_create(kgem->fd, size); |
||
3515 | if (handle == 0) |
||
3516 | return NULL; |
||
3517 | |||
3518 | bo = __kgem_bo_alloc(handle, size); |
||
3519 | if (!bo) { |
||
3520 | gem_close(kgem->fd, handle); |
||
3521 | return NULL; |
||
3522 | } |
||
3523 | |||
3524 | bo->domain = DOMAIN_CPU; |
||
3525 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3526 | bo->pitch = pitch; |
||
3527 | if (tiling != I915_TILING_NONE && |
||
3528 | gem_set_tiling(kgem->fd, handle, tiling, pitch)) |
||
3529 | bo->tiling = tiling; |
||
3530 | if (bucket >= NUM_CACHE_BUCKETS) { |
||
3531 | DBG(("%s: marking large bo for automatic flushing\n", |
||
3532 | __FUNCTION__)); |
||
3533 | bo->flush = true; |
||
3534 | } |
||
3535 | |||
3536 | assert(bytes(bo) >= bo->pitch * kgem_aligned_height(kgem, height, bo->tiling)); |
||
3537 | |||
3538 | debug_alloc__bo(kgem, bo); |
||
3539 | |||
3540 | DBG((" new pitch=%d, tiling=%d, handle=%d, id=%d, num_pages=%d [%d], bucket=%d\n", |
||
3541 | bo->pitch, bo->tiling, bo->handle, bo->unique_id, |
||
3542 | size, num_pages(bo), bucket(bo))); |
||
3543 | return bo; |
||
3544 | } |
||
3545 | |||
3263 | Serge | 3546 | #if 0 |
3258 | Serge | 3547 | struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem, |
3548 | int width, |
||
3549 | int height, |
||
3550 | int bpp, |
||
3551 | uint32_t flags) |
||
3552 | { |
||
3553 | struct kgem_bo *bo; |
||
3554 | int stride, size; |
||
3555 | |||
3556 | if (DBG_NO_CPU) |
||
3557 | return NULL; |
||
3558 | |||
3559 | DBG(("%s(%dx%d, bpp=%d)\n", __FUNCTION__, width, height, bpp)); |
||
3560 | |||
3561 | if (kgem->has_llc) { |
||
3562 | bo = kgem_create_2d(kgem, width, height, bpp, |
||
3563 | I915_TILING_NONE, flags); |
||
3564 | if (bo == NULL) |
||
3565 | return bo; |
||
3566 | |||
3567 | assert(bo->tiling == I915_TILING_NONE); |
||
3568 | |||
3569 | if (kgem_bo_map__cpu(kgem, bo) == NULL) { |
||
3570 | kgem_bo_destroy(kgem, bo); |
||
3571 | return NULL; |
||
3572 | } |
||
3573 | |||
3574 | return bo; |
||
3575 | } |
||
3576 | |||
3577 | assert(width > 0 && height > 0); |
||
3578 | stride = ALIGN(width, 2) * bpp >> 3; |
||
3579 | stride = ALIGN(stride, 4); |
||
3580 | size = stride * ALIGN(height, 2); |
||
3581 | assert(size >= PAGE_SIZE); |
||
3582 | |||
3583 | DBG(("%s: %dx%d, %d bpp, stride=%d\n", |
||
3584 | __FUNCTION__, width, height, bpp, stride)); |
||
3585 | |||
3586 | bo = search_snoop_cache(kgem, NUM_PAGES(size), 0); |
||
3587 | if (bo) { |
||
3588 | assert(bo->tiling == I915_TILING_NONE); |
||
3589 | assert(bo->snoop); |
||
3590 | bo->refcnt = 1; |
||
3591 | bo->pitch = stride; |
||
3592 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3593 | return bo; |
||
3594 | } |
||
3595 | |||
3596 | if (kgem->has_cacheing) { |
||
3597 | bo = kgem_create_linear(kgem, size, flags); |
||
3598 | if (bo == NULL) |
||
3599 | return NULL; |
||
3600 | |||
3601 | assert(bo->tiling == I915_TILING_NONE); |
||
3602 | |||
3603 | if (!gem_set_cacheing(kgem->fd, bo->handle, SNOOPED)) { |
||
3604 | kgem_bo_destroy(kgem, bo); |
||
3605 | return NULL; |
||
3606 | } |
||
3607 | bo->snoop = true; |
||
3608 | |||
3609 | if (kgem_bo_map__cpu(kgem, bo) == NULL) { |
||
3610 | kgem_bo_destroy(kgem, bo); |
||
3611 | return NULL; |
||
3612 | } |
||
3613 | |||
3614 | bo->pitch = stride; |
||
3615 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3616 | return bo; |
||
3617 | } |
||
3618 | |||
3619 | if (kgem->has_userptr) { |
||
3620 | void *ptr; |
||
3621 | |||
3622 | /* XXX */ |
||
3623 | //if (posix_memalign(&ptr, 64, ALIGN(size, 64))) |
||
3624 | if (posix_memalign(&ptr, PAGE_SIZE, ALIGN(size, PAGE_SIZE))) |
||
3625 | return NULL; |
||
3626 | |||
3627 | bo = kgem_create_map(kgem, ptr, size, false); |
||
3628 | if (bo == NULL) { |
||
3629 | free(ptr); |
||
3630 | return NULL; |
||
3631 | } |
||
3632 | |||
3633 | bo->map = MAKE_USER_MAP(ptr); |
||
3634 | bo->pitch = stride; |
||
3635 | bo->unique_id = kgem_get_unique_id(kgem); |
||
3636 | return bo; |
||
3637 | } |
||
3638 | |||
3639 | return NULL; |
||
3640 | } |
||
3641 | |||
3642 | |||
3643 | #endif |
||
3644 | |||
3645 | |||
3646 | void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) |
||
3647 | { |
||
3648 | DBG(("%s: handle=%d, proxy? %d\n", |
||
3649 | __FUNCTION__, bo->handle, bo->proxy != NULL)); |
||
3650 | |||
3651 | if (bo->proxy) { |
||
3652 | _list_del(&bo->vma); |
||
3653 | _list_del(&bo->request); |
||
3654 | if (bo->io && bo->exec == NULL) |
||
3655 | _kgem_bo_delete_buffer(kgem, bo); |
||
3656 | kgem_bo_unref(kgem, bo->proxy); |
||
3657 | kgem_bo_binding_free(kgem, bo); |
||
3658 | free(bo); |
||
3659 | return; |
||
3660 | } |
||
3661 | |||
3662 | __kgem_bo_destroy(kgem, bo); |
||
3663 | } |
||
3664 | |||
3263 | Serge | 3665 | void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo) |
3666 | { |
||
3667 | assert(bo->rq); |
||
3668 | assert(bo->exec == NULL); |
||
3669 | assert(bo->needs_flush); |
||
3258 | Serge | 3670 | |
3263 | Serge | 3671 | /* The kernel will emit a flush *and* update its own flushing lists. */ |
3672 | if (!__kgem_busy(kgem, bo->handle)) |
||
3673 | __kgem_bo_clear_busy(bo); |
||
3258 | Serge | 3674 | |
3263 | Serge | 3675 | DBG(("%s: handle=%d, busy?=%d\n", |
3676 | __FUNCTION__, bo->handle, bo->rq != NULL)); |
||
3677 | } |
||
3258 | Serge | 3678 | |
3263 | Serge | 3679 | inline static bool needs_semaphore(struct kgem *kgem, struct kgem_bo *bo) |
3680 | { |
||
3681 | return kgem->nreloc && bo->rq && RQ_RING(bo->rq) != kgem->ring; |
||
3682 | } |
||
3258 | Serge | 3683 | |
3263 | Serge | 3684 | bool kgem_check_bo(struct kgem *kgem, ...) |
3685 | { |
||
3686 | va_list ap; |
||
3687 | struct kgem_bo *bo; |
||
3688 | int num_exec = 0; |
||
3689 | int num_pages = 0; |
||
3690 | bool flush = false; |
||
3258 | Serge | 3691 | |
3263 | Serge | 3692 | va_start(ap, kgem); |
3693 | while ((bo = va_arg(ap, struct kgem_bo *))) { |
||
3694 | while (bo->proxy) |
||
3695 | bo = bo->proxy; |
||
3696 | if (bo->exec) |
||
3697 | continue; |
||
3258 | Serge | 3698 | |
3263 | Serge | 3699 | if (needs_semaphore(kgem, bo)) |
3700 | return false; |
||
3258 | Serge | 3701 | |
3263 | Serge | 3702 | num_pages += num_pages(bo); |
3703 | num_exec++; |
||
3258 | Serge | 3704 | |
3263 | Serge | 3705 | flush |= bo->flush; |
3706 | } |
||
3707 | va_end(ap); |
||
3258 | Serge | 3708 | |
3263 | Serge | 3709 | DBG(("%s: num_pages=+%d, num_exec=+%d\n", |
3710 | __FUNCTION__, num_pages, num_exec)); |
||
3258 | Serge | 3711 | |
3263 | Serge | 3712 | if (!num_pages) |
3713 | return true; |
||
3258 | Serge | 3714 | |
3263 | Serge | 3715 | if (kgem_flush(kgem, flush)) |
3716 | return false; |
||
3258 | Serge | 3717 | |
3263 | Serge | 3718 | if (kgem->aperture > kgem->aperture_low && |
3719 | kgem_ring_is_idle(kgem, kgem->ring)) { |
||
3720 | DBG(("%s: current aperture usage (%d) is greater than low water mark (%d)\n", |
||
3721 | __FUNCTION__, kgem->aperture, kgem->aperture_low)); |
||
3722 | return false; |
||
3723 | } |
||
3258 | Serge | 3724 | |
3263 | Serge | 3725 | if (num_pages + kgem->aperture > kgem->aperture_high) { |
3726 | DBG(("%s: final aperture usage (%d) is greater than high water mark (%d)\n", |
||
3727 | __FUNCTION__, num_pages + kgem->aperture, kgem->aperture_high)); |
||
3728 | return false; |
||
3729 | } |
||
3258 | Serge | 3730 | |
3263 | Serge | 3731 | if (kgem->nexec + num_exec >= KGEM_EXEC_SIZE(kgem)) { |
3732 | DBG(("%s: out of exec slots (%d + %d / %d)\n", __FUNCTION__, |
||
3733 | kgem->nexec, num_exec, KGEM_EXEC_SIZE(kgem))); |
||
3734 | return false; |
||
3735 | } |
||
3258 | Serge | 3736 | |
3263 | Serge | 3737 | return true; |
3738 | } |
||
3258 | Serge | 3739 | |
3740 | |||
3741 | |||
3742 | |||
3743 | |||
3744 | |||
3745 | |||
3746 | |||
3747 | |||
3748 | |||
3749 | |||
3750 | |||
3751 | |||
3752 | |||
3753 | |||
3754 | |||
3755 | |||
3756 | |||
3757 | |||
3758 | |||
3263 | Serge | 3759 | |
3760 | |||
3761 | |||
3762 | |||
3763 | |||
3764 | |||
3765 | |||
3766 | |||
3767 | |||
3768 | |||
3258 | Serge | 3769 | uint32_t kgem_add_reloc(struct kgem *kgem, |
3770 | uint32_t pos, |
||
3771 | struct kgem_bo *bo, |
||
3772 | uint32_t read_write_domain, |
||
3773 | uint32_t delta) |
||
3774 | { |
||
3775 | int index; |
||
3776 | |||
3777 | DBG(("%s: handle=%d, pos=%d, delta=%d, domains=%08x\n", |
||
3778 | __FUNCTION__, bo ? bo->handle : 0, pos, delta, read_write_domain)); |
||
3779 | |||
3780 | assert((read_write_domain & 0x7fff) == 0 || bo != NULL); |
||
3781 | |||
3266 | Serge | 3782 | if( bo != NULL && bo->handle == -2) |
3783 | { |
||
3784 | if (bo->exec == NULL) |
||
3785 | kgem_add_bo(kgem, bo); |
||
3786 | |||
3787 | if (read_write_domain & 0x7fff && !bo->dirty) { |
||
3788 | assert(!bo->snoop || kgem->can_blt_cpu); |
||
3789 | __kgem_bo_mark_dirty(bo); |
||
3790 | } |
||
3791 | return 0; |
||
3792 | }; |
||
3263 | Serge | 3793 | |
3258 | Serge | 3794 | index = kgem->nreloc++; |
3795 | assert(index < ARRAY_SIZE(kgem->reloc)); |
||
3796 | kgem->reloc[index].offset = pos * sizeof(kgem->batch[0]); |
||
3797 | if (bo) { |
||
3798 | assert(bo->refcnt); |
||
3799 | assert(!bo->purged); |
||
3800 | |||
3801 | while (bo->proxy) { |
||
3802 | DBG(("%s: adding proxy [delta=%d] for handle=%d\n", |
||
3803 | __FUNCTION__, bo->delta, bo->handle)); |
||
3804 | delta += bo->delta; |
||
3805 | assert(bo->handle == bo->proxy->handle); |
||
3806 | /* need to release the cache upon batch submit */ |
||
3807 | if (bo->exec == NULL) { |
||
3808 | list_move_tail(&bo->request, |
||
3809 | &kgem->next_request->buffers); |
||
3810 | bo->rq = MAKE_REQUEST(kgem->next_request, |
||
3811 | kgem->ring); |
||
3812 | bo->exec = &_kgem_dummy_exec; |
||
3813 | } |
||
3814 | |||
3815 | if (read_write_domain & 0x7fff && !bo->dirty) |
||
3816 | __kgem_bo_mark_dirty(bo); |
||
3817 | |||
3818 | bo = bo->proxy; |
||
3819 | assert(bo->refcnt); |
||
3820 | assert(!bo->purged); |
||
3821 | } |
||
3822 | |||
3823 | if (bo->exec == NULL) |
||
3824 | kgem_add_bo(kgem, bo); |
||
3825 | assert(bo->rq == MAKE_REQUEST(kgem->next_request, kgem->ring)); |
||
3826 | assert(RQ_RING(bo->rq) == kgem->ring); |
||
3827 | |||
3828 | if (kgem->gen < 040 && read_write_domain & KGEM_RELOC_FENCED) { |
||
3829 | if (bo->tiling && |
||
3830 | (bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0) { |
||
3831 | assert(kgem->nfence < kgem->fence_max); |
||
3832 | kgem->aperture_fenced += |
||
3833 | kgem_bo_fenced_size(kgem, bo); |
||
3834 | kgem->nfence++; |
||
3835 | } |
||
3836 | bo->exec->flags |= EXEC_OBJECT_NEEDS_FENCE; |
||
3837 | } |
||
3838 | |||
3839 | kgem->reloc[index].delta = delta; |
||
3840 | kgem->reloc[index].target_handle = bo->target_handle; |
||
3841 | kgem->reloc[index].presumed_offset = bo->presumed_offset; |
||
3842 | |||
3843 | if (read_write_domain & 0x7fff && !bo->dirty) { |
||
3844 | assert(!bo->snoop || kgem->can_blt_cpu); |
||
3845 | __kgem_bo_mark_dirty(bo); |
||
3846 | } |
||
3847 | |||
3848 | delta += bo->presumed_offset; |
||
3849 | } else { |
||
3850 | kgem->reloc[index].delta = delta; |
||
3851 | kgem->reloc[index].target_handle = ~0U; |
||
3852 | kgem->reloc[index].presumed_offset = 0; |
||
3853 | if (kgem->nreloc__self < 256) |
||
3854 | kgem->reloc__self[kgem->nreloc__self++] = index; |
||
3855 | } |
||
3856 | kgem->reloc[index].read_domains = read_write_domain >> 16; |
||
3857 | kgem->reloc[index].write_domain = read_write_domain & 0x7fff; |
||
3858 | |||
3859 | return delta; |
||
3860 | } |
||
3861 | |||
3862 | static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket) |
||
3863 | { |
||
3864 | int i, j; |
||
3865 | |||
3866 | DBG(("%s: type=%d, count=%d (bucket: %d)\n", |
||
3867 | __FUNCTION__, type, kgem->vma[type].count, bucket)); |
||
3868 | if (kgem->vma[type].count <= 0) |
||
3869 | return; |
||
3870 | |||
3871 | if (kgem->need_purge) |
||
3872 | kgem_purge_cache(kgem); |
||
3873 | |||
3874 | /* vma are limited on a per-process basis to around 64k. |
||
3875 | * This includes all malloc arenas as well as other file |
||
3876 | * mappings. In order to be fair and not hog the cache, |
||
3877 | * and more importantly not to exhaust that limit and to |
||
3878 | * start failing mappings, we keep our own number of open |
||
3879 | * vma to within a conservative value. |
||
3880 | */ |
||
3881 | i = 0; |
||
3882 | while (kgem->vma[type].count > 0) { |
||
3883 | struct kgem_bo *bo = NULL; |
||
3884 | |||
3885 | for (j = 0; |
||
3886 | bo == NULL && j < ARRAY_SIZE(kgem->vma[type].inactive); |
||
3887 | j++) { |
||
3888 | struct list *head = &kgem->vma[type].inactive[i++%ARRAY_SIZE(kgem->vma[type].inactive)]; |
||
3889 | if (!list_is_empty(head)) |
||
3890 | bo = list_last_entry(head, struct kgem_bo, vma); |
||
3891 | } |
||
3892 | if (bo == NULL) |
||
3893 | break; |
||
3894 | |||
3895 | DBG(("%s: discarding inactive %s vma cache for %d\n", |
||
3896 | __FUNCTION__, |
||
3897 | IS_CPU_MAP(bo->map) ? "CPU" : "GTT", bo->handle)); |
||
3898 | assert(IS_CPU_MAP(bo->map) == type); |
||
3899 | assert(bo->map); |
||
3900 | assert(bo->rq == NULL); |
||
3901 | |||
3902 | VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo))); |
||
3903 | // munmap(MAP(bo->map), bytes(bo)); |
||
3904 | bo->map = NULL; |
||
3905 | list_del(&bo->vma); |
||
3906 | kgem->vma[type].count--; |
||
3907 | |||
3908 | if (!bo->purged && !kgem_bo_set_purgeable(kgem, bo)) { |
||
3909 | DBG(("%s: freeing unpurgeable old mapping\n", |
||
3910 | __FUNCTION__)); |
||
3911 | kgem_bo_free(kgem, bo); |
||
3912 | } |
||
3913 | } |
||
3914 | } |
||
3915 | |||
3916 | |||
3917 | void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo) |
||
3918 | { |
||
3919 | void *ptr; |
||
3920 | |||
3921 | DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__, |
||
3922 | bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain)); |
||
3923 | |||
3924 | assert(!bo->purged); |
||
3925 | assert(bo->proxy == NULL); |
||
3926 | assert(list_is_empty(&bo->list)); |
||
3927 | assert(bo->exec == NULL); |
||
3928 | |||
3929 | if (bo->tiling == I915_TILING_NONE && !bo->scanout && |
||
3930 | (kgem->has_llc || bo->domain == DOMAIN_CPU)) { |
||
3931 | DBG(("%s: converting request for GTT map into CPU map\n", |
||
3932 | __FUNCTION__)); |
||
3933 | ptr = kgem_bo_map__cpu(kgem, bo); |
||
3934 | kgem_bo_sync__cpu(kgem, bo); |
||
3935 | return ptr; |
||
3936 | } |
||
3937 | |||
3938 | if (IS_CPU_MAP(bo->map)) |
||
3939 | kgem_bo_release_map(kgem, bo); |
||
3940 | |||
3941 | ptr = bo->map; |
||
3942 | if (ptr == NULL) { |
||
3943 | assert(kgem_bo_size(bo) <= kgem->aperture_mappable / 2); |
||
3944 | assert(kgem->gen != 021 || bo->tiling != I915_TILING_Y); |
||
3945 | |||
3946 | kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo)); |
||
3947 | |||
3948 | ptr = __kgem_bo_map__gtt(kgem, bo); |
||
3949 | if (ptr == NULL) |
||
3950 | return NULL; |
||
3951 | |||
3952 | /* Cache this mapping to avoid the overhead of an |
||
3953 | * excruciatingly slow GTT pagefault. This is more an |
||
3954 | * issue with compositing managers which need to frequently |
||
3955 | * flush CPU damage to their GPU bo. |
||
3956 | */ |
||
3957 | bo->map = ptr; |
||
3958 | DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle)); |
||
3959 | } |
||
3960 | |||
3961 | if (bo->domain != DOMAIN_GTT) { |
||
3962 | struct drm_i915_gem_set_domain set_domain; |
||
3963 | |||
3964 | DBG(("%s: sync: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__, |
||
3965 | bo->needs_flush, bo->domain, __kgem_busy(kgem, bo->handle))); |
||
3966 | |||
3967 | /* XXX use PROT_READ to avoid the write flush? */ |
||
3968 | |||
3969 | VG_CLEAR(set_domain); |
||
3970 | set_domain.handle = bo->handle; |
||
3971 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
||
3972 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
||
3973 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) { |
||
3974 | kgem_bo_retire(kgem, bo); |
||
3975 | bo->domain = DOMAIN_GTT; |
||
3976 | } |
||
3977 | } |
||
3978 | |||
3979 | return ptr; |
||
3980 | } |
||
3981 | |||
3982 | void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo) |
||
3983 | { |
||
3984 | void *ptr; |
||
3985 | |||
3986 | DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__, |
||
3987 | bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain)); |
||
3988 | |||
3989 | assert(!bo->purged); |
||
3990 | assert(bo->exec == NULL); |
||
3991 | assert(list_is_empty(&bo->list)); |
||
3992 | |||
3993 | if (IS_CPU_MAP(bo->map)) |
||
3994 | kgem_bo_release_map(kgem, bo); |
||
3995 | |||
3996 | ptr = bo->map; |
||
3997 | if (ptr == NULL) { |
||
3998 | assert(bytes(bo) <= kgem->aperture_mappable / 4); |
||
3999 | |||
4000 | kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo)); |
||
4001 | |||
4002 | ptr = __kgem_bo_map__gtt(kgem, bo); |
||
4003 | if (ptr == NULL) |
||
4004 | return NULL; |
||
4005 | |||
4006 | /* Cache this mapping to avoid the overhead of an |
||
4007 | * excruciatingly slow GTT pagefault. This is more an |
||
4008 | * issue with compositing managers which need to frequently |
||
4009 | * flush CPU damage to their GPU bo. |
||
4010 | */ |
||
4011 | bo->map = ptr; |
||
4012 | DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle)); |
||
4013 | } |
||
4014 | |||
4015 | return ptr; |
||
4016 | } |
||
4017 | |||
4018 | |||
4019 | void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo) |
||
4020 | { |
||
4021 | struct drm_i915_gem_mmap mmap_arg; |
||
4022 | |||
4023 | DBG(("%s(handle=%d, size=%d, mapped? %d)\n", |
||
4024 | __FUNCTION__, bo->handle, bytes(bo), (int)__MAP_TYPE(bo->map))); |
||
4025 | assert(!bo->purged); |
||
4026 | assert(list_is_empty(&bo->list)); |
||
4027 | assert(!bo->scanout); |
||
4028 | assert(bo->proxy == NULL); |
||
4029 | |||
4030 | if (IS_CPU_MAP(bo->map)) |
||
4031 | return MAP(bo->map); |
||
4032 | |||
4033 | if (bo->map) |
||
4034 | kgem_bo_release_map(kgem, bo); |
||
4035 | |||
4036 | kgem_trim_vma_cache(kgem, MAP_CPU, bucket(bo)); |
||
4037 | |||
4038 | retry: |
||
4039 | VG_CLEAR(mmap_arg); |
||
4040 | mmap_arg.handle = bo->handle; |
||
4041 | mmap_arg.offset = 0; |
||
4042 | mmap_arg.size = bytes(bo); |
||
4043 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) { |
||
4044 | printf("%s: failed to mmap %d, %d bytes, into CPU domain: %d\n", |
||
4045 | __FUNCTION__, bo->handle, bytes(bo), 0); |
||
4046 | if (__kgem_throttle_retire(kgem, 0)) |
||
4047 | goto retry; |
||
4048 | |||
4049 | if (kgem->need_expire) { |
||
4050 | kgem_cleanup_cache(kgem); |
||
4051 | goto retry; |
||
4052 | } |
||
4053 | |||
4054 | return NULL; |
||
4055 | } |
||
4056 | |||
4057 | VG(VALGRIND_MAKE_MEM_DEFINED(mmap_arg.addr_ptr, bytes(bo))); |
||
4058 | |||
4059 | DBG(("%s: caching CPU vma for %d\n", __FUNCTION__, bo->handle)); |
||
4060 | bo->map = MAKE_CPU_MAP(mmap_arg.addr_ptr); |
||
4061 | return (void *)(uintptr_t)mmap_arg.addr_ptr; |
||
4062 | } |
||
4063 | |||
4064 | void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo) |
||
4065 | { |
||
4066 | assert(bo->proxy == NULL); |
||
4067 | kgem_bo_submit(kgem, bo); |
||
4068 | |||
4069 | if (bo->domain != DOMAIN_CPU) { |
||
4070 | struct drm_i915_gem_set_domain set_domain; |
||
4071 | |||
4072 | DBG(("%s: SYNC: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__, |
||
4073 | bo->needs_flush, bo->domain, __kgem_busy(kgem, bo->handle))); |
||
4074 | |||
4075 | VG_CLEAR(set_domain); |
||
4076 | set_domain.handle = bo->handle; |
||
4077 | set_domain.read_domains = I915_GEM_DOMAIN_CPU; |
||
4078 | set_domain.write_domain = I915_GEM_DOMAIN_CPU; |
||
4079 | |||
4080 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) { |
||
4081 | kgem_bo_retire(kgem, bo); |
||
4082 | bo->domain = DOMAIN_CPU; |
||
4083 | } |
||
4084 | } |
||
4085 | } |
||
4086 | |||
3254 | Serge | 4087 | void kgem_clear_dirty(struct kgem *kgem) |
4088 | { |
||
4089 | struct list * const buffers = &kgem->next_request->buffers; |
||
4090 | struct kgem_bo *bo; |
||
4091 | |||
4092 | list_for_each_entry(bo, buffers, request) { |
||
4093 | if (!bo->dirty) |
||
4094 | break; |
||
4095 | |||
4096 | bo->dirty = false; |
||
4097 | } |
||
4098 | } |
||
4099 | |||
3263 | Serge | 4100 | struct kgem_bo *kgem_create_proxy(struct kgem *kgem, |
4101 | struct kgem_bo *target, |
||
4102 | int offset, int length) |
||
4103 | { |
||
4104 | struct kgem_bo *bo; |
||
4105 | |||
4106 | DBG(("%s: target handle=%d [proxy? %d], offset=%d, length=%d, io=%d\n", |
||
4107 | __FUNCTION__, target->handle, target->proxy ? target->proxy->delta : -1, |
||
4108 | offset, length, target->io)); |
||
4109 | |||
4110 | bo = __kgem_bo_alloc(target->handle, length); |
||
4111 | if (bo == NULL) |
||
4112 | return NULL; |
||
4113 | |||
4114 | bo->unique_id = kgem_get_unique_id(kgem); |
||
4115 | bo->reusable = false; |
||
4116 | bo->size.bytes = length; |
||
4117 | |||
4118 | bo->io = target->io && target->proxy == NULL; |
||
4119 | bo->dirty = target->dirty; |
||
4120 | bo->tiling = target->tiling; |
||
4121 | bo->pitch = target->pitch; |
||
4122 | |||
4123 | assert(!bo->scanout); |
||
4124 | bo->proxy = kgem_bo_reference(target); |
||
4125 | bo->delta = offset; |
||
4126 | |||
4127 | if (target->exec) { |
||
4128 | list_move_tail(&bo->request, &kgem->next_request->buffers); |
||
4129 | bo->exec = &_kgem_dummy_exec; |
||
4130 | } |
||
4131 | bo->rq = target->rq; |
||
4132 | |||
4133 | return bo; |
||
4134 | } |
||
4135 | |||
3254 | Serge | 4136 | uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format) |
4137 | { |
||
4138 | struct kgem_bo_binding *b; |
||
4139 | |||
4140 | for (b = &bo->binding; b && b->offset; b = b->next) |
||
4141 | if (format == b->format) |
||
4142 | return b->offset; |
||
4143 | |||
4144 | return 0; |
||
4145 | } |
||
4146 | |||
4147 | void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset) |
||
4148 | { |
||
4149 | struct kgem_bo_binding *b; |
||
4150 | |||
4151 | for (b = &bo->binding; b; b = b->next) { |
||
4152 | if (b->offset) |
||
4153 | continue; |
||
4154 | |||
4155 | b->offset = offset; |
||
4156 | b->format = format; |
||
4157 | |||
4158 | if (b->next) |
||
4159 | b->next->offset = 0; |
||
4160 | |||
4161 | return; |
||
4162 | } |
||
4163 | |||
4164 | b = malloc(sizeof(*b)); |
||
4165 | if (b) { |
||
4166 | b->next = bo->binding.next; |
||
4167 | b->format = format; |
||
4168 | b->offset = offset; |
||
4169 | bo->binding.next = b; |
||
4170 | } |
||
4171 | } |
||
4172 | |||
4173 | |||
3263 | Serge | 4174 | int kgem_init_fb(struct kgem *kgem, struct sna_fb *fb) |
4175 | { |
||
4176 | struct kgem_bo *bo; |
||
4177 | size_t size; |
||
4178 | int ret; |
||
3254 | Serge | 4179 | |
3263 | Serge | 4180 | ret = drmIoctl(kgem->fd, SRV_FBINFO, fb); |
4181 | if( ret != 0 ) |
||
4182 | return 0; |
||
4183 | |||
4184 | size = fb->pitch * fb->height / PAGE_SIZE; |
||
4185 | |||
4186 | bo = __kgem_bo_alloc(-2, size); |
||
4187 | if (!bo) { |
||
4188 | return 0; |
||
4189 | } |
||
3254 | Serge | 4190 | |
3263 | Serge | 4191 | bo->domain = DOMAIN_GTT; |
4192 | bo->unique_id = kgem_get_unique_id(kgem); |
||
4193 | bo->pitch = fb->pitch; |
||
4194 | bo->tiling = I915_TILING_NONE; |
||
4195 | bo->scanout = 1; |
||
4196 | fb->fb_bo = bo; |
||
4197 | |||
3299 | Serge | 4198 | // printf("fb width %d height %d pitch %d bo %p\n", |
4199 | // fb->width, fb->height, fb->pitch, fb->fb_bo); |
||
3263 | Serge | 4200 | |
4201 | return 1; |
||
4202 | }; |
||
4203 | |||
3769 | Serge | 4204 | |
4205 | int kgem_update_fb(struct kgem *kgem, struct sna_fb *fb) |
||
4206 | { |
||
4207 | struct kgem_bo *bo; |
||
4208 | size_t size; |
||
4209 | int ret; |
||
4210 | |||
4211 | bo = fb->fb_bo; |
||
4212 | |||
4213 | ret = drmIoctl(kgem->fd, SRV_FBINFO, fb); |
||
4214 | if( ret != 0 ) |
||
4215 | return 0; |
||
4216 | |||
4217 | fb->fb_bo = bo; |
||
4218 | |||
4219 | size = fb->pitch * fb->height / PAGE_SIZE; |
||
4220 | |||
4221 | if((size != bo->size.pages.count) || |
||
4222 | (fb->pitch != bo->pitch)) |
||
4223 | { |
||
4224 | bo->size.pages.count = size; |
||
4225 | bo->pitch = fb->pitch; |
||
4226 | |||
4227 | printf("fb width %d height %d pitch %d bo %p\n", |
||
4228 | fb->width, fb->height, fb->pitch, fb->fb_bo); |
||
4229 | |||
4230 | return 1; |
||
4231 | } |
||
4232 | |||
4233 | return 0; |
||
4234 | }; |
||
4235 | |||
4236 | void sna_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) |
||
4237 | { |
||
4238 | kgem_bo_destroy(kgem, bo); |
||
4239 | kgem_bo_free(kgem, bo); |
||
4240 | } |
||
4241 | |||
4242 | |||
3291 | Serge | 4243 | void kgem_close_batches(struct kgem *kgem) |
4244 | { |
||
4245 | int n; |
||
3263 | Serge | 4246 | |
3291 | Serge | 4247 | for (n = 0; n < ARRAY_SIZE(kgem->pinned_batches); n++) { |
4248 | while (!list_is_empty(&kgem->pinned_batches[n])) { |
||
4249 | kgem_bo_destroy(kgem, |
||
4250 | list_first_entry(&kgem->pinned_batches[n], |
||
4251 | struct kgem_bo, list)); |
||
4252 | } |
||
4253 | } |
||
4254 | };>=>=>>=>>>>>=>>>=>>=>>=>=>>=>>>>>=>>>=>=>>>>=>=>>>>>=>>>>>>>>>>=>>=>=>=>=>=>=>=>=>>>>=>>=>>=>>=>>>>>>>>=>>>=><=>>>=>=>=>>><>><>>>>>>>>>>>>>>>>>>>>>=>31) |
||
3263 | Serge | 4255 | |
4256 | |||
4257 |