Rev 3263 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3263 | Rev 3266 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2011 Intel Corporation |
2 | * Copyright (c) 2011 Intel Corporation |
3 | * |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
13 | * Software. |
14 | * |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
21 | * SOFTWARE. |
21 | * SOFTWARE. |
22 | * |
22 | * |
23 | * Authors: |
23 | * Authors: |
24 | * Chris Wilson |
24 | * Chris Wilson |
25 | * |
25 | * |
26 | */ |
26 | */ |
27 | 27 | ||
28 | #ifdef HAVE_CONFIG_H |
28 | #ifdef HAVE_CONFIG_H |
29 | #include "config.h" |
29 | #include "config.h" |
30 | #endif |
30 | #endif |
31 | 31 | ||
32 | #include "sna.h" |
32 | #include "sna.h" |
33 | #include "sna_reg.h" |
33 | #include "sna_reg.h" |
34 | 34 | ||
35 | 35 | ||
36 | unsigned int cpu_cache_size(); |
36 | unsigned int cpu_cache_size(); |
37 | 37 | ||
38 | static struct kgem_bo * |
38 | static struct kgem_bo * |
39 | search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags); |
39 | search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags); |
40 | 40 | ||
41 | static struct kgem_bo * |
41 | static struct kgem_bo * |
42 | search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags); |
42 | search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags); |
43 | 43 | ||
44 | #define DBG_NO_HW 0 |
44 | #define DBG_NO_HW 0 |
45 | #define DBG_NO_TILING 1 |
45 | #define DBG_NO_TILING 1 |
46 | #define DBG_NO_CACHE 0 |
46 | #define DBG_NO_CACHE 0 |
47 | #define DBG_NO_CACHE_LEVEL 0 |
47 | #define DBG_NO_CACHE_LEVEL 0 |
48 | #define DBG_NO_CPU 0 |
48 | #define DBG_NO_CPU 0 |
49 | #define DBG_NO_USERPTR 0 |
49 | #define DBG_NO_USERPTR 0 |
50 | #define DBG_NO_LLC 0 |
50 | #define DBG_NO_LLC 0 |
51 | #define DBG_NO_SEMAPHORES 0 |
51 | #define DBG_NO_SEMAPHORES 0 |
52 | #define DBG_NO_MADV 1 |
52 | #define DBG_NO_MADV 1 |
53 | #define DBG_NO_UPLOAD_CACHE 0 |
53 | #define DBG_NO_UPLOAD_CACHE 0 |
54 | #define DBG_NO_UPLOAD_ACTIVE 0 |
54 | #define DBG_NO_UPLOAD_ACTIVE 0 |
55 | #define DBG_NO_MAP_UPLOAD 0 |
55 | #define DBG_NO_MAP_UPLOAD 0 |
56 | #define DBG_NO_RELAXED_FENCING 0 |
56 | #define DBG_NO_RELAXED_FENCING 0 |
57 | #define DBG_NO_SECURE_BATCHES 0 |
57 | #define DBG_NO_SECURE_BATCHES 0 |
58 | #define DBG_NO_PINNED_BATCHES 0 |
58 | #define DBG_NO_PINNED_BATCHES 0 |
59 | #define DBG_NO_FAST_RELOC 0 |
59 | #define DBG_NO_FAST_RELOC 0 |
60 | #define DBG_NO_HANDLE_LUT 0 |
60 | #define DBG_NO_HANDLE_LUT 0 |
61 | #define DBG_DUMP 0 |
61 | #define DBG_DUMP 0 |
62 | 62 | ||
63 | #ifndef DEBUG_SYNC |
63 | #ifndef DEBUG_SYNC |
64 | #define DEBUG_SYNC 0 |
64 | #define DEBUG_SYNC 0 |
65 | #endif |
65 | #endif |
66 | 66 | ||
67 | #define SHOW_BATCH 0 |
67 | #define SHOW_BATCH 0 |
68 | 68 | ||
69 | #if 0 |
69 | #if 0 |
70 | #define ASSERT_IDLE(kgem__, handle__) assert(!__kgem_busy(kgem__, handle__)) |
70 | #define ASSERT_IDLE(kgem__, handle__) assert(!__kgem_busy(kgem__, handle__)) |
71 | #define ASSERT_MAYBE_IDLE(kgem__, handle__, expect__) assert(!(expect__) || !__kgem_busy(kgem__, handle__)) |
71 | #define ASSERT_MAYBE_IDLE(kgem__, handle__, expect__) assert(!(expect__) || !__kgem_busy(kgem__, handle__)) |
72 | #else |
72 | #else |
73 | #define ASSERT_IDLE(kgem__, handle__) |
73 | #define ASSERT_IDLE(kgem__, handle__) |
74 | #define ASSERT_MAYBE_IDLE(kgem__, handle__, expect__) |
74 | #define ASSERT_MAYBE_IDLE(kgem__, handle__, expect__) |
75 | #endif |
75 | #endif |
76 | 76 | ||
77 | /* Worst case seems to be 965gm where we cannot write within a cacheline that |
77 | /* Worst case seems to be 965gm where we cannot write within a cacheline that |
78 | * is being simultaneously being read by the GPU, or within the sampler |
78 | * is being simultaneously being read by the GPU, or within the sampler |
79 | * prefetch. In general, the chipsets seem to have a requirement that sampler |
79 | * prefetch. In general, the chipsets seem to have a requirement that sampler |
80 | * offsets be aligned to a cacheline (64 bytes). |
80 | * offsets be aligned to a cacheline (64 bytes). |
81 | */ |
81 | */ |
82 | #define UPLOAD_ALIGNMENT 128 |
82 | #define UPLOAD_ALIGNMENT 128 |
83 | 83 | ||
84 | #define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE) |
84 | #define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE) |
85 | #define NUM_PAGES(x) (((x) + PAGE_SIZE-1) / PAGE_SIZE) |
85 | #define NUM_PAGES(x) (((x) + PAGE_SIZE-1) / PAGE_SIZE) |
86 | 86 | ||
87 | #define MAX_GTT_VMA_CACHE 512 |
87 | #define MAX_GTT_VMA_CACHE 512 |
88 | #define MAX_CPU_VMA_CACHE INT16_MAX |
88 | #define MAX_CPU_VMA_CACHE INT16_MAX |
89 | #define MAP_PRESERVE_TIME 10 |
89 | #define MAP_PRESERVE_TIME 10 |
90 | 90 | ||
91 | #define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3)) |
91 | #define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3)) |
92 | #define MAKE_CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1)) |
92 | #define MAKE_CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1)) |
93 | #define MAKE_USER_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3)) |
93 | #define MAKE_USER_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3)) |
94 | #define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 2) |
94 | #define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 2) |
95 | #define __MAP_TYPE(ptr) ((uintptr_t)(ptr) & 3) |
95 | #define __MAP_TYPE(ptr) ((uintptr_t)(ptr) & 3) |
96 | 96 | ||
97 | #define MAKE_REQUEST(rq, ring) ((struct kgem_request *)((uintptr_t)(rq) | (ring))) |
97 | #define MAKE_REQUEST(rq, ring) ((struct kgem_request *)((uintptr_t)(rq) | (ring))) |
98 | 98 | ||
99 | #define LOCAL_I915_PARAM_HAS_BLT 11 |
99 | #define LOCAL_I915_PARAM_HAS_BLT 11 |
100 | #define LOCAL_I915_PARAM_HAS_RELAXED_FENCING 12 |
100 | #define LOCAL_I915_PARAM_HAS_RELAXED_FENCING 12 |
101 | #define LOCAL_I915_PARAM_HAS_RELAXED_DELTA 15 |
101 | #define LOCAL_I915_PARAM_HAS_RELAXED_DELTA 15 |
102 | #define LOCAL_I915_PARAM_HAS_SEMAPHORES 20 |
102 | #define LOCAL_I915_PARAM_HAS_SEMAPHORES 20 |
103 | #define LOCAL_I915_PARAM_HAS_SECURE_BATCHES 23 |
103 | #define LOCAL_I915_PARAM_HAS_SECURE_BATCHES 23 |
104 | #define LOCAL_I915_PARAM_HAS_PINNED_BATCHES 24 |
104 | #define LOCAL_I915_PARAM_HAS_PINNED_BATCHES 24 |
105 | #define LOCAL_I915_PARAM_HAS_NO_RELOC 25 |
105 | #define LOCAL_I915_PARAM_HAS_NO_RELOC 25 |
106 | #define LOCAL_I915_PARAM_HAS_HANDLE_LUT 26 |
106 | #define LOCAL_I915_PARAM_HAS_HANDLE_LUT 26 |
107 | 107 | ||
108 | #define LOCAL_I915_EXEC_IS_PINNED (1<<10) |
108 | #define LOCAL_I915_EXEC_IS_PINNED (1<<10) |
109 | #define LOCAL_I915_EXEC_NO_RELOC (1<<11) |
109 | #define LOCAL_I915_EXEC_NO_RELOC (1<<11) |
110 | #define LOCAL_I915_EXEC_HANDLE_LUT (1<<12) |
110 | #define LOCAL_I915_EXEC_HANDLE_LUT (1<<12) |
111 | struct local_i915_gem_userptr { |
111 | struct local_i915_gem_userptr { |
112 | uint64_t user_ptr; |
112 | uint64_t user_ptr; |
113 | uint32_t user_size; |
113 | uint32_t user_size; |
114 | uint32_t flags; |
114 | uint32_t flags; |
115 | #define I915_USERPTR_READ_ONLY (1<<0) |
115 | #define I915_USERPTR_READ_ONLY (1<<0) |
116 | #define I915_USERPTR_UNSYNCHRONIZED (1<<31) |
116 | #define I915_USERPTR_UNSYNCHRONIZED (1<<31) |
117 | uint32_t handle; |
117 | uint32_t handle; |
118 | }; |
118 | }; |
119 | 119 | ||
120 | #define UNCACHED 0 |
120 | #define UNCACHED 0 |
121 | #define SNOOPED 1 |
121 | #define SNOOPED 1 |
122 | 122 | ||
123 | struct local_i915_gem_cacheing { |
123 | struct local_i915_gem_cacheing { |
124 | uint32_t handle; |
124 | uint32_t handle; |
125 | uint32_t cacheing; |
125 | uint32_t cacheing; |
126 | }; |
126 | }; |
127 | 127 | ||
128 | #define LOCAL_IOCTL_I915_GEM_SET_CACHEING SRV_I915_GEM_SET_CACHEING |
128 | #define LOCAL_IOCTL_I915_GEM_SET_CACHEING SRV_I915_GEM_SET_CACHEING |
129 | 129 | ||
130 | struct local_fbinfo { |
130 | struct local_fbinfo { |
131 | int width; |
131 | int width; |
132 | int height; |
132 | int height; |
133 | int pitch; |
133 | int pitch; |
134 | int tiling; |
134 | int tiling; |
135 | }; |
135 | }; |
136 | 136 | ||
137 | struct kgem_buffer { |
137 | struct kgem_buffer { |
138 | struct kgem_bo base; |
138 | struct kgem_bo base; |
139 | void *mem; |
139 | void *mem; |
140 | uint32_t used; |
140 | uint32_t used; |
141 | uint32_t need_io : 1; |
141 | uint32_t need_io : 1; |
142 | uint32_t write : 2; |
142 | uint32_t write : 2; |
143 | uint32_t mmapped : 1; |
143 | uint32_t mmapped : 1; |
144 | }; |
144 | }; |
145 | 145 | ||
146 | static struct kgem_bo *__kgem_freed_bo; |
146 | static struct kgem_bo *__kgem_freed_bo; |
147 | static struct kgem_request *__kgem_freed_request; |
147 | static struct kgem_request *__kgem_freed_request; |
148 | static struct drm_i915_gem_exec_object2 _kgem_dummy_exec; |
148 | static struct drm_i915_gem_exec_object2 _kgem_dummy_exec; |
149 | 149 | ||
150 | static inline int bytes(struct kgem_bo *bo) |
150 | static inline int bytes(struct kgem_bo *bo) |
151 | { |
151 | { |
152 | return __kgem_bo_size(bo); |
152 | return __kgem_bo_size(bo); |
153 | } |
153 | } |
154 | 154 | ||
155 | #define bucket(B) (B)->size.pages.bucket |
155 | #define bucket(B) (B)->size.pages.bucket |
156 | #define num_pages(B) (B)->size.pages.count |
156 | #define num_pages(B) (B)->size.pages.count |
157 | 157 | ||
158 | #ifdef DEBUG_MEMORY |
158 | #ifdef DEBUG_MEMORY |
159 | static void debug_alloc(struct kgem *kgem, size_t size) |
159 | static void debug_alloc(struct kgem *kgem, size_t size) |
160 | { |
160 | { |
161 | kgem->debug_memory.bo_allocs++; |
161 | kgem->debug_memory.bo_allocs++; |
162 | kgem->debug_memory.bo_bytes += size; |
162 | kgem->debug_memory.bo_bytes += size; |
163 | } |
163 | } |
164 | static void debug_alloc__bo(struct kgem *kgem, struct kgem_bo *bo) |
164 | static void debug_alloc__bo(struct kgem *kgem, struct kgem_bo *bo) |
165 | { |
165 | { |
166 | debug_alloc(kgem, bytes(bo)); |
166 | debug_alloc(kgem, bytes(bo)); |
167 | } |
167 | } |
168 | #else |
168 | #else |
169 | #define debug_alloc(k, b) |
169 | #define debug_alloc(k, b) |
170 | #define debug_alloc__bo(k, b) |
170 | #define debug_alloc__bo(k, b) |
171 | #endif |
171 | #endif |
172 | 172 | ||
173 | static void kgem_sna_reset(struct kgem *kgem) |
173 | static void kgem_sna_reset(struct kgem *kgem) |
174 | { |
174 | { |
175 | struct sna *sna = container_of(kgem, struct sna, kgem); |
175 | struct sna *sna = container_of(kgem, struct sna, kgem); |
176 | 176 | ||
177 | sna->render.reset(sna); |
177 | sna->render.reset(sna); |
178 | sna->blt_state.fill_bo = 0; |
178 | sna->blt_state.fill_bo = 0; |
179 | } |
179 | } |
180 | 180 | ||
181 | static void kgem_sna_flush(struct kgem *kgem) |
181 | static void kgem_sna_flush(struct kgem *kgem) |
182 | { |
182 | { |
183 | struct sna *sna = container_of(kgem, struct sna, kgem); |
183 | struct sna *sna = container_of(kgem, struct sna, kgem); |
184 | 184 | ||
185 | sna->render.flush(sna); |
185 | sna->render.flush(sna); |
186 | 186 | ||
187 | // if (sna->render.solid_cache.dirty) |
187 | // if (sna->render.solid_cache.dirty) |
188 | // sna_render_flush_solid(sna); |
188 | // sna_render_flush_solid(sna); |
189 | } |
189 | } |
190 | 190 | ||
191 | static bool gem_set_tiling(int fd, uint32_t handle, int tiling, int stride) |
191 | static bool gem_set_tiling(int fd, uint32_t handle, int tiling, int stride) |
192 | { |
192 | { |
193 | struct drm_i915_gem_set_tiling set_tiling; |
193 | struct drm_i915_gem_set_tiling set_tiling; |
194 | int ret; |
194 | int ret; |
195 | 195 | ||
196 | if (DBG_NO_TILING) |
196 | if (DBG_NO_TILING) |
197 | return false; |
197 | return false; |
198 | /* |
198 | /* |
199 | VG_CLEAR(set_tiling); |
199 | VG_CLEAR(set_tiling); |
200 | do { |
200 | do { |
201 | set_tiling.handle = handle; |
201 | set_tiling.handle = handle; |
202 | set_tiling.tiling_mode = tiling; |
202 | set_tiling.tiling_mode = tiling; |
203 | set_tiling.stride = stride; |
203 | set_tiling.stride = stride; |
204 | 204 | ||
205 | ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling); |
205 | ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling); |
206 | } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); |
206 | } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); |
207 | */ |
207 | */ |
208 | return false;//ret == 0; |
208 | return false;//ret == 0; |
209 | } |
209 | } |
210 | 210 | ||
211 | static bool gem_set_cacheing(int fd, uint32_t handle, int cacheing) |
211 | static bool gem_set_cacheing(int fd, uint32_t handle, int cacheing) |
212 | { |
212 | { |
213 | struct local_i915_gem_cacheing arg; |
213 | struct local_i915_gem_cacheing arg; |
214 | 214 | ||
215 | VG_CLEAR(arg); |
215 | VG_CLEAR(arg); |
216 | arg.handle = handle; |
216 | arg.handle = handle; |
217 | arg.cacheing = cacheing; |
217 | arg.cacheing = cacheing; |
218 | return drmIoctl(fd, LOCAL_IOCTL_I915_GEM_SET_CACHEING, &arg) == 0; |
218 | return drmIoctl(fd, LOCAL_IOCTL_I915_GEM_SET_CACHEING, &arg) == 0; |
219 | } |
219 | } |
220 | 220 | ||
221 | 221 | ||
222 | 222 | ||
223 | 223 | ||
224 | 224 | ||
225 | static bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags) |
225 | static bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags) |
226 | { |
226 | { |
227 | if (flags & CREATE_NO_RETIRE) { |
227 | if (flags & CREATE_NO_RETIRE) { |
228 | DBG(("%s: not retiring per-request\n", __FUNCTION__)); |
228 | DBG(("%s: not retiring per-request\n", __FUNCTION__)); |
229 | return false; |
229 | return false; |
230 | } |
230 | } |
231 | 231 | ||
232 | if (!kgem->need_retire) { |
232 | if (!kgem->need_retire) { |
233 | DBG(("%s: nothing to retire\n", __FUNCTION__)); |
233 | DBG(("%s: nothing to retire\n", __FUNCTION__)); |
234 | return false; |
234 | return false; |
235 | } |
235 | } |
236 | 236 | ||
237 | if (kgem_retire(kgem)) |
237 | if (kgem_retire(kgem)) |
238 | return true; |
238 | return true; |
239 | 239 | ||
240 | if (flags & CREATE_NO_THROTTLE || !kgem->need_throttle) { |
240 | if (flags & CREATE_NO_THROTTLE || !kgem->need_throttle) { |
241 | DBG(("%s: not throttling\n", __FUNCTION__)); |
241 | DBG(("%s: not throttling\n", __FUNCTION__)); |
242 | return false; |
242 | return false; |
243 | } |
243 | } |
244 | 244 | ||
245 | kgem_throttle(kgem); |
245 | kgem_throttle(kgem); |
246 | return kgem_retire(kgem); |
246 | return kgem_retire(kgem); |
247 | } |
247 | } |
248 | 248 | ||
249 | static void *__kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo) |
249 | static void *__kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo) |
250 | { |
250 | { |
251 | struct drm_i915_gem_mmap_gtt mmap_arg; |
251 | struct drm_i915_gem_mmap_gtt mmap_arg; |
252 | void *ptr; |
252 | void *ptr; |
253 | 253 | ||
254 | DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__, |
254 | DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__, |
255 | bo->handle, bytes(bo))); |
255 | bo->handle, bytes(bo))); |
256 | assert(bo->proxy == NULL); |
256 | assert(bo->proxy == NULL); |
257 | 257 | ||
258 | retry_gtt: |
258 | retry_gtt: |
259 | VG_CLEAR(mmap_arg); |
259 | VG_CLEAR(mmap_arg); |
260 | mmap_arg.handle = bo->handle; |
260 | mmap_arg.handle = bo->handle; |
261 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg)) { |
261 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg)) { |
262 | printf("%s: failed to retrieve GTT offset for handle=%d: %d\n", |
262 | printf("%s: failed to retrieve GTT offset for handle=%d: %d\n", |
263 | __FUNCTION__, bo->handle, 0); |
263 | __FUNCTION__, bo->handle, 0); |
264 | (void)__kgem_throttle_retire(kgem, 0); |
264 | (void)__kgem_throttle_retire(kgem, 0); |
265 | if (kgem_expire_cache(kgem)) |
265 | if (kgem_expire_cache(kgem)) |
266 | goto retry_gtt; |
266 | goto retry_gtt; |
267 | 267 | ||
268 | if (kgem->need_expire) { |
268 | if (kgem->need_expire) { |
269 | kgem_cleanup_cache(kgem); |
269 | kgem_cleanup_cache(kgem); |
270 | goto retry_gtt; |
270 | goto retry_gtt; |
271 | } |
271 | } |
272 | 272 | ||
273 | return NULL; |
273 | return NULL; |
274 | } |
274 | } |
275 | 275 | ||
276 | retry_mmap: |
276 | retry_mmap: |
277 | // ptr = mmap(0, bytes(bo), PROT_READ | PROT_WRITE, MAP_SHARED, |
277 | // ptr = mmap(0, bytes(bo), PROT_READ | PROT_WRITE, MAP_SHARED, |
278 | // kgem->fd, mmap_arg.offset); |
278 | // kgem->fd, mmap_arg.offset); |
279 | // if (ptr == 0) { |
279 | // if (ptr == 0) { |
280 | printf("%s: failed to mmap %d, %d bytes, into GTT domain: %d\n", |
280 | printf("%s: failed to mmap %d, %d bytes, into GTT domain: %d\n", |
281 | __FUNCTION__, bo->handle, bytes(bo), 0); |
281 | __FUNCTION__, bo->handle, bytes(bo), 0); |
282 | // if (__kgem_throttle_retire(kgem, 0)) |
282 | // if (__kgem_throttle_retire(kgem, 0)) |
283 | // goto retry_mmap; |
283 | // goto retry_mmap; |
284 | 284 | ||
285 | // if (kgem->need_expire) { |
285 | // if (kgem->need_expire) { |
286 | // kgem_cleanup_cache(kgem); |
286 | // kgem_cleanup_cache(kgem); |
287 | // goto retry_mmap; |
287 | // goto retry_mmap; |
288 | // } |
288 | // } |
289 | 289 | ||
290 | ptr = NULL; |
290 | ptr = NULL; |
291 | // } |
291 | // } |
292 | 292 | ||
293 | return ptr; |
293 | return ptr; |
294 | } |
294 | } |
295 | 295 | ||
296 | static int __gem_write(int fd, uint32_t handle, |
296 | static int __gem_write(int fd, uint32_t handle, |
297 | int offset, int length, |
297 | int offset, int length, |
298 | const void *src) |
298 | const void *src) |
299 | { |
299 | { |
300 | struct drm_i915_gem_pwrite pwrite; |
300 | struct drm_i915_gem_pwrite pwrite; |
301 | 301 | ||
302 | DBG(("%s(handle=%d, offset=%d, len=%d)\n", __FUNCTION__, |
302 | DBG(("%s(handle=%d, offset=%d, len=%d)\n", __FUNCTION__, |
303 | handle, offset, length)); |
303 | handle, offset, length)); |
304 | 304 | ||
305 | VG_CLEAR(pwrite); |
305 | VG_CLEAR(pwrite); |
306 | pwrite.handle = handle; |
306 | pwrite.handle = handle; |
307 | pwrite.offset = offset; |
307 | pwrite.offset = offset; |
308 | pwrite.size = length; |
308 | pwrite.size = length; |
309 | pwrite.data_ptr = (uintptr_t)src; |
309 | pwrite.data_ptr = (uintptr_t)src; |
310 | return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite); |
310 | return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite); |
311 | } |
311 | } |
312 | 312 | ||
313 | static int gem_write(int fd, uint32_t handle, |
313 | static int gem_write(int fd, uint32_t handle, |
314 | int offset, int length, |
314 | int offset, int length, |
315 | const void *src) |
315 | const void *src) |
316 | { |
316 | { |
317 | struct drm_i915_gem_pwrite pwrite; |
317 | struct drm_i915_gem_pwrite pwrite; |
318 | 318 | ||
319 | DBG(("%s(handle=%d, offset=%d, len=%d)\n", __FUNCTION__, |
319 | DBG(("%s(handle=%d, offset=%d, len=%d)\n", __FUNCTION__, |
320 | handle, offset, length)); |
320 | handle, offset, length)); |
321 | 321 | ||
322 | VG_CLEAR(pwrite); |
322 | VG_CLEAR(pwrite); |
323 | pwrite.handle = handle; |
323 | pwrite.handle = handle; |
324 | /* align the transfer to cachelines; fortuitously this is safe! */ |
324 | /* align the transfer to cachelines; fortuitously this is safe! */ |
325 | if ((offset | length) & 63) { |
325 | if ((offset | length) & 63) { |
326 | pwrite.offset = offset & ~63; |
326 | pwrite.offset = offset & ~63; |
327 | pwrite.size = ALIGN(offset+length, 64) - pwrite.offset; |
327 | pwrite.size = ALIGN(offset+length, 64) - pwrite.offset; |
328 | pwrite.data_ptr = (uintptr_t)src + pwrite.offset - offset; |
328 | pwrite.data_ptr = (uintptr_t)src + pwrite.offset - offset; |
329 | } else { |
329 | } else { |
330 | pwrite.offset = offset; |
330 | pwrite.offset = offset; |
331 | pwrite.size = length; |
331 | pwrite.size = length; |
332 | pwrite.data_ptr = (uintptr_t)src; |
332 | pwrite.data_ptr = (uintptr_t)src; |
333 | } |
333 | } |
334 | return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite); |
334 | return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite); |
335 | } |
335 | } |
336 | 336 | ||
337 | 337 | ||
338 | bool __kgem_busy(struct kgem *kgem, int handle) |
338 | bool __kgem_busy(struct kgem *kgem, int handle) |
339 | { |
339 | { |
340 | struct drm_i915_gem_busy busy; |
340 | struct drm_i915_gem_busy busy; |
341 | 341 | ||
342 | VG_CLEAR(busy); |
342 | VG_CLEAR(busy); |
343 | busy.handle = handle; |
343 | busy.handle = handle; |
344 | busy.busy = !kgem->wedged; |
344 | busy.busy = !kgem->wedged; |
345 | (void)drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); |
345 | (void)drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); |
346 | DBG(("%s: handle=%d, busy=%d, wedged=%d\n", |
346 | DBG(("%s: handle=%d, busy=%d, wedged=%d\n", |
347 | __FUNCTION__, handle, busy.busy, kgem->wedged)); |
347 | __FUNCTION__, handle, busy.busy, kgem->wedged)); |
348 | 348 | ||
349 | return busy.busy; |
349 | return busy.busy; |
350 | } |
350 | } |
351 | 351 | ||
352 | static void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo) |
352 | static void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo) |
353 | { |
353 | { |
354 | DBG(("%s: retiring bo handle=%d (needed flush? %d), rq? %d [busy?=%d]\n", |
354 | DBG(("%s: retiring bo handle=%d (needed flush? %d), rq? %d [busy?=%d]\n", |
355 | __FUNCTION__, bo->handle, bo->needs_flush, bo->rq != NULL, |
355 | __FUNCTION__, bo->handle, bo->needs_flush, bo->rq != NULL, |
356 | __kgem_busy(kgem, bo->handle))); |
356 | __kgem_busy(kgem, bo->handle))); |
357 | assert(bo->exec == NULL); |
357 | assert(bo->exec == NULL); |
358 | assert(list_is_empty(&bo->vma)); |
358 | assert(list_is_empty(&bo->vma)); |
359 | 359 | ||
360 | if (bo->rq) { |
360 | if (bo->rq) { |
361 | if (!__kgem_busy(kgem, bo->handle)) { |
361 | if (!__kgem_busy(kgem, bo->handle)) { |
362 | __kgem_bo_clear_busy(bo); |
362 | __kgem_bo_clear_busy(bo); |
363 | kgem_retire(kgem); |
363 | kgem_retire(kgem); |
364 | } |
364 | } |
365 | } else { |
365 | } else { |
366 | assert(!bo->needs_flush); |
366 | assert(!bo->needs_flush); |
367 | ASSERT_IDLE(kgem, bo->handle); |
367 | ASSERT_IDLE(kgem, bo->handle); |
368 | } |
368 | } |
369 | } |
369 | } |
370 | 370 | ||
371 | bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo, |
371 | bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo, |
372 | const void *data, int length) |
372 | const void *data, int length) |
373 | { |
373 | { |
374 | assert(bo->refcnt); |
374 | assert(bo->refcnt); |
375 | assert(!bo->purged); |
375 | assert(!bo->purged); |
376 | assert(bo->proxy == NULL); |
376 | assert(bo->proxy == NULL); |
377 | ASSERT_IDLE(kgem, bo->handle); |
377 | ASSERT_IDLE(kgem, bo->handle); |
378 | 378 | ||
379 | assert(length <= bytes(bo)); |
379 | assert(length <= bytes(bo)); |
380 | if (gem_write(kgem->fd, bo->handle, 0, length, data)) |
380 | if (gem_write(kgem->fd, bo->handle, 0, length, data)) |
381 | return false; |
381 | return false; |
382 | 382 | ||
383 | DBG(("%s: flush=%d, domain=%d\n", __FUNCTION__, bo->flush, bo->domain)); |
383 | DBG(("%s: flush=%d, domain=%d\n", __FUNCTION__, bo->flush, bo->domain)); |
384 | if (bo->exec == NULL) { |
384 | if (bo->exec == NULL) { |
385 | kgem_bo_retire(kgem, bo); |
385 | kgem_bo_retire(kgem, bo); |
386 | bo->domain = DOMAIN_NONE; |
386 | bo->domain = DOMAIN_NONE; |
387 | } |
387 | } |
388 | return true; |
388 | return true; |
389 | } |
389 | } |
390 | 390 | ||
391 | static uint32_t gem_create(int fd, int num_pages) |
391 | static uint32_t gem_create(int fd, int num_pages) |
392 | { |
392 | { |
393 | struct drm_i915_gem_create create; |
393 | struct drm_i915_gem_create create; |
394 | 394 | ||
395 | VG_CLEAR(create); |
395 | VG_CLEAR(create); |
396 | create.handle = 0; |
396 | create.handle = 0; |
397 | create.size = PAGE_SIZE * num_pages; |
397 | create.size = PAGE_SIZE * num_pages; |
398 | (void)drmIoctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create); |
398 | (void)drmIoctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create); |
399 | 399 | ||
400 | return create.handle; |
400 | return create.handle; |
401 | } |
401 | } |
402 | 402 | ||
403 | static bool |
403 | static bool |
404 | kgem_bo_set_purgeable(struct kgem *kgem, struct kgem_bo *bo) |
404 | kgem_bo_set_purgeable(struct kgem *kgem, struct kgem_bo *bo) |
405 | { |
405 | { |
406 | #if DBG_NO_MADV |
406 | #if DBG_NO_MADV |
407 | return true; |
407 | return true; |
408 | #else |
408 | #else |
409 | struct drm_i915_gem_madvise madv; |
409 | struct drm_i915_gem_madvise madv; |
410 | 410 | ||
411 | assert(bo->exec == NULL); |
411 | assert(bo->exec == NULL); |
412 | assert(!bo->purged); |
412 | assert(!bo->purged); |
413 | 413 | ||
414 | VG_CLEAR(madv); |
414 | VG_CLEAR(madv); |
415 | madv.handle = bo->handle; |
415 | madv.handle = bo->handle; |
416 | madv.madv = I915_MADV_DONTNEED; |
416 | madv.madv = I915_MADV_DONTNEED; |
417 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) { |
417 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) { |
418 | bo->purged = 1; |
418 | bo->purged = 1; |
419 | kgem->need_purge |= !madv.retained && bo->domain == DOMAIN_GPU; |
419 | kgem->need_purge |= !madv.retained && bo->domain == DOMAIN_GPU; |
420 | return madv.retained; |
420 | return madv.retained; |
421 | } |
421 | } |
422 | 422 | ||
423 | return true; |
423 | return true; |
424 | #endif |
424 | #endif |
425 | } |
425 | } |
426 | 426 | ||
427 | static bool |
427 | static bool |
428 | kgem_bo_is_retained(struct kgem *kgem, struct kgem_bo *bo) |
428 | kgem_bo_is_retained(struct kgem *kgem, struct kgem_bo *bo) |
429 | { |
429 | { |
430 | #if DBG_NO_MADV |
430 | #if DBG_NO_MADV |
431 | return true; |
431 | return true; |
432 | #else |
432 | #else |
433 | struct drm_i915_gem_madvise madv; |
433 | struct drm_i915_gem_madvise madv; |
434 | 434 | ||
435 | if (!bo->purged) |
435 | if (!bo->purged) |
436 | return true; |
436 | return true; |
437 | 437 | ||
438 | VG_CLEAR(madv); |
438 | VG_CLEAR(madv); |
439 | madv.handle = bo->handle; |
439 | madv.handle = bo->handle; |
440 | madv.madv = I915_MADV_DONTNEED; |
440 | madv.madv = I915_MADV_DONTNEED; |
441 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) |
441 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) |
442 | return madv.retained; |
442 | return madv.retained; |
443 | 443 | ||
444 | return false; |
444 | return false; |
445 | #endif |
445 | #endif |
446 | } |
446 | } |
447 | 447 | ||
448 | static bool |
448 | static bool |
449 | kgem_bo_clear_purgeable(struct kgem *kgem, struct kgem_bo *bo) |
449 | kgem_bo_clear_purgeable(struct kgem *kgem, struct kgem_bo *bo) |
450 | { |
450 | { |
451 | #if DBG_NO_MADV |
451 | #if DBG_NO_MADV |
452 | return true; |
452 | return true; |
453 | #else |
453 | #else |
454 | struct drm_i915_gem_madvise madv; |
454 | struct drm_i915_gem_madvise madv; |
455 | 455 | ||
456 | assert(bo->purged); |
456 | assert(bo->purged); |
457 | 457 | ||
458 | VG_CLEAR(madv); |
458 | VG_CLEAR(madv); |
459 | madv.handle = bo->handle; |
459 | madv.handle = bo->handle; |
460 | madv.madv = I915_MADV_WILLNEED; |
460 | madv.madv = I915_MADV_WILLNEED; |
461 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) { |
461 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv) == 0) { |
462 | bo->purged = !madv.retained; |
462 | bo->purged = !madv.retained; |
463 | kgem->need_purge |= !madv.retained && bo->domain == DOMAIN_GPU; |
463 | kgem->need_purge |= !madv.retained && bo->domain == DOMAIN_GPU; |
464 | return madv.retained; |
464 | return madv.retained; |
465 | } |
465 | } |
466 | 466 | ||
467 | return false; |
467 | return false; |
468 | #endif |
468 | #endif |
469 | } |
469 | } |
470 | 470 | ||
471 | static void gem_close(int fd, uint32_t handle) |
471 | static void gem_close(int fd, uint32_t handle) |
472 | { |
472 | { |
473 | struct drm_gem_close close; |
473 | struct drm_gem_close close; |
474 | 474 | ||
475 | VG_CLEAR(close); |
475 | VG_CLEAR(close); |
476 | close.handle = handle; |
476 | close.handle = handle; |
477 | (void)drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &close); |
477 | (void)drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &close); |
478 | } |
478 | } |
479 | 479 | ||
480 | constant inline static unsigned long __fls(unsigned long word) |
480 | constant inline static unsigned long __fls(unsigned long word) |
481 | { |
481 | { |
482 | #if defined(__GNUC__) && (defined(__i386__) || defined(__x86__) || defined(__x86_64__)) |
482 | #if defined(__GNUC__) && (defined(__i386__) || defined(__x86__) || defined(__x86_64__)) |
483 | asm("bsr %1,%0" |
483 | asm("bsr %1,%0" |
484 | : "=r" (word) |
484 | : "=r" (word) |
485 | : "rm" (word)); |
485 | : "rm" (word)); |
486 | return word; |
486 | return word; |
487 | #else |
487 | #else |
488 | unsigned int v = 0; |
488 | unsigned int v = 0; |
489 | 489 | ||
490 | while (word >>= 1) |
490 | while (word >>= 1) |
491 | v++; |
491 | v++; |
492 | 492 | ||
493 | return v; |
493 | return v; |
494 | #endif |
494 | #endif |
495 | } |
495 | } |
496 | 496 | ||
497 | constant inline static int cache_bucket(int num_pages) |
497 | constant inline static int cache_bucket(int num_pages) |
498 | { |
498 | { |
499 | return __fls(num_pages); |
499 | return __fls(num_pages); |
500 | } |
500 | } |
501 | 501 | ||
502 | static struct kgem_bo *__kgem_bo_init(struct kgem_bo *bo, |
502 | static struct kgem_bo *__kgem_bo_init(struct kgem_bo *bo, |
503 | int handle, int num_pages) |
503 | int handle, int num_pages) |
504 | { |
504 | { |
505 | assert(num_pages); |
505 | assert(num_pages); |
506 | memset(bo, 0, sizeof(*bo)); |
506 | memset(bo, 0, sizeof(*bo)); |
507 | 507 | ||
508 | bo->refcnt = 1; |
508 | bo->refcnt = 1; |
509 | bo->handle = handle; |
509 | bo->handle = handle; |
510 | bo->target_handle = -1; |
510 | bo->target_handle = -1; |
511 | num_pages(bo) = num_pages; |
511 | num_pages(bo) = num_pages; |
512 | bucket(bo) = cache_bucket(num_pages); |
512 | bucket(bo) = cache_bucket(num_pages); |
513 | bo->reusable = true; |
513 | bo->reusable = true; |
514 | bo->domain = DOMAIN_CPU; |
514 | bo->domain = DOMAIN_CPU; |
515 | list_init(&bo->request); |
515 | list_init(&bo->request); |
516 | list_init(&bo->list); |
516 | list_init(&bo->list); |
517 | list_init(&bo->vma); |
517 | list_init(&bo->vma); |
518 | 518 | ||
519 | return bo; |
519 | return bo; |
520 | } |
520 | } |
521 | 521 | ||
522 | static struct kgem_bo *__kgem_bo_alloc(int handle, int num_pages) |
522 | static struct kgem_bo *__kgem_bo_alloc(int handle, int num_pages) |
523 | { |
523 | { |
524 | struct kgem_bo *bo; |
524 | struct kgem_bo *bo; |
525 | 525 | ||
526 | if (__kgem_freed_bo) { |
526 | if (__kgem_freed_bo) { |
527 | bo = __kgem_freed_bo; |
527 | bo = __kgem_freed_bo; |
528 | __kgem_freed_bo = *(struct kgem_bo **)bo; |
528 | __kgem_freed_bo = *(struct kgem_bo **)bo; |
529 | } else { |
529 | } else { |
530 | bo = malloc(sizeof(*bo)); |
530 | bo = malloc(sizeof(*bo)); |
531 | if (bo == NULL) |
531 | if (bo == NULL) |
532 | return NULL; |
532 | return NULL; |
533 | } |
533 | } |
534 | 534 | ||
535 | return __kgem_bo_init(bo, handle, num_pages); |
535 | return __kgem_bo_init(bo, handle, num_pages); |
536 | } |
536 | } |
537 | 537 | ||
538 | static struct kgem_request *__kgem_request_alloc(struct kgem *kgem) |
538 | static struct kgem_request *__kgem_request_alloc(struct kgem *kgem) |
539 | { |
539 | { |
540 | struct kgem_request *rq; |
540 | struct kgem_request *rq; |
541 | 541 | ||
542 | rq = __kgem_freed_request; |
542 | rq = __kgem_freed_request; |
543 | if (rq) { |
543 | if (rq) { |
544 | __kgem_freed_request = *(struct kgem_request **)rq; |
544 | __kgem_freed_request = *(struct kgem_request **)rq; |
545 | } else { |
545 | } else { |
546 | rq = malloc(sizeof(*rq)); |
546 | rq = malloc(sizeof(*rq)); |
547 | if (rq == NULL) |
547 | if (rq == NULL) |
548 | rq = &kgem->static_request; |
548 | rq = &kgem->static_request; |
549 | } |
549 | } |
550 | 550 | ||
551 | list_init(&rq->buffers); |
551 | list_init(&rq->buffers); |
552 | rq->bo = NULL; |
552 | rq->bo = NULL; |
553 | rq->ring = 0; |
553 | rq->ring = 0; |
554 | 554 | ||
555 | return rq; |
555 | return rq; |
556 | } |
556 | } |
557 | 557 | ||
558 | static void __kgem_request_free(struct kgem_request *rq) |
558 | static void __kgem_request_free(struct kgem_request *rq) |
559 | { |
559 | { |
560 | _list_del(&rq->list); |
560 | _list_del(&rq->list); |
561 | *(struct kgem_request **)rq = __kgem_freed_request; |
561 | *(struct kgem_request **)rq = __kgem_freed_request; |
562 | __kgem_freed_request = rq; |
562 | __kgem_freed_request = rq; |
563 | } |
563 | } |
564 | 564 | ||
565 | static struct list *inactive(struct kgem *kgem, int num_pages) |
565 | static struct list *inactive(struct kgem *kgem, int num_pages) |
566 | { |
566 | { |
567 | assert(num_pages < MAX_CACHE_SIZE / PAGE_SIZE); |
567 | assert(num_pages < MAX_CACHE_SIZE / PAGE_SIZE); |
568 | assert(cache_bucket(num_pages) < NUM_CACHE_BUCKETS); |
568 | assert(cache_bucket(num_pages) < NUM_CACHE_BUCKETS); |
569 | return &kgem->inactive[cache_bucket(num_pages)]; |
569 | return &kgem->inactive[cache_bucket(num_pages)]; |
570 | } |
570 | } |
571 | 571 | ||
572 | static struct list *active(struct kgem *kgem, int num_pages, int tiling) |
572 | static struct list *active(struct kgem *kgem, int num_pages, int tiling) |
573 | { |
573 | { |
574 | assert(num_pages < MAX_CACHE_SIZE / PAGE_SIZE); |
574 | assert(num_pages < MAX_CACHE_SIZE / PAGE_SIZE); |
575 | assert(cache_bucket(num_pages) < NUM_CACHE_BUCKETS); |
575 | assert(cache_bucket(num_pages) < NUM_CACHE_BUCKETS); |
576 | return &kgem->active[cache_bucket(num_pages)][tiling]; |
576 | return &kgem->active[cache_bucket(num_pages)][tiling]; |
577 | } |
577 | } |
578 | 578 | ||
579 | static size_t |
579 | static size_t |
580 | agp_aperture_size(struct pci_device *dev, unsigned gen) |
580 | agp_aperture_size(struct pci_device *dev, unsigned gen) |
581 | { |
581 | { |
582 | /* XXX assume that only future chipsets are unknown and follow |
582 | /* XXX assume that only future chipsets are unknown and follow |
583 | * the post gen2 PCI layout. |
583 | * the post gen2 PCI layout. |
584 | */ |
584 | */ |
585 | // return dev->regions[gen < 030 ? 0 : 2].size; |
585 | // return dev->regions[gen < 030 ? 0 : 2].size; |
586 | 586 | ||
587 | return 0; |
587 | return 0; |
588 | } |
588 | } |
589 | 589 | ||
590 | static size_t |
590 | static size_t |
591 | total_ram_size(void) |
591 | total_ram_size(void) |
592 | { |
592 | { |
593 | uint32_t data[9]; |
593 | uint32_t data[9]; |
594 | size_t size = 0; |
594 | size_t size = 0; |
595 | 595 | ||
596 | asm volatile("int $0x40" |
596 | asm volatile("int $0x40" |
597 | : "=a" (size) |
597 | : "=a" (size) |
598 | : "a" (18),"b"(20), "c" (data) |
598 | : "a" (18),"b"(20), "c" (data) |
599 | : "memory"); |
599 | : "memory"); |
600 | 600 | ||
601 | return size != -1 ? size : 0; |
601 | return size != -1 ? size : 0; |
602 | } |
602 | } |
603 | 603 | ||
604 | static int gem_param(struct kgem *kgem, int name) |
604 | static int gem_param(struct kgem *kgem, int name) |
605 | { |
605 | { |
606 | drm_i915_getparam_t gp; |
606 | drm_i915_getparam_t gp; |
607 | int v = -1; /* No param uses the sign bit, reserve it for errors */ |
607 | int v = -1; /* No param uses the sign bit, reserve it for errors */ |
608 | 608 | ||
609 | VG_CLEAR(gp); |
609 | VG_CLEAR(gp); |
610 | gp.param = name; |
610 | gp.param = name; |
611 | gp.value = &v; |
611 | gp.value = &v; |
612 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GETPARAM, &gp)) |
612 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GETPARAM, &gp)) |
613 | return -1; |
613 | return -1; |
614 | 614 | ||
615 | VG(VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v))); |
615 | VG(VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v))); |
616 | return v; |
616 | return v; |
617 | } |
617 | } |
618 | 618 | ||
619 | static bool test_has_execbuffer2(struct kgem *kgem) |
619 | static bool test_has_execbuffer2(struct kgem *kgem) |
620 | { |
620 | { |
621 | return 1; |
621 | return 1; |
622 | } |
622 | } |
623 | 623 | ||
624 | static bool test_has_no_reloc(struct kgem *kgem) |
624 | static bool test_has_no_reloc(struct kgem *kgem) |
625 | { |
625 | { |
626 | if (DBG_NO_FAST_RELOC) |
626 | if (DBG_NO_FAST_RELOC) |
627 | return false; |
627 | return false; |
628 | 628 | ||
629 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_NO_RELOC) > 0; |
629 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_NO_RELOC) > 0; |
630 | } |
630 | } |
631 | 631 | ||
632 | static bool test_has_handle_lut(struct kgem *kgem) |
632 | static bool test_has_handle_lut(struct kgem *kgem) |
633 | { |
633 | { |
634 | if (DBG_NO_HANDLE_LUT) |
634 | if (DBG_NO_HANDLE_LUT) |
635 | return false; |
635 | return false; |
636 | 636 | ||
637 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_HANDLE_LUT) > 0; |
637 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_HANDLE_LUT) > 0; |
638 | } |
638 | } |
639 | 639 | ||
640 | static bool test_has_semaphores_enabled(struct kgem *kgem) |
640 | static bool test_has_semaphores_enabled(struct kgem *kgem) |
641 | { |
641 | { |
642 | FILE *file; |
642 | FILE *file; |
643 | bool detected = false; |
643 | bool detected = false; |
644 | int ret; |
644 | int ret; |
645 | 645 | ||
646 | if (DBG_NO_SEMAPHORES) |
646 | if (DBG_NO_SEMAPHORES) |
647 | return false; |
647 | return false; |
648 | 648 | ||
649 | ret = gem_param(kgem, LOCAL_I915_PARAM_HAS_SEMAPHORES); |
649 | ret = gem_param(kgem, LOCAL_I915_PARAM_HAS_SEMAPHORES); |
650 | if (ret != -1) |
650 | if (ret != -1) |
651 | return ret > 0; |
651 | return ret > 0; |
652 | 652 | ||
653 | return detected; |
653 | return detected; |
654 | } |
654 | } |
655 | 655 | ||
656 | static bool __kgem_throttle(struct kgem *kgem) |
656 | static bool __kgem_throttle(struct kgem *kgem) |
657 | { |
657 | { |
658 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_THROTTLE, NULL) == 0) |
658 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_THROTTLE, NULL) == 0) |
659 | return false; |
659 | return false; |
660 | 660 | ||
661 | return errno == EIO; |
661 | return errno == EIO; |
662 | } |
662 | } |
663 | 663 | ||
664 | static bool is_hw_supported(struct kgem *kgem, |
664 | static bool is_hw_supported(struct kgem *kgem, |
665 | struct pci_device *dev) |
665 | struct pci_device *dev) |
666 | { |
666 | { |
667 | if (DBG_NO_HW) |
667 | if (DBG_NO_HW) |
668 | return false; |
668 | return false; |
669 | 669 | ||
670 | if (!test_has_execbuffer2(kgem)) |
670 | if (!test_has_execbuffer2(kgem)) |
671 | return false; |
671 | return false; |
672 | 672 | ||
673 | if (kgem->gen == (unsigned)-1) /* unknown chipset, assume future gen */ |
673 | if (kgem->gen == (unsigned)-1) /* unknown chipset, assume future gen */ |
674 | return kgem->has_blt; |
674 | return kgem->has_blt; |
675 | 675 | ||
676 | /* Although pre-855gm the GMCH is fubar, it works mostly. So |
676 | /* Although pre-855gm the GMCH is fubar, it works mostly. So |
677 | * let the user decide through "NoAccel" whether or not to risk |
677 | * let the user decide through "NoAccel" whether or not to risk |
678 | * hw acceleration. |
678 | * hw acceleration. |
679 | */ |
679 | */ |
680 | 680 | ||
681 | if (kgem->gen == 060 && dev->revision < 8) { |
681 | if (kgem->gen == 060 && dev->revision < 8) { |
682 | /* pre-production SNB with dysfunctional BLT */ |
682 | /* pre-production SNB with dysfunctional BLT */ |
683 | return false; |
683 | return false; |
684 | } |
684 | } |
685 | 685 | ||
686 | if (kgem->gen >= 060) /* Only if the kernel supports the BLT ring */ |
686 | if (kgem->gen >= 060) /* Only if the kernel supports the BLT ring */ |
687 | return kgem->has_blt; |
687 | return kgem->has_blt; |
688 | 688 | ||
689 | return true; |
689 | return true; |
690 | } |
690 | } |
691 | 691 | ||
692 | static bool test_has_relaxed_fencing(struct kgem *kgem) |
692 | static bool test_has_relaxed_fencing(struct kgem *kgem) |
693 | { |
693 | { |
694 | if (kgem->gen < 040) { |
694 | if (kgem->gen < 040) { |
695 | if (DBG_NO_RELAXED_FENCING) |
695 | if (DBG_NO_RELAXED_FENCING) |
696 | return false; |
696 | return false; |
697 | 697 | ||
698 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_FENCING) > 0; |
698 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_FENCING) > 0; |
699 | } else |
699 | } else |
700 | return true; |
700 | return true; |
701 | } |
701 | } |
702 | 702 | ||
703 | static bool test_has_llc(struct kgem *kgem) |
703 | static bool test_has_llc(struct kgem *kgem) |
704 | { |
704 | { |
705 | int has_llc = -1; |
705 | int has_llc = -1; |
706 | 706 | ||
707 | if (DBG_NO_LLC) |
707 | if (DBG_NO_LLC) |
708 | return false; |
708 | return false; |
709 | 709 | ||
710 | #if defined(I915_PARAM_HAS_LLC) /* Expected in libdrm-2.4.31 */ |
710 | #if defined(I915_PARAM_HAS_LLC) /* Expected in libdrm-2.4.31 */ |
711 | has_llc = gem_param(kgem, I915_PARAM_HAS_LLC); |
711 | has_llc = gem_param(kgem, I915_PARAM_HAS_LLC); |
712 | #endif |
712 | #endif |
713 | if (has_llc == -1) { |
713 | if (has_llc == -1) { |
714 | DBG(("%s: no kernel/drm support for HAS_LLC, assuming support for LLC based on GPU generation\n", __FUNCTION__)); |
714 | DBG(("%s: no kernel/drm support for HAS_LLC, assuming support for LLC based on GPU generation\n", __FUNCTION__)); |
715 | has_llc = kgem->gen >= 060; |
715 | has_llc = kgem->gen >= 060; |
716 | } |
716 | } |
717 | 717 | ||
718 | return has_llc; |
718 | return has_llc; |
719 | } |
719 | } |
720 | 720 | ||
721 | static bool test_has_cacheing(struct kgem *kgem) |
721 | static bool test_has_cacheing(struct kgem *kgem) |
722 | { |
722 | { |
723 | uint32_t handle; |
723 | uint32_t handle; |
724 | bool ret; |
724 | bool ret; |
725 | 725 | ||
726 | if (DBG_NO_CACHE_LEVEL) |
726 | if (DBG_NO_CACHE_LEVEL) |
727 | return false; |
727 | return false; |
728 | 728 | ||
729 | /* Incoherent blt and sampler hangs the GPU */ |
729 | /* Incoherent blt and sampler hangs the GPU */ |
730 | if (kgem->gen == 040) |
730 | if (kgem->gen == 040) |
731 | return false; |
731 | return false; |
732 | 732 | ||
733 | handle = gem_create(kgem->fd, 1); |
733 | handle = gem_create(kgem->fd, 1); |
734 | if (handle == 0) |
734 | if (handle == 0) |
735 | return false; |
735 | return false; |
736 | 736 | ||
737 | ret = gem_set_cacheing(kgem->fd, handle, UNCACHED); |
737 | ret = gem_set_cacheing(kgem->fd, handle, UNCACHED); |
738 | gem_close(kgem->fd, handle); |
738 | gem_close(kgem->fd, handle); |
739 | return ret; |
739 | return ret; |
740 | } |
740 | } |
741 | 741 | ||
742 | static bool test_has_userptr(struct kgem *kgem) |
742 | static bool test_has_userptr(struct kgem *kgem) |
743 | { |
743 | { |
744 | #if defined(USE_USERPTR) |
744 | #if defined(USE_USERPTR) |
745 | uint32_t handle; |
745 | uint32_t handle; |
746 | void *ptr; |
746 | void *ptr; |
747 | 747 | ||
748 | if (DBG_NO_USERPTR) |
748 | if (DBG_NO_USERPTR) |
749 | return false; |
749 | return false; |
750 | 750 | ||
751 | /* Incoherent blt and sampler hangs the GPU */ |
751 | /* Incoherent blt and sampler hangs the GPU */ |
752 | if (kgem->gen == 040) |
752 | if (kgem->gen == 040) |
753 | return false; |
753 | return false; |
754 | 754 | ||
755 | ptr = malloc(PAGE_SIZE); |
755 | ptr = malloc(PAGE_SIZE); |
756 | handle = gem_userptr(kgem->fd, ptr, PAGE_SIZE, false); |
756 | handle = gem_userptr(kgem->fd, ptr, PAGE_SIZE, false); |
757 | gem_close(kgem->fd, handle); |
757 | gem_close(kgem->fd, handle); |
758 | free(ptr); |
758 | free(ptr); |
759 | 759 | ||
760 | return handle != 0; |
760 | return handle != 0; |
761 | #else |
761 | #else |
762 | return false; |
762 | return false; |
763 | #endif |
763 | #endif |
764 | } |
764 | } |
765 | 765 | ||
766 | static bool test_has_secure_batches(struct kgem *kgem) |
766 | static bool test_has_secure_batches(struct kgem *kgem) |
767 | { |
767 | { |
768 | if (DBG_NO_SECURE_BATCHES) |
768 | if (DBG_NO_SECURE_BATCHES) |
769 | return false; |
769 | return false; |
770 | 770 | ||
771 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_SECURE_BATCHES) > 0; |
771 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_SECURE_BATCHES) > 0; |
772 | } |
772 | } |
773 | 773 | ||
774 | static bool test_has_pinned_batches(struct kgem *kgem) |
774 | static bool test_has_pinned_batches(struct kgem *kgem) |
775 | { |
775 | { |
776 | if (DBG_NO_PINNED_BATCHES) |
776 | if (DBG_NO_PINNED_BATCHES) |
777 | return false; |
777 | return false; |
778 | 778 | ||
779 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_PINNED_BATCHES) > 0; |
779 | return gem_param(kgem, LOCAL_I915_PARAM_HAS_PINNED_BATCHES) > 0; |
780 | } |
780 | } |
781 | 781 | ||
782 | 782 | ||
783 | static bool kgem_init_pinned_batches(struct kgem *kgem) |
783 | static bool kgem_init_pinned_batches(struct kgem *kgem) |
784 | { |
784 | { |
785 | int count[2] = { 4, 2 }; |
785 | int count[2] = { 4, 2 }; |
786 | int size[2] = { 1, 4 }; |
786 | int size[2] = { 1, 4 }; |
787 | int n, i; |
787 | int n, i; |
788 | 788 | ||
789 | if (kgem->wedged) |
789 | if (kgem->wedged) |
790 | return true; |
790 | return true; |
791 | 791 | ||
792 | for (n = 0; n < ARRAY_SIZE(count); n++) { |
792 | for (n = 0; n < ARRAY_SIZE(count); n++) { |
793 | for (i = 0; i < count[n]; i++) { |
793 | for (i = 0; i < count[n]; i++) { |
794 | struct drm_i915_gem_pin pin; |
794 | struct drm_i915_gem_pin pin; |
795 | struct kgem_bo *bo; |
795 | struct kgem_bo *bo; |
796 | 796 | ||
797 | VG_CLEAR(pin); |
797 | VG_CLEAR(pin); |
798 | 798 | ||
799 | pin.handle = gem_create(kgem->fd, size[n]); |
799 | pin.handle = gem_create(kgem->fd, size[n]); |
800 | if (pin.handle == 0) |
800 | if (pin.handle == 0) |
801 | goto err; |
801 | goto err; |
802 | 802 | ||
803 | DBG(("%s: new handle=%d, num_pages=%d\n", |
803 | DBG(("%s: new handle=%d, num_pages=%d\n", |
804 | __FUNCTION__, pin.handle, size[n])); |
804 | __FUNCTION__, pin.handle, size[n])); |
805 | 805 | ||
806 | bo = __kgem_bo_alloc(pin.handle, size[n]); |
806 | bo = __kgem_bo_alloc(pin.handle, size[n]); |
807 | if (bo == NULL) { |
807 | if (bo == NULL) { |
808 | gem_close(kgem->fd, pin.handle); |
808 | gem_close(kgem->fd, pin.handle); |
809 | goto err; |
809 | goto err; |
810 | } |
810 | } |
811 | 811 | ||
812 | pin.alignment = 0; |
812 | pin.alignment = 0; |
813 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_PIN, &pin)) { |
813 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_PIN, &pin)) { |
814 | gem_close(kgem->fd, pin.handle); |
814 | gem_close(kgem->fd, pin.handle); |
815 | goto err; |
815 | goto err; |
816 | } |
816 | } |
817 | bo->presumed_offset = pin.offset; |
817 | bo->presumed_offset = pin.offset; |
818 | debug_alloc__bo(kgem, bo); |
818 | debug_alloc__bo(kgem, bo); |
819 | list_add(&bo->list, &kgem->pinned_batches[n]); |
819 | list_add(&bo->list, &kgem->pinned_batches[n]); |
820 | } |
820 | } |
821 | } |
821 | } |
822 | 822 | ||
823 | return true; |
823 | return true; |
824 | 824 | ||
825 | err: |
825 | err: |
826 | for (n = 0; n < ARRAY_SIZE(kgem->pinned_batches); n++) { |
826 | for (n = 0; n < ARRAY_SIZE(kgem->pinned_batches); n++) { |
827 | while (!list_is_empty(&kgem->pinned_batches[n])) { |
827 | while (!list_is_empty(&kgem->pinned_batches[n])) { |
828 | kgem_bo_destroy(kgem, |
828 | kgem_bo_destroy(kgem, |
829 | list_first_entry(&kgem->pinned_batches[n], |
829 | list_first_entry(&kgem->pinned_batches[n], |
830 | struct kgem_bo, list)); |
830 | struct kgem_bo, list)); |
831 | } |
831 | } |
832 | } |
832 | } |
833 | 833 | ||
834 | /* For simplicity populate the lists with a single unpinned bo */ |
834 | /* For simplicity populate the lists with a single unpinned bo */ |
835 | for (n = 0; n < ARRAY_SIZE(count); n++) { |
835 | for (n = 0; n < ARRAY_SIZE(count); n++) { |
836 | struct kgem_bo *bo; |
836 | struct kgem_bo *bo; |
837 | uint32_t handle; |
837 | uint32_t handle; |
838 | 838 | ||
839 | handle = gem_create(kgem->fd, size[n]); |
839 | handle = gem_create(kgem->fd, size[n]); |
840 | if (handle == 0) |
840 | if (handle == 0) |
841 | break; |
841 | break; |
842 | 842 | ||
843 | bo = __kgem_bo_alloc(handle, size[n]); |
843 | bo = __kgem_bo_alloc(handle, size[n]); |
844 | if (bo == NULL) { |
844 | if (bo == NULL) { |
845 | gem_close(kgem->fd, handle); |
845 | gem_close(kgem->fd, handle); |
846 | break; |
846 | break; |
847 | } |
847 | } |
848 | 848 | ||
849 | debug_alloc__bo(kgem, bo); |
849 | debug_alloc__bo(kgem, bo); |
850 | list_add(&bo->list, &kgem->pinned_batches[n]); |
850 | list_add(&bo->list, &kgem->pinned_batches[n]); |
851 | } |
851 | } |
852 | return false; |
852 | return false; |
853 | } |
853 | } |
854 | 854 | ||
855 | void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen) |
855 | void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen) |
856 | { |
856 | { |
857 | struct drm_i915_gem_get_aperture aperture; |
857 | struct drm_i915_gem_get_aperture aperture; |
858 | size_t totalram; |
858 | size_t totalram; |
859 | unsigned half_gpu_max; |
859 | unsigned half_gpu_max; |
860 | unsigned int i, j; |
860 | unsigned int i, j; |
861 | 861 | ||
862 | DBG(("%s: fd=%d, gen=%d\n", __FUNCTION__, fd, gen)); |
862 | DBG(("%s: fd=%d, gen=%d\n", __FUNCTION__, fd, gen)); |
863 | 863 | ||
864 | memset(kgem, 0, sizeof(*kgem)); |
864 | memset(kgem, 0, sizeof(*kgem)); |
865 | 865 | ||
866 | kgem->fd = fd; |
866 | kgem->fd = fd; |
867 | kgem->gen = gen; |
867 | kgem->gen = gen; |
868 | 868 | ||
869 | list_init(&kgem->requests[0]); |
869 | list_init(&kgem->requests[0]); |
870 | list_init(&kgem->requests[1]); |
870 | list_init(&kgem->requests[1]); |
871 | list_init(&kgem->batch_buffers); |
871 | list_init(&kgem->batch_buffers); |
872 | list_init(&kgem->active_buffers); |
872 | list_init(&kgem->active_buffers); |
873 | list_init(&kgem->flushing); |
873 | list_init(&kgem->flushing); |
874 | list_init(&kgem->large); |
874 | list_init(&kgem->large); |
875 | list_init(&kgem->large_inactive); |
875 | list_init(&kgem->large_inactive); |
876 | list_init(&kgem->snoop); |
876 | list_init(&kgem->snoop); |
877 | list_init(&kgem->scanout); |
877 | list_init(&kgem->scanout); |
878 | for (i = 0; i < ARRAY_SIZE(kgem->pinned_batches); i++) |
878 | for (i = 0; i < ARRAY_SIZE(kgem->pinned_batches); i++) |
879 | list_init(&kgem->pinned_batches[i]); |
879 | list_init(&kgem->pinned_batches[i]); |
880 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) |
880 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) |
881 | list_init(&kgem->inactive[i]); |
881 | list_init(&kgem->inactive[i]); |
882 | for (i = 0; i < ARRAY_SIZE(kgem->active); i++) { |
882 | for (i = 0; i < ARRAY_SIZE(kgem->active); i++) { |
883 | for (j = 0; j < ARRAY_SIZE(kgem->active[i]); j++) |
883 | for (j = 0; j < ARRAY_SIZE(kgem->active[i]); j++) |
884 | list_init(&kgem->active[i][j]); |
884 | list_init(&kgem->active[i][j]); |
885 | } |
885 | } |
886 | for (i = 0; i < ARRAY_SIZE(kgem->vma); i++) { |
886 | for (i = 0; i < ARRAY_SIZE(kgem->vma); i++) { |
887 | for (j = 0; j < ARRAY_SIZE(kgem->vma[i].inactive); j++) |
887 | for (j = 0; j < ARRAY_SIZE(kgem->vma[i].inactive); j++) |
888 | list_init(&kgem->vma[i].inactive[j]); |
888 | list_init(&kgem->vma[i].inactive[j]); |
889 | } |
889 | } |
890 | kgem->vma[MAP_GTT].count = -MAX_GTT_VMA_CACHE; |
890 | kgem->vma[MAP_GTT].count = -MAX_GTT_VMA_CACHE; |
891 | kgem->vma[MAP_CPU].count = -MAX_CPU_VMA_CACHE; |
891 | kgem->vma[MAP_CPU].count = -MAX_CPU_VMA_CACHE; |
892 | 892 | ||
893 | kgem->has_blt = gem_param(kgem, LOCAL_I915_PARAM_HAS_BLT) > 0; |
893 | kgem->has_blt = gem_param(kgem, LOCAL_I915_PARAM_HAS_BLT) > 0; |
894 | DBG(("%s: has BLT ring? %d\n", __FUNCTION__, |
894 | DBG(("%s: has BLT ring? %d\n", __FUNCTION__, |
895 | kgem->has_blt)); |
895 | kgem->has_blt)); |
896 | 896 | ||
897 | kgem->has_relaxed_delta = |
897 | kgem->has_relaxed_delta = |
898 | gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_DELTA) > 0; |
898 | gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_DELTA) > 0; |
899 | DBG(("%s: has relaxed delta? %d\n", __FUNCTION__, |
899 | DBG(("%s: has relaxed delta? %d\n", __FUNCTION__, |
900 | kgem->has_relaxed_delta)); |
900 | kgem->has_relaxed_delta)); |
901 | 901 | ||
902 | kgem->has_relaxed_fencing = test_has_relaxed_fencing(kgem); |
902 | kgem->has_relaxed_fencing = test_has_relaxed_fencing(kgem); |
903 | DBG(("%s: has relaxed fencing? %d\n", __FUNCTION__, |
903 | DBG(("%s: has relaxed fencing? %d\n", __FUNCTION__, |
904 | kgem->has_relaxed_fencing)); |
904 | kgem->has_relaxed_fencing)); |
905 | 905 | ||
906 | kgem->has_llc = test_has_llc(kgem); |
906 | kgem->has_llc = test_has_llc(kgem); |
907 | DBG(("%s: has shared last-level-cache? %d\n", __FUNCTION__, |
907 | DBG(("%s: has shared last-level-cache? %d\n", __FUNCTION__, |
908 | kgem->has_llc)); |
908 | kgem->has_llc)); |
909 | 909 | ||
910 | kgem->has_cacheing = test_has_cacheing(kgem); |
910 | kgem->has_cacheing = test_has_cacheing(kgem); |
911 | DBG(("%s: has set-cache-level? %d\n", __FUNCTION__, |
911 | DBG(("%s: has set-cache-level? %d\n", __FUNCTION__, |
912 | kgem->has_cacheing)); |
912 | kgem->has_cacheing)); |
913 | 913 | ||
914 | kgem->has_userptr = test_has_userptr(kgem); |
914 | kgem->has_userptr = test_has_userptr(kgem); |
915 | DBG(("%s: has userptr? %d\n", __FUNCTION__, |
915 | DBG(("%s: has userptr? %d\n", __FUNCTION__, |
916 | kgem->has_userptr)); |
916 | kgem->has_userptr)); |
917 | 917 | ||
918 | kgem->has_no_reloc = test_has_no_reloc(kgem); |
918 | kgem->has_no_reloc = test_has_no_reloc(kgem); |
919 | DBG(("%s: has no-reloc? %d\n", __FUNCTION__, |
919 | DBG(("%s: has no-reloc? %d\n", __FUNCTION__, |
920 | kgem->has_no_reloc)); |
920 | kgem->has_no_reloc)); |
921 | 921 | ||
922 | kgem->has_handle_lut = test_has_handle_lut(kgem); |
922 | kgem->has_handle_lut = test_has_handle_lut(kgem); |
923 | DBG(("%s: has handle-lut? %d\n", __FUNCTION__, |
923 | DBG(("%s: has handle-lut? %d\n", __FUNCTION__, |
924 | kgem->has_handle_lut)); |
924 | kgem->has_handle_lut)); |
925 | 925 | ||
926 | kgem->has_semaphores = false; |
926 | kgem->has_semaphores = false; |
927 | if (kgem->has_blt && test_has_semaphores_enabled(kgem)) |
927 | if (kgem->has_blt && test_has_semaphores_enabled(kgem)) |
928 | kgem->has_semaphores = true; |
928 | kgem->has_semaphores = true; |
929 | DBG(("%s: semaphores enabled? %d\n", __FUNCTION__, |
929 | DBG(("%s: semaphores enabled? %d\n", __FUNCTION__, |
930 | kgem->has_semaphores)); |
930 | kgem->has_semaphores)); |
931 | 931 | ||
932 | kgem->can_blt_cpu = gen >= 030; |
932 | kgem->can_blt_cpu = gen >= 030; |
933 | DBG(("%s: can blt to cpu? %d\n", __FUNCTION__, |
933 | DBG(("%s: can blt to cpu? %d\n", __FUNCTION__, |
934 | kgem->can_blt_cpu)); |
934 | kgem->can_blt_cpu)); |
935 | 935 | ||
936 | kgem->has_secure_batches = test_has_secure_batches(kgem); |
936 | kgem->has_secure_batches = test_has_secure_batches(kgem); |
937 | DBG(("%s: can use privileged batchbuffers? %d\n", __FUNCTION__, |
937 | DBG(("%s: can use privileged batchbuffers? %d\n", __FUNCTION__, |
938 | kgem->has_secure_batches)); |
938 | kgem->has_secure_batches)); |
939 | 939 | ||
940 | kgem->has_pinned_batches = test_has_pinned_batches(kgem); |
940 | kgem->has_pinned_batches = test_has_pinned_batches(kgem); |
941 | DBG(("%s: can use pinned batchbuffers (to avoid CS w/a)? %d\n", __FUNCTION__, |
941 | DBG(("%s: can use pinned batchbuffers (to avoid CS w/a)? %d\n", __FUNCTION__, |
942 | kgem->has_pinned_batches)); |
942 | kgem->has_pinned_batches)); |
943 | 943 | ||
944 | if (!is_hw_supported(kgem, dev)) { |
944 | if (!is_hw_supported(kgem, dev)) { |
945 | printf("Detected unsupported/dysfunctional hardware, disabling acceleration.\n"); |
945 | printf("Detected unsupported/dysfunctional hardware, disabling acceleration.\n"); |
946 | kgem->wedged = 1; |
946 | kgem->wedged = 1; |
947 | } else if (__kgem_throttle(kgem)) { |
947 | } else if (__kgem_throttle(kgem)) { |
948 | printf("Detected a hung GPU, disabling acceleration.\n"); |
948 | printf("Detected a hung GPU, disabling acceleration.\n"); |
949 | kgem->wedged = 1; |
949 | kgem->wedged = 1; |
950 | } |
950 | } |
951 | 951 | ||
952 | kgem->batch_size = ARRAY_SIZE(kgem->batch); |
952 | kgem->batch_size = ARRAY_SIZE(kgem->batch); |
953 | if (gen == 020 && !kgem->has_pinned_batches) |
953 | if (gen == 020 && !kgem->has_pinned_batches) |
954 | /* Limited to what we can pin */ |
954 | /* Limited to what we can pin */ |
955 | kgem->batch_size = 4*1024; |
955 | kgem->batch_size = 4*1024; |
956 | if (gen == 022) |
956 | if (gen == 022) |
957 | /* 865g cannot handle a batch spanning multiple pages */ |
957 | /* 865g cannot handle a batch spanning multiple pages */ |
958 | kgem->batch_size = PAGE_SIZE / sizeof(uint32_t); |
958 | kgem->batch_size = PAGE_SIZE / sizeof(uint32_t); |
959 | if ((gen >> 3) == 7) |
959 | if ((gen >> 3) == 7) |
960 | kgem->batch_size = 16*1024; |
960 | kgem->batch_size = 16*1024; |
961 | if (!kgem->has_relaxed_delta && kgem->batch_size > 4*1024) |
961 | if (!kgem->has_relaxed_delta && kgem->batch_size > 4*1024) |
962 | kgem->batch_size = 4*1024; |
962 | kgem->batch_size = 4*1024; |
963 | 963 | ||
964 | if (!kgem_init_pinned_batches(kgem) && gen == 020) { |
964 | if (!kgem_init_pinned_batches(kgem) && gen == 020) { |
965 | printf("Unable to reserve memory for GPU, disabling acceleration.\n"); |
965 | printf("Unable to reserve memory for GPU, disabling acceleration.\n"); |
966 | kgem->wedged = 1; |
966 | kgem->wedged = 1; |
967 | } |
967 | } |
968 | 968 | ||
969 | DBG(("%s: maximum batch size? %d\n", __FUNCTION__, |
969 | DBG(("%s: maximum batch size? %d\n", __FUNCTION__, |
970 | kgem->batch_size)); |
970 | kgem->batch_size)); |
971 | 971 | ||
972 | kgem->min_alignment = 4; |
972 | kgem->min_alignment = 4; |
973 | if (gen < 040) |
973 | if (gen < 040) |
974 | kgem->min_alignment = 64; |
974 | kgem->min_alignment = 64; |
975 | 975 | ||
976 | kgem->half_cpu_cache_pages = cpu_cache_size() >> 13; |
976 | kgem->half_cpu_cache_pages = cpu_cache_size() >> 13; |
977 | DBG(("%s: half cpu cache %d pages\n", __FUNCTION__, |
977 | DBG(("%s: half cpu cache %d pages\n", __FUNCTION__, |
978 | kgem->half_cpu_cache_pages)); |
978 | kgem->half_cpu_cache_pages)); |
979 | 979 | ||
980 | kgem->next_request = __kgem_request_alloc(kgem); |
980 | kgem->next_request = __kgem_request_alloc(kgem); |
981 | 981 | ||
982 | DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, userptr? %d\n", __FUNCTION__, |
982 | DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, userptr? %d\n", __FUNCTION__, |
983 | !DBG_NO_CPU && (kgem->has_llc | kgem->has_userptr | kgem->has_cacheing), |
983 | !DBG_NO_CPU && (kgem->has_llc | kgem->has_userptr | kgem->has_cacheing), |
984 | kgem->has_llc, kgem->has_cacheing, kgem->has_userptr)); |
984 | kgem->has_llc, kgem->has_cacheing, kgem->has_userptr)); |
985 | 985 | ||
986 | VG_CLEAR(aperture); |
986 | VG_CLEAR(aperture); |
987 | aperture.aper_size = 0; |
987 | aperture.aper_size = 0; |
988 | (void)drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture); |
988 | (void)drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture); |
989 | if (aperture.aper_size == 0) |
989 | if (aperture.aper_size == 0) |
990 | aperture.aper_size = 64*1024*1024; |
990 | aperture.aper_size = 64*1024*1024; |
991 | 991 | ||
992 | DBG(("%s: aperture size %lld, available now %lld\n", |
992 | DBG(("%s: aperture size %lld, available now %lld\n", |
993 | __FUNCTION__, |
993 | __FUNCTION__, |
994 | (long long)aperture.aper_size, |
994 | (long long)aperture.aper_size, |
995 | (long long)aperture.aper_available_size)); |
995 | (long long)aperture.aper_available_size)); |
996 | 996 | ||
997 | kgem->aperture_total = aperture.aper_size; |
997 | kgem->aperture_total = aperture.aper_size; |
998 | kgem->aperture_high = aperture.aper_size * 3/4; |
998 | kgem->aperture_high = aperture.aper_size * 3/4; |
999 | kgem->aperture_low = aperture.aper_size * 1/3; |
999 | kgem->aperture_low = aperture.aper_size * 1/3; |
1000 | if (gen < 033) { |
1000 | if (gen < 033) { |
1001 | /* Severe alignment penalties */ |
1001 | /* Severe alignment penalties */ |
1002 | kgem->aperture_high /= 2; |
1002 | kgem->aperture_high /= 2; |
1003 | kgem->aperture_low /= 2; |
1003 | kgem->aperture_low /= 2; |
1004 | } |
1004 | } |
1005 | DBG(("%s: aperture low=%d [%d], high=%d [%d]\n", __FUNCTION__, |
1005 | DBG(("%s: aperture low=%d [%d], high=%d [%d]\n", __FUNCTION__, |
1006 | kgem->aperture_low, kgem->aperture_low / (1024*1024), |
1006 | kgem->aperture_low, kgem->aperture_low / (1024*1024), |
1007 | kgem->aperture_high, kgem->aperture_high / (1024*1024))); |
1007 | kgem->aperture_high, kgem->aperture_high / (1024*1024))); |
1008 | 1008 | ||
1009 | kgem->aperture_mappable = agp_aperture_size(dev, gen); |
1009 | kgem->aperture_mappable = agp_aperture_size(dev, gen); |
1010 | if (kgem->aperture_mappable == 0 || |
1010 | if (kgem->aperture_mappable == 0 || |
1011 | kgem->aperture_mappable > aperture.aper_size) |
1011 | kgem->aperture_mappable > aperture.aper_size) |
1012 | kgem->aperture_mappable = aperture.aper_size; |
1012 | kgem->aperture_mappable = aperture.aper_size; |
1013 | DBG(("%s: aperture mappable=%d [%d MiB]\n", __FUNCTION__, |
1013 | DBG(("%s: aperture mappable=%d [%d MiB]\n", __FUNCTION__, |
1014 | kgem->aperture_mappable, kgem->aperture_mappable / (1024*1024))); |
1014 | kgem->aperture_mappable, kgem->aperture_mappable / (1024*1024))); |
1015 | 1015 | ||
1016 | kgem->buffer_size = 64 * 1024; |
1016 | kgem->buffer_size = 64 * 1024; |
1017 | while (kgem->buffer_size < kgem->aperture_mappable >> 10) |
1017 | while (kgem->buffer_size < kgem->aperture_mappable >> 10) |
1018 | kgem->buffer_size *= 2; |
1018 | kgem->buffer_size *= 2; |
1019 | if (kgem->buffer_size >> 12 > kgem->half_cpu_cache_pages) |
1019 | if (kgem->buffer_size >> 12 > kgem->half_cpu_cache_pages) |
1020 | kgem->buffer_size = kgem->half_cpu_cache_pages << 12; |
1020 | kgem->buffer_size = kgem->half_cpu_cache_pages << 12; |
1021 | DBG(("%s: buffer size=%d [%d KiB]\n", __FUNCTION__, |
1021 | DBG(("%s: buffer size=%d [%d KiB]\n", __FUNCTION__, |
1022 | kgem->buffer_size, kgem->buffer_size / 1024)); |
1022 | kgem->buffer_size, kgem->buffer_size / 1024)); |
1023 | 1023 | ||
1024 | kgem->max_object_size = 3 * (kgem->aperture_high >> 12) << 10; |
1024 | kgem->max_object_size = 3 * (kgem->aperture_high >> 12) << 10; |
1025 | kgem->max_gpu_size = kgem->max_object_size; |
1025 | kgem->max_gpu_size = kgem->max_object_size; |
1026 | if (!kgem->has_llc) |
1026 | if (!kgem->has_llc) |
1027 | kgem->max_gpu_size = MAX_CACHE_SIZE; |
1027 | kgem->max_gpu_size = MAX_CACHE_SIZE; |
1028 | 1028 | ||
1029 | totalram = total_ram_size(); |
1029 | totalram = total_ram_size(); |
1030 | if (totalram == 0) { |
1030 | if (totalram == 0) { |
1031 | DBG(("%s: total ram size unknown, assuming maximum of total aperture\n", |
1031 | DBG(("%s: total ram size unknown, assuming maximum of total aperture\n", |
1032 | __FUNCTION__)); |
1032 | __FUNCTION__)); |
1033 | totalram = kgem->aperture_total; |
1033 | totalram = kgem->aperture_total; |
1034 | } |
1034 | } |
1035 | DBG(("%s: total ram=%u\n", __FUNCTION__, totalram)); |
1035 | DBG(("%s: total ram=%u\n", __FUNCTION__, totalram)); |
1036 | if (kgem->max_object_size > totalram / 2) |
1036 | if (kgem->max_object_size > totalram / 2) |
1037 | kgem->max_object_size = totalram / 2; |
1037 | kgem->max_object_size = totalram / 2; |
1038 | if (kgem->max_gpu_size > totalram / 4) |
1038 | if (kgem->max_gpu_size > totalram / 4) |
1039 | kgem->max_gpu_size = totalram / 4; |
1039 | kgem->max_gpu_size = totalram / 4; |
1040 | 1040 | ||
1041 | kgem->max_cpu_size = kgem->max_object_size; |
1041 | kgem->max_cpu_size = kgem->max_object_size; |
1042 | 1042 | ||
1043 | half_gpu_max = kgem->max_gpu_size / 2; |
1043 | half_gpu_max = kgem->max_gpu_size / 2; |
1044 | kgem->max_copy_tile_size = (MAX_CACHE_SIZE + 1)/2; |
1044 | kgem->max_copy_tile_size = (MAX_CACHE_SIZE + 1)/2; |
1045 | if (kgem->max_copy_tile_size > half_gpu_max) |
1045 | if (kgem->max_copy_tile_size > half_gpu_max) |
1046 | kgem->max_copy_tile_size = half_gpu_max; |
1046 | kgem->max_copy_tile_size = half_gpu_max; |
1047 | 1047 | ||
1048 | if (kgem->has_llc) |
1048 | if (kgem->has_llc) |
1049 | kgem->max_upload_tile_size = kgem->max_copy_tile_size; |
1049 | kgem->max_upload_tile_size = kgem->max_copy_tile_size; |
1050 | else |
1050 | else |
1051 | kgem->max_upload_tile_size = kgem->aperture_mappable / 4; |
1051 | kgem->max_upload_tile_size = kgem->aperture_mappable / 4; |
1052 | if (kgem->max_upload_tile_size > half_gpu_max) |
1052 | if (kgem->max_upload_tile_size > half_gpu_max) |
1053 | kgem->max_upload_tile_size = half_gpu_max; |
1053 | kgem->max_upload_tile_size = half_gpu_max; |
1054 | 1054 | ||
1055 | kgem->large_object_size = MAX_CACHE_SIZE; |
1055 | kgem->large_object_size = MAX_CACHE_SIZE; |
1056 | if (kgem->large_object_size > kgem->max_gpu_size) |
1056 | if (kgem->large_object_size > kgem->max_gpu_size) |
1057 | kgem->large_object_size = kgem->max_gpu_size; |
1057 | kgem->large_object_size = kgem->max_gpu_size; |
1058 | 1058 | ||
1059 | if (kgem->has_llc | kgem->has_cacheing | kgem->has_userptr) { |
1059 | if (kgem->has_llc | kgem->has_cacheing | kgem->has_userptr) { |
1060 | if (kgem->large_object_size > kgem->max_cpu_size) |
1060 | if (kgem->large_object_size > kgem->max_cpu_size) |
1061 | kgem->large_object_size = kgem->max_cpu_size; |
1061 | kgem->large_object_size = kgem->max_cpu_size; |
1062 | } else |
1062 | } else |
1063 | kgem->max_cpu_size = 0; |
1063 | kgem->max_cpu_size = 0; |
1064 | if (DBG_NO_CPU) |
1064 | if (DBG_NO_CPU) |
1065 | kgem->max_cpu_size = 0; |
1065 | kgem->max_cpu_size = 0; |
1066 | 1066 | ||
1067 | DBG(("%s: maximum object size=%d\n", |
1067 | DBG(("%s: maximum object size=%d\n", |
1068 | __FUNCTION__, kgem->max_object_size)); |
1068 | __FUNCTION__, kgem->max_object_size)); |
1069 | DBG(("%s: large object thresold=%d\n", |
1069 | DBG(("%s: large object thresold=%d\n", |
1070 | __FUNCTION__, kgem->large_object_size)); |
1070 | __FUNCTION__, kgem->large_object_size)); |
1071 | DBG(("%s: max object sizes (gpu=%d, cpu=%d, tile upload=%d, copy=%d)\n", |
1071 | DBG(("%s: max object sizes (gpu=%d, cpu=%d, tile upload=%d, copy=%d)\n", |
1072 | __FUNCTION__, |
1072 | __FUNCTION__, |
1073 | kgem->max_gpu_size, kgem->max_cpu_size, |
1073 | kgem->max_gpu_size, kgem->max_cpu_size, |
1074 | kgem->max_upload_tile_size, kgem->max_copy_tile_size)); |
1074 | kgem->max_upload_tile_size, kgem->max_copy_tile_size)); |
1075 | 1075 | ||
1076 | /* Convert the aperture thresholds to pages */ |
1076 | /* Convert the aperture thresholds to pages */ |
1077 | kgem->aperture_low /= PAGE_SIZE; |
1077 | kgem->aperture_low /= PAGE_SIZE; |
1078 | kgem->aperture_high /= PAGE_SIZE; |
1078 | kgem->aperture_high /= PAGE_SIZE; |
1079 | 1079 | ||
1080 | kgem->fence_max = gem_param(kgem, I915_PARAM_NUM_FENCES_AVAIL) - 2; |
1080 | kgem->fence_max = gem_param(kgem, I915_PARAM_NUM_FENCES_AVAIL) - 2; |
1081 | if ((int)kgem->fence_max < 0) |
1081 | if ((int)kgem->fence_max < 0) |
1082 | kgem->fence_max = 5; /* minimum safe value for all hw */ |
1082 | kgem->fence_max = 5; /* minimum safe value for all hw */ |
1083 | DBG(("%s: max fences=%d\n", __FUNCTION__, kgem->fence_max)); |
1083 | DBG(("%s: max fences=%d\n", __FUNCTION__, kgem->fence_max)); |
1084 | 1084 | ||
1085 | kgem->batch_flags_base = 0; |
1085 | kgem->batch_flags_base = 0; |
1086 | if (kgem->has_no_reloc) |
1086 | if (kgem->has_no_reloc) |
1087 | kgem->batch_flags_base |= LOCAL_I915_EXEC_NO_RELOC; |
1087 | kgem->batch_flags_base |= LOCAL_I915_EXEC_NO_RELOC; |
1088 | if (kgem->has_handle_lut) |
1088 | if (kgem->has_handle_lut) |
1089 | kgem->batch_flags_base |= LOCAL_I915_EXEC_HANDLE_LUT; |
1089 | kgem->batch_flags_base |= LOCAL_I915_EXEC_HANDLE_LUT; |
1090 | if (kgem->has_pinned_batches) |
1090 | if (kgem->has_pinned_batches) |
1091 | kgem->batch_flags_base |= LOCAL_I915_EXEC_IS_PINNED; |
1091 | kgem->batch_flags_base |= LOCAL_I915_EXEC_IS_PINNED; |
1092 | } |
1092 | } |
1093 | 1093 | ||
1094 | /* XXX hopefully a good approximation */ |
1094 | /* XXX hopefully a good approximation */ |
1095 | static uint32_t kgem_get_unique_id(struct kgem *kgem) |
1095 | static uint32_t kgem_get_unique_id(struct kgem *kgem) |
1096 | { |
1096 | { |
1097 | uint32_t id; |
1097 | uint32_t id; |
1098 | id = ++kgem->unique_id; |
1098 | id = ++kgem->unique_id; |
1099 | if (id == 0) |
1099 | if (id == 0) |
1100 | id = ++kgem->unique_id; |
1100 | id = ++kgem->unique_id; |
1101 | return id; |
1101 | return id; |
1102 | } |
1102 | } |
1103 | 1103 | ||
1104 | inline static uint32_t kgem_pitch_alignment(struct kgem *kgem, unsigned flags) |
1104 | inline static uint32_t kgem_pitch_alignment(struct kgem *kgem, unsigned flags) |
1105 | { |
1105 | { |
1106 | if (flags & CREATE_PRIME) |
1106 | if (flags & CREATE_PRIME) |
1107 | return 256; |
1107 | return 256; |
1108 | if (flags & CREATE_SCANOUT) |
1108 | if (flags & CREATE_SCANOUT) |
1109 | return 64; |
1109 | return 64; |
1110 | return kgem->min_alignment; |
1110 | return kgem->min_alignment; |
1111 | } |
1111 | } |
1112 | 1112 | ||
1113 | static uint32_t kgem_untiled_pitch(struct kgem *kgem, |
1113 | static uint32_t kgem_untiled_pitch(struct kgem *kgem, |
1114 | uint32_t width, uint32_t bpp, |
1114 | uint32_t width, uint32_t bpp, |
1115 | unsigned flags) |
1115 | unsigned flags) |
1116 | { |
1116 | { |
1117 | width = ALIGN(width, 2) * bpp >> 3; |
1117 | width = ALIGN(width, 2) * bpp >> 3; |
1118 | return ALIGN(width, kgem_pitch_alignment(kgem, flags)); |
1118 | return ALIGN(width, kgem_pitch_alignment(kgem, flags)); |
1119 | } |
1119 | } |
1120 | static uint32_t kgem_surface_size(struct kgem *kgem, |
1120 | static uint32_t kgem_surface_size(struct kgem *kgem, |
1121 | bool relaxed_fencing, |
1121 | bool relaxed_fencing, |
1122 | unsigned flags, |
1122 | unsigned flags, |
1123 | uint32_t width, |
1123 | uint32_t width, |
1124 | uint32_t height, |
1124 | uint32_t height, |
1125 | uint32_t bpp, |
1125 | uint32_t bpp, |
1126 | uint32_t tiling, |
1126 | uint32_t tiling, |
1127 | uint32_t *pitch) |
1127 | uint32_t *pitch) |
1128 | { |
1128 | { |
1129 | uint32_t tile_width, tile_height; |
1129 | uint32_t tile_width, tile_height; |
1130 | uint32_t size; |
1130 | uint32_t size; |
1131 | 1131 | ||
1132 | assert(width <= MAXSHORT); |
1132 | assert(width <= MAXSHORT); |
1133 | assert(height <= MAXSHORT); |
1133 | assert(height <= MAXSHORT); |
1134 | 1134 | ||
1135 | if (kgem->gen <= 030) { |
1135 | if (kgem->gen <= 030) { |
1136 | if (tiling) { |
1136 | if (tiling) { |
1137 | if (kgem->gen < 030) { |
1137 | if (kgem->gen < 030) { |
1138 | tile_width = 128; |
1138 | tile_width = 128; |
1139 | tile_height = 32; |
1139 | tile_height = 32; |
1140 | } else { |
1140 | } else { |
1141 | tile_width = 512; |
1141 | tile_width = 512; |
1142 | tile_height = 16; |
1142 | tile_height = 16; |
1143 | } |
1143 | } |
1144 | } else { |
1144 | } else { |
1145 | tile_width = 2 * bpp >> 3; |
1145 | tile_width = 2 * bpp >> 3; |
1146 | tile_width = ALIGN(tile_width, |
1146 | tile_width = ALIGN(tile_width, |
1147 | kgem_pitch_alignment(kgem, flags)); |
1147 | kgem_pitch_alignment(kgem, flags)); |
1148 | tile_height = 2; |
1148 | tile_height = 2; |
1149 | } |
1149 | } |
1150 | } else switch (tiling) { |
1150 | } else switch (tiling) { |
1151 | default: |
1151 | default: |
1152 | case I915_TILING_NONE: |
1152 | case I915_TILING_NONE: |
1153 | tile_width = 2 * bpp >> 3; |
1153 | tile_width = 2 * bpp >> 3; |
1154 | tile_width = ALIGN(tile_width, |
1154 | tile_width = ALIGN(tile_width, |
1155 | kgem_pitch_alignment(kgem, flags)); |
1155 | kgem_pitch_alignment(kgem, flags)); |
1156 | tile_height = 2; |
1156 | tile_height = 2; |
1157 | break; |
1157 | break; |
1158 | 1158 | ||
1159 | /* XXX align to an even tile row */ |
1159 | /* XXX align to an even tile row */ |
1160 | case I915_TILING_X: |
1160 | case I915_TILING_X: |
1161 | tile_width = 512; |
1161 | tile_width = 512; |
1162 | tile_height = 16; |
1162 | tile_height = 16; |
1163 | break; |
1163 | break; |
1164 | case I915_TILING_Y: |
1164 | case I915_TILING_Y: |
1165 | tile_width = 128; |
1165 | tile_width = 128; |
1166 | tile_height = 64; |
1166 | tile_height = 64; |
1167 | break; |
1167 | break; |
1168 | } |
1168 | } |
1169 | 1169 | ||
1170 | *pitch = ALIGN(width * bpp / 8, tile_width); |
1170 | *pitch = ALIGN(width * bpp / 8, tile_width); |
1171 | height = ALIGN(height, tile_height); |
1171 | height = ALIGN(height, tile_height); |
1172 | if (kgem->gen >= 040) |
1172 | if (kgem->gen >= 040) |
1173 | return PAGE_ALIGN(*pitch * height); |
1173 | return PAGE_ALIGN(*pitch * height); |
1174 | 1174 | ||
1175 | /* If it is too wide for the blitter, don't even bother. */ |
1175 | /* If it is too wide for the blitter, don't even bother. */ |
1176 | if (tiling != I915_TILING_NONE) { |
1176 | if (tiling != I915_TILING_NONE) { |
1177 | if (*pitch > 8192) |
1177 | if (*pitch > 8192) |
1178 | return 0; |
1178 | return 0; |
1179 | 1179 | ||
1180 | for (size = tile_width; size < *pitch; size <<= 1) |
1180 | for (size = tile_width; size < *pitch; size <<= 1) |
1181 | ; |
1181 | ; |
1182 | *pitch = size; |
1182 | *pitch = size; |
1183 | } else { |
1183 | } else { |
1184 | if (*pitch >= 32768) |
1184 | if (*pitch >= 32768) |
1185 | return 0; |
1185 | return 0; |
1186 | } |
1186 | } |
1187 | 1187 | ||
1188 | size = *pitch * height; |
1188 | size = *pitch * height; |
1189 | if (relaxed_fencing || tiling == I915_TILING_NONE) |
1189 | if (relaxed_fencing || tiling == I915_TILING_NONE) |
1190 | return PAGE_ALIGN(size); |
1190 | return PAGE_ALIGN(size); |
1191 | 1191 | ||
1192 | /* We need to allocate a pot fence region for a tiled buffer. */ |
1192 | /* We need to allocate a pot fence region for a tiled buffer. */ |
1193 | if (kgem->gen < 030) |
1193 | if (kgem->gen < 030) |
1194 | tile_width = 512 * 1024; |
1194 | tile_width = 512 * 1024; |
1195 | else |
1195 | else |
1196 | tile_width = 1024 * 1024; |
1196 | tile_width = 1024 * 1024; |
1197 | while (tile_width < size) |
1197 | while (tile_width < size) |
1198 | tile_width *= 2; |
1198 | tile_width *= 2; |
1199 | return tile_width; |
1199 | return tile_width; |
1200 | } |
1200 | } |
1201 | 1201 | ||
1202 | static uint32_t kgem_aligned_height(struct kgem *kgem, |
1202 | static uint32_t kgem_aligned_height(struct kgem *kgem, |
1203 | uint32_t height, uint32_t tiling) |
1203 | uint32_t height, uint32_t tiling) |
1204 | { |
1204 | { |
1205 | uint32_t tile_height; |
1205 | uint32_t tile_height; |
1206 | 1206 | ||
1207 | if (kgem->gen <= 030) { |
1207 | if (kgem->gen <= 030) { |
1208 | tile_height = tiling ? kgem->gen < 030 ? 32 : 16 : 1; |
1208 | tile_height = tiling ? kgem->gen < 030 ? 32 : 16 : 1; |
1209 | } else switch (tiling) { |
1209 | } else switch (tiling) { |
1210 | /* XXX align to an even tile row */ |
1210 | /* XXX align to an even tile row */ |
1211 | default: |
1211 | default: |
1212 | case I915_TILING_NONE: |
1212 | case I915_TILING_NONE: |
1213 | tile_height = 1; |
1213 | tile_height = 1; |
1214 | break; |
1214 | break; |
1215 | case I915_TILING_X: |
1215 | case I915_TILING_X: |
1216 | tile_height = 16; |
1216 | tile_height = 16; |
1217 | break; |
1217 | break; |
1218 | case I915_TILING_Y: |
1218 | case I915_TILING_Y: |
1219 | tile_height = 64; |
1219 | tile_height = 64; |
1220 | break; |
1220 | break; |
1221 | } |
1221 | } |
1222 | 1222 | ||
1223 | return ALIGN(height, tile_height); |
1223 | return ALIGN(height, tile_height); |
1224 | } |
1224 | } |
1225 | 1225 | ||
1226 | static struct drm_i915_gem_exec_object2 * |
1226 | static struct drm_i915_gem_exec_object2 * |
1227 | kgem_add_handle(struct kgem *kgem, struct kgem_bo *bo) |
1227 | kgem_add_handle(struct kgem *kgem, struct kgem_bo *bo) |
1228 | { |
1228 | { |
1229 | struct drm_i915_gem_exec_object2 *exec; |
1229 | struct drm_i915_gem_exec_object2 *exec; |
1230 | 1230 | ||
1231 | DBG(("%s: handle=%d, index=%d\n", |
1231 | DBG(("%s: handle=%d, index=%d\n", |
1232 | __FUNCTION__, bo->handle, kgem->nexec)); |
1232 | __FUNCTION__, bo->handle, kgem->nexec)); |
1233 | 1233 | ||
1234 | assert(kgem->nexec < ARRAY_SIZE(kgem->exec)); |
1234 | assert(kgem->nexec < ARRAY_SIZE(kgem->exec)); |
1235 | bo->target_handle = kgem->has_handle_lut ? kgem->nexec : bo->handle; |
1235 | bo->target_handle = kgem->has_handle_lut ? kgem->nexec : bo->handle; |
1236 | exec = memset(&kgem->exec[kgem->nexec++], 0, sizeof(*exec)); |
1236 | exec = memset(&kgem->exec[kgem->nexec++], 0, sizeof(*exec)); |
1237 | exec->handle = bo->handle; |
1237 | exec->handle = bo->handle; |
1238 | exec->offset = bo->presumed_offset; |
1238 | exec->offset = bo->presumed_offset; |
1239 | 1239 | ||
1240 | kgem->aperture += num_pages(bo); |
1240 | kgem->aperture += num_pages(bo); |
1241 | 1241 | ||
1242 | return exec; |
1242 | return exec; |
1243 | } |
1243 | } |
1244 | 1244 | ||
1245 | static void kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo) |
1245 | static void kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo) |
1246 | { |
1246 | { |
1247 | bo->exec = kgem_add_handle(kgem, bo); |
1247 | bo->exec = kgem_add_handle(kgem, bo); |
1248 | bo->rq = MAKE_REQUEST(kgem->next_request, kgem->ring); |
1248 | bo->rq = MAKE_REQUEST(kgem->next_request, kgem->ring); |
1249 | 1249 | ||
1250 | list_move_tail(&bo->request, &kgem->next_request->buffers); |
1250 | list_move_tail(&bo->request, &kgem->next_request->buffers); |
1251 | 1251 | ||
1252 | /* XXX is it worth working around gcc here? */ |
1252 | /* XXX is it worth working around gcc here? */ |
1253 | kgem->flush |= bo->flush; |
1253 | kgem->flush |= bo->flush; |
1254 | } |
1254 | } |
1255 | 1255 | ||
1256 | static uint32_t kgem_end_batch(struct kgem *kgem) |
1256 | static uint32_t kgem_end_batch(struct kgem *kgem) |
1257 | { |
1257 | { |
1258 | kgem->batch[kgem->nbatch++] = MI_BATCH_BUFFER_END; |
1258 | kgem->batch[kgem->nbatch++] = MI_BATCH_BUFFER_END; |
1259 | if (kgem->nbatch & 1) |
1259 | if (kgem->nbatch & 1) |
1260 | kgem->batch[kgem->nbatch++] = MI_NOOP; |
1260 | kgem->batch[kgem->nbatch++] = MI_NOOP; |
1261 | 1261 | ||
1262 | return kgem->nbatch; |
1262 | return kgem->nbatch; |
1263 | } |
1263 | } |
1264 | 1264 | ||
1265 | static void kgem_fixup_self_relocs(struct kgem *kgem, struct kgem_bo *bo) |
1265 | static void kgem_fixup_self_relocs(struct kgem *kgem, struct kgem_bo *bo) |
1266 | { |
1266 | { |
1267 | int n; |
1267 | int n; |
1268 | 1268 | ||
1269 | if (kgem->nreloc__self == 0) |
1269 | if (kgem->nreloc__self == 0) |
1270 | return; |
1270 | return; |
1271 | 1271 | ||
1272 | for (n = 0; n < kgem->nreloc__self; n++) { |
1272 | for (n = 0; n < kgem->nreloc__self; n++) { |
1273 | int i = kgem->reloc__self[n]; |
1273 | int i = kgem->reloc__self[n]; |
1274 | assert(kgem->reloc[i].target_handle == ~0U); |
1274 | assert(kgem->reloc[i].target_handle == ~0U); |
1275 | kgem->reloc[i].target_handle = bo->target_handle; |
1275 | kgem->reloc[i].target_handle = bo->target_handle; |
1276 | kgem->reloc[i].presumed_offset = bo->presumed_offset; |
1276 | kgem->reloc[i].presumed_offset = bo->presumed_offset; |
1277 | kgem->batch[kgem->reloc[i].offset/sizeof(kgem->batch[0])] = |
1277 | kgem->batch[kgem->reloc[i].offset/sizeof(kgem->batch[0])] = |
1278 | kgem->reloc[i].delta + bo->presumed_offset; |
1278 | kgem->reloc[i].delta + bo->presumed_offset; |
1279 | } |
1279 | } |
1280 | 1280 | ||
1281 | if (n == 256) { |
1281 | if (n == 256) { |
1282 | for (n = kgem->reloc__self[255]; n < kgem->nreloc; n++) { |
1282 | for (n = kgem->reloc__self[255]; n < kgem->nreloc; n++) { |
1283 | if (kgem->reloc[n].target_handle == ~0U) { |
1283 | if (kgem->reloc[n].target_handle == ~0U) { |
1284 | kgem->reloc[n].target_handle = bo->target_handle; |
1284 | kgem->reloc[n].target_handle = bo->target_handle; |
1285 | kgem->reloc[n].presumed_offset = bo->presumed_offset; |
1285 | kgem->reloc[n].presumed_offset = bo->presumed_offset; |
1286 | kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] = |
1286 | kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] = |
1287 | kgem->reloc[n].delta + bo->presumed_offset; |
1287 | kgem->reloc[n].delta + bo->presumed_offset; |
1288 | } |
1288 | } |
1289 | } |
1289 | } |
1290 | 1290 | ||
1291 | } |
1291 | } |
1292 | 1292 | ||
1293 | } |
1293 | } |
1294 | 1294 | ||
1295 | static void kgem_bo_binding_free(struct kgem *kgem, struct kgem_bo *bo) |
1295 | static void kgem_bo_binding_free(struct kgem *kgem, struct kgem_bo *bo) |
1296 | { |
1296 | { |
1297 | struct kgem_bo_binding *b; |
1297 | struct kgem_bo_binding *b; |
1298 | 1298 | ||
1299 | b = bo->binding.next; |
1299 | b = bo->binding.next; |
1300 | while (b) { |
1300 | while (b) { |
1301 | struct kgem_bo_binding *next = b->next; |
1301 | struct kgem_bo_binding *next = b->next; |
1302 | free (b); |
1302 | free (b); |
1303 | b = next; |
1303 | b = next; |
1304 | } |
1304 | } |
1305 | } |
1305 | } |
1306 | 1306 | ||
1307 | static void kgem_bo_release_map(struct kgem *kgem, struct kgem_bo *bo) |
1307 | static void kgem_bo_release_map(struct kgem *kgem, struct kgem_bo *bo) |
1308 | { |
1308 | { |
1309 | int type = IS_CPU_MAP(bo->map); |
1309 | int type = IS_CPU_MAP(bo->map); |
1310 | 1310 | ||
1311 | assert(!IS_USER_MAP(bo->map)); |
1311 | assert(!IS_USER_MAP(bo->map)); |
1312 | 1312 | ||
1313 | DBG(("%s: releasing %s vma for handle=%d, count=%d\n", |
1313 | DBG(("%s: releasing %s vma for handle=%d, count=%d\n", |
1314 | __FUNCTION__, type ? "CPU" : "GTT", |
1314 | __FUNCTION__, type ? "CPU" : "GTT", |
1315 | bo->handle, kgem->vma[type].count)); |
1315 | bo->handle, kgem->vma[type].count)); |
1316 | 1316 | ||
1317 | VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo))); |
1317 | VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo))); |
1318 | // munmap(MAP(bo->map), bytes(bo)); |
1318 | // munmap(MAP(bo->map), bytes(bo)); |
1319 | bo->map = NULL; |
1319 | bo->map = NULL; |
1320 | 1320 | ||
1321 | if (!list_is_empty(&bo->vma)) { |
1321 | if (!list_is_empty(&bo->vma)) { |
1322 | list_del(&bo->vma); |
1322 | list_del(&bo->vma); |
1323 | kgem->vma[type].count--; |
1323 | kgem->vma[type].count--; |
1324 | } |
1324 | } |
1325 | } |
1325 | } |
1326 | 1326 | ||
1327 | static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo) |
1327 | static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo) |
1328 | { |
1328 | { |
1329 | DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle)); |
1329 | DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle)); |
1330 | assert(bo->refcnt == 0); |
1330 | assert(bo->refcnt == 0); |
1331 | assert(bo->exec == NULL); |
1331 | assert(bo->exec == NULL); |
1332 | assert(!bo->snoop || bo->rq == NULL); |
1332 | assert(!bo->snoop || bo->rq == NULL); |
1333 | 1333 | ||
1334 | #ifdef DEBUG_MEMORY |
1334 | #ifdef DEBUG_MEMORY |
1335 | kgem->debug_memory.bo_allocs--; |
1335 | kgem->debug_memory.bo_allocs--; |
1336 | kgem->debug_memory.bo_bytes -= bytes(bo); |
1336 | kgem->debug_memory.bo_bytes -= bytes(bo); |
1337 | #endif |
1337 | #endif |
1338 | 1338 | ||
1339 | kgem_bo_binding_free(kgem, bo); |
1339 | kgem_bo_binding_free(kgem, bo); |
1340 | 1340 | ||
1341 | if (IS_USER_MAP(bo->map)) { |
1341 | if (IS_USER_MAP(bo->map)) { |
1342 | assert(bo->rq == NULL); |
1342 | assert(bo->rq == NULL); |
1343 | assert(MAP(bo->map) != bo || bo->io); |
1343 | assert(MAP(bo->map) != bo || bo->io); |
1344 | if (bo != MAP(bo->map)) { |
1344 | if (bo != MAP(bo->map)) { |
1345 | DBG(("%s: freeing snooped base\n", __FUNCTION__)); |
1345 | DBG(("%s: freeing snooped base\n", __FUNCTION__)); |
1346 | free(MAP(bo->map)); |
1346 | free(MAP(bo->map)); |
1347 | } |
1347 | } |
1348 | bo->map = NULL; |
1348 | bo->map = NULL; |
1349 | } |
1349 | } |
1350 | if (bo->map) |
1350 | if (bo->map) |
1351 | kgem_bo_release_map(kgem, bo); |
1351 | kgem_bo_release_map(kgem, bo); |
1352 | assert(list_is_empty(&bo->vma)); |
1352 | assert(list_is_empty(&bo->vma)); |
1353 | 1353 | ||
1354 | _list_del(&bo->list); |
1354 | _list_del(&bo->list); |
1355 | _list_del(&bo->request); |
1355 | _list_del(&bo->request); |
1356 | gem_close(kgem->fd, bo->handle); |
1356 | gem_close(kgem->fd, bo->handle); |
1357 | 1357 | ||
1358 | if (!bo->io) { |
1358 | if (!bo->io) { |
1359 | *(struct kgem_bo **)bo = __kgem_freed_bo; |
1359 | *(struct kgem_bo **)bo = __kgem_freed_bo; |
1360 | __kgem_freed_bo = bo; |
1360 | __kgem_freed_bo = bo; |
1361 | } else |
1361 | } else |
1362 | free(bo); |
1362 | free(bo); |
1363 | } |
1363 | } |
1364 | 1364 | ||
1365 | inline static void kgem_bo_move_to_inactive(struct kgem *kgem, |
1365 | inline static void kgem_bo_move_to_inactive(struct kgem *kgem, |
1366 | struct kgem_bo *bo) |
1366 | struct kgem_bo *bo) |
1367 | { |
1367 | { |
1368 | DBG(("%s: moving handle=%d to inactive\n", __FUNCTION__, bo->handle)); |
1368 | DBG(("%s: moving handle=%d to inactive\n", __FUNCTION__, bo->handle)); |
1369 | 1369 | ||
1370 | assert(bo->refcnt == 0); |
1370 | assert(bo->refcnt == 0); |
1371 | assert(bo->reusable); |
1371 | assert(bo->reusable); |
1372 | assert(bo->rq == NULL); |
1372 | assert(bo->rq == NULL); |
1373 | assert(bo->exec == NULL); |
1373 | assert(bo->exec == NULL); |
1374 | assert(bo->domain != DOMAIN_GPU); |
1374 | assert(bo->domain != DOMAIN_GPU); |
1375 | assert(!bo->proxy); |
1375 | assert(!bo->proxy); |
1376 | assert(!bo->io); |
1376 | assert(!bo->io); |
1377 | assert(!bo->scanout); |
1377 | assert(!bo->scanout); |
1378 | assert(!bo->needs_flush); |
1378 | assert(!bo->needs_flush); |
1379 | assert(list_is_empty(&bo->vma)); |
1379 | assert(list_is_empty(&bo->vma)); |
1380 | ASSERT_IDLE(kgem, bo->handle); |
1380 | ASSERT_IDLE(kgem, bo->handle); |
1381 | 1381 | ||
1382 | kgem->need_expire = true; |
1382 | kgem->need_expire = true; |
1383 | 1383 | ||
1384 | if (bucket(bo) >= NUM_CACHE_BUCKETS) { |
1384 | if (bucket(bo) >= NUM_CACHE_BUCKETS) { |
1385 | list_move(&bo->list, &kgem->large_inactive); |
1385 | list_move(&bo->list, &kgem->large_inactive); |
1386 | return; |
1386 | return; |
1387 | } |
1387 | } |
1388 | 1388 | ||
1389 | assert(bo->flush == false); |
1389 | assert(bo->flush == false); |
1390 | list_move(&bo->list, &kgem->inactive[bucket(bo)]); |
1390 | list_move(&bo->list, &kgem->inactive[bucket(bo)]); |
1391 | if (bo->map) { |
1391 | if (bo->map) { |
1392 | int type = IS_CPU_MAP(bo->map); |
1392 | int type = IS_CPU_MAP(bo->map); |
1393 | if (bucket(bo) >= NUM_CACHE_BUCKETS || |
1393 | if (bucket(bo) >= NUM_CACHE_BUCKETS || |
1394 | (!type && !__kgem_bo_is_mappable(kgem, bo))) { |
1394 | (!type && !__kgem_bo_is_mappable(kgem, bo))) { |
1395 | // munmap(MAP(bo->map), bytes(bo)); |
1395 | // munmap(MAP(bo->map), bytes(bo)); |
1396 | bo->map = NULL; |
1396 | bo->map = NULL; |
1397 | } |
1397 | } |
1398 | if (bo->map) { |
1398 | if (bo->map) { |
1399 | list_add(&bo->vma, &kgem->vma[type].inactive[bucket(bo)]); |
1399 | list_add(&bo->vma, &kgem->vma[type].inactive[bucket(bo)]); |
1400 | kgem->vma[type].count++; |
1400 | kgem->vma[type].count++; |
1401 | } |
1401 | } |
1402 | } |
1402 | } |
1403 | } |
1403 | } |
1404 | 1404 | ||
1405 | static struct kgem_bo *kgem_bo_replace_io(struct kgem_bo *bo) |
1405 | static struct kgem_bo *kgem_bo_replace_io(struct kgem_bo *bo) |
1406 | { |
1406 | { |
1407 | struct kgem_bo *base; |
1407 | struct kgem_bo *base; |
1408 | 1408 | ||
1409 | if (!bo->io) |
1409 | if (!bo->io) |
1410 | return bo; |
1410 | return bo; |
1411 | 1411 | ||
1412 | assert(!bo->snoop); |
1412 | assert(!bo->snoop); |
1413 | base = malloc(sizeof(*base)); |
1413 | base = malloc(sizeof(*base)); |
1414 | if (base) { |
1414 | if (base) { |
1415 | DBG(("%s: transferring io handle=%d to bo\n", |
1415 | DBG(("%s: transferring io handle=%d to bo\n", |
1416 | __FUNCTION__, bo->handle)); |
1416 | __FUNCTION__, bo->handle)); |
1417 | /* transfer the handle to a minimum bo */ |
1417 | /* transfer the handle to a minimum bo */ |
1418 | memcpy(base, bo, sizeof(*base)); |
1418 | memcpy(base, bo, sizeof(*base)); |
1419 | base->io = false; |
1419 | base->io = false; |
1420 | list_init(&base->list); |
1420 | list_init(&base->list); |
1421 | list_replace(&bo->request, &base->request); |
1421 | list_replace(&bo->request, &base->request); |
1422 | list_replace(&bo->vma, &base->vma); |
1422 | list_replace(&bo->vma, &base->vma); |
1423 | free(bo); |
1423 | free(bo); |
1424 | bo = base; |
1424 | bo = base; |
1425 | } else |
1425 | } else |
1426 | bo->reusable = false; |
1426 | bo->reusable = false; |
1427 | 1427 | ||
1428 | return bo; |
1428 | return bo; |
1429 | } |
1429 | } |
1430 | 1430 | ||
1431 | inline static void kgem_bo_remove_from_inactive(struct kgem *kgem, |
1431 | inline static void kgem_bo_remove_from_inactive(struct kgem *kgem, |
1432 | struct kgem_bo *bo) |
1432 | struct kgem_bo *bo) |
1433 | { |
1433 | { |
1434 | DBG(("%s: removing handle=%d from inactive\n", __FUNCTION__, bo->handle)); |
1434 | DBG(("%s: removing handle=%d from inactive\n", __FUNCTION__, bo->handle)); |
1435 | 1435 | ||
1436 | list_del(&bo->list); |
1436 | list_del(&bo->list); |
1437 | assert(bo->rq == NULL); |
1437 | assert(bo->rq == NULL); |
1438 | assert(bo->exec == NULL); |
1438 | assert(bo->exec == NULL); |
1439 | if (bo->map) { |
1439 | if (bo->map) { |
1440 | assert(!list_is_empty(&bo->vma)); |
1440 | assert(!list_is_empty(&bo->vma)); |
1441 | list_del(&bo->vma); |
1441 | list_del(&bo->vma); |
1442 | kgem->vma[IS_CPU_MAP(bo->map)].count--; |
1442 | kgem->vma[IS_CPU_MAP(bo->map)].count--; |
1443 | } |
1443 | } |
1444 | } |
1444 | } |
1445 | 1445 | ||
1446 | inline static void kgem_bo_remove_from_active(struct kgem *kgem, |
1446 | inline static void kgem_bo_remove_from_active(struct kgem *kgem, |
1447 | struct kgem_bo *bo) |
1447 | struct kgem_bo *bo) |
1448 | { |
1448 | { |
1449 | DBG(("%s: removing handle=%d from active\n", __FUNCTION__, bo->handle)); |
1449 | DBG(("%s: removing handle=%d from active\n", __FUNCTION__, bo->handle)); |
1450 | 1450 | ||
1451 | list_del(&bo->list); |
1451 | list_del(&bo->list); |
1452 | assert(bo->rq != NULL); |
1452 | assert(bo->rq != NULL); |
1453 | if (bo->rq == (void *)kgem) |
1453 | if (bo->rq == (void *)kgem) |
1454 | list_del(&bo->request); |
1454 | list_del(&bo->request); |
1455 | assert(list_is_empty(&bo->vma)); |
1455 | assert(list_is_empty(&bo->vma)); |
1456 | } |
1456 | } |
1457 | 1457 | ||
1458 | static void kgem_bo_clear_scanout(struct kgem *kgem, struct kgem_bo *bo) |
1458 | static void kgem_bo_clear_scanout(struct kgem *kgem, struct kgem_bo *bo) |
1459 | { |
1459 | { |
1460 | assert(bo->scanout); |
1460 | assert(bo->scanout); |
1461 | assert(!bo->refcnt); |
1461 | assert(!bo->refcnt); |
1462 | assert(bo->exec == NULL); |
1462 | assert(bo->exec == NULL); |
1463 | assert(bo->proxy == NULL); |
1463 | assert(bo->proxy == NULL); |
1464 | 1464 | ||
1465 | DBG(("%s: handle=%d, fb=%d (reusable=%d)\n", |
1465 | DBG(("%s: handle=%d, fb=%d (reusable=%d)\n", |
1466 | __FUNCTION__, bo->handle, bo->delta, bo->reusable)); |
1466 | __FUNCTION__, bo->handle, bo->delta, bo->reusable)); |
1467 | if (bo->delta) { |
1467 | if (bo->delta) { |
1468 | /* XXX will leak if we are not DRM_MASTER. *shrug* */ |
1468 | /* XXX will leak if we are not DRM_MASTER. *shrug* */ |
1469 | // drmModeRmFB(kgem->fd, bo->delta); |
1469 | // drmModeRmFB(kgem->fd, bo->delta); |
1470 | bo->delta = 0; |
1470 | bo->delta = 0; |
1471 | } |
1471 | } |
1472 | 1472 | ||
1473 | bo->scanout = false; |
1473 | bo->scanout = false; |
1474 | bo->flush = false; |
1474 | bo->flush = false; |
1475 | bo->reusable = true; |
1475 | bo->reusable = true; |
1476 | 1476 | ||
1477 | if (kgem->has_llc && |
1477 | if (kgem->has_llc && |
1478 | !gem_set_cacheing(kgem->fd, bo->handle, SNOOPED)) |
1478 | !gem_set_cacheing(kgem->fd, bo->handle, SNOOPED)) |
1479 | bo->reusable = false; |
1479 | bo->reusable = false; |
1480 | } |
1480 | } |
1481 | 1481 | ||
1482 | static void _kgem_bo_delete_buffer(struct kgem *kgem, struct kgem_bo *bo) |
1482 | static void _kgem_bo_delete_buffer(struct kgem *kgem, struct kgem_bo *bo) |
1483 | { |
1483 | { |
1484 | struct kgem_buffer *io = (struct kgem_buffer *)bo->proxy; |
1484 | struct kgem_buffer *io = (struct kgem_buffer *)bo->proxy; |
1485 | 1485 | ||
1486 | DBG(("%s: size=%d, offset=%d, parent used=%d\n", |
1486 | DBG(("%s: size=%d, offset=%d, parent used=%d\n", |
1487 | __FUNCTION__, bo->size.bytes, bo->delta, io->used)); |
1487 | __FUNCTION__, bo->size.bytes, bo->delta, io->used)); |
1488 | 1488 | ||
1489 | if (ALIGN(bo->delta + bo->size.bytes, UPLOAD_ALIGNMENT) == io->used) |
1489 | if (ALIGN(bo->delta + bo->size.bytes, UPLOAD_ALIGNMENT) == io->used) |
1490 | io->used = bo->delta; |
1490 | io->used = bo->delta; |
1491 | } |
1491 | } |
1492 | 1492 | ||
1493 | static void kgem_bo_move_to_scanout(struct kgem *kgem, struct kgem_bo *bo) |
1493 | static void kgem_bo_move_to_scanout(struct kgem *kgem, struct kgem_bo *bo) |
1494 | { |
1494 | { |
1495 | assert(bo->refcnt == 0); |
1495 | assert(bo->refcnt == 0); |
1496 | assert(bo->scanout); |
1496 | assert(bo->scanout); |
1497 | assert(bo->delta); |
1497 | assert(bo->delta); |
1498 | assert(!bo->snoop); |
1498 | assert(!bo->snoop); |
1499 | assert(!bo->io); |
1499 | assert(!bo->io); |
1500 | 1500 | ||
1501 | DBG(("%s: moving %d [fb %d] to scanout cache, active? %d\n", |
1501 | DBG(("%s: moving %d [fb %d] to scanout cache, active? %d\n", |
1502 | __FUNCTION__, bo->handle, bo->delta, bo->rq != NULL)); |
1502 | __FUNCTION__, bo->handle, bo->delta, bo->rq != NULL)); |
1503 | if (bo->rq) |
1503 | if (bo->rq) |
1504 | list_move_tail(&bo->list, &kgem->scanout); |
1504 | list_move_tail(&bo->list, &kgem->scanout); |
1505 | else |
1505 | else |
1506 | list_move(&bo->list, &kgem->scanout); |
1506 | list_move(&bo->list, &kgem->scanout); |
1507 | } |
1507 | } |
1508 | 1508 | ||
1509 | static void kgem_bo_move_to_snoop(struct kgem *kgem, struct kgem_bo *bo) |
1509 | static void kgem_bo_move_to_snoop(struct kgem *kgem, struct kgem_bo *bo) |
1510 | { |
1510 | { |
1511 | assert(bo->refcnt == 0); |
1511 | assert(bo->refcnt == 0); |
1512 | assert(bo->exec == NULL); |
1512 | assert(bo->exec == NULL); |
1513 | 1513 | ||
1514 | if (num_pages(bo) > kgem->max_cpu_size >> 13) { |
1514 | if (num_pages(bo) > kgem->max_cpu_size >> 13) { |
1515 | DBG(("%s handle=%d discarding large CPU buffer (%d >%d pages)\n", |
1515 | DBG(("%s handle=%d discarding large CPU buffer (%d >%d pages)\n", |
1516 | __FUNCTION__, bo->handle, num_pages(bo), kgem->max_cpu_size >> 13)); |
1516 | __FUNCTION__, bo->handle, num_pages(bo), kgem->max_cpu_size >> 13)); |
1517 | kgem_bo_free(kgem, bo); |
1517 | kgem_bo_free(kgem, bo); |
1518 | return; |
1518 | return; |
1519 | } |
1519 | } |
1520 | 1520 | ||
1521 | assert(bo->tiling == I915_TILING_NONE); |
1521 | assert(bo->tiling == I915_TILING_NONE); |
1522 | assert(bo->rq == NULL); |
1522 | assert(bo->rq == NULL); |
1523 | 1523 | ||
1524 | DBG(("%s: moving %d to snoop cachee\n", __FUNCTION__, bo->handle)); |
1524 | DBG(("%s: moving %d to snoop cachee\n", __FUNCTION__, bo->handle)); |
1525 | list_add(&bo->list, &kgem->snoop); |
1525 | list_add(&bo->list, &kgem->snoop); |
1526 | } |
1526 | } |
1527 | 1527 | ||
1528 | static struct kgem_bo * |
1528 | static struct kgem_bo * |
1529 | search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags) |
1529 | search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags) |
1530 | { |
1530 | { |
1531 | struct kgem_bo *bo, *first = NULL; |
1531 | struct kgem_bo *bo, *first = NULL; |
1532 | 1532 | ||
1533 | DBG(("%s: num_pages=%d, flags=%x\n", __FUNCTION__, num_pages, flags)); |
1533 | DBG(("%s: num_pages=%d, flags=%x\n", __FUNCTION__, num_pages, flags)); |
1534 | 1534 | ||
1535 | if ((kgem->has_cacheing | kgem->has_userptr) == 0) |
1535 | if ((kgem->has_cacheing | kgem->has_userptr) == 0) |
1536 | return NULL; |
1536 | return NULL; |
1537 | 1537 | ||
1538 | if (list_is_empty(&kgem->snoop)) { |
1538 | if (list_is_empty(&kgem->snoop)) { |
1539 | DBG(("%s: inactive and cache empty\n", __FUNCTION__)); |
1539 | DBG(("%s: inactive and cache empty\n", __FUNCTION__)); |
1540 | if (!__kgem_throttle_retire(kgem, flags)) { |
1540 | if (!__kgem_throttle_retire(kgem, flags)) { |
1541 | DBG(("%s: nothing retired\n", __FUNCTION__)); |
1541 | DBG(("%s: nothing retired\n", __FUNCTION__)); |
1542 | return NULL; |
1542 | return NULL; |
1543 | } |
1543 | } |
1544 | } |
1544 | } |
1545 | 1545 | ||
1546 | list_for_each_entry(bo, &kgem->snoop, list) { |
1546 | list_for_each_entry(bo, &kgem->snoop, list) { |
1547 | assert(bo->refcnt == 0); |
1547 | assert(bo->refcnt == 0); |
1548 | assert(bo->snoop); |
1548 | assert(bo->snoop); |
1549 | assert(!bo->scanout); |
1549 | assert(!bo->scanout); |
1550 | assert(bo->proxy == NULL); |
1550 | assert(bo->proxy == NULL); |
1551 | assert(bo->tiling == I915_TILING_NONE); |
1551 | assert(bo->tiling == I915_TILING_NONE); |
1552 | assert(bo->rq == NULL); |
1552 | assert(bo->rq == NULL); |
1553 | assert(bo->exec == NULL); |
1553 | assert(bo->exec == NULL); |
1554 | 1554 | ||
1555 | if (num_pages > num_pages(bo)) |
1555 | if (num_pages > num_pages(bo)) |
1556 | continue; |
1556 | continue; |
1557 | 1557 | ||
1558 | if (num_pages(bo) > 2*num_pages) { |
1558 | if (num_pages(bo) > 2*num_pages) { |
1559 | if (first == NULL) |
1559 | if (first == NULL) |
1560 | first = bo; |
1560 | first = bo; |
1561 | continue; |
1561 | continue; |
1562 | } |
1562 | } |
1563 | 1563 | ||
1564 | list_del(&bo->list); |
1564 | list_del(&bo->list); |
1565 | bo->pitch = 0; |
1565 | bo->pitch = 0; |
1566 | bo->delta = 0; |
1566 | bo->delta = 0; |
1567 | 1567 | ||
1568 | DBG((" %s: found handle=%d (num_pages=%d) in snoop cache\n", |
1568 | DBG((" %s: found handle=%d (num_pages=%d) in snoop cache\n", |
1569 | __FUNCTION__, bo->handle, num_pages(bo))); |
1569 | __FUNCTION__, bo->handle, num_pages(bo))); |
1570 | return bo; |
1570 | return bo; |
1571 | } |
1571 | } |
1572 | 1572 | ||
1573 | if (first) { |
1573 | if (first) { |
1574 | list_del(&first->list); |
1574 | list_del(&first->list); |
1575 | first->pitch = 0; |
1575 | first->pitch = 0; |
1576 | first->delta = 0; |
1576 | first->delta = 0; |
1577 | 1577 | ||
1578 | DBG((" %s: found handle=%d (num_pages=%d) in snoop cache\n", |
1578 | DBG((" %s: found handle=%d (num_pages=%d) in snoop cache\n", |
1579 | __FUNCTION__, first->handle, num_pages(first))); |
1579 | __FUNCTION__, first->handle, num_pages(first))); |
1580 | return first; |
1580 | return first; |
1581 | } |
1581 | } |
1582 | 1582 | ||
1583 | return NULL; |
1583 | return NULL; |
1584 | } |
1584 | } |
1585 | 1585 | ||
1586 | static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) |
1586 | static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) |
1587 | { |
1587 | { |
1588 | DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle)); |
1588 | DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle)); |
1589 | 1589 | ||
1590 | assert(list_is_empty(&bo->list)); |
1590 | assert(list_is_empty(&bo->list)); |
1591 | assert(bo->refcnt == 0); |
1591 | assert(bo->refcnt == 0); |
1592 | assert(!bo->purged); |
1592 | assert(!bo->purged); |
1593 | assert(bo->proxy == NULL); |
1593 | assert(bo->proxy == NULL); |
1594 | 1594 | ||
1595 | bo->binding.offset = 0; |
1595 | bo->binding.offset = 0; |
1596 | 1596 | ||
1597 | if (DBG_NO_CACHE) |
1597 | if (DBG_NO_CACHE) |
1598 | goto destroy; |
1598 | goto destroy; |
1599 | 1599 | ||
1600 | if (bo->snoop && !bo->flush) { |
1600 | if (bo->snoop && !bo->flush) { |
1601 | DBG(("%s: handle=%d is snooped\n", __FUNCTION__, bo->handle)); |
1601 | DBG(("%s: handle=%d is snooped\n", __FUNCTION__, bo->handle)); |
1602 | assert(!bo->flush); |
1602 | assert(!bo->flush); |
1603 | assert(list_is_empty(&bo->list)); |
1603 | assert(list_is_empty(&bo->list)); |
1604 | if (bo->exec == NULL && bo->rq && !__kgem_busy(kgem, bo->handle)) |
1604 | if (bo->exec == NULL && bo->rq && !__kgem_busy(kgem, bo->handle)) |
1605 | __kgem_bo_clear_busy(bo); |
1605 | __kgem_bo_clear_busy(bo); |
1606 | if (bo->rq == NULL) { |
1606 | if (bo->rq == NULL) { |
1607 | assert(!bo->needs_flush); |
1607 | assert(!bo->needs_flush); |
1608 | kgem_bo_move_to_snoop(kgem, bo); |
1608 | kgem_bo_move_to_snoop(kgem, bo); |
1609 | } |
1609 | } |
1610 | return; |
1610 | return; |
1611 | } |
1611 | } |
1612 | 1612 | ||
1613 | if (bo->scanout) { |
1613 | if (bo->scanout) { |
1614 | kgem_bo_move_to_scanout(kgem, bo); |
1614 | kgem_bo_move_to_scanout(kgem, bo); |
1615 | return; |
1615 | return; |
1616 | } |
1616 | } |
1617 | 1617 | ||
1618 | if (bo->io) |
1618 | if (bo->io) |
1619 | bo = kgem_bo_replace_io(bo); |
1619 | bo = kgem_bo_replace_io(bo); |
1620 | if (!bo->reusable) { |
1620 | if (!bo->reusable) { |
1621 | DBG(("%s: handle=%d, not reusable\n", |
1621 | DBG(("%s: handle=%d, not reusable\n", |
1622 | __FUNCTION__, bo->handle)); |
1622 | __FUNCTION__, bo->handle)); |
1623 | goto destroy; |
1623 | goto destroy; |
1624 | } |
1624 | } |
1625 | 1625 | ||
1626 | if (!kgem->has_llc && IS_CPU_MAP(bo->map) && bo->domain != DOMAIN_CPU) |
1626 | if (!kgem->has_llc && IS_CPU_MAP(bo->map) && bo->domain != DOMAIN_CPU) |
1627 | kgem_bo_release_map(kgem, bo); |
1627 | kgem_bo_release_map(kgem, bo); |
1628 | 1628 | ||
1629 | assert(list_is_empty(&bo->vma)); |
1629 | assert(list_is_empty(&bo->vma)); |
1630 | assert(list_is_empty(&bo->list)); |
1630 | assert(list_is_empty(&bo->list)); |
1631 | assert(bo->snoop == false); |
1631 | assert(bo->snoop == false); |
1632 | assert(bo->io == false); |
1632 | assert(bo->io == false); |
1633 | assert(bo->scanout == false); |
1633 | assert(bo->scanout == false); |
1634 | 1634 | ||
1635 | if (bo->exec && kgem->nexec == 1) { |
1635 | if (bo->exec && kgem->nexec == 1) { |
1636 | DBG(("%s: only handle in batch, discarding last operations\n", |
1636 | DBG(("%s: only handle in batch, discarding last operations\n", |
1637 | __FUNCTION__)); |
1637 | __FUNCTION__)); |
1638 | assert(bo->exec == &kgem->exec[0]); |
1638 | assert(bo->exec == &kgem->exec[0]); |
1639 | assert(kgem->exec[0].handle == bo->handle); |
1639 | assert(kgem->exec[0].handle == bo->handle); |
1640 | assert(RQ(bo->rq) == kgem->next_request); |
1640 | assert(RQ(bo->rq) == kgem->next_request); |
1641 | bo->refcnt = 1; |
1641 | bo->refcnt = 1; |
1642 | kgem_reset(kgem); |
1642 | kgem_reset(kgem); |
1643 | bo->refcnt = 0; |
1643 | bo->refcnt = 0; |
1644 | } |
1644 | } |
1645 | 1645 | ||
1646 | if (bo->rq && bo->exec == NULL && !__kgem_busy(kgem, bo->handle)) |
1646 | if (bo->rq && bo->exec == NULL && !__kgem_busy(kgem, bo->handle)) |
1647 | __kgem_bo_clear_busy(bo); |
1647 | __kgem_bo_clear_busy(bo); |
1648 | 1648 | ||
1649 | if (bo->rq) { |
1649 | if (bo->rq) { |
1650 | struct list *cache; |
1650 | struct list *cache; |
1651 | 1651 | ||
1652 | DBG(("%s: handle=%d -> active\n", __FUNCTION__, bo->handle)); |
1652 | DBG(("%s: handle=%d -> active\n", __FUNCTION__, bo->handle)); |
1653 | if (bucket(bo) < NUM_CACHE_BUCKETS) |
1653 | if (bucket(bo) < NUM_CACHE_BUCKETS) |
1654 | cache = &kgem->active[bucket(bo)][bo->tiling]; |
1654 | cache = &kgem->active[bucket(bo)][bo->tiling]; |
1655 | else |
1655 | else |
1656 | cache = &kgem->large; |
1656 | cache = &kgem->large; |
1657 | list_add(&bo->list, cache); |
1657 | list_add(&bo->list, cache); |
1658 | return; |
1658 | return; |
1659 | } |
1659 | } |
1660 | 1660 | ||
1661 | assert(bo->exec == NULL); |
1661 | assert(bo->exec == NULL); |
1662 | assert(list_is_empty(&bo->request)); |
1662 | assert(list_is_empty(&bo->request)); |
1663 | 1663 | ||
1664 | if (!IS_CPU_MAP(bo->map)) { |
1664 | if (!IS_CPU_MAP(bo->map)) { |
1665 | if (!kgem_bo_set_purgeable(kgem, bo)) |
1665 | if (!kgem_bo_set_purgeable(kgem, bo)) |
1666 | goto destroy; |
1666 | goto destroy; |
1667 | 1667 | ||
1668 | if (!kgem->has_llc && bo->domain == DOMAIN_CPU) |
1668 | if (!kgem->has_llc && bo->domain == DOMAIN_CPU) |
1669 | goto destroy; |
1669 | goto destroy; |
1670 | 1670 | ||
1671 | DBG(("%s: handle=%d, purged\n", |
1671 | DBG(("%s: handle=%d, purged\n", |
1672 | __FUNCTION__, bo->handle)); |
1672 | __FUNCTION__, bo->handle)); |
1673 | } |
1673 | } |
1674 | 1674 | ||
1675 | kgem_bo_move_to_inactive(kgem, bo); |
1675 | kgem_bo_move_to_inactive(kgem, bo); |
1676 | return; |
1676 | return; |
1677 | 1677 | ||
1678 | destroy: |
1678 | destroy: |
1679 | if (!bo->exec) |
1679 | if (!bo->exec) |
1680 | kgem_bo_free(kgem, bo); |
1680 | kgem_bo_free(kgem, bo); |
1681 | } |
1681 | } |
1682 | 1682 | ||
1683 | static void kgem_bo_unref(struct kgem *kgem, struct kgem_bo *bo) |
1683 | static void kgem_bo_unref(struct kgem *kgem, struct kgem_bo *bo) |
1684 | { |
1684 | { |
1685 | assert(bo->refcnt); |
1685 | assert(bo->refcnt); |
1686 | if (--bo->refcnt == 0) |
1686 | if (--bo->refcnt == 0) |
1687 | __kgem_bo_destroy(kgem, bo); |
1687 | __kgem_bo_destroy(kgem, bo); |
1688 | } |
1688 | } |
1689 | 1689 | ||
1690 | static void kgem_buffer_release(struct kgem *kgem, struct kgem_buffer *bo) |
1690 | static void kgem_buffer_release(struct kgem *kgem, struct kgem_buffer *bo) |
1691 | { |
1691 | { |
1692 | while (!list_is_empty(&bo->base.vma)) { |
1692 | while (!list_is_empty(&bo->base.vma)) { |
1693 | struct kgem_bo *cached; |
1693 | struct kgem_bo *cached; |
1694 | 1694 | ||
1695 | cached = list_first_entry(&bo->base.vma, struct kgem_bo, vma); |
1695 | cached = list_first_entry(&bo->base.vma, struct kgem_bo, vma); |
1696 | assert(cached->proxy == &bo->base); |
1696 | assert(cached->proxy == &bo->base); |
1697 | list_del(&cached->vma); |
1697 | list_del(&cached->vma); |
1698 | 1698 | ||
1699 | assert(*(struct kgem_bo **)cached->map == cached); |
1699 | assert(*(struct kgem_bo **)cached->map == cached); |
1700 | *(struct kgem_bo **)cached->map = NULL; |
1700 | *(struct kgem_bo **)cached->map = NULL; |
1701 | cached->map = NULL; |
1701 | cached->map = NULL; |
1702 | 1702 | ||
1703 | kgem_bo_destroy(kgem, cached); |
1703 | kgem_bo_destroy(kgem, cached); |
1704 | } |
1704 | } |
1705 | } |
1705 | } |
1706 | 1706 | ||
1707 | static bool kgem_retire__buffers(struct kgem *kgem) |
1707 | static bool kgem_retire__buffers(struct kgem *kgem) |
1708 | { |
1708 | { |
1709 | bool retired = false; |
1709 | bool retired = false; |
1710 | 1710 | ||
1711 | while (!list_is_empty(&kgem->active_buffers)) { |
1711 | while (!list_is_empty(&kgem->active_buffers)) { |
1712 | struct kgem_buffer *bo = |
1712 | struct kgem_buffer *bo = |
1713 | list_last_entry(&kgem->active_buffers, |
1713 | list_last_entry(&kgem->active_buffers, |
1714 | struct kgem_buffer, |
1714 | struct kgem_buffer, |
1715 | base.list); |
1715 | base.list); |
1716 | 1716 | ||
1717 | if (bo->base.rq) |
1717 | if (bo->base.rq) |
1718 | break; |
1718 | break; |
1719 | 1719 | ||
1720 | DBG(("%s: releasing upload cache for handle=%d? %d\n", |
1720 | DBG(("%s: releasing upload cache for handle=%d? %d\n", |
1721 | __FUNCTION__, bo->base.handle, !list_is_empty(&bo->base.vma))); |
1721 | __FUNCTION__, bo->base.handle, !list_is_empty(&bo->base.vma))); |
1722 | list_del(&bo->base.list); |
1722 | list_del(&bo->base.list); |
1723 | kgem_buffer_release(kgem, bo); |
1723 | kgem_buffer_release(kgem, bo); |
1724 | kgem_bo_unref(kgem, &bo->base); |
1724 | kgem_bo_unref(kgem, &bo->base); |
1725 | retired = true; |
1725 | retired = true; |
1726 | } |
1726 | } |
1727 | 1727 | ||
1728 | return retired; |
1728 | return retired; |
1729 | } |
1729 | } |
1730 | 1730 | ||
1731 | static bool kgem_retire__flushing(struct kgem *kgem) |
1731 | static bool kgem_retire__flushing(struct kgem *kgem) |
1732 | { |
1732 | { |
1733 | struct kgem_bo *bo, *next; |
1733 | struct kgem_bo *bo, *next; |
1734 | bool retired = false; |
1734 | bool retired = false; |
1735 | 1735 | ||
1736 | list_for_each_entry_safe(bo, next, &kgem->flushing, request) { |
1736 | list_for_each_entry_safe(bo, next, &kgem->flushing, request) { |
1737 | assert(bo->rq == (void *)kgem); |
1737 | assert(bo->rq == (void *)kgem); |
1738 | assert(bo->exec == NULL); |
1738 | assert(bo->exec == NULL); |
1739 | 1739 | ||
1740 | if (__kgem_busy(kgem, bo->handle)) |
1740 | if (__kgem_busy(kgem, bo->handle)) |
1741 | break; |
1741 | break; |
1742 | 1742 | ||
1743 | __kgem_bo_clear_busy(bo); |
1743 | __kgem_bo_clear_busy(bo); |
1744 | 1744 | ||
1745 | if (bo->refcnt) |
1745 | if (bo->refcnt) |
1746 | continue; |
1746 | continue; |
1747 | 1747 | ||
1748 | if (bo->snoop) { |
1748 | if (bo->snoop) { |
1749 | kgem_bo_move_to_snoop(kgem, bo); |
1749 | kgem_bo_move_to_snoop(kgem, bo); |
1750 | } else if (bo->scanout) { |
1750 | } else if (bo->scanout) { |
1751 | kgem_bo_move_to_scanout(kgem, bo); |
1751 | kgem_bo_move_to_scanout(kgem, bo); |
1752 | } else if ((bo = kgem_bo_replace_io(bo))->reusable && |
1752 | } else if ((bo = kgem_bo_replace_io(bo))->reusable && |
1753 | kgem_bo_set_purgeable(kgem, bo)) { |
1753 | kgem_bo_set_purgeable(kgem, bo)) { |
1754 | kgem_bo_move_to_inactive(kgem, bo); |
1754 | kgem_bo_move_to_inactive(kgem, bo); |
1755 | retired = true; |
1755 | retired = true; |
1756 | } else |
1756 | } else |
1757 | kgem_bo_free(kgem, bo); |
1757 | kgem_bo_free(kgem, bo); |
1758 | } |
1758 | } |
1759 | #if HAS_DEBUG_FULL |
1759 | #if HAS_DEBUG_FULL |
1760 | { |
1760 | { |
1761 | int count = 0; |
1761 | int count = 0; |
1762 | list_for_each_entry(bo, &kgem->flushing, request) |
1762 | list_for_each_entry(bo, &kgem->flushing, request) |
1763 | count++; |
1763 | count++; |
1764 | printf("%s: %d bo on flushing list\n", __FUNCTION__, count); |
1764 | printf("%s: %d bo on flushing list\n", __FUNCTION__, count); |
1765 | } |
1765 | } |
1766 | #endif |
1766 | #endif |
1767 | 1767 | ||
1768 | kgem->need_retire |= !list_is_empty(&kgem->flushing); |
1768 | kgem->need_retire |= !list_is_empty(&kgem->flushing); |
1769 | 1769 | ||
1770 | return retired; |
1770 | return retired; |
1771 | } |
1771 | } |
1772 | 1772 | ||
1773 | 1773 | ||
1774 | static bool __kgem_retire_rq(struct kgem *kgem, struct kgem_request *rq) |
1774 | static bool __kgem_retire_rq(struct kgem *kgem, struct kgem_request *rq) |
1775 | { |
1775 | { |
1776 | bool retired = false; |
1776 | bool retired = false; |
1777 | 1777 | ||
1778 | DBG(("%s: request %d complete\n", |
1778 | DBG(("%s: request %d complete\n", |
1779 | __FUNCTION__, rq->bo->handle)); |
1779 | __FUNCTION__, rq->bo->handle)); |
1780 | 1780 | ||
1781 | while (!list_is_empty(&rq->buffers)) { |
1781 | while (!list_is_empty(&rq->buffers)) { |
1782 | struct kgem_bo *bo; |
1782 | struct kgem_bo *bo; |
1783 | 1783 | ||
1784 | bo = list_first_entry(&rq->buffers, |
1784 | bo = list_first_entry(&rq->buffers, |
1785 | struct kgem_bo, |
1785 | struct kgem_bo, |
1786 | request); |
1786 | request); |
1787 | 1787 | ||
1788 | assert(RQ(bo->rq) == rq); |
1788 | assert(RQ(bo->rq) == rq); |
1789 | assert(bo->exec == NULL); |
1789 | assert(bo->exec == NULL); |
1790 | assert(bo->domain == DOMAIN_GPU || bo->domain == DOMAIN_NONE); |
1790 | assert(bo->domain == DOMAIN_GPU || bo->domain == DOMAIN_NONE); |
1791 | 1791 | ||
1792 | list_del(&bo->request); |
1792 | list_del(&bo->request); |
1793 | 1793 | ||
1794 | if (bo->needs_flush) |
1794 | if (bo->needs_flush) |
1795 | bo->needs_flush = __kgem_busy(kgem, bo->handle); |
1795 | bo->needs_flush = __kgem_busy(kgem, bo->handle); |
1796 | if (bo->needs_flush) { |
1796 | if (bo->needs_flush) { |
1797 | DBG(("%s: moving %d to flushing\n", |
1797 | DBG(("%s: moving %d to flushing\n", |
1798 | __FUNCTION__, bo->handle)); |
1798 | __FUNCTION__, bo->handle)); |
1799 | list_add(&bo->request, &kgem->flushing); |
1799 | list_add(&bo->request, &kgem->flushing); |
1800 | bo->rq = (void *)kgem; |
1800 | bo->rq = (void *)kgem; |
1801 | continue; |
1801 | continue; |
1802 | } |
1802 | } |
1803 | 1803 | ||
1804 | bo->domain = DOMAIN_NONE; |
1804 | bo->domain = DOMAIN_NONE; |
1805 | bo->rq = NULL; |
1805 | bo->rq = NULL; |
1806 | if (bo->refcnt) |
1806 | if (bo->refcnt) |
1807 | continue; |
1807 | continue; |
1808 | 1808 | ||
1809 | if (bo->snoop) { |
1809 | if (bo->snoop) { |
1810 | kgem_bo_move_to_snoop(kgem, bo); |
1810 | kgem_bo_move_to_snoop(kgem, bo); |
1811 | } else if (bo->scanout) { |
1811 | } else if (bo->scanout) { |
1812 | kgem_bo_move_to_scanout(kgem, bo); |
1812 | kgem_bo_move_to_scanout(kgem, bo); |
1813 | } else if ((bo = kgem_bo_replace_io(bo))->reusable && |
1813 | } else if ((bo = kgem_bo_replace_io(bo))->reusable && |
1814 | kgem_bo_set_purgeable(kgem, bo)) { |
1814 | kgem_bo_set_purgeable(kgem, bo)) { |
1815 | kgem_bo_move_to_inactive(kgem, bo); |
1815 | kgem_bo_move_to_inactive(kgem, bo); |
1816 | retired = true; |
1816 | retired = true; |
1817 | } else { |
1817 | } else { |
1818 | DBG(("%s: closing %d\n", |
1818 | DBG(("%s: closing %d\n", |
1819 | __FUNCTION__, bo->handle)); |
1819 | __FUNCTION__, bo->handle)); |
1820 | kgem_bo_free(kgem, bo); |
1820 | kgem_bo_free(kgem, bo); |
1821 | } |
1821 | } |
1822 | } |
1822 | } |
1823 | 1823 | ||
1824 | assert(rq->bo->rq == NULL); |
1824 | assert(rq->bo->rq == NULL); |
1825 | assert(list_is_empty(&rq->bo->request)); |
1825 | assert(list_is_empty(&rq->bo->request)); |
1826 | 1826 | ||
1827 | if (--rq->bo->refcnt == 0) { |
1827 | if (--rq->bo->refcnt == 0) { |
1828 | if (kgem_bo_set_purgeable(kgem, rq->bo)) { |
1828 | if (kgem_bo_set_purgeable(kgem, rq->bo)) { |
1829 | kgem_bo_move_to_inactive(kgem, rq->bo); |
1829 | kgem_bo_move_to_inactive(kgem, rq->bo); |
1830 | retired = true; |
1830 | retired = true; |
1831 | } else { |
1831 | } else { |
1832 | DBG(("%s: closing %d\n", |
1832 | DBG(("%s: closing %d\n", |
1833 | __FUNCTION__, rq->bo->handle)); |
1833 | __FUNCTION__, rq->bo->handle)); |
1834 | kgem_bo_free(kgem, rq->bo); |
1834 | kgem_bo_free(kgem, rq->bo); |
1835 | } |
1835 | } |
1836 | } |
1836 | } |
1837 | 1837 | ||
1838 | __kgem_request_free(rq); |
1838 | __kgem_request_free(rq); |
1839 | return retired; |
1839 | return retired; |
1840 | } |
1840 | } |
1841 | 1841 | ||
1842 | static bool kgem_retire__requests_ring(struct kgem *kgem, int ring) |
1842 | static bool kgem_retire__requests_ring(struct kgem *kgem, int ring) |
1843 | { |
1843 | { |
1844 | bool retired = false; |
1844 | bool retired = false; |
1845 | 1845 | ||
1846 | while (!list_is_empty(&kgem->requests[ring])) { |
1846 | while (!list_is_empty(&kgem->requests[ring])) { |
1847 | struct kgem_request *rq; |
1847 | struct kgem_request *rq; |
1848 | 1848 | ||
1849 | rq = list_first_entry(&kgem->requests[ring], |
1849 | rq = list_first_entry(&kgem->requests[ring], |
1850 | struct kgem_request, |
1850 | struct kgem_request, |
1851 | list); |
1851 | list); |
1852 | if (__kgem_busy(kgem, rq->bo->handle)) |
1852 | if (__kgem_busy(kgem, rq->bo->handle)) |
1853 | break; |
1853 | break; |
1854 | 1854 | ||
1855 | retired |= __kgem_retire_rq(kgem, rq); |
1855 | retired |= __kgem_retire_rq(kgem, rq); |
1856 | } |
1856 | } |
1857 | 1857 | ||
1858 | #if HAS_DEBUG_FULL |
1858 | #if HAS_DEBUG_FULL |
1859 | { |
1859 | { |
1860 | struct kgem_bo *bo; |
1860 | struct kgem_bo *bo; |
1861 | int count = 0; |
1861 | int count = 0; |
1862 | 1862 | ||
1863 | list_for_each_entry(bo, &kgem->requests[ring], request) |
1863 | list_for_each_entry(bo, &kgem->requests[ring], request) |
1864 | count++; |
1864 | count++; |
1865 | 1865 | ||
1866 | bo = NULL; |
1866 | bo = NULL; |
1867 | if (!list_is_empty(&kgem->requests[ring])) |
1867 | if (!list_is_empty(&kgem->requests[ring])) |
1868 | bo = list_first_entry(&kgem->requests[ring], |
1868 | bo = list_first_entry(&kgem->requests[ring], |
1869 | struct kgem_request, |
1869 | struct kgem_request, |
1870 | list)->bo; |
1870 | list)->bo; |
1871 | 1871 | ||
1872 | printf("%s: ring=%d, %d outstanding requests, oldest=%d\n", |
1872 | printf("%s: ring=%d, %d outstanding requests, oldest=%d\n", |
1873 | __FUNCTION__, ring, count, bo ? bo->handle : 0); |
1873 | __FUNCTION__, ring, count, bo ? bo->handle : 0); |
1874 | } |
1874 | } |
1875 | #endif |
1875 | #endif |
1876 | 1876 | ||
1877 | return retired; |
1877 | return retired; |
1878 | } |
1878 | } |
1879 | 1879 | ||
1880 | static bool kgem_retire__requests(struct kgem *kgem) |
1880 | static bool kgem_retire__requests(struct kgem *kgem) |
1881 | { |
1881 | { |
1882 | bool retired = false; |
1882 | bool retired = false; |
1883 | int n; |
1883 | int n; |
1884 | 1884 | ||
1885 | for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) { |
1885 | for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) { |
1886 | retired |= kgem_retire__requests_ring(kgem, n); |
1886 | retired |= kgem_retire__requests_ring(kgem, n); |
1887 | kgem->need_retire |= !list_is_empty(&kgem->requests[n]); |
1887 | kgem->need_retire |= !list_is_empty(&kgem->requests[n]); |
1888 | } |
1888 | } |
1889 | 1889 | ||
1890 | return retired; |
1890 | return retired; |
1891 | } |
1891 | } |
1892 | 1892 | ||
1893 | bool kgem_retire(struct kgem *kgem) |
1893 | bool kgem_retire(struct kgem *kgem) |
1894 | { |
1894 | { |
1895 | bool retired = false; |
1895 | bool retired = false; |
1896 | 1896 | ||
1897 | DBG(("%s\n", __FUNCTION__)); |
1897 | DBG(("%s\n", __FUNCTION__)); |
1898 | 1898 | ||
1899 | kgem->need_retire = false; |
1899 | kgem->need_retire = false; |
1900 | 1900 | ||
1901 | retired |= kgem_retire__flushing(kgem); |
1901 | retired |= kgem_retire__flushing(kgem); |
1902 | retired |= kgem_retire__requests(kgem); |
1902 | retired |= kgem_retire__requests(kgem); |
1903 | retired |= kgem_retire__buffers(kgem); |
1903 | retired |= kgem_retire__buffers(kgem); |
1904 | 1904 | ||
1905 | DBG(("%s -- retired=%d, need_retire=%d\n", |
1905 | DBG(("%s -- retired=%d, need_retire=%d\n", |
1906 | __FUNCTION__, retired, kgem->need_retire)); |
1906 | __FUNCTION__, retired, kgem->need_retire)); |
1907 | 1907 | ||
1908 | kgem->retire(kgem); |
1908 | kgem->retire(kgem); |
1909 | 1909 | ||
1910 | return retired; |
1910 | return retired; |
1911 | } |
1911 | } |
1912 | 1912 | ||
1913 | bool __kgem_ring_is_idle(struct kgem *kgem, int ring) |
1913 | bool __kgem_ring_is_idle(struct kgem *kgem, int ring) |
1914 | { |
1914 | { |
1915 | struct kgem_request *rq; |
1915 | struct kgem_request *rq; |
1916 | 1916 | ||
1917 | assert(!list_is_empty(&kgem->requests[ring])); |
1917 | assert(!list_is_empty(&kgem->requests[ring])); |
1918 | 1918 | ||
1919 | rq = list_last_entry(&kgem->requests[ring], |
1919 | rq = list_last_entry(&kgem->requests[ring], |
1920 | struct kgem_request, list); |
1920 | struct kgem_request, list); |
1921 | if (__kgem_busy(kgem, rq->bo->handle)) { |
1921 | if (__kgem_busy(kgem, rq->bo->handle)) { |
1922 | DBG(("%s: last requests handle=%d still busy\n", |
1922 | DBG(("%s: last requests handle=%d still busy\n", |
1923 | __FUNCTION__, rq->bo->handle)); |
1923 | __FUNCTION__, rq->bo->handle)); |
1924 | return false; |
1924 | return false; |
1925 | } |
1925 | } |
1926 | 1926 | ||
1927 | DBG(("%s: ring=%d idle (handle=%d)\n", |
1927 | DBG(("%s: ring=%d idle (handle=%d)\n", |
1928 | __FUNCTION__, ring, rq->bo->handle)); |
1928 | __FUNCTION__, ring, rq->bo->handle)); |
1929 | 1929 | ||
1930 | kgem_retire__requests_ring(kgem, ring); |
1930 | kgem_retire__requests_ring(kgem, ring); |
1931 | assert(list_is_empty(&kgem->requests[ring])); |
1931 | assert(list_is_empty(&kgem->requests[ring])); |
1932 | return true; |
1932 | return true; |
1933 | } |
1933 | } |
1934 | 1934 | ||
1935 | static void kgem_commit(struct kgem *kgem) |
1935 | static void kgem_commit(struct kgem *kgem) |
1936 | { |
1936 | { |
1937 | struct kgem_request *rq = kgem->next_request; |
1937 | struct kgem_request *rq = kgem->next_request; |
1938 | struct kgem_bo *bo, *next; |
1938 | struct kgem_bo *bo, *next; |
1939 | 1939 | ||
1940 | list_for_each_entry_safe(bo, next, &rq->buffers, request) { |
1940 | list_for_each_entry_safe(bo, next, &rq->buffers, request) { |
1941 | assert(next->request.prev == &bo->request); |
1941 | assert(next->request.prev == &bo->request); |
1942 | 1942 | ||
1943 | DBG(("%s: release handle=%d (proxy? %d), dirty? %d flush? %d, snoop? %d -> offset=%x\n", |
1943 | DBG(("%s: release handle=%d (proxy? %d), dirty? %d flush? %d, snoop? %d -> offset=%x\n", |
1944 | __FUNCTION__, bo->handle, bo->proxy != NULL, |
1944 | __FUNCTION__, bo->handle, bo->proxy != NULL, |
1945 | bo->dirty, bo->needs_flush, bo->snoop, |
1945 | bo->dirty, bo->needs_flush, bo->snoop, |
1946 | (unsigned)bo->exec->offset)); |
1946 | (unsigned)bo->exec->offset)); |
1947 | 1947 | ||
1948 | assert(!bo->purged); |
1948 | assert(!bo->purged); |
1949 | assert(bo->exec); |
1949 | assert(bo->exec); |
1950 | assert(bo->proxy == NULL || bo->exec == &_kgem_dummy_exec); |
1950 | assert(bo->proxy == NULL || bo->exec == &_kgem_dummy_exec); |
1951 | assert(RQ(bo->rq) == rq || (RQ(bo->proxy->rq) == rq)); |
1951 | assert(RQ(bo->rq) == rq || (RQ(bo->proxy->rq) == rq)); |
1952 | 1952 | ||
1953 | bo->presumed_offset = bo->exec->offset; |
1953 | bo->presumed_offset = bo->exec->offset; |
1954 | bo->exec = NULL; |
1954 | bo->exec = NULL; |
1955 | bo->target_handle = -1; |
1955 | bo->target_handle = -1; |
1956 | 1956 | ||
1957 | if (!bo->refcnt && !bo->reusable) { |
1957 | if (!bo->refcnt && !bo->reusable) { |
1958 | assert(!bo->snoop); |
1958 | assert(!bo->snoop); |
1959 | kgem_bo_free(kgem, bo); |
1959 | kgem_bo_free(kgem, bo); |
1960 | continue; |
1960 | continue; |
1961 | } |
1961 | } |
1962 | 1962 | ||
1963 | bo->binding.offset = 0; |
1963 | bo->binding.offset = 0; |
1964 | bo->domain = DOMAIN_GPU; |
1964 | bo->domain = DOMAIN_GPU; |
1965 | bo->dirty = false; |
1965 | bo->dirty = false; |
1966 | 1966 | ||
1967 | if (bo->proxy) { |
1967 | if (bo->proxy) { |
1968 | /* proxies are not used for domain tracking */ |
1968 | /* proxies are not used for domain tracking */ |
1969 | bo->exec = NULL; |
1969 | bo->exec = NULL; |
1970 | __kgem_bo_clear_busy(bo); |
1970 | __kgem_bo_clear_busy(bo); |
1971 | } |
1971 | } |
1972 | 1972 | ||
1973 | kgem->scanout_busy |= bo->scanout; |
1973 | kgem->scanout_busy |= bo->scanout; |
1974 | } |
1974 | } |
1975 | 1975 | ||
1976 | if (rq == &kgem->static_request) { |
1976 | if (rq == &kgem->static_request) { |
1977 | struct drm_i915_gem_set_domain set_domain; |
1977 | struct drm_i915_gem_set_domain set_domain; |
1978 | 1978 | ||
1979 | DBG(("%s: syncing due to allocation failure\n", __FUNCTION__)); |
1979 | DBG(("%s: syncing due to allocation failure\n", __FUNCTION__)); |
1980 | 1980 | ||
1981 | VG_CLEAR(set_domain); |
1981 | VG_CLEAR(set_domain); |
1982 | set_domain.handle = rq->bo->handle; |
1982 | set_domain.handle = rq->bo->handle; |
1983 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
1983 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
1984 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
1984 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
1985 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) { |
1985 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) { |
1986 | DBG(("%s: sync: GPU hang detected\n", __FUNCTION__)); |
1986 | DBG(("%s: sync: GPU hang detected\n", __FUNCTION__)); |
1987 | kgem_throttle(kgem); |
1987 | kgem_throttle(kgem); |
1988 | } |
1988 | } |
1989 | 1989 | ||
1990 | kgem_retire(kgem); |
1990 | kgem_retire(kgem); |
1991 | assert(list_is_empty(&rq->buffers)); |
1991 | assert(list_is_empty(&rq->buffers)); |
1992 | 1992 | ||
1993 | gem_close(kgem->fd, rq->bo->handle); |
1993 | gem_close(kgem->fd, rq->bo->handle); |
1994 | kgem_cleanup_cache(kgem); |
1994 | kgem_cleanup_cache(kgem); |
1995 | } else { |
1995 | } else { |
1996 | list_add_tail(&rq->list, &kgem->requests[rq->ring]); |
1996 | list_add_tail(&rq->list, &kgem->requests[rq->ring]); |
1997 | kgem->need_throttle = kgem->need_retire = 1; |
1997 | kgem->need_throttle = kgem->need_retire = 1; |
1998 | } |
1998 | } |
1999 | 1999 | ||
2000 | kgem->next_request = NULL; |
2000 | kgem->next_request = NULL; |
2001 | } |
2001 | } |
2002 | 2002 | ||
2003 | static void kgem_close_list(struct kgem *kgem, struct list *head) |
2003 | static void kgem_close_list(struct kgem *kgem, struct list *head) |
2004 | { |
2004 | { |
2005 | while (!list_is_empty(head)) |
2005 | while (!list_is_empty(head)) |
2006 | kgem_bo_free(kgem, list_first_entry(head, struct kgem_bo, list)); |
2006 | kgem_bo_free(kgem, list_first_entry(head, struct kgem_bo, list)); |
2007 | } |
2007 | } |
2008 | 2008 | ||
2009 | static void kgem_close_inactive(struct kgem *kgem) |
2009 | static void kgem_close_inactive(struct kgem *kgem) |
2010 | { |
2010 | { |
2011 | unsigned int i; |
2011 | unsigned int i; |
2012 | 2012 | ||
2013 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) |
2013 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) |
2014 | kgem_close_list(kgem, &kgem->inactive[i]); |
2014 | kgem_close_list(kgem, &kgem->inactive[i]); |
2015 | } |
2015 | } |
2016 | 2016 | ||
2017 | static void kgem_finish_buffers(struct kgem *kgem) |
2017 | static void kgem_finish_buffers(struct kgem *kgem) |
2018 | { |
2018 | { |
2019 | struct kgem_buffer *bo, *next; |
2019 | struct kgem_buffer *bo, *next; |
2020 | 2020 | ||
2021 | list_for_each_entry_safe(bo, next, &kgem->batch_buffers, base.list) { |
2021 | list_for_each_entry_safe(bo, next, &kgem->batch_buffers, base.list) { |
2022 | DBG(("%s: buffer handle=%d, used=%d, exec?=%d, write=%d, mmapped=%d\n", |
2022 | DBG(("%s: buffer handle=%d, used=%d, exec?=%d, write=%d, mmapped=%d\n", |
2023 | __FUNCTION__, bo->base.handle, bo->used, bo->base.exec!=NULL, |
2023 | __FUNCTION__, bo->base.handle, bo->used, bo->base.exec!=NULL, |
2024 | bo->write, bo->mmapped)); |
2024 | bo->write, bo->mmapped)); |
2025 | 2025 | ||
2026 | assert(next->base.list.prev == &bo->base.list); |
2026 | assert(next->base.list.prev == &bo->base.list); |
2027 | assert(bo->base.io); |
2027 | assert(bo->base.io); |
2028 | assert(bo->base.refcnt >= 1); |
2028 | assert(bo->base.refcnt >= 1); |
2029 | 2029 | ||
2030 | if (!bo->base.exec) { |
2030 | if (!bo->base.exec) { |
2031 | DBG(("%s: skipping unattached handle=%d, used=%d\n", |
2031 | DBG(("%s: skipping unattached handle=%d, used=%d\n", |
2032 | __FUNCTION__, bo->base.handle, bo->used)); |
2032 | __FUNCTION__, bo->base.handle, bo->used)); |
2033 | continue; |
2033 | continue; |
2034 | } |
2034 | } |
2035 | 2035 | ||
2036 | if (!bo->write) { |
2036 | if (!bo->write) { |
2037 | assert(bo->base.exec || bo->base.refcnt > 1); |
2037 | assert(bo->base.exec || bo->base.refcnt > 1); |
2038 | goto decouple; |
2038 | goto decouple; |
2039 | } |
2039 | } |
2040 | 2040 | ||
2041 | if (bo->mmapped) { |
2041 | if (bo->mmapped) { |
2042 | int used; |
2042 | int used; |
2043 | 2043 | ||
2044 | assert(!bo->need_io); |
2044 | assert(!bo->need_io); |
2045 | 2045 | ||
2046 | used = ALIGN(bo->used, PAGE_SIZE); |
2046 | used = ALIGN(bo->used, PAGE_SIZE); |
2047 | if (!DBG_NO_UPLOAD_ACTIVE && |
2047 | if (!DBG_NO_UPLOAD_ACTIVE && |
2048 | used + PAGE_SIZE <= bytes(&bo->base) && |
2048 | used + PAGE_SIZE <= bytes(&bo->base) && |
2049 | (kgem->has_llc || !IS_CPU_MAP(bo->base.map) || bo->base.snoop)) { |
2049 | (kgem->has_llc || !IS_CPU_MAP(bo->base.map) || bo->base.snoop)) { |
2050 | DBG(("%s: retaining upload buffer (%d/%d)\n", |
2050 | DBG(("%s: retaining upload buffer (%d/%d)\n", |
2051 | __FUNCTION__, bo->used, bytes(&bo->base))); |
2051 | __FUNCTION__, bo->used, bytes(&bo->base))); |
2052 | bo->used = used; |
2052 | bo->used = used; |
2053 | list_move(&bo->base.list, |
2053 | list_move(&bo->base.list, |
2054 | &kgem->active_buffers); |
2054 | &kgem->active_buffers); |
2055 | continue; |
2055 | continue; |
2056 | } |
2056 | } |
2057 | DBG(("%s: discarding mmapped buffer, used=%d, map type=%d\n", |
2057 | DBG(("%s: discarding mmapped buffer, used=%d, map type=%d\n", |
2058 | __FUNCTION__, bo->used, (int)__MAP_TYPE(bo->base.map))); |
2058 | __FUNCTION__, bo->used, (int)__MAP_TYPE(bo->base.map))); |
2059 | goto decouple; |
2059 | goto decouple; |
2060 | } |
2060 | } |
2061 | 2061 | ||
2062 | if (!bo->used) { |
2062 | if (!bo->used) { |
2063 | /* Unless we replace the handle in the execbuffer, |
2063 | /* Unless we replace the handle in the execbuffer, |
2064 | * then this bo will become active. So decouple it |
2064 | * then this bo will become active. So decouple it |
2065 | * from the buffer list and track it in the normal |
2065 | * from the buffer list and track it in the normal |
2066 | * manner. |
2066 | * manner. |
2067 | */ |
2067 | */ |
2068 | goto decouple; |
2068 | goto decouple; |
2069 | } |
2069 | } |
2070 | 2070 | ||
2071 | assert(bo->need_io); |
2071 | assert(bo->need_io); |
2072 | assert(bo->base.rq == MAKE_REQUEST(kgem->next_request, kgem->ring)); |
2072 | assert(bo->base.rq == MAKE_REQUEST(kgem->next_request, kgem->ring)); |
2073 | assert(bo->base.domain != DOMAIN_GPU); |
2073 | assert(bo->base.domain != DOMAIN_GPU); |
2074 | 2074 | ||
2075 | if (bo->base.refcnt == 1 && |
2075 | if (bo->base.refcnt == 1 && |
2076 | bo->base.size.pages.count > 1 && |
2076 | bo->base.size.pages.count > 1 && |
2077 | bo->used < bytes(&bo->base) / 2) { |
2077 | bo->used < bytes(&bo->base) / 2) { |
2078 | struct kgem_bo *shrink; |
2078 | struct kgem_bo *shrink; |
2079 | unsigned alloc = NUM_PAGES(bo->used); |
2079 | unsigned alloc = NUM_PAGES(bo->used); |
2080 | 2080 | ||
2081 | shrink = search_snoop_cache(kgem, alloc, |
2081 | shrink = search_snoop_cache(kgem, alloc, |
2082 | CREATE_INACTIVE | CREATE_NO_RETIRE); |
2082 | CREATE_INACTIVE | CREATE_NO_RETIRE); |
2083 | if (shrink) { |
2083 | if (shrink) { |
2084 | void *map; |
2084 | void *map; |
2085 | int n; |
2085 | int n; |
2086 | 2086 | ||
2087 | DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n", |
2087 | DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n", |
2088 | __FUNCTION__, |
2088 | __FUNCTION__, |
2089 | bo->used, bytes(&bo->base), bytes(shrink), |
2089 | bo->used, bytes(&bo->base), bytes(shrink), |
2090 | bo->base.handle, shrink->handle)); |
2090 | bo->base.handle, shrink->handle)); |
2091 | 2091 | ||
2092 | assert(bo->used <= bytes(shrink)); |
2092 | assert(bo->used <= bytes(shrink)); |
2093 | map = kgem_bo_map__cpu(kgem, shrink); |
2093 | map = kgem_bo_map__cpu(kgem, shrink); |
2094 | if (map) { |
2094 | if (map) { |
2095 | kgem_bo_sync__cpu(kgem, shrink); |
2095 | kgem_bo_sync__cpu(kgem, shrink); |
2096 | memcpy(map, bo->mem, bo->used); |
2096 | memcpy(map, bo->mem, bo->used); |
2097 | 2097 | ||
2098 | shrink->target_handle = |
2098 | shrink->target_handle = |
2099 | kgem->has_handle_lut ? bo->base.target_handle : shrink->handle; |
2099 | kgem->has_handle_lut ? bo->base.target_handle : shrink->handle; |
2100 | for (n = 0; n < kgem->nreloc; n++) { |
2100 | for (n = 0; n < kgem->nreloc; n++) { |
2101 | if (kgem->reloc[n].target_handle == bo->base.target_handle) { |
2101 | if (kgem->reloc[n].target_handle == bo->base.target_handle) { |
2102 | kgem->reloc[n].target_handle = shrink->target_handle; |
2102 | kgem->reloc[n].target_handle = shrink->target_handle; |
2103 | kgem->reloc[n].presumed_offset = shrink->presumed_offset; |
2103 | kgem->reloc[n].presumed_offset = shrink->presumed_offset; |
2104 | kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] = |
2104 | kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] = |
2105 | kgem->reloc[n].delta + shrink->presumed_offset; |
2105 | kgem->reloc[n].delta + shrink->presumed_offset; |
2106 | } |
2106 | } |
2107 | } |
2107 | } |
2108 | 2108 | ||
2109 | bo->base.exec->handle = shrink->handle; |
2109 | bo->base.exec->handle = shrink->handle; |
2110 | bo->base.exec->offset = shrink->presumed_offset; |
2110 | bo->base.exec->offset = shrink->presumed_offset; |
2111 | shrink->exec = bo->base.exec; |
2111 | shrink->exec = bo->base.exec; |
2112 | shrink->rq = bo->base.rq; |
2112 | shrink->rq = bo->base.rq; |
2113 | list_replace(&bo->base.request, |
2113 | list_replace(&bo->base.request, |
2114 | &shrink->request); |
2114 | &shrink->request); |
2115 | list_init(&bo->base.request); |
2115 | list_init(&bo->base.request); |
2116 | shrink->needs_flush = bo->base.dirty; |
2116 | shrink->needs_flush = bo->base.dirty; |
2117 | 2117 | ||
2118 | bo->base.exec = NULL; |
2118 | bo->base.exec = NULL; |
2119 | bo->base.rq = NULL; |
2119 | bo->base.rq = NULL; |
2120 | bo->base.dirty = false; |
2120 | bo->base.dirty = false; |
2121 | bo->base.needs_flush = false; |
2121 | bo->base.needs_flush = false; |
2122 | bo->used = 0; |
2122 | bo->used = 0; |
2123 | 2123 | ||
2124 | goto decouple; |
2124 | goto decouple; |
2125 | } |
2125 | } |
2126 | 2126 | ||
2127 | __kgem_bo_destroy(kgem, shrink); |
2127 | __kgem_bo_destroy(kgem, shrink); |
2128 | } |
2128 | } |
2129 | 2129 | ||
2130 | shrink = search_linear_cache(kgem, alloc, |
2130 | shrink = search_linear_cache(kgem, alloc, |
2131 | CREATE_INACTIVE | CREATE_NO_RETIRE); |
2131 | CREATE_INACTIVE | CREATE_NO_RETIRE); |
2132 | if (shrink) { |
2132 | if (shrink) { |
2133 | int n; |
2133 | int n; |
2134 | 2134 | ||
2135 | DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n", |
2135 | DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n", |
2136 | __FUNCTION__, |
2136 | __FUNCTION__, |
2137 | bo->used, bytes(&bo->base), bytes(shrink), |
2137 | bo->used, bytes(&bo->base), bytes(shrink), |
2138 | bo->base.handle, shrink->handle)); |
2138 | bo->base.handle, shrink->handle)); |
2139 | 2139 | ||
2140 | assert(bo->used <= bytes(shrink)); |
2140 | assert(bo->used <= bytes(shrink)); |
2141 | if (gem_write(kgem->fd, shrink->handle, |
2141 | if (gem_write(kgem->fd, shrink->handle, |
2142 | 0, bo->used, bo->mem) == 0) { |
2142 | 0, bo->used, bo->mem) == 0) { |
2143 | shrink->target_handle = |
2143 | shrink->target_handle = |
2144 | kgem->has_handle_lut ? bo->base.target_handle : shrink->handle; |
2144 | kgem->has_handle_lut ? bo->base.target_handle : shrink->handle; |
2145 | for (n = 0; n < kgem->nreloc; n++) { |
2145 | for (n = 0; n < kgem->nreloc; n++) { |
2146 | if (kgem->reloc[n].target_handle == bo->base.target_handle) { |
2146 | if (kgem->reloc[n].target_handle == bo->base.target_handle) { |
2147 | kgem->reloc[n].target_handle = shrink->target_handle; |
2147 | kgem->reloc[n].target_handle = shrink->target_handle; |
2148 | kgem->reloc[n].presumed_offset = shrink->presumed_offset; |
2148 | kgem->reloc[n].presumed_offset = shrink->presumed_offset; |
2149 | kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] = |
2149 | kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] = |
2150 | kgem->reloc[n].delta + shrink->presumed_offset; |
2150 | kgem->reloc[n].delta + shrink->presumed_offset; |
2151 | } |
2151 | } |
2152 | } |
2152 | } |
2153 | 2153 | ||
2154 | bo->base.exec->handle = shrink->handle; |
2154 | bo->base.exec->handle = shrink->handle; |
2155 | bo->base.exec->offset = shrink->presumed_offset; |
2155 | bo->base.exec->offset = shrink->presumed_offset; |
2156 | shrink->exec = bo->base.exec; |
2156 | shrink->exec = bo->base.exec; |
2157 | shrink->rq = bo->base.rq; |
2157 | shrink->rq = bo->base.rq; |
2158 | list_replace(&bo->base.request, |
2158 | list_replace(&bo->base.request, |
2159 | &shrink->request); |
2159 | &shrink->request); |
2160 | list_init(&bo->base.request); |
2160 | list_init(&bo->base.request); |
2161 | shrink->needs_flush = bo->base.dirty; |
2161 | shrink->needs_flush = bo->base.dirty; |
2162 | 2162 | ||
2163 | bo->base.exec = NULL; |
2163 | bo->base.exec = NULL; |
2164 | bo->base.rq = NULL; |
2164 | bo->base.rq = NULL; |
2165 | bo->base.dirty = false; |
2165 | bo->base.dirty = false; |
2166 | bo->base.needs_flush = false; |
2166 | bo->base.needs_flush = false; |
2167 | bo->used = 0; |
2167 | bo->used = 0; |
2168 | 2168 | ||
2169 | goto decouple; |
2169 | goto decouple; |
2170 | } |
2170 | } |
2171 | 2171 | ||
2172 | __kgem_bo_destroy(kgem, shrink); |
2172 | __kgem_bo_destroy(kgem, shrink); |
2173 | } |
2173 | } |
2174 | } |
2174 | } |
2175 | 2175 | ||
2176 | DBG(("%s: handle=%d, uploading %d/%d\n", |
2176 | DBG(("%s: handle=%d, uploading %d/%d\n", |
2177 | __FUNCTION__, bo->base.handle, bo->used, bytes(&bo->base))); |
2177 | __FUNCTION__, bo->base.handle, bo->used, bytes(&bo->base))); |
2178 | ASSERT_IDLE(kgem, bo->base.handle); |
2178 | ASSERT_IDLE(kgem, bo->base.handle); |
2179 | assert(bo->used <= bytes(&bo->base)); |
2179 | assert(bo->used <= bytes(&bo->base)); |
2180 | gem_write(kgem->fd, bo->base.handle, |
2180 | gem_write(kgem->fd, bo->base.handle, |
2181 | 0, bo->used, bo->mem); |
2181 | 0, bo->used, bo->mem); |
2182 | bo->need_io = 0; |
2182 | bo->need_io = 0; |
2183 | 2183 | ||
2184 | decouple: |
2184 | decouple: |
2185 | DBG(("%s: releasing handle=%d\n", |
2185 | DBG(("%s: releasing handle=%d\n", |
2186 | __FUNCTION__, bo->base.handle)); |
2186 | __FUNCTION__, bo->base.handle)); |
2187 | list_del(&bo->base.list); |
2187 | list_del(&bo->base.list); |
2188 | kgem_bo_unref(kgem, &bo->base); |
2188 | kgem_bo_unref(kgem, &bo->base); |
2189 | } |
2189 | } |
2190 | } |
2190 | } |
2191 | 2191 | ||
2192 | static void kgem_cleanup(struct kgem *kgem) |
2192 | static void kgem_cleanup(struct kgem *kgem) |
2193 | { |
2193 | { |
2194 | int n; |
2194 | int n; |
2195 | 2195 | ||
2196 | for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) { |
2196 | for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) { |
2197 | while (!list_is_empty(&kgem->requests[n])) { |
2197 | while (!list_is_empty(&kgem->requests[n])) { |
2198 | struct kgem_request *rq; |
2198 | struct kgem_request *rq; |
2199 | 2199 | ||
2200 | rq = list_first_entry(&kgem->requests[n], |
2200 | rq = list_first_entry(&kgem->requests[n], |
2201 | struct kgem_request, |
2201 | struct kgem_request, |
2202 | list); |
2202 | list); |
2203 | while (!list_is_empty(&rq->buffers)) { |
2203 | while (!list_is_empty(&rq->buffers)) { |
2204 | struct kgem_bo *bo; |
2204 | struct kgem_bo *bo; |
2205 | 2205 | ||
2206 | bo = list_first_entry(&rq->buffers, |
2206 | bo = list_first_entry(&rq->buffers, |
2207 | struct kgem_bo, |
2207 | struct kgem_bo, |
2208 | request); |
2208 | request); |
2209 | 2209 | ||
2210 | bo->exec = NULL; |
2210 | bo->exec = NULL; |
2211 | bo->dirty = false; |
2211 | bo->dirty = false; |
2212 | __kgem_bo_clear_busy(bo); |
2212 | __kgem_bo_clear_busy(bo); |
2213 | if (bo->refcnt == 0) |
2213 | if (bo->refcnt == 0) |
2214 | kgem_bo_free(kgem, bo); |
2214 | kgem_bo_free(kgem, bo); |
2215 | } |
2215 | } |
2216 | 2216 | ||
2217 | __kgem_request_free(rq); |
2217 | __kgem_request_free(rq); |
2218 | } |
2218 | } |
2219 | } |
2219 | } |
2220 | 2220 | ||
2221 | kgem_close_inactive(kgem); |
2221 | kgem_close_inactive(kgem); |
2222 | } |
2222 | } |
2223 | 2223 | ||
2224 | static int kgem_batch_write(struct kgem *kgem, uint32_t handle, uint32_t size) |
2224 | static int kgem_batch_write(struct kgem *kgem, uint32_t handle, uint32_t size) |
2225 | { |
2225 | { |
2226 | int ret; |
2226 | int ret; |
2227 | 2227 | ||
2228 | ASSERT_IDLE(kgem, handle); |
2228 | ASSERT_IDLE(kgem, handle); |
2229 | 2229 | ||
2230 | /* If there is no surface data, just upload the batch */ |
2230 | /* If there is no surface data, just upload the batch */ |
2231 | if (kgem->surface == kgem->batch_size) |
2231 | if (kgem->surface == kgem->batch_size) |
2232 | return gem_write(kgem->fd, handle, |
2232 | return gem_write(kgem->fd, handle, |
2233 | 0, sizeof(uint32_t)*kgem->nbatch, |
2233 | 0, sizeof(uint32_t)*kgem->nbatch, |
2234 | kgem->batch); |
2234 | kgem->batch); |
2235 | 2235 | ||
2236 | /* Are the batch pages conjoint with the surface pages? */ |
2236 | /* Are the batch pages conjoint with the surface pages? */ |
2237 | if (kgem->surface < kgem->nbatch + PAGE_SIZE/sizeof(uint32_t)) { |
2237 | if (kgem->surface < kgem->nbatch + PAGE_SIZE/sizeof(uint32_t)) { |
2238 | assert(size == PAGE_ALIGN(kgem->batch_size*sizeof(uint32_t))); |
2238 | assert(size == PAGE_ALIGN(kgem->batch_size*sizeof(uint32_t))); |
2239 | return gem_write(kgem->fd, handle, |
2239 | return gem_write(kgem->fd, handle, |
2240 | 0, kgem->batch_size*sizeof(uint32_t), |
2240 | 0, kgem->batch_size*sizeof(uint32_t), |
2241 | kgem->batch); |
2241 | kgem->batch); |
2242 | } |
2242 | } |
2243 | 2243 | ||
2244 | /* Disjoint surface/batch, upload separately */ |
2244 | /* Disjoint surface/batch, upload separately */ |
2245 | ret = gem_write(kgem->fd, handle, |
2245 | ret = gem_write(kgem->fd, handle, |
2246 | 0, sizeof(uint32_t)*kgem->nbatch, |
2246 | 0, sizeof(uint32_t)*kgem->nbatch, |
2247 | kgem->batch); |
2247 | kgem->batch); |
2248 | if (ret) |
2248 | if (ret) |
2249 | return ret; |
2249 | return ret; |
2250 | 2250 | ||
2251 | ret = PAGE_ALIGN(sizeof(uint32_t) * kgem->batch_size); |
2251 | ret = PAGE_ALIGN(sizeof(uint32_t) * kgem->batch_size); |
2252 | ret -= sizeof(uint32_t) * kgem->surface; |
2252 | ret -= sizeof(uint32_t) * kgem->surface; |
2253 | assert(size-ret >= kgem->nbatch*sizeof(uint32_t)); |
2253 | assert(size-ret >= kgem->nbatch*sizeof(uint32_t)); |
2254 | return __gem_write(kgem->fd, handle, |
2254 | return __gem_write(kgem->fd, handle, |
2255 | size - ret, (kgem->batch_size - kgem->surface)*sizeof(uint32_t), |
2255 | size - ret, (kgem->batch_size - kgem->surface)*sizeof(uint32_t), |
2256 | kgem->batch + kgem->surface); |
2256 | kgem->batch + kgem->surface); |
2257 | } |
2257 | } |
2258 | 2258 | ||
2259 | void kgem_reset(struct kgem *kgem) |
2259 | void kgem_reset(struct kgem *kgem) |
2260 | { |
2260 | { |
2261 | if (kgem->next_request) { |
2261 | if (kgem->next_request) { |
2262 | struct kgem_request *rq = kgem->next_request; |
2262 | struct kgem_request *rq = kgem->next_request; |
2263 | 2263 | ||
2264 | while (!list_is_empty(&rq->buffers)) { |
2264 | while (!list_is_empty(&rq->buffers)) { |
2265 | struct kgem_bo *bo = |
2265 | struct kgem_bo *bo = |
2266 | list_first_entry(&rq->buffers, |
2266 | list_first_entry(&rq->buffers, |
2267 | struct kgem_bo, |
2267 | struct kgem_bo, |
2268 | request); |
2268 | request); |
2269 | list_del(&bo->request); |
2269 | list_del(&bo->request); |
2270 | 2270 | ||
2271 | assert(RQ(bo->rq) == rq); |
2271 | assert(RQ(bo->rq) == rq); |
2272 | 2272 | ||
2273 | bo->binding.offset = 0; |
2273 | bo->binding.offset = 0; |
2274 | bo->exec = NULL; |
2274 | bo->exec = NULL; |
2275 | bo->target_handle = -1; |
2275 | bo->target_handle = -1; |
2276 | bo->dirty = false; |
2276 | bo->dirty = false; |
2277 | 2277 | ||
2278 | if (bo->needs_flush && __kgem_busy(kgem, bo->handle)) { |
2278 | if (bo->needs_flush && __kgem_busy(kgem, bo->handle)) { |
2279 | list_add(&bo->request, &kgem->flushing); |
2279 | list_add(&bo->request, &kgem->flushing); |
2280 | bo->rq = (void *)kgem; |
2280 | bo->rq = (void *)kgem; |
2281 | } else |
2281 | } else |
2282 | __kgem_bo_clear_busy(bo); |
2282 | __kgem_bo_clear_busy(bo); |
2283 | 2283 | ||
2284 | if (!bo->refcnt && !bo->reusable) { |
2284 | if (!bo->refcnt && !bo->reusable) { |
2285 | assert(!bo->snoop); |
2285 | assert(!bo->snoop); |
2286 | DBG(("%s: discarding handle=%d\n", |
2286 | DBG(("%s: discarding handle=%d\n", |
2287 | __FUNCTION__, bo->handle)); |
2287 | __FUNCTION__, bo->handle)); |
2288 | kgem_bo_free(kgem, bo); |
2288 | kgem_bo_free(kgem, bo); |
2289 | } |
2289 | } |
2290 | } |
2290 | } |
2291 | 2291 | ||
2292 | if (rq != &kgem->static_request) { |
2292 | if (rq != &kgem->static_request) { |
2293 | list_init(&rq->list); |
2293 | list_init(&rq->list); |
2294 | __kgem_request_free(rq); |
2294 | __kgem_request_free(rq); |
2295 | } |
2295 | } |
2296 | } |
2296 | } |
2297 | 2297 | ||
2298 | kgem->nfence = 0; |
2298 | kgem->nfence = 0; |
2299 | kgem->nexec = 0; |
2299 | kgem->nexec = 0; |
2300 | kgem->nreloc = 0; |
2300 | kgem->nreloc = 0; |
2301 | kgem->nreloc__self = 0; |
2301 | kgem->nreloc__self = 0; |
2302 | kgem->aperture = 0; |
2302 | kgem->aperture = 0; |
2303 | kgem->aperture_fenced = 0; |
2303 | kgem->aperture_fenced = 0; |
2304 | kgem->nbatch = 0; |
2304 | kgem->nbatch = 0; |
2305 | kgem->surface = kgem->batch_size; |
2305 | kgem->surface = kgem->batch_size; |
2306 | kgem->mode = KGEM_NONE; |
2306 | kgem->mode = KGEM_NONE; |
2307 | kgem->flush = 0; |
2307 | kgem->flush = 0; |
2308 | kgem->batch_flags = kgem->batch_flags_base; |
2308 | kgem->batch_flags = kgem->batch_flags_base; |
2309 | 2309 | ||
2310 | kgem->next_request = __kgem_request_alloc(kgem); |
2310 | kgem->next_request = __kgem_request_alloc(kgem); |
2311 | 2311 | ||
2312 | kgem_sna_reset(kgem); |
2312 | kgem_sna_reset(kgem); |
2313 | } |
2313 | } |
2314 | 2314 | ||
2315 | static int compact_batch_surface(struct kgem *kgem) |
2315 | static int compact_batch_surface(struct kgem *kgem) |
2316 | { |
2316 | { |
2317 | int size, shrink, n; |
2317 | int size, shrink, n; |
2318 | 2318 | ||
2319 | if (!kgem->has_relaxed_delta) |
2319 | if (!kgem->has_relaxed_delta) |
2320 | return kgem->batch_size; |
2320 | return kgem->batch_size; |
2321 | 2321 | ||
2322 | /* See if we can pack the contents into one or two pages */ |
2322 | /* See if we can pack the contents into one or two pages */ |
2323 | n = ALIGN(kgem->batch_size, 1024); |
2323 | n = ALIGN(kgem->batch_size, 1024); |
2324 | size = n - kgem->surface + kgem->nbatch; |
2324 | size = n - kgem->surface + kgem->nbatch; |
2325 | size = ALIGN(size, 1024); |
2325 | size = ALIGN(size, 1024); |
2326 | 2326 | ||
2327 | shrink = n - size; |
2327 | shrink = n - size; |
2328 | if (shrink) { |
2328 | if (shrink) { |
2329 | DBG(("shrinking from %d to %d\n", kgem->batch_size, size)); |
2329 | DBG(("shrinking from %d to %d\n", kgem->batch_size, size)); |
2330 | 2330 | ||
2331 | shrink *= sizeof(uint32_t); |
2331 | shrink *= sizeof(uint32_t); |
2332 | for (n = 0; n < kgem->nreloc; n++) { |
2332 | for (n = 0; n < kgem->nreloc; n++) { |
2333 | if (kgem->reloc[n].read_domains == I915_GEM_DOMAIN_INSTRUCTION && |
2333 | if (kgem->reloc[n].read_domains == I915_GEM_DOMAIN_INSTRUCTION && |
2334 | kgem->reloc[n].target_handle == ~0U) |
2334 | kgem->reloc[n].target_handle == ~0U) |
2335 | kgem->reloc[n].delta -= shrink; |
2335 | kgem->reloc[n].delta -= shrink; |
2336 | 2336 | ||
2337 | if (kgem->reloc[n].offset >= sizeof(uint32_t)*kgem->nbatch) |
2337 | if (kgem->reloc[n].offset >= sizeof(uint32_t)*kgem->nbatch) |
2338 | kgem->reloc[n].offset -= shrink; |
2338 | kgem->reloc[n].offset -= shrink; |
2339 | } |
2339 | } |
2340 | } |
2340 | } |
2341 | 2341 | ||
2342 | return size * sizeof(uint32_t); |
2342 | return size * sizeof(uint32_t); |
2343 | } |
2343 | } |
2344 | 2344 | ||
2345 | static struct kgem_bo * |
2345 | static struct kgem_bo * |
2346 | kgem_create_batch(struct kgem *kgem, int size) |
2346 | kgem_create_batch(struct kgem *kgem, int size) |
2347 | { |
2347 | { |
2348 | struct drm_i915_gem_set_domain set_domain; |
2348 | struct drm_i915_gem_set_domain set_domain; |
2349 | struct kgem_bo *bo; |
2349 | struct kgem_bo *bo; |
2350 | 2350 | ||
2351 | if (size <= 4096) { |
2351 | if (size <= 4096) { |
2352 | bo = list_first_entry(&kgem->pinned_batches[0], |
2352 | bo = list_first_entry(&kgem->pinned_batches[0], |
2353 | struct kgem_bo, |
2353 | struct kgem_bo, |
2354 | list); |
2354 | list); |
2355 | if (!bo->rq) { |
2355 | if (!bo->rq) { |
2356 | out_4096: |
2356 | out_4096: |
2357 | list_move_tail(&bo->list, &kgem->pinned_batches[0]); |
2357 | list_move_tail(&bo->list, &kgem->pinned_batches[0]); |
2358 | return kgem_bo_reference(bo); |
2358 | return kgem_bo_reference(bo); |
2359 | } |
2359 | } |
2360 | 2360 | ||
2361 | if (!__kgem_busy(kgem, bo->handle)) { |
2361 | if (!__kgem_busy(kgem, bo->handle)) { |
2362 | assert(RQ(bo->rq)->bo == bo); |
2362 | assert(RQ(bo->rq)->bo == bo); |
2363 | __kgem_retire_rq(kgem, RQ(bo->rq)); |
2363 | __kgem_retire_rq(kgem, RQ(bo->rq)); |
2364 | goto out_4096; |
2364 | goto out_4096; |
2365 | } |
2365 | } |
2366 | } |
2366 | } |
2367 | 2367 | ||
2368 | if (size <= 16384) { |
2368 | if (size <= 16384) { |
2369 | bo = list_first_entry(&kgem->pinned_batches[1], |
2369 | bo = list_first_entry(&kgem->pinned_batches[1], |
2370 | struct kgem_bo, |
2370 | struct kgem_bo, |
2371 | list); |
2371 | list); |
2372 | if (!bo->rq) { |
2372 | if (!bo->rq) { |
2373 | out_16384: |
2373 | out_16384: |
2374 | list_move_tail(&bo->list, &kgem->pinned_batches[1]); |
2374 | list_move_tail(&bo->list, &kgem->pinned_batches[1]); |
2375 | return kgem_bo_reference(bo); |
2375 | return kgem_bo_reference(bo); |
2376 | } |
2376 | } |
2377 | 2377 | ||
2378 | if (!__kgem_busy(kgem, bo->handle)) { |
2378 | if (!__kgem_busy(kgem, bo->handle)) { |
2379 | assert(RQ(bo->rq)->bo == bo); |
2379 | assert(RQ(bo->rq)->bo == bo); |
2380 | __kgem_retire_rq(kgem, RQ(bo->rq)); |
2380 | __kgem_retire_rq(kgem, RQ(bo->rq)); |
2381 | goto out_16384; |
2381 | goto out_16384; |
2382 | } |
2382 | } |
2383 | } |
2383 | } |
2384 | 2384 | ||
2385 | if (kgem->gen == 020 && !kgem->has_pinned_batches) { |
2385 | if (kgem->gen == 020 && !kgem->has_pinned_batches) { |
2386 | assert(size <= 16384); |
2386 | assert(size <= 16384); |
2387 | 2387 | ||
2388 | bo = list_first_entry(&kgem->pinned_batches[size > 4096], |
2388 | bo = list_first_entry(&kgem->pinned_batches[size > 4096], |
2389 | struct kgem_bo, |
2389 | struct kgem_bo, |
2390 | list); |
2390 | list); |
2391 | list_move_tail(&bo->list, &kgem->pinned_batches[size > 4096]); |
2391 | list_move_tail(&bo->list, &kgem->pinned_batches[size > 4096]); |
2392 | 2392 | ||
2393 | DBG(("%s: syncing due to busy batches\n", __FUNCTION__)); |
2393 | DBG(("%s: syncing due to busy batches\n", __FUNCTION__)); |
2394 | 2394 | ||
2395 | VG_CLEAR(set_domain); |
2395 | VG_CLEAR(set_domain); |
2396 | set_domain.handle = bo->handle; |
2396 | set_domain.handle = bo->handle; |
2397 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
2397 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
2398 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
2398 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
2399 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) { |
2399 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) { |
2400 | DBG(("%s: sync: GPU hang detected\n", __FUNCTION__)); |
2400 | DBG(("%s: sync: GPU hang detected\n", __FUNCTION__)); |
2401 | kgem_throttle(kgem); |
2401 | kgem_throttle(kgem); |
2402 | return NULL; |
2402 | return NULL; |
2403 | } |
2403 | } |
2404 | 2404 | ||
2405 | kgem_retire(kgem); |
2405 | kgem_retire(kgem); |
2406 | assert(bo->rq == NULL); |
2406 | assert(bo->rq == NULL); |
2407 | return kgem_bo_reference(bo); |
2407 | return kgem_bo_reference(bo); |
2408 | } |
2408 | } |
2409 | 2409 | ||
2410 | return kgem_create_linear(kgem, size, CREATE_NO_THROTTLE); |
2410 | return kgem_create_linear(kgem, size, CREATE_NO_THROTTLE); |
2411 | } |
2411 | } |
2412 | 2412 | ||
2413 | void _kgem_submit(struct kgem *kgem) |
2413 | void _kgem_submit(struct kgem *kgem) |
2414 | { |
2414 | { |
2415 | struct kgem_request *rq; |
2415 | struct kgem_request *rq; |
2416 | uint32_t batch_end; |
2416 | uint32_t batch_end; |
2417 | int size; |
2417 | int size; |
2418 | 2418 | ||
2419 | assert(!DBG_NO_HW); |
2419 | assert(!DBG_NO_HW); |
2420 | assert(!kgem->wedged); |
2420 | assert(!kgem->wedged); |
2421 | 2421 | ||
2422 | assert(kgem->nbatch); |
2422 | assert(kgem->nbatch); |
2423 | assert(kgem->nbatch <= KGEM_BATCH_SIZE(kgem)); |
2423 | assert(kgem->nbatch <= KGEM_BATCH_SIZE(kgem)); |
2424 | assert(kgem->nbatch <= kgem->surface); |
2424 | assert(kgem->nbatch <= kgem->surface); |
2425 | 2425 | ||
2426 | batch_end = kgem_end_batch(kgem); |
2426 | batch_end = kgem_end_batch(kgem); |
2427 | kgem_sna_flush(kgem); |
2427 | kgem_sna_flush(kgem); |
2428 | 2428 | ||
2429 | DBG(("batch[%d/%d]: %d %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d\n", |
2429 | DBG(("batch[%d/%d]: %d %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d\n", |
2430 | kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface, kgem->batch_size, |
2430 | kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface, kgem->batch_size, |
2431 | kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture)); |
2431 | kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture)); |
2432 | 2432 | ||
2433 | assert(kgem->nbatch <= kgem->batch_size); |
2433 | assert(kgem->nbatch <= kgem->batch_size); |
2434 | assert(kgem->nbatch <= kgem->surface); |
2434 | assert(kgem->nbatch <= kgem->surface); |
2435 | assert(kgem->nreloc <= ARRAY_SIZE(kgem->reloc)); |
2435 | assert(kgem->nreloc <= ARRAY_SIZE(kgem->reloc)); |
2436 | assert(kgem->nexec < ARRAY_SIZE(kgem->exec)); |
2436 | assert(kgem->nexec < ARRAY_SIZE(kgem->exec)); |
2437 | assert(kgem->nfence <= kgem->fence_max); |
2437 | assert(kgem->nfence <= kgem->fence_max); |
2438 | 2438 | ||
2439 | kgem_finish_buffers(kgem); |
2439 | kgem_finish_buffers(kgem); |
2440 | 2440 | ||
2441 | #if SHOW_BATCH |
2441 | #if SHOW_BATCH |
2442 | __kgem_batch_debug(kgem, batch_end); |
2442 | __kgem_batch_debug(kgem, batch_end); |
2443 | #endif |
2443 | #endif |
2444 | 2444 | ||
2445 | rq = kgem->next_request; |
2445 | rq = kgem->next_request; |
2446 | if (kgem->surface != kgem->batch_size) |
2446 | if (kgem->surface != kgem->batch_size) |
2447 | size = compact_batch_surface(kgem); |
2447 | size = compact_batch_surface(kgem); |
2448 | else |
2448 | else |
2449 | size = kgem->nbatch * sizeof(kgem->batch[0]); |
2449 | size = kgem->nbatch * sizeof(kgem->batch[0]); |
2450 | rq->bo = kgem_create_batch(kgem, size); |
2450 | rq->bo = kgem_create_batch(kgem, size); |
2451 | if (rq->bo) { |
2451 | if (rq->bo) { |
2452 | uint32_t handle = rq->bo->handle; |
2452 | uint32_t handle = rq->bo->handle; |
2453 | int i; |
2453 | int i; |
2454 | 2454 | ||
2455 | assert(!rq->bo->needs_flush); |
2455 | assert(!rq->bo->needs_flush); |
2456 | 2456 | ||
2457 | i = kgem->nexec++; |
2457 | i = kgem->nexec++; |
2458 | kgem->exec[i].handle = handle; |
2458 | kgem->exec[i].handle = handle; |
2459 | kgem->exec[i].relocation_count = kgem->nreloc; |
2459 | kgem->exec[i].relocation_count = kgem->nreloc; |
2460 | kgem->exec[i].relocs_ptr = (uintptr_t)kgem->reloc; |
2460 | kgem->exec[i].relocs_ptr = (uintptr_t)kgem->reloc; |
2461 | kgem->exec[i].alignment = 0; |
2461 | kgem->exec[i].alignment = 0; |
2462 | kgem->exec[i].offset = rq->bo->presumed_offset; |
2462 | kgem->exec[i].offset = rq->bo->presumed_offset; |
2463 | kgem->exec[i].flags = 0; |
2463 | kgem->exec[i].flags = 0; |
2464 | kgem->exec[i].rsvd1 = 0; |
2464 | kgem->exec[i].rsvd1 = 0; |
2465 | kgem->exec[i].rsvd2 = 0; |
2465 | kgem->exec[i].rsvd2 = 0; |
2466 | 2466 | ||
2467 | rq->bo->target_handle = kgem->has_handle_lut ? i : handle; |
2467 | rq->bo->target_handle = kgem->has_handle_lut ? i : handle; |
2468 | rq->bo->exec = &kgem->exec[i]; |
2468 | rq->bo->exec = &kgem->exec[i]; |
2469 | rq->bo->rq = MAKE_REQUEST(rq, kgem->ring); /* useful sanity check */ |
2469 | rq->bo->rq = MAKE_REQUEST(rq, kgem->ring); /* useful sanity check */ |
2470 | list_add(&rq->bo->request, &rq->buffers); |
2470 | list_add(&rq->bo->request, &rq->buffers); |
2471 | rq->ring = kgem->ring == KGEM_BLT; |
2471 | rq->ring = kgem->ring == KGEM_BLT; |
2472 | 2472 | ||
2473 | kgem_fixup_self_relocs(kgem, rq->bo); |
2473 | kgem_fixup_self_relocs(kgem, rq->bo); |
2474 | 2474 | ||
2475 | if (kgem_batch_write(kgem, handle, size) == 0) { |
2475 | if (kgem_batch_write(kgem, handle, size) == 0) { |
2476 | struct drm_i915_gem_execbuffer2 execbuf; |
2476 | struct drm_i915_gem_execbuffer2 execbuf; |
2477 | int ret, retry = 3; |
2477 | int ret, retry = 3; |
2478 | 2478 | ||
2479 | VG_CLEAR(execbuf); |
2479 | VG_CLEAR(execbuf); |
2480 | execbuf.buffers_ptr = (uintptr_t)kgem->exec; |
2480 | execbuf.buffers_ptr = (uintptr_t)kgem->exec; |
2481 | execbuf.buffer_count = kgem->nexec; |
2481 | execbuf.buffer_count = kgem->nexec; |
2482 | execbuf.batch_start_offset = 0; |
2482 | execbuf.batch_start_offset = 0; |
2483 | execbuf.batch_len = batch_end*sizeof(uint32_t); |
2483 | execbuf.batch_len = batch_end*sizeof(uint32_t); |
2484 | execbuf.cliprects_ptr = 0; |
2484 | execbuf.cliprects_ptr = 0; |
2485 | execbuf.num_cliprects = 0; |
2485 | execbuf.num_cliprects = 0; |
2486 | execbuf.DR1 = 0; |
2486 | execbuf.DR1 = 0; |
2487 | execbuf.DR4 = 0; |
2487 | execbuf.DR4 = 0; |
2488 | execbuf.flags = kgem->ring | kgem->batch_flags; |
2488 | execbuf.flags = kgem->ring | kgem->batch_flags; |
2489 | execbuf.rsvd1 = 0; |
2489 | execbuf.rsvd1 = 0; |
2490 | execbuf.rsvd2 = 0; |
2490 | execbuf.rsvd2 = 0; |
2491 | 2491 | ||
2492 | 2492 | ||
2493 | 2493 | ||
2494 | ret = drmIoctl(kgem->fd, |
2494 | ret = drmIoctl(kgem->fd, |
2495 | DRM_IOCTL_I915_GEM_EXECBUFFER2, |
2495 | DRM_IOCTL_I915_GEM_EXECBUFFER2, |
2496 | &execbuf); |
2496 | &execbuf); |
2497 | while (ret == -1 && errno == EBUSY && retry--) { |
2497 | while (ret == -1 && errno == EBUSY && retry--) { |
2498 | __kgem_throttle(kgem); |
2498 | __kgem_throttle(kgem); |
2499 | ret = drmIoctl(kgem->fd, |
2499 | ret = drmIoctl(kgem->fd, |
2500 | DRM_IOCTL_I915_GEM_EXECBUFFER2, |
2500 | DRM_IOCTL_I915_GEM_EXECBUFFER2, |
2501 | &execbuf); |
2501 | &execbuf); |
2502 | } |
2502 | } |
2503 | if (DEBUG_SYNC && ret == 0) { |
2503 | if (DEBUG_SYNC && ret == 0) { |
2504 | struct drm_i915_gem_set_domain set_domain; |
2504 | struct drm_i915_gem_set_domain set_domain; |
2505 | 2505 | ||
2506 | VG_CLEAR(set_domain); |
2506 | VG_CLEAR(set_domain); |
2507 | set_domain.handle = handle; |
2507 | set_domain.handle = handle; |
2508 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
2508 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
2509 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
2509 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
2510 | 2510 | ||
2511 | ret = drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain); |
2511 | ret = drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain); |
2512 | } |
2512 | } |
2513 | if (ret == -1) { |
2513 | if (ret == -1) { |
2514 | // DBG(("%s: GPU hang detected [%d]\n", |
2514 | // DBG(("%s: GPU hang detected [%d]\n", |
2515 | // __FUNCTION__, errno)); |
2515 | // __FUNCTION__, errno)); |
2516 | kgem_throttle(kgem); |
2516 | kgem_throttle(kgem); |
2517 | kgem->wedged = true; |
2517 | kgem->wedged = true; |
2518 | 2518 | ||
2519 | #if 0 |
2519 | #if 0 |
2520 | ret = errno; |
2520 | ret = errno; |
2521 | ErrorF("batch[%d/%d]: %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d: errno=%d\n", |
2521 | ErrorF("batch[%d/%d]: %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d: errno=%d\n", |
2522 | kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface, |
2522 | kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface, |
2523 | kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture, errno); |
2523 | kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture, errno); |
2524 | 2524 | ||
2525 | for (i = 0; i < kgem->nexec; i++) { |
2525 | for (i = 0; i < kgem->nexec; i++) { |
2526 | struct kgem_bo *bo, *found = NULL; |
2526 | struct kgem_bo *bo, *found = NULL; |
2527 | 2527 | ||
2528 | list_for_each_entry(bo, &kgem->next_request->buffers, request) { |
2528 | list_for_each_entry(bo, &kgem->next_request->buffers, request) { |
2529 | if (bo->handle == kgem->exec[i].handle) { |
2529 | if (bo->handle == kgem->exec[i].handle) { |
2530 | found = bo; |
2530 | found = bo; |
2531 | break; |
2531 | break; |
2532 | } |
2532 | } |
2533 | } |
2533 | } |
2534 | ErrorF("exec[%d] = handle:%d, presumed offset: %x, size: %d, tiling %d, fenced %d, snooped %d, deleted %d\n", |
2534 | ErrorF("exec[%d] = handle:%d, presumed offset: %x, size: %d, tiling %d, fenced %d, snooped %d, deleted %d\n", |
2535 | i, |
2535 | i, |
2536 | kgem->exec[i].handle, |
2536 | kgem->exec[i].handle, |
2537 | (int)kgem->exec[i].offset, |
2537 | (int)kgem->exec[i].offset, |
2538 | found ? kgem_bo_size(found) : -1, |
2538 | found ? kgem_bo_size(found) : -1, |
2539 | found ? found->tiling : -1, |
2539 | found ? found->tiling : -1, |
2540 | (int)(kgem->exec[i].flags & EXEC_OBJECT_NEEDS_FENCE), |
2540 | (int)(kgem->exec[i].flags & EXEC_OBJECT_NEEDS_FENCE), |
2541 | found ? found->snoop : -1, |
2541 | found ? found->snoop : -1, |
2542 | found ? found->purged : -1); |
2542 | found ? found->purged : -1); |
2543 | } |
2543 | } |
2544 | for (i = 0; i < kgem->nreloc; i++) { |
2544 | for (i = 0; i < kgem->nreloc; i++) { |
2545 | ErrorF("reloc[%d] = pos:%d, target:%d, delta:%d, read:%x, write:%x, offset:%x\n", |
2545 | ErrorF("reloc[%d] = pos:%d, target:%d, delta:%d, read:%x, write:%x, offset:%x\n", |
2546 | i, |
2546 | i, |
2547 | (int)kgem->reloc[i].offset, |
2547 | (int)kgem->reloc[i].offset, |
2548 | kgem->reloc[i].target_handle, |
2548 | kgem->reloc[i].target_handle, |
2549 | kgem->reloc[i].delta, |
2549 | kgem->reloc[i].delta, |
2550 | kgem->reloc[i].read_domains, |
2550 | kgem->reloc[i].read_domains, |
2551 | kgem->reloc[i].write_domain, |
2551 | kgem->reloc[i].write_domain, |
2552 | (int)kgem->reloc[i].presumed_offset); |
2552 | (int)kgem->reloc[i].presumed_offset); |
2553 | } |
2553 | } |
2554 | 2554 | ||
2555 | if (DEBUG_SYNC) { |
2555 | if (DEBUG_SYNC) { |
2556 | int fd = open("/tmp/batchbuffer", O_WRONLY | O_CREAT | O_APPEND, 0666); |
2556 | int fd = open("/tmp/batchbuffer", O_WRONLY | O_CREAT | O_APPEND, 0666); |
2557 | if (fd != -1) { |
2557 | if (fd != -1) { |
2558 | write(fd, kgem->batch, batch_end*sizeof(uint32_t)); |
2558 | write(fd, kgem->batch, batch_end*sizeof(uint32_t)); |
2559 | close(fd); |
2559 | close(fd); |
2560 | } |
2560 | } |
2561 | 2561 | ||
2562 | FatalError("SNA: failed to submit batchbuffer, errno=%d\n", ret); |
2562 | FatalError("SNA: failed to submit batchbuffer, errno=%d\n", ret); |
2563 | } |
2563 | } |
2564 | #endif |
2564 | #endif |
2565 | } |
2565 | } |
2566 | } |
2566 | } |
2567 | 2567 | ||
2568 | kgem_commit(kgem); |
2568 | kgem_commit(kgem); |
2569 | } |
2569 | } |
2570 | if (kgem->wedged) |
2570 | if (kgem->wedged) |
2571 | kgem_cleanup(kgem); |
2571 | kgem_cleanup(kgem); |
2572 | 2572 | ||
2573 | kgem_reset(kgem); |
2573 | kgem_reset(kgem); |
2574 | 2574 | ||
2575 | assert(kgem->next_request != NULL); |
2575 | assert(kgem->next_request != NULL); |
2576 | } |
2576 | } |
2577 | 2577 | ||
2578 | void kgem_throttle(struct kgem *kgem) |
2578 | void kgem_throttle(struct kgem *kgem) |
2579 | { |
2579 | { |
2580 | kgem->need_throttle = 0; |
2580 | kgem->need_throttle = 0; |
2581 | if (kgem->wedged) |
2581 | if (kgem->wedged) |
2582 | return; |
2582 | return; |
2583 | 2583 | ||
2584 | kgem->wedged = __kgem_throttle(kgem); |
2584 | kgem->wedged = __kgem_throttle(kgem); |
2585 | if (kgem->wedged) { |
2585 | if (kgem->wedged) { |
2586 | printf("Detected a hung GPU, disabling acceleration.\n"); |
2586 | printf("Detected a hung GPU, disabling acceleration.\n"); |
2587 | printf("When reporting this, please include i915_error_state from debugfs and the full dmesg.\n"); |
2587 | printf("When reporting this, please include i915_error_state from debugfs and the full dmesg.\n"); |
2588 | } |
2588 | } |
2589 | } |
2589 | } |
2590 | 2590 | ||
2591 | void kgem_purge_cache(struct kgem *kgem) |
2591 | void kgem_purge_cache(struct kgem *kgem) |
2592 | { |
2592 | { |
2593 | struct kgem_bo *bo, *next; |
2593 | struct kgem_bo *bo, *next; |
2594 | int i; |
2594 | int i; |
2595 | 2595 | ||
2596 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { |
2596 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { |
2597 | list_for_each_entry_safe(bo, next, &kgem->inactive[i], list) { |
2597 | list_for_each_entry_safe(bo, next, &kgem->inactive[i], list) { |
2598 | if (!kgem_bo_is_retained(kgem, bo)) { |
2598 | if (!kgem_bo_is_retained(kgem, bo)) { |
2599 | DBG(("%s: purging %d\n", |
2599 | DBG(("%s: purging %d\n", |
2600 | __FUNCTION__, bo->handle)); |
2600 | __FUNCTION__, bo->handle)); |
2601 | kgem_bo_free(kgem, bo); |
2601 | kgem_bo_free(kgem, bo); |
2602 | } |
2602 | } |
2603 | } |
2603 | } |
2604 | } |
2604 | } |
2605 | 2605 | ||
2606 | kgem->need_purge = false; |
2606 | kgem->need_purge = false; |
2607 | } |
2607 | } |
2608 | 2608 | ||
2609 | bool kgem_expire_cache(struct kgem *kgem) |
2609 | bool kgem_expire_cache(struct kgem *kgem) |
2610 | { |
2610 | { |
2611 | time_t now, expire; |
2611 | time_t now, expire; |
2612 | struct kgem_bo *bo; |
2612 | struct kgem_bo *bo; |
2613 | unsigned int size = 0, count = 0; |
2613 | unsigned int size = 0, count = 0; |
2614 | bool idle; |
2614 | bool idle; |
2615 | unsigned int i; |
2615 | unsigned int i; |
2616 | 2616 | ||
2617 | time(&now); |
2617 | time(&now); |
2618 | 2618 | ||
2619 | while (__kgem_freed_bo) { |
2619 | while (__kgem_freed_bo) { |
2620 | bo = __kgem_freed_bo; |
2620 | bo = __kgem_freed_bo; |
2621 | __kgem_freed_bo = *(struct kgem_bo **)bo; |
2621 | __kgem_freed_bo = *(struct kgem_bo **)bo; |
2622 | free(bo); |
2622 | free(bo); |
2623 | } |
2623 | } |
2624 | 2624 | ||
2625 | while (__kgem_freed_request) { |
2625 | while (__kgem_freed_request) { |
2626 | struct kgem_request *rq = __kgem_freed_request; |
2626 | struct kgem_request *rq = __kgem_freed_request; |
2627 | __kgem_freed_request = *(struct kgem_request **)rq; |
2627 | __kgem_freed_request = *(struct kgem_request **)rq; |
2628 | free(rq); |
2628 | free(rq); |
2629 | } |
2629 | } |
2630 | 2630 | ||
2631 | while (!list_is_empty(&kgem->large_inactive)) { |
2631 | while (!list_is_empty(&kgem->large_inactive)) { |
2632 | kgem_bo_free(kgem, |
2632 | kgem_bo_free(kgem, |
2633 | list_first_entry(&kgem->large_inactive, |
2633 | list_first_entry(&kgem->large_inactive, |
2634 | struct kgem_bo, list)); |
2634 | struct kgem_bo, list)); |
2635 | 2635 | ||
2636 | } |
2636 | } |
2637 | 2637 | ||
2638 | while (!list_is_empty(&kgem->scanout)) { |
2638 | while (!list_is_empty(&kgem->scanout)) { |
2639 | bo = list_first_entry(&kgem->scanout, struct kgem_bo, list); |
2639 | bo = list_first_entry(&kgem->scanout, struct kgem_bo, list); |
2640 | if (__kgem_busy(kgem, bo->handle)) |
2640 | if (__kgem_busy(kgem, bo->handle)) |
2641 | break; |
2641 | break; |
2642 | 2642 | ||
2643 | list_del(&bo->list); |
2643 | list_del(&bo->list); |
2644 | kgem_bo_clear_scanout(kgem, bo); |
2644 | kgem_bo_clear_scanout(kgem, bo); |
2645 | __kgem_bo_destroy(kgem, bo); |
2645 | __kgem_bo_destroy(kgem, bo); |
2646 | } |
2646 | } |
2647 | 2647 | ||
2648 | expire = 0; |
2648 | expire = 0; |
2649 | list_for_each_entry(bo, &kgem->snoop, list) { |
2649 | list_for_each_entry(bo, &kgem->snoop, list) { |
2650 | if (bo->delta) { |
2650 | if (bo->delta) { |
2651 | expire = now - MAX_INACTIVE_TIME/2; |
2651 | expire = now - MAX_INACTIVE_TIME/2; |
2652 | break; |
2652 | break; |
2653 | } |
2653 | } |
2654 | 2654 | ||
2655 | bo->delta = now; |
2655 | bo->delta = now; |
2656 | } |
2656 | } |
2657 | if (expire) { |
2657 | if (expire) { |
2658 | while (!list_is_empty(&kgem->snoop)) { |
2658 | while (!list_is_empty(&kgem->snoop)) { |
2659 | bo = list_last_entry(&kgem->snoop, struct kgem_bo, list); |
2659 | bo = list_last_entry(&kgem->snoop, struct kgem_bo, list); |
2660 | 2660 | ||
2661 | if (bo->delta > expire) |
2661 | if (bo->delta > expire) |
2662 | break; |
2662 | break; |
2663 | 2663 | ||
2664 | kgem_bo_free(kgem, bo); |
2664 | kgem_bo_free(kgem, bo); |
2665 | } |
2665 | } |
2666 | } |
2666 | } |
2667 | #ifdef DEBUG_MEMORY |
2667 | #ifdef DEBUG_MEMORY |
2668 | { |
2668 | { |
2669 | long snoop_size = 0; |
2669 | long snoop_size = 0; |
2670 | int snoop_count = 0; |
2670 | int snoop_count = 0; |
2671 | list_for_each_entry(bo, &kgem->snoop, list) |
2671 | list_for_each_entry(bo, &kgem->snoop, list) |
2672 | snoop_count++, snoop_size += bytes(bo); |
2672 | snoop_count++, snoop_size += bytes(bo); |
2673 | ErrorF("%s: still allocated %d bo, %ld bytes, in snoop cache\n", |
2673 | ErrorF("%s: still allocated %d bo, %ld bytes, in snoop cache\n", |
2674 | __FUNCTION__, snoop_count, snoop_size); |
2674 | __FUNCTION__, snoop_count, snoop_size); |
2675 | } |
2675 | } |
2676 | #endif |
2676 | #endif |
2677 | 2677 | ||
2678 | kgem_retire(kgem); |
2678 | kgem_retire(kgem); |
2679 | if (kgem->wedged) |
2679 | if (kgem->wedged) |
2680 | kgem_cleanup(kgem); |
2680 | kgem_cleanup(kgem); |
2681 | 2681 | ||
2682 | kgem->expire(kgem); |
2682 | kgem->expire(kgem); |
2683 | 2683 | ||
2684 | if (kgem->need_purge) |
2684 | if (kgem->need_purge) |
2685 | kgem_purge_cache(kgem); |
2685 | kgem_purge_cache(kgem); |
2686 | 2686 | ||
2687 | expire = 0; |
2687 | expire = 0; |
2688 | 2688 | ||
2689 | idle = !kgem->need_retire; |
2689 | idle = !kgem->need_retire; |
2690 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { |
2690 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { |
2691 | idle &= list_is_empty(&kgem->inactive[i]); |
2691 | idle &= list_is_empty(&kgem->inactive[i]); |
2692 | list_for_each_entry(bo, &kgem->inactive[i], list) { |
2692 | list_for_each_entry(bo, &kgem->inactive[i], list) { |
2693 | if (bo->delta) { |
2693 | if (bo->delta) { |
2694 | expire = now - MAX_INACTIVE_TIME; |
2694 | expire = now - MAX_INACTIVE_TIME; |
2695 | break; |
2695 | break; |
2696 | } |
2696 | } |
2697 | 2697 | ||
2698 | bo->delta = now; |
2698 | bo->delta = now; |
2699 | } |
2699 | } |
2700 | } |
2700 | } |
2701 | if (idle) { |
2701 | if (idle) { |
2702 | DBG(("%s: idle\n", __FUNCTION__)); |
2702 | DBG(("%s: idle\n", __FUNCTION__)); |
2703 | kgem->need_expire = false; |
2703 | kgem->need_expire = false; |
2704 | return false; |
2704 | return false; |
2705 | } |
2705 | } |
2706 | if (expire == 0) |
2706 | if (expire == 0) |
2707 | return true; |
2707 | return true; |
2708 | 2708 | ||
2709 | idle = !kgem->need_retire; |
2709 | idle = !kgem->need_retire; |
2710 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { |
2710 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { |
2711 | struct list preserve; |
2711 | struct list preserve; |
2712 | 2712 | ||
2713 | list_init(&preserve); |
2713 | list_init(&preserve); |
2714 | while (!list_is_empty(&kgem->inactive[i])) { |
2714 | while (!list_is_empty(&kgem->inactive[i])) { |
2715 | bo = list_last_entry(&kgem->inactive[i], |
2715 | bo = list_last_entry(&kgem->inactive[i], |
2716 | struct kgem_bo, list); |
2716 | struct kgem_bo, list); |
2717 | 2717 | ||
2718 | if (bo->delta > expire) { |
2718 | if (bo->delta > expire) { |
2719 | idle = false; |
2719 | idle = false; |
2720 | break; |
2720 | break; |
2721 | } |
2721 | } |
2722 | 2722 | ||
2723 | if (bo->map && bo->delta + MAP_PRESERVE_TIME > expire) { |
2723 | if (bo->map && bo->delta + MAP_PRESERVE_TIME > expire) { |
2724 | idle = false; |
2724 | idle = false; |
2725 | list_move_tail(&bo->list, &preserve); |
2725 | list_move_tail(&bo->list, &preserve); |
2726 | } else { |
2726 | } else { |
2727 | count++; |
2727 | count++; |
2728 | size += bytes(bo); |
2728 | size += bytes(bo); |
2729 | kgem_bo_free(kgem, bo); |
2729 | kgem_bo_free(kgem, bo); |
2730 | DBG(("%s: expiring %d\n", |
2730 | DBG(("%s: expiring %d\n", |
2731 | __FUNCTION__, bo->handle)); |
2731 | __FUNCTION__, bo->handle)); |
2732 | } |
2732 | } |
2733 | } |
2733 | } |
2734 | if (!list_is_empty(&preserve)) { |
2734 | if (!list_is_empty(&preserve)) { |
2735 | preserve.prev->next = kgem->inactive[i].next; |
2735 | preserve.prev->next = kgem->inactive[i].next; |
2736 | kgem->inactive[i].next->prev = preserve.prev; |
2736 | kgem->inactive[i].next->prev = preserve.prev; |
2737 | kgem->inactive[i].next = preserve.next; |
2737 | kgem->inactive[i].next = preserve.next; |
2738 | preserve.next->prev = &kgem->inactive[i]; |
2738 | preserve.next->prev = &kgem->inactive[i]; |
2739 | } |
2739 | } |
2740 | } |
2740 | } |
2741 | 2741 | ||
2742 | #ifdef DEBUG_MEMORY |
2742 | #ifdef DEBUG_MEMORY |
2743 | { |
2743 | { |
2744 | long inactive_size = 0; |
2744 | long inactive_size = 0; |
2745 | int inactive_count = 0; |
2745 | int inactive_count = 0; |
2746 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) |
2746 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) |
2747 | list_for_each_entry(bo, &kgem->inactive[i], list) |
2747 | list_for_each_entry(bo, &kgem->inactive[i], list) |
2748 | inactive_count++, inactive_size += bytes(bo); |
2748 | inactive_count++, inactive_size += bytes(bo); |
2749 | ErrorF("%s: still allocated %d bo, %ld bytes, in inactive cache\n", |
2749 | ErrorF("%s: still allocated %d bo, %ld bytes, in inactive cache\n", |
2750 | __FUNCTION__, inactive_count, inactive_size); |
2750 | __FUNCTION__, inactive_count, inactive_size); |
2751 | } |
2751 | } |
2752 | #endif |
2752 | #endif |
2753 | 2753 | ||
2754 | DBG(("%s: expired %d objects, %d bytes, idle? %d\n", |
2754 | DBG(("%s: expired %d objects, %d bytes, idle? %d\n", |
2755 | __FUNCTION__, count, size, idle)); |
2755 | __FUNCTION__, count, size, idle)); |
2756 | 2756 | ||
2757 | kgem->need_expire = !idle; |
2757 | kgem->need_expire = !idle; |
2758 | return !idle; |
2758 | return !idle; |
2759 | (void)count; |
2759 | (void)count; |
2760 | (void)size; |
2760 | (void)size; |
2761 | } |
2761 | } |
2762 | 2762 | ||
2763 | void kgem_cleanup_cache(struct kgem *kgem) |
2763 | void kgem_cleanup_cache(struct kgem *kgem) |
2764 | { |
2764 | { |
2765 | unsigned int i; |
2765 | unsigned int i; |
2766 | int n; |
2766 | int n; |
2767 | 2767 | ||
2768 | /* sync to the most recent request */ |
2768 | /* sync to the most recent request */ |
2769 | for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) { |
2769 | for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) { |
2770 | if (!list_is_empty(&kgem->requests[n])) { |
2770 | if (!list_is_empty(&kgem->requests[n])) { |
2771 | struct kgem_request *rq; |
2771 | struct kgem_request *rq; |
2772 | struct drm_i915_gem_set_domain set_domain; |
2772 | struct drm_i915_gem_set_domain set_domain; |
2773 | 2773 | ||
2774 | rq = list_first_entry(&kgem->requests[n], |
2774 | rq = list_first_entry(&kgem->requests[n], |
2775 | struct kgem_request, |
2775 | struct kgem_request, |
2776 | list); |
2776 | list); |
2777 | 2777 | ||
2778 | DBG(("%s: sync on cleanup\n", __FUNCTION__)); |
2778 | DBG(("%s: sync on cleanup\n", __FUNCTION__)); |
2779 | 2779 | ||
2780 | VG_CLEAR(set_domain); |
2780 | VG_CLEAR(set_domain); |
2781 | set_domain.handle = rq->bo->handle; |
2781 | set_domain.handle = rq->bo->handle; |
2782 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
2782 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
2783 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
2783 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
2784 | (void)drmIoctl(kgem->fd, |
2784 | (void)drmIoctl(kgem->fd, |
2785 | DRM_IOCTL_I915_GEM_SET_DOMAIN, |
2785 | DRM_IOCTL_I915_GEM_SET_DOMAIN, |
2786 | &set_domain); |
2786 | &set_domain); |
2787 | } |
2787 | } |
2788 | } |
2788 | } |
2789 | 2789 | ||
2790 | kgem_retire(kgem); |
2790 | kgem_retire(kgem); |
2791 | kgem_cleanup(kgem); |
2791 | kgem_cleanup(kgem); |
2792 | 2792 | ||
2793 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { |
2793 | for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) { |
2794 | while (!list_is_empty(&kgem->inactive[i])) |
2794 | while (!list_is_empty(&kgem->inactive[i])) |
2795 | kgem_bo_free(kgem, |
2795 | kgem_bo_free(kgem, |
2796 | list_last_entry(&kgem->inactive[i], |
2796 | list_last_entry(&kgem->inactive[i], |
2797 | struct kgem_bo, list)); |
2797 | struct kgem_bo, list)); |
2798 | } |
2798 | } |
2799 | 2799 | ||
2800 | while (!list_is_empty(&kgem->snoop)) |
2800 | while (!list_is_empty(&kgem->snoop)) |
2801 | kgem_bo_free(kgem, |
2801 | kgem_bo_free(kgem, |
2802 | list_last_entry(&kgem->snoop, |
2802 | list_last_entry(&kgem->snoop, |
2803 | struct kgem_bo, list)); |
2803 | struct kgem_bo, list)); |
2804 | 2804 | ||
2805 | while (__kgem_freed_bo) { |
2805 | while (__kgem_freed_bo) { |
2806 | struct kgem_bo *bo = __kgem_freed_bo; |
2806 | struct kgem_bo *bo = __kgem_freed_bo; |
2807 | __kgem_freed_bo = *(struct kgem_bo **)bo; |
2807 | __kgem_freed_bo = *(struct kgem_bo **)bo; |
2808 | free(bo); |
2808 | free(bo); |
2809 | } |
2809 | } |
2810 | 2810 | ||
2811 | kgem->need_purge = false; |
2811 | kgem->need_purge = false; |
2812 | kgem->need_expire = false; |
2812 | kgem->need_expire = false; |
2813 | } |
2813 | } |
2814 | 2814 | ||
2815 | static struct kgem_bo * |
2815 | static struct kgem_bo * |
2816 | search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags) |
2816 | search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags) |
2817 | { |
2817 | { |
2818 | struct kgem_bo *bo, *first = NULL; |
2818 | struct kgem_bo *bo, *first = NULL; |
2819 | bool use_active = (flags & CREATE_INACTIVE) == 0; |
2819 | bool use_active = (flags & CREATE_INACTIVE) == 0; |
2820 | struct list *cache; |
2820 | struct list *cache; |
2821 | 2821 | ||
2822 | DBG(("%s: num_pages=%d, flags=%x, use_active? %d\n", |
2822 | DBG(("%s: num_pages=%d, flags=%x, use_active? %d\n", |
2823 | __FUNCTION__, num_pages, flags, use_active)); |
2823 | __FUNCTION__, num_pages, flags, use_active)); |
2824 | 2824 | ||
2825 | if (num_pages >= MAX_CACHE_SIZE / PAGE_SIZE) |
2825 | if (num_pages >= MAX_CACHE_SIZE / PAGE_SIZE) |
2826 | return NULL; |
2826 | return NULL; |
2827 | 2827 | ||
2828 | if (!use_active && list_is_empty(inactive(kgem, num_pages))) { |
2828 | if (!use_active && list_is_empty(inactive(kgem, num_pages))) { |
2829 | DBG(("%s: inactive and cache bucket empty\n", |
2829 | DBG(("%s: inactive and cache bucket empty\n", |
2830 | __FUNCTION__)); |
2830 | __FUNCTION__)); |
2831 | 2831 | ||
2832 | if (flags & CREATE_NO_RETIRE) { |
2832 | if (flags & CREATE_NO_RETIRE) { |
2833 | DBG(("%s: can not retire\n", __FUNCTION__)); |
2833 | DBG(("%s: can not retire\n", __FUNCTION__)); |
2834 | return NULL; |
2834 | return NULL; |
2835 | } |
2835 | } |
2836 | 2836 | ||
2837 | if (list_is_empty(active(kgem, num_pages, I915_TILING_NONE))) { |
2837 | if (list_is_empty(active(kgem, num_pages, I915_TILING_NONE))) { |
2838 | DBG(("%s: active cache bucket empty\n", __FUNCTION__)); |
2838 | DBG(("%s: active cache bucket empty\n", __FUNCTION__)); |
2839 | return NULL; |
2839 | return NULL; |
2840 | } |
2840 | } |
2841 | 2841 | ||
2842 | if (!__kgem_throttle_retire(kgem, flags)) { |
2842 | if (!__kgem_throttle_retire(kgem, flags)) { |
2843 | DBG(("%s: nothing retired\n", __FUNCTION__)); |
2843 | DBG(("%s: nothing retired\n", __FUNCTION__)); |
2844 | return NULL; |
2844 | return NULL; |
2845 | } |
2845 | } |
2846 | 2846 | ||
2847 | if (list_is_empty(inactive(kgem, num_pages))) { |
2847 | if (list_is_empty(inactive(kgem, num_pages))) { |
2848 | DBG(("%s: active cache bucket still empty after retire\n", |
2848 | DBG(("%s: active cache bucket still empty after retire\n", |
2849 | __FUNCTION__)); |
2849 | __FUNCTION__)); |
2850 | return NULL; |
2850 | return NULL; |
2851 | } |
2851 | } |
2852 | } |
2852 | } |
2853 | 2853 | ||
2854 | if (!use_active && flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
2854 | if (!use_active && flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
2855 | int for_cpu = !!(flags & CREATE_CPU_MAP); |
2855 | int for_cpu = !!(flags & CREATE_CPU_MAP); |
2856 | DBG(("%s: searching for inactive %s map\n", |
2856 | DBG(("%s: searching for inactive %s map\n", |
2857 | __FUNCTION__, for_cpu ? "cpu" : "gtt")); |
2857 | __FUNCTION__, for_cpu ? "cpu" : "gtt")); |
2858 | cache = &kgem->vma[for_cpu].inactive[cache_bucket(num_pages)]; |
2858 | cache = &kgem->vma[for_cpu].inactive[cache_bucket(num_pages)]; |
2859 | list_for_each_entry(bo, cache, vma) { |
2859 | list_for_each_entry(bo, cache, vma) { |
2860 | assert(IS_CPU_MAP(bo->map) == for_cpu); |
2860 | assert(IS_CPU_MAP(bo->map) == for_cpu); |
2861 | assert(bucket(bo) == cache_bucket(num_pages)); |
2861 | assert(bucket(bo) == cache_bucket(num_pages)); |
2862 | assert(bo->proxy == NULL); |
2862 | assert(bo->proxy == NULL); |
2863 | assert(bo->rq == NULL); |
2863 | assert(bo->rq == NULL); |
2864 | assert(bo->exec == NULL); |
2864 | assert(bo->exec == NULL); |
2865 | assert(!bo->scanout); |
2865 | assert(!bo->scanout); |
2866 | 2866 | ||
2867 | if (num_pages > num_pages(bo)) { |
2867 | if (num_pages > num_pages(bo)) { |
2868 | DBG(("inactive too small: %d < %d\n", |
2868 | DBG(("inactive too small: %d < %d\n", |
2869 | num_pages(bo), num_pages)); |
2869 | num_pages(bo), num_pages)); |
2870 | continue; |
2870 | continue; |
2871 | } |
2871 | } |
2872 | 2872 | ||
2873 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
2873 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
2874 | kgem_bo_free(kgem, bo); |
2874 | kgem_bo_free(kgem, bo); |
2875 | break; |
2875 | break; |
2876 | } |
2876 | } |
2877 | 2877 | ||
2878 | if (I915_TILING_NONE != bo->tiling && |
2878 | if (I915_TILING_NONE != bo->tiling && |
2879 | !gem_set_tiling(kgem->fd, bo->handle, |
2879 | !gem_set_tiling(kgem->fd, bo->handle, |
2880 | I915_TILING_NONE, 0)) |
2880 | I915_TILING_NONE, 0)) |
2881 | continue; |
2881 | continue; |
2882 | 2882 | ||
2883 | kgem_bo_remove_from_inactive(kgem, bo); |
2883 | kgem_bo_remove_from_inactive(kgem, bo); |
2884 | 2884 | ||
2885 | bo->tiling = I915_TILING_NONE; |
2885 | bo->tiling = I915_TILING_NONE; |
2886 | bo->pitch = 0; |
2886 | bo->pitch = 0; |
2887 | bo->delta = 0; |
2887 | bo->delta = 0; |
2888 | DBG((" %s: found handle=%d (num_pages=%d) in linear vma cache\n", |
2888 | DBG((" %s: found handle=%d (num_pages=%d) in linear vma cache\n", |
2889 | __FUNCTION__, bo->handle, num_pages(bo))); |
2889 | __FUNCTION__, bo->handle, num_pages(bo))); |
2890 | assert(use_active || bo->domain != DOMAIN_GPU); |
2890 | assert(use_active || bo->domain != DOMAIN_GPU); |
2891 | assert(!bo->needs_flush); |
2891 | assert(!bo->needs_flush); |
2892 | ASSERT_MAYBE_IDLE(kgem, bo->handle, !use_active); |
2892 | ASSERT_MAYBE_IDLE(kgem, bo->handle, !use_active); |
2893 | return bo; |
2893 | return bo; |
2894 | } |
2894 | } |
2895 | 2895 | ||
2896 | if (flags & CREATE_EXACT) |
2896 | if (flags & CREATE_EXACT) |
2897 | return NULL; |
2897 | return NULL; |
2898 | 2898 | ||
2899 | if (flags & CREATE_CPU_MAP && !kgem->has_llc) |
2899 | if (flags & CREATE_CPU_MAP && !kgem->has_llc) |
2900 | return NULL; |
2900 | return NULL; |
2901 | } |
2901 | } |
2902 | 2902 | ||
2903 | cache = use_active ? active(kgem, num_pages, I915_TILING_NONE) : inactive(kgem, num_pages); |
2903 | cache = use_active ? active(kgem, num_pages, I915_TILING_NONE) : inactive(kgem, num_pages); |
2904 | list_for_each_entry(bo, cache, list) { |
2904 | list_for_each_entry(bo, cache, list) { |
2905 | assert(bo->refcnt == 0); |
2905 | assert(bo->refcnt == 0); |
2906 | assert(bo->reusable); |
2906 | assert(bo->reusable); |
2907 | assert(!!bo->rq == !!use_active); |
2907 | assert(!!bo->rq == !!use_active); |
2908 | assert(bo->proxy == NULL); |
2908 | assert(bo->proxy == NULL); |
2909 | assert(!bo->scanout); |
2909 | assert(!bo->scanout); |
2910 | 2910 | ||
2911 | if (num_pages > num_pages(bo)) |
2911 | if (num_pages > num_pages(bo)) |
2912 | continue; |
2912 | continue; |
2913 | 2913 | ||
2914 | if (use_active && |
2914 | if (use_active && |
2915 | kgem->gen <= 040 && |
2915 | kgem->gen <= 040 && |
2916 | bo->tiling != I915_TILING_NONE) |
2916 | bo->tiling != I915_TILING_NONE) |
2917 | continue; |
2917 | continue; |
2918 | 2918 | ||
2919 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
2919 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
2920 | kgem_bo_free(kgem, bo); |
2920 | kgem_bo_free(kgem, bo); |
2921 | break; |
2921 | break; |
2922 | } |
2922 | } |
2923 | 2923 | ||
2924 | if (I915_TILING_NONE != bo->tiling) { |
2924 | if (I915_TILING_NONE != bo->tiling) { |
2925 | if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) |
2925 | if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) |
2926 | continue; |
2926 | continue; |
2927 | 2927 | ||
2928 | if (first) |
2928 | if (first) |
2929 | continue; |
2929 | continue; |
2930 | 2930 | ||
2931 | if (!gem_set_tiling(kgem->fd, bo->handle, |
2931 | if (!gem_set_tiling(kgem->fd, bo->handle, |
2932 | I915_TILING_NONE, 0)) |
2932 | I915_TILING_NONE, 0)) |
2933 | continue; |
2933 | continue; |
2934 | 2934 | ||
2935 | bo->tiling = I915_TILING_NONE; |
2935 | bo->tiling = I915_TILING_NONE; |
2936 | bo->pitch = 0; |
2936 | bo->pitch = 0; |
2937 | } |
2937 | } |
2938 | 2938 | ||
2939 | if (bo->map) { |
2939 | if (bo->map) { |
2940 | if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
2940 | if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
2941 | int for_cpu = !!(flags & CREATE_CPU_MAP); |
2941 | int for_cpu = !!(flags & CREATE_CPU_MAP); |
2942 | if (IS_CPU_MAP(bo->map) != for_cpu) { |
2942 | if (IS_CPU_MAP(bo->map) != for_cpu) { |
2943 | if (first != NULL) |
2943 | if (first != NULL) |
2944 | break; |
2944 | break; |
2945 | 2945 | ||
2946 | first = bo; |
2946 | first = bo; |
2947 | continue; |
2947 | continue; |
2948 | } |
2948 | } |
2949 | } else { |
2949 | } else { |
2950 | if (first != NULL) |
2950 | if (first != NULL) |
2951 | break; |
2951 | break; |
2952 | 2952 | ||
2953 | first = bo; |
2953 | first = bo; |
2954 | continue; |
2954 | continue; |
2955 | } |
2955 | } |
2956 | } else { |
2956 | } else { |
2957 | if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
2957 | if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
2958 | if (first != NULL) |
2958 | if (first != NULL) |
2959 | break; |
2959 | break; |
2960 | 2960 | ||
2961 | first = bo; |
2961 | first = bo; |
2962 | continue; |
2962 | continue; |
2963 | } |
2963 | } |
2964 | } |
2964 | } |
2965 | 2965 | ||
2966 | if (use_active) |
2966 | if (use_active) |
2967 | kgem_bo_remove_from_active(kgem, bo); |
2967 | kgem_bo_remove_from_active(kgem, bo); |
2968 | else |
2968 | else |
2969 | kgem_bo_remove_from_inactive(kgem, bo); |
2969 | kgem_bo_remove_from_inactive(kgem, bo); |
2970 | 2970 | ||
2971 | assert(bo->tiling == I915_TILING_NONE); |
2971 | assert(bo->tiling == I915_TILING_NONE); |
2972 | bo->pitch = 0; |
2972 | bo->pitch = 0; |
2973 | bo->delta = 0; |
2973 | bo->delta = 0; |
2974 | DBG((" %s: found handle=%d (num_pages=%d) in linear %s cache\n", |
2974 | DBG((" %s: found handle=%d (num_pages=%d) in linear %s cache\n", |
2975 | __FUNCTION__, bo->handle, num_pages(bo), |
2975 | __FUNCTION__, bo->handle, num_pages(bo), |
2976 | use_active ? "active" : "inactive")); |
2976 | use_active ? "active" : "inactive")); |
2977 | assert(list_is_empty(&bo->list)); |
2977 | assert(list_is_empty(&bo->list)); |
2978 | assert(use_active || bo->domain != DOMAIN_GPU); |
2978 | assert(use_active || bo->domain != DOMAIN_GPU); |
2979 | assert(!bo->needs_flush || use_active); |
2979 | assert(!bo->needs_flush || use_active); |
2980 | ASSERT_MAYBE_IDLE(kgem, bo->handle, !use_active); |
2980 | ASSERT_MAYBE_IDLE(kgem, bo->handle, !use_active); |
2981 | return bo; |
2981 | return bo; |
2982 | } |
2982 | } |
2983 | 2983 | ||
2984 | if (first) { |
2984 | if (first) { |
2985 | assert(first->tiling == I915_TILING_NONE); |
2985 | assert(first->tiling == I915_TILING_NONE); |
2986 | 2986 | ||
2987 | if (use_active) |
2987 | if (use_active) |
2988 | kgem_bo_remove_from_active(kgem, first); |
2988 | kgem_bo_remove_from_active(kgem, first); |
2989 | else |
2989 | else |
2990 | kgem_bo_remove_from_inactive(kgem, first); |
2990 | kgem_bo_remove_from_inactive(kgem, first); |
2991 | 2991 | ||
2992 | first->pitch = 0; |
2992 | first->pitch = 0; |
2993 | first->delta = 0; |
2993 | first->delta = 0; |
2994 | DBG((" %s: found handle=%d (near-miss) (num_pages=%d) in linear %s cache\n", |
2994 | DBG((" %s: found handle=%d (near-miss) (num_pages=%d) in linear %s cache\n", |
2995 | __FUNCTION__, first->handle, num_pages(first), |
2995 | __FUNCTION__, first->handle, num_pages(first), |
2996 | use_active ? "active" : "inactive")); |
2996 | use_active ? "active" : "inactive")); |
2997 | assert(list_is_empty(&first->list)); |
2997 | assert(list_is_empty(&first->list)); |
2998 | assert(use_active || first->domain != DOMAIN_GPU); |
2998 | assert(use_active || first->domain != DOMAIN_GPU); |
2999 | assert(!first->needs_flush || use_active); |
2999 | assert(!first->needs_flush || use_active); |
3000 | ASSERT_MAYBE_IDLE(kgem, first->handle, !use_active); |
3000 | ASSERT_MAYBE_IDLE(kgem, first->handle, !use_active); |
3001 | return first; |
3001 | return first; |
3002 | } |
3002 | } |
3003 | 3003 | ||
3004 | return NULL; |
3004 | return NULL; |
3005 | } |
3005 | } |
3006 | 3006 | ||
3007 | 3007 | ||
3008 | struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags) |
3008 | struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags) |
3009 | { |
3009 | { |
3010 | struct kgem_bo *bo; |
3010 | struct kgem_bo *bo; |
3011 | uint32_t handle; |
3011 | uint32_t handle; |
3012 | 3012 | ||
3013 | DBG(("%s(%d)\n", __FUNCTION__, size)); |
3013 | DBG(("%s(%d)\n", __FUNCTION__, size)); |
3014 | 3014 | ||
3015 | if (flags & CREATE_GTT_MAP && kgem->has_llc) { |
3015 | if (flags & CREATE_GTT_MAP && kgem->has_llc) { |
3016 | flags &= ~CREATE_GTT_MAP; |
3016 | flags &= ~CREATE_GTT_MAP; |
3017 | flags |= CREATE_CPU_MAP; |
3017 | flags |= CREATE_CPU_MAP; |
3018 | } |
3018 | } |
3019 | 3019 | ||
3020 | size = (size + PAGE_SIZE - 1) / PAGE_SIZE; |
3020 | size = (size + PAGE_SIZE - 1) / PAGE_SIZE; |
3021 | bo = search_linear_cache(kgem, size, CREATE_INACTIVE | flags); |
3021 | bo = search_linear_cache(kgem, size, CREATE_INACTIVE | flags); |
3022 | if (bo) { |
3022 | if (bo) { |
3023 | assert(bo->domain != DOMAIN_GPU); |
3023 | assert(bo->domain != DOMAIN_GPU); |
3024 | ASSERT_IDLE(kgem, bo->handle); |
3024 | ASSERT_IDLE(kgem, bo->handle); |
3025 | bo->refcnt = 1; |
3025 | bo->refcnt = 1; |
3026 | return bo; |
3026 | return bo; |
3027 | } |
3027 | } |
3028 | 3028 | ||
3029 | if (flags & CREATE_CACHED) |
3029 | if (flags & CREATE_CACHED) |
3030 | return NULL; |
3030 | return NULL; |
3031 | 3031 | ||
3032 | handle = gem_create(kgem->fd, size); |
3032 | handle = gem_create(kgem->fd, size); |
3033 | if (handle == 0) |
3033 | if (handle == 0) |
3034 | return NULL; |
3034 | return NULL; |
3035 | 3035 | ||
3036 | DBG(("%s: new handle=%d, num_pages=%d\n", __FUNCTION__, handle, size)); |
3036 | DBG(("%s: new handle=%d, num_pages=%d\n", __FUNCTION__, handle, size)); |
3037 | bo = __kgem_bo_alloc(handle, size); |
3037 | bo = __kgem_bo_alloc(handle, size); |
3038 | if (bo == NULL) { |
3038 | if (bo == NULL) { |
3039 | gem_close(kgem->fd, handle); |
3039 | gem_close(kgem->fd, handle); |
3040 | return NULL; |
3040 | return NULL; |
3041 | } |
3041 | } |
3042 | 3042 | ||
3043 | debug_alloc__bo(kgem, bo); |
3043 | debug_alloc__bo(kgem, bo); |
3044 | return bo; |
3044 | return bo; |
3045 | } |
3045 | } |
3046 | 3046 | ||
3047 | inline int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo) |
3047 | inline int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo) |
3048 | { |
3048 | { |
3049 | unsigned int size; |
3049 | unsigned int size; |
3050 | 3050 | ||
3051 | assert(bo->tiling); |
3051 | assert(bo->tiling); |
3052 | assert(kgem->gen < 040); |
3052 | assert(kgem->gen < 040); |
3053 | 3053 | ||
3054 | if (kgem->gen < 030) |
3054 | if (kgem->gen < 030) |
3055 | size = 512 * 1024; |
3055 | size = 512 * 1024; |
3056 | else |
3056 | else |
3057 | size = 1024 * 1024; |
3057 | size = 1024 * 1024; |
3058 | while (size < bytes(bo)) |
3058 | while (size < bytes(bo)) |
3059 | size *= 2; |
3059 | size *= 2; |
3060 | 3060 | ||
3061 | return size; |
3061 | return size; |
3062 | } |
3062 | } |
3063 | 3063 | ||
3064 | struct kgem_bo *kgem_create_2d(struct kgem *kgem, |
3064 | struct kgem_bo *kgem_create_2d(struct kgem *kgem, |
3065 | int width, |
3065 | int width, |
3066 | int height, |
3066 | int height, |
3067 | int bpp, |
3067 | int bpp, |
3068 | int tiling, |
3068 | int tiling, |
3069 | uint32_t flags) |
3069 | uint32_t flags) |
3070 | { |
3070 | { |
3071 | struct list *cache; |
3071 | struct list *cache; |
3072 | struct kgem_bo *bo; |
3072 | struct kgem_bo *bo; |
3073 | uint32_t pitch, untiled_pitch, tiled_height, size; |
3073 | uint32_t pitch, untiled_pitch, tiled_height, size; |
3074 | uint32_t handle; |
3074 | uint32_t handle; |
3075 | int i, bucket, retry; |
3075 | int i, bucket, retry; |
3076 | 3076 | ||
3077 | if (tiling < 0) |
3077 | if (tiling < 0) |
3078 | tiling = -tiling, flags |= CREATE_EXACT; |
3078 | tiling = -tiling, flags |= CREATE_EXACT; |
3079 | 3079 | ||
3080 | DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d, cpu-mapping=%d, gtt-mapping=%d, scanout?=%d, prime?=%d, temp?=%d)\n", __FUNCTION__, |
3080 | DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d, cpu-mapping=%d, gtt-mapping=%d, scanout?=%d, prime?=%d, temp?=%d)\n", __FUNCTION__, |
3081 | width, height, bpp, tiling, |
3081 | width, height, bpp, tiling, |
3082 | !!(flags & CREATE_EXACT), |
3082 | !!(flags & CREATE_EXACT), |
3083 | !!(flags & CREATE_INACTIVE), |
3083 | !!(flags & CREATE_INACTIVE), |
3084 | !!(flags & CREATE_CPU_MAP), |
3084 | !!(flags & CREATE_CPU_MAP), |
3085 | !!(flags & CREATE_GTT_MAP), |
3085 | !!(flags & CREATE_GTT_MAP), |
3086 | !!(flags & CREATE_SCANOUT), |
3086 | !!(flags & CREATE_SCANOUT), |
3087 | !!(flags & CREATE_PRIME), |
3087 | !!(flags & CREATE_PRIME), |
3088 | !!(flags & CREATE_TEMPORARY))); |
3088 | !!(flags & CREATE_TEMPORARY))); |
3089 | 3089 | ||
3090 | size = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags, |
3090 | size = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags, |
3091 | width, height, bpp, tiling, &pitch); |
3091 | width, height, bpp, tiling, &pitch); |
3092 | assert(size && size <= kgem->max_object_size); |
3092 | assert(size && size <= kgem->max_object_size); |
3093 | size /= PAGE_SIZE; |
3093 | size /= PAGE_SIZE; |
3094 | bucket = cache_bucket(size); |
3094 | bucket = cache_bucket(size); |
3095 | 3095 | ||
3096 | if (flags & CREATE_SCANOUT) { |
3096 | if (flags & CREATE_SCANOUT) { |
3097 | assert((flags & CREATE_INACTIVE) == 0); |
3097 | assert((flags & CREATE_INACTIVE) == 0); |
3098 | list_for_each_entry_reverse(bo, &kgem->scanout, list) { |
3098 | list_for_each_entry_reverse(bo, &kgem->scanout, list) { |
3099 | assert(bo->scanout); |
3099 | assert(bo->scanout); |
3100 | assert(bo->delta); |
3100 | assert(bo->delta); |
3101 | assert(!bo->purged); |
3101 | assert(!bo->purged); |
3102 | 3102 | ||
3103 | if (size > num_pages(bo) || num_pages(bo) > 2*size) |
3103 | if (size > num_pages(bo) || num_pages(bo) > 2*size) |
3104 | continue; |
3104 | continue; |
3105 | 3105 | ||
3106 | if (bo->tiling != tiling || |
3106 | if (bo->tiling != tiling || |
3107 | (tiling != I915_TILING_NONE && bo->pitch != pitch)) { |
3107 | (tiling != I915_TILING_NONE && bo->pitch != pitch)) { |
3108 | if (!gem_set_tiling(kgem->fd, bo->handle, |
3108 | if (!gem_set_tiling(kgem->fd, bo->handle, |
3109 | tiling, pitch)) |
3109 | tiling, pitch)) |
3110 | continue; |
3110 | continue; |
3111 | 3111 | ||
3112 | bo->tiling = tiling; |
3112 | bo->tiling = tiling; |
3113 | bo->pitch = pitch; |
3113 | bo->pitch = pitch; |
3114 | } |
3114 | } |
3115 | 3115 | ||
3116 | list_del(&bo->list); |
3116 | list_del(&bo->list); |
3117 | 3117 | ||
3118 | bo->unique_id = kgem_get_unique_id(kgem); |
3118 | bo->unique_id = kgem_get_unique_id(kgem); |
3119 | DBG((" 1:from scanout: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
3119 | DBG((" 1:from scanout: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
3120 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
3120 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
3121 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
3121 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
3122 | bo->refcnt = 1; |
3122 | bo->refcnt = 1; |
3123 | return bo; |
3123 | return bo; |
3124 | } |
3124 | } |
3125 | } |
3125 | } |
3126 | 3126 | ||
3127 | if (bucket >= NUM_CACHE_BUCKETS) { |
3127 | if (bucket >= NUM_CACHE_BUCKETS) { |
3128 | DBG(("%s: large bo num pages=%d, bucket=%d\n", |
3128 | DBG(("%s: large bo num pages=%d, bucket=%d\n", |
3129 | __FUNCTION__, size, bucket)); |
3129 | __FUNCTION__, size, bucket)); |
3130 | 3130 | ||
3131 | if (flags & CREATE_INACTIVE) |
3131 | if (flags & CREATE_INACTIVE) |
3132 | goto large_inactive; |
3132 | goto large_inactive; |
3133 | 3133 | ||
3134 | tiled_height = kgem_aligned_height(kgem, height, tiling); |
3134 | tiled_height = kgem_aligned_height(kgem, height, tiling); |
3135 | untiled_pitch = kgem_untiled_pitch(kgem, width, bpp, flags); |
3135 | untiled_pitch = kgem_untiled_pitch(kgem, width, bpp, flags); |
3136 | 3136 | ||
3137 | list_for_each_entry(bo, &kgem->large, list) { |
3137 | list_for_each_entry(bo, &kgem->large, list) { |
3138 | assert(!bo->purged); |
3138 | assert(!bo->purged); |
3139 | assert(!bo->scanout); |
3139 | assert(!bo->scanout); |
3140 | assert(bo->refcnt == 0); |
3140 | assert(bo->refcnt == 0); |
3141 | assert(bo->reusable); |
3141 | assert(bo->reusable); |
3142 | assert(bo->flush == true); |
3142 | assert(bo->flush == true); |
3143 | 3143 | ||
3144 | if (kgem->gen < 040) { |
3144 | if (kgem->gen < 040) { |
3145 | if (bo->pitch < pitch) { |
3145 | if (bo->pitch < pitch) { |
3146 | DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n", |
3146 | DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n", |
3147 | bo->tiling, tiling, |
3147 | bo->tiling, tiling, |
3148 | bo->pitch, pitch)); |
3148 | bo->pitch, pitch)); |
3149 | continue; |
3149 | continue; |
3150 | } |
3150 | } |
3151 | 3151 | ||
3152 | if (bo->pitch * tiled_height > bytes(bo)) |
3152 | if (bo->pitch * tiled_height > bytes(bo)) |
3153 | continue; |
3153 | continue; |
3154 | } else { |
3154 | } else { |
3155 | if (num_pages(bo) < size) |
3155 | if (num_pages(bo) < size) |
3156 | continue; |
3156 | continue; |
3157 | 3157 | ||
3158 | if (bo->pitch != pitch || bo->tiling != tiling) { |
3158 | if (bo->pitch != pitch || bo->tiling != tiling) { |
3159 | if (!gem_set_tiling(kgem->fd, bo->handle, |
3159 | if (!gem_set_tiling(kgem->fd, bo->handle, |
3160 | tiling, pitch)) |
3160 | tiling, pitch)) |
3161 | continue; |
3161 | continue; |
3162 | 3162 | ||
3163 | bo->pitch = pitch; |
3163 | bo->pitch = pitch; |
3164 | bo->tiling = tiling; |
3164 | bo->tiling = tiling; |
3165 | } |
3165 | } |
3166 | } |
3166 | } |
3167 | 3167 | ||
3168 | kgem_bo_remove_from_active(kgem, bo); |
3168 | kgem_bo_remove_from_active(kgem, bo); |
3169 | 3169 | ||
3170 | bo->unique_id = kgem_get_unique_id(kgem); |
3170 | bo->unique_id = kgem_get_unique_id(kgem); |
3171 | bo->delta = 0; |
3171 | bo->delta = 0; |
3172 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
3172 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
3173 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
3173 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
3174 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
3174 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
3175 | bo->refcnt = 1; |
3175 | bo->refcnt = 1; |
3176 | return bo; |
3176 | return bo; |
3177 | } |
3177 | } |
3178 | 3178 | ||
3179 | large_inactive: |
3179 | large_inactive: |
3180 | list_for_each_entry(bo, &kgem->large_inactive, list) { |
3180 | list_for_each_entry(bo, &kgem->large_inactive, list) { |
3181 | assert(bo->refcnt == 0); |
3181 | assert(bo->refcnt == 0); |
3182 | assert(bo->reusable); |
3182 | assert(bo->reusable); |
3183 | assert(!bo->scanout); |
3183 | assert(!bo->scanout); |
3184 | 3184 | ||
3185 | if (size > num_pages(bo)) |
3185 | if (size > num_pages(bo)) |
3186 | continue; |
3186 | continue; |
3187 | 3187 | ||
3188 | if (bo->tiling != tiling || |
3188 | if (bo->tiling != tiling || |
3189 | (tiling != I915_TILING_NONE && bo->pitch != pitch)) { |
3189 | (tiling != I915_TILING_NONE && bo->pitch != pitch)) { |
3190 | if (!gem_set_tiling(kgem->fd, bo->handle, |
3190 | if (!gem_set_tiling(kgem->fd, bo->handle, |
3191 | tiling, pitch)) |
3191 | tiling, pitch)) |
3192 | continue; |
3192 | continue; |
3193 | 3193 | ||
3194 | bo->tiling = tiling; |
3194 | bo->tiling = tiling; |
3195 | bo->pitch = pitch; |
3195 | bo->pitch = pitch; |
3196 | } |
3196 | } |
3197 | 3197 | ||
3198 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
3198 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
3199 | kgem_bo_free(kgem, bo); |
3199 | kgem_bo_free(kgem, bo); |
3200 | break; |
3200 | break; |
3201 | } |
3201 | } |
3202 | 3202 | ||
3203 | list_del(&bo->list); |
3203 | list_del(&bo->list); |
3204 | 3204 | ||
3205 | bo->unique_id = kgem_get_unique_id(kgem); |
3205 | bo->unique_id = kgem_get_unique_id(kgem); |
3206 | bo->pitch = pitch; |
3206 | bo->pitch = pitch; |
3207 | bo->delta = 0; |
3207 | bo->delta = 0; |
3208 | DBG((" 1:from large inactive: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
3208 | DBG((" 1:from large inactive: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
3209 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
3209 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
3210 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
3210 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
3211 | bo->refcnt = 1; |
3211 | bo->refcnt = 1; |
3212 | return bo; |
3212 | return bo; |
3213 | } |
3213 | } |
3214 | 3214 | ||
3215 | goto create; |
3215 | goto create; |
3216 | } |
3216 | } |
3217 | 3217 | ||
3218 | if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
3218 | if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
3219 | int for_cpu = !!(flags & CREATE_CPU_MAP); |
3219 | int for_cpu = !!(flags & CREATE_CPU_MAP); |
3220 | if (kgem->has_llc && tiling == I915_TILING_NONE) |
3220 | if (kgem->has_llc && tiling == I915_TILING_NONE) |
3221 | for_cpu = 1; |
3221 | for_cpu = 1; |
3222 | /* We presume that we will need to upload to this bo, |
3222 | /* We presume that we will need to upload to this bo, |
3223 | * and so would prefer to have an active VMA. |
3223 | * and so would prefer to have an active VMA. |
3224 | */ |
3224 | */ |
3225 | cache = &kgem->vma[for_cpu].inactive[bucket]; |
3225 | cache = &kgem->vma[for_cpu].inactive[bucket]; |
3226 | do { |
3226 | do { |
3227 | list_for_each_entry(bo, cache, vma) { |
3227 | list_for_each_entry(bo, cache, vma) { |
3228 | assert(bucket(bo) == bucket); |
3228 | assert(bucket(bo) == bucket); |
3229 | assert(bo->refcnt == 0); |
3229 | assert(bo->refcnt == 0); |
3230 | assert(!bo->scanout); |
3230 | assert(!bo->scanout); |
3231 | assert(bo->map); |
3231 | assert(bo->map); |
3232 | assert(IS_CPU_MAP(bo->map) == for_cpu); |
3232 | assert(IS_CPU_MAP(bo->map) == for_cpu); |
3233 | assert(bo->rq == NULL); |
3233 | assert(bo->rq == NULL); |
3234 | assert(list_is_empty(&bo->request)); |
3234 | assert(list_is_empty(&bo->request)); |
3235 | assert(bo->flush == false); |
3235 | assert(bo->flush == false); |
3236 | 3236 | ||
3237 | if (size > num_pages(bo)) { |
3237 | if (size > num_pages(bo)) { |
3238 | DBG(("inactive too small: %d < %d\n", |
3238 | DBG(("inactive too small: %d < %d\n", |
3239 | num_pages(bo), size)); |
3239 | num_pages(bo), size)); |
3240 | continue; |
3240 | continue; |
3241 | } |
3241 | } |
3242 | 3242 | ||
3243 | if (bo->tiling != tiling || |
3243 | if (bo->tiling != tiling || |
3244 | (tiling != I915_TILING_NONE && bo->pitch != pitch)) { |
3244 | (tiling != I915_TILING_NONE && bo->pitch != pitch)) { |
3245 | DBG(("inactive vma with wrong tiling: %d < %d\n", |
3245 | DBG(("inactive vma with wrong tiling: %d < %d\n", |
3246 | bo->tiling, tiling)); |
3246 | bo->tiling, tiling)); |
3247 | continue; |
3247 | continue; |
3248 | } |
3248 | } |
3249 | 3249 | ||
3250 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
3250 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
3251 | kgem_bo_free(kgem, bo); |
3251 | kgem_bo_free(kgem, bo); |
3252 | break; |
3252 | break; |
3253 | } |
3253 | } |
3254 | 3254 | ||
3255 | bo->pitch = pitch; |
3255 | bo->pitch = pitch; |
3256 | bo->delta = 0; |
3256 | bo->delta = 0; |
3257 | bo->unique_id = kgem_get_unique_id(kgem); |
3257 | bo->unique_id = kgem_get_unique_id(kgem); |
3258 | 3258 | ||
3259 | kgem_bo_remove_from_inactive(kgem, bo); |
3259 | kgem_bo_remove_from_inactive(kgem, bo); |
3260 | 3260 | ||
3261 | DBG((" from inactive vma: pitch=%d, tiling=%d: handle=%d, id=%d\n", |
3261 | DBG((" from inactive vma: pitch=%d, tiling=%d: handle=%d, id=%d\n", |
3262 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
3262 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
3263 | assert(bo->reusable); |
3263 | assert(bo->reusable); |
3264 | assert(bo->domain != DOMAIN_GPU); |
3264 | assert(bo->domain != DOMAIN_GPU); |
3265 | ASSERT_IDLE(kgem, bo->handle); |
3265 | ASSERT_IDLE(kgem, bo->handle); |
3266 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
3266 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
3267 | bo->refcnt = 1; |
3267 | bo->refcnt = 1; |
3268 | return bo; |
3268 | return bo; |
3269 | } |
3269 | } |
3270 | } while (!list_is_empty(cache) && |
3270 | } while (!list_is_empty(cache) && |
3271 | __kgem_throttle_retire(kgem, flags)); |
3271 | __kgem_throttle_retire(kgem, flags)); |
3272 | 3272 | ||
3273 | if (flags & CREATE_CPU_MAP && !kgem->has_llc) |
3273 | if (flags & CREATE_CPU_MAP && !kgem->has_llc) |
3274 | goto create; |
3274 | goto create; |
3275 | } |
3275 | } |
3276 | 3276 | ||
3277 | if (flags & CREATE_INACTIVE) |
3277 | if (flags & CREATE_INACTIVE) |
3278 | goto skip_active_search; |
3278 | goto skip_active_search; |
3279 | 3279 | ||
3280 | /* Best active match */ |
3280 | /* Best active match */ |
3281 | retry = NUM_CACHE_BUCKETS - bucket; |
3281 | retry = NUM_CACHE_BUCKETS - bucket; |
3282 | if (retry > 3 && (flags & CREATE_TEMPORARY) == 0) |
3282 | if (retry > 3 && (flags & CREATE_TEMPORARY) == 0) |
3283 | retry = 3; |
3283 | retry = 3; |
3284 | search_again: |
3284 | search_again: |
3285 | assert(bucket < NUM_CACHE_BUCKETS); |
3285 | assert(bucket < NUM_CACHE_BUCKETS); |
3286 | cache = &kgem->active[bucket][tiling]; |
3286 | cache = &kgem->active[bucket][tiling]; |
3287 | if (tiling) { |
3287 | if (tiling) { |
3288 | tiled_height = kgem_aligned_height(kgem, height, tiling); |
3288 | tiled_height = kgem_aligned_height(kgem, height, tiling); |
3289 | list_for_each_entry(bo, cache, list) { |
3289 | list_for_each_entry(bo, cache, list) { |
3290 | assert(!bo->purged); |
3290 | assert(!bo->purged); |
3291 | assert(bo->refcnt == 0); |
3291 | assert(bo->refcnt == 0); |
3292 | assert(bucket(bo) == bucket); |
3292 | assert(bucket(bo) == bucket); |
3293 | assert(bo->reusable); |
3293 | assert(bo->reusable); |
3294 | assert(bo->tiling == tiling); |
3294 | assert(bo->tiling == tiling); |
3295 | assert(bo->flush == false); |
3295 | assert(bo->flush == false); |
3296 | assert(!bo->scanout); |
3296 | assert(!bo->scanout); |
3297 | 3297 | ||
3298 | if (kgem->gen < 040) { |
3298 | if (kgem->gen < 040) { |
3299 | if (bo->pitch < pitch) { |
3299 | if (bo->pitch < pitch) { |
3300 | DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n", |
3300 | DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n", |
3301 | bo->tiling, tiling, |
3301 | bo->tiling, tiling, |
3302 | bo->pitch, pitch)); |
3302 | bo->pitch, pitch)); |
3303 | continue; |
3303 | continue; |
3304 | } |
3304 | } |
3305 | 3305 | ||
3306 | if (bo->pitch * tiled_height > bytes(bo)) |
3306 | if (bo->pitch * tiled_height > bytes(bo)) |
3307 | continue; |
3307 | continue; |
3308 | } else { |
3308 | } else { |
3309 | if (num_pages(bo) < size) |
3309 | if (num_pages(bo) < size) |
3310 | continue; |
3310 | continue; |
3311 | 3311 | ||
3312 | if (bo->pitch != pitch) { |
3312 | if (bo->pitch != pitch) { |
3313 | if (!gem_set_tiling(kgem->fd, |
3313 | if (!gem_set_tiling(kgem->fd, |
3314 | bo->handle, |
3314 | bo->handle, |
3315 | tiling, pitch)) |
3315 | tiling, pitch)) |
3316 | continue; |
3316 | continue; |
3317 | 3317 | ||
3318 | bo->pitch = pitch; |
3318 | bo->pitch = pitch; |
3319 | } |
3319 | } |
3320 | } |
3320 | } |
3321 | 3321 | ||
3322 | kgem_bo_remove_from_active(kgem, bo); |
3322 | kgem_bo_remove_from_active(kgem, bo); |
3323 | 3323 | ||
3324 | bo->unique_id = kgem_get_unique_id(kgem); |
3324 | bo->unique_id = kgem_get_unique_id(kgem); |
3325 | bo->delta = 0; |
3325 | bo->delta = 0; |
3326 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
3326 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
3327 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
3327 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
3328 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
3328 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
3329 | bo->refcnt = 1; |
3329 | bo->refcnt = 1; |
3330 | return bo; |
3330 | return bo; |
3331 | } |
3331 | } |
3332 | } else { |
3332 | } else { |
3333 | list_for_each_entry(bo, cache, list) { |
3333 | list_for_each_entry(bo, cache, list) { |
3334 | assert(bucket(bo) == bucket); |
3334 | assert(bucket(bo) == bucket); |
3335 | assert(!bo->purged); |
3335 | assert(!bo->purged); |
3336 | assert(bo->refcnt == 0); |
3336 | assert(bo->refcnt == 0); |
3337 | assert(bo->reusable); |
3337 | assert(bo->reusable); |
3338 | assert(!bo->scanout); |
3338 | assert(!bo->scanout); |
3339 | assert(bo->tiling == tiling); |
3339 | assert(bo->tiling == tiling); |
3340 | assert(bo->flush == false); |
3340 | assert(bo->flush == false); |
3341 | 3341 | ||
3342 | if (num_pages(bo) < size) |
3342 | if (num_pages(bo) < size) |
3343 | continue; |
3343 | continue; |
3344 | 3344 | ||
3345 | kgem_bo_remove_from_active(kgem, bo); |
3345 | kgem_bo_remove_from_active(kgem, bo); |
3346 | 3346 | ||
3347 | bo->pitch = pitch; |
3347 | bo->pitch = pitch; |
3348 | bo->unique_id = kgem_get_unique_id(kgem); |
3348 | bo->unique_id = kgem_get_unique_id(kgem); |
3349 | bo->delta = 0; |
3349 | bo->delta = 0; |
3350 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
3350 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
3351 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
3351 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
3352 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
3352 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
3353 | bo->refcnt = 1; |
3353 | bo->refcnt = 1; |
3354 | return bo; |
3354 | return bo; |
3355 | } |
3355 | } |
3356 | } |
3356 | } |
3357 | 3357 | ||
3358 | if (--retry && flags & CREATE_EXACT) { |
3358 | if (--retry && flags & CREATE_EXACT) { |
3359 | if (kgem->gen >= 040) { |
3359 | if (kgem->gen >= 040) { |
3360 | for (i = I915_TILING_NONE; i <= I915_TILING_Y; i++) { |
3360 | for (i = I915_TILING_NONE; i <= I915_TILING_Y; i++) { |
3361 | if (i == tiling) |
3361 | if (i == tiling) |
3362 | continue; |
3362 | continue; |
3363 | 3363 | ||
3364 | cache = &kgem->active[bucket][i]; |
3364 | cache = &kgem->active[bucket][i]; |
3365 | list_for_each_entry(bo, cache, list) { |
3365 | list_for_each_entry(bo, cache, list) { |
3366 | assert(!bo->purged); |
3366 | assert(!bo->purged); |
3367 | assert(bo->refcnt == 0); |
3367 | assert(bo->refcnt == 0); |
3368 | assert(bo->reusable); |
3368 | assert(bo->reusable); |
3369 | assert(!bo->scanout); |
3369 | assert(!bo->scanout); |
3370 | assert(bo->flush == false); |
3370 | assert(bo->flush == false); |
3371 | 3371 | ||
3372 | if (num_pages(bo) < size) |
3372 | if (num_pages(bo) < size) |
3373 | continue; |
3373 | continue; |
3374 | 3374 | ||
3375 | if (!gem_set_tiling(kgem->fd, |
3375 | if (!gem_set_tiling(kgem->fd, |
3376 | bo->handle, |
3376 | bo->handle, |
3377 | tiling, pitch)) |
3377 | tiling, pitch)) |
3378 | continue; |
3378 | continue; |
3379 | 3379 | ||
3380 | kgem_bo_remove_from_active(kgem, bo); |
3380 | kgem_bo_remove_from_active(kgem, bo); |
3381 | 3381 | ||
3382 | bo->unique_id = kgem_get_unique_id(kgem); |
3382 | bo->unique_id = kgem_get_unique_id(kgem); |
3383 | bo->pitch = pitch; |
3383 | bo->pitch = pitch; |
3384 | bo->tiling = tiling; |
3384 | bo->tiling = tiling; |
3385 | bo->delta = 0; |
3385 | bo->delta = 0; |
3386 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
3386 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
3387 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
3387 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
3388 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
3388 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
3389 | bo->refcnt = 1; |
3389 | bo->refcnt = 1; |
3390 | return bo; |
3390 | return bo; |
3391 | } |
3391 | } |
3392 | } |
3392 | } |
3393 | } |
3393 | } |
3394 | 3394 | ||
3395 | bucket++; |
3395 | bucket++; |
3396 | goto search_again; |
3396 | goto search_again; |
3397 | } |
3397 | } |
3398 | 3398 | ||
3399 | if ((flags & CREATE_EXACT) == 0) { /* allow an active near-miss? */ |
3399 | if ((flags & CREATE_EXACT) == 0) { /* allow an active near-miss? */ |
3400 | untiled_pitch = kgem_untiled_pitch(kgem, width, bpp, flags); |
3400 | untiled_pitch = kgem_untiled_pitch(kgem, width, bpp, flags); |
3401 | i = tiling; |
3401 | i = tiling; |
3402 | while (--i >= 0) { |
3402 | while (--i >= 0) { |
3403 | tiled_height = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags, |
3403 | tiled_height = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags, |
3404 | width, height, bpp, tiling, &pitch); |
3404 | width, height, bpp, tiling, &pitch); |
3405 | cache = active(kgem, tiled_height / PAGE_SIZE, i); |
3405 | cache = active(kgem, tiled_height / PAGE_SIZE, i); |
3406 | tiled_height = kgem_aligned_height(kgem, height, i); |
3406 | tiled_height = kgem_aligned_height(kgem, height, i); |
3407 | list_for_each_entry(bo, cache, list) { |
3407 | list_for_each_entry(bo, cache, list) { |
3408 | assert(!bo->purged); |
3408 | assert(!bo->purged); |
3409 | assert(bo->refcnt == 0); |
3409 | assert(bo->refcnt == 0); |
3410 | assert(bo->reusable); |
3410 | assert(bo->reusable); |
3411 | assert(!bo->scanout); |
3411 | assert(!bo->scanout); |
3412 | assert(bo->flush == false); |
3412 | assert(bo->flush == false); |
3413 | 3413 | ||
3414 | if (bo->tiling) { |
3414 | if (bo->tiling) { |
3415 | if (bo->pitch < pitch) { |
3415 | if (bo->pitch < pitch) { |
3416 | DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n", |
3416 | DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n", |
3417 | bo->tiling, tiling, |
3417 | bo->tiling, tiling, |
3418 | bo->pitch, pitch)); |
3418 | bo->pitch, pitch)); |
3419 | continue; |
3419 | continue; |
3420 | } |
3420 | } |
3421 | } else |
3421 | } else |
3422 | bo->pitch = untiled_pitch; |
3422 | bo->pitch = untiled_pitch; |
3423 | 3423 | ||
3424 | if (bo->pitch * tiled_height > bytes(bo)) |
3424 | if (bo->pitch * tiled_height > bytes(bo)) |
3425 | continue; |
3425 | continue; |
3426 | 3426 | ||
3427 | kgem_bo_remove_from_active(kgem, bo); |
3427 | kgem_bo_remove_from_active(kgem, bo); |
3428 | 3428 | ||
3429 | bo->unique_id = kgem_get_unique_id(kgem); |
3429 | bo->unique_id = kgem_get_unique_id(kgem); |
3430 | bo->delta = 0; |
3430 | bo->delta = 0; |
3431 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
3431 | DBG((" 1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n", |
3432 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
3432 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
3433 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
3433 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
3434 | bo->refcnt = 1; |
3434 | bo->refcnt = 1; |
3435 | return bo; |
3435 | return bo; |
3436 | } |
3436 | } |
3437 | } |
3437 | } |
3438 | } |
3438 | } |
3439 | 3439 | ||
3440 | skip_active_search: |
3440 | skip_active_search: |
3441 | bucket = cache_bucket(size); |
3441 | bucket = cache_bucket(size); |
3442 | retry = NUM_CACHE_BUCKETS - bucket; |
3442 | retry = NUM_CACHE_BUCKETS - bucket; |
3443 | if (retry > 3) |
3443 | if (retry > 3) |
3444 | retry = 3; |
3444 | retry = 3; |
3445 | search_inactive: |
3445 | search_inactive: |
3446 | /* Now just look for a close match and prefer any currently active */ |
3446 | /* Now just look for a close match and prefer any currently active */ |
3447 | assert(bucket < NUM_CACHE_BUCKETS); |
3447 | assert(bucket < NUM_CACHE_BUCKETS); |
3448 | cache = &kgem->inactive[bucket]; |
3448 | cache = &kgem->inactive[bucket]; |
3449 | list_for_each_entry(bo, cache, list) { |
3449 | list_for_each_entry(bo, cache, list) { |
3450 | assert(bucket(bo) == bucket); |
3450 | assert(bucket(bo) == bucket); |
3451 | assert(bo->reusable); |
3451 | assert(bo->reusable); |
3452 | assert(!bo->scanout); |
3452 | assert(!bo->scanout); |
3453 | assert(bo->flush == false); |
3453 | assert(bo->flush == false); |
3454 | 3454 | ||
3455 | if (size > num_pages(bo)) { |
3455 | if (size > num_pages(bo)) { |
3456 | DBG(("inactive too small: %d < %d\n", |
3456 | DBG(("inactive too small: %d < %d\n", |
3457 | num_pages(bo), size)); |
3457 | num_pages(bo), size)); |
3458 | continue; |
3458 | continue; |
3459 | } |
3459 | } |
3460 | 3460 | ||
3461 | if (bo->tiling != tiling || |
3461 | if (bo->tiling != tiling || |
3462 | (tiling != I915_TILING_NONE && bo->pitch != pitch)) { |
3462 | (tiling != I915_TILING_NONE && bo->pitch != pitch)) { |
3463 | if (!gem_set_tiling(kgem->fd, bo->handle, |
3463 | if (!gem_set_tiling(kgem->fd, bo->handle, |
3464 | tiling, pitch)) |
3464 | tiling, pitch)) |
3465 | continue; |
3465 | continue; |
3466 | 3466 | ||
3467 | if (bo->map) |
3467 | if (bo->map) |
3468 | kgem_bo_release_map(kgem, bo); |
3468 | kgem_bo_release_map(kgem, bo); |
3469 | } |
3469 | } |
3470 | 3470 | ||
3471 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
3471 | if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
3472 | kgem_bo_free(kgem, bo); |
3472 | kgem_bo_free(kgem, bo); |
3473 | break; |
3473 | break; |
3474 | } |
3474 | } |
3475 | 3475 | ||
3476 | kgem_bo_remove_from_inactive(kgem, bo); |
3476 | kgem_bo_remove_from_inactive(kgem, bo); |
3477 | 3477 | ||
3478 | bo->pitch = pitch; |
3478 | bo->pitch = pitch; |
3479 | bo->tiling = tiling; |
3479 | bo->tiling = tiling; |
3480 | 3480 | ||
3481 | bo->delta = 0; |
3481 | bo->delta = 0; |
3482 | bo->unique_id = kgem_get_unique_id(kgem); |
3482 | bo->unique_id = kgem_get_unique_id(kgem); |
3483 | assert(bo->pitch); |
3483 | assert(bo->pitch); |
3484 | DBG((" from inactive: pitch=%d, tiling=%d: handle=%d, id=%d\n", |
3484 | DBG((" from inactive: pitch=%d, tiling=%d: handle=%d, id=%d\n", |
3485 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
3485 | bo->pitch, bo->tiling, bo->handle, bo->unique_id)); |
3486 | assert(bo->refcnt == 0); |
3486 | assert(bo->refcnt == 0); |
3487 | assert(bo->reusable); |
3487 | assert(bo->reusable); |
3488 | assert((flags & CREATE_INACTIVE) == 0 || bo->domain != DOMAIN_GPU); |
3488 | assert((flags & CREATE_INACTIVE) == 0 || bo->domain != DOMAIN_GPU); |
3489 | ASSERT_MAYBE_IDLE(kgem, bo->handle, flags & CREATE_INACTIVE); |
3489 | ASSERT_MAYBE_IDLE(kgem, bo->handle, flags & CREATE_INACTIVE); |
3490 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
3490 | assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo)); |
3491 | bo->refcnt = 1; |
3491 | bo->refcnt = 1; |
3492 | return bo; |
3492 | return bo; |
3493 | } |
3493 | } |
3494 | 3494 | ||
3495 | if (flags & CREATE_INACTIVE && |
3495 | if (flags & CREATE_INACTIVE && |
3496 | !list_is_empty(&kgem->active[bucket][tiling]) && |
3496 | !list_is_empty(&kgem->active[bucket][tiling]) && |
3497 | __kgem_throttle_retire(kgem, flags)) { |
3497 | __kgem_throttle_retire(kgem, flags)) { |
3498 | flags &= ~CREATE_INACTIVE; |
3498 | flags &= ~CREATE_INACTIVE; |
3499 | goto search_inactive; |
3499 | goto search_inactive; |
3500 | } |
3500 | } |
3501 | 3501 | ||
3502 | if (--retry) { |
3502 | if (--retry) { |
3503 | bucket++; |
3503 | bucket++; |
3504 | flags &= ~CREATE_INACTIVE; |
3504 | flags &= ~CREATE_INACTIVE; |
3505 | goto search_inactive; |
3505 | goto search_inactive; |
3506 | } |
3506 | } |
3507 | 3507 | ||
3508 | create: |
3508 | create: |
3509 | if (bucket >= NUM_CACHE_BUCKETS) |
3509 | if (bucket >= NUM_CACHE_BUCKETS) |
3510 | size = ALIGN(size, 1024); |
3510 | size = ALIGN(size, 1024); |
3511 | handle = gem_create(kgem->fd, size); |
3511 | handle = gem_create(kgem->fd, size); |
3512 | if (handle == 0) |
3512 | if (handle == 0) |
3513 | return NULL; |
3513 | return NULL; |
3514 | 3514 | ||
3515 | bo = __kgem_bo_alloc(handle, size); |
3515 | bo = __kgem_bo_alloc(handle, size); |
3516 | if (!bo) { |
3516 | if (!bo) { |
3517 | gem_close(kgem->fd, handle); |
3517 | gem_close(kgem->fd, handle); |
3518 | return NULL; |
3518 | return NULL; |
3519 | } |
3519 | } |
3520 | 3520 | ||
3521 | bo->domain = DOMAIN_CPU; |
3521 | bo->domain = DOMAIN_CPU; |
3522 | bo->unique_id = kgem_get_unique_id(kgem); |
3522 | bo->unique_id = kgem_get_unique_id(kgem); |
3523 | bo->pitch = pitch; |
3523 | bo->pitch = pitch; |
3524 | if (tiling != I915_TILING_NONE && |
3524 | if (tiling != I915_TILING_NONE && |
3525 | gem_set_tiling(kgem->fd, handle, tiling, pitch)) |
3525 | gem_set_tiling(kgem->fd, handle, tiling, pitch)) |
3526 | bo->tiling = tiling; |
3526 | bo->tiling = tiling; |
3527 | if (bucket >= NUM_CACHE_BUCKETS) { |
3527 | if (bucket >= NUM_CACHE_BUCKETS) { |
3528 | DBG(("%s: marking large bo for automatic flushing\n", |
3528 | DBG(("%s: marking large bo for automatic flushing\n", |
3529 | __FUNCTION__)); |
3529 | __FUNCTION__)); |
3530 | bo->flush = true; |
3530 | bo->flush = true; |
3531 | } |
3531 | } |
3532 | 3532 | ||
3533 | assert(bytes(bo) >= bo->pitch * kgem_aligned_height(kgem, height, bo->tiling)); |
3533 | assert(bytes(bo) >= bo->pitch * kgem_aligned_height(kgem, height, bo->tiling)); |
3534 | 3534 | ||
3535 | debug_alloc__bo(kgem, bo); |
3535 | debug_alloc__bo(kgem, bo); |
3536 | 3536 | ||
3537 | DBG((" new pitch=%d, tiling=%d, handle=%d, id=%d, num_pages=%d [%d], bucket=%d\n", |
3537 | DBG((" new pitch=%d, tiling=%d, handle=%d, id=%d, num_pages=%d [%d], bucket=%d\n", |
3538 | bo->pitch, bo->tiling, bo->handle, bo->unique_id, |
3538 | bo->pitch, bo->tiling, bo->handle, bo->unique_id, |
3539 | size, num_pages(bo), bucket(bo))); |
3539 | size, num_pages(bo), bucket(bo))); |
3540 | return bo; |
3540 | return bo; |
3541 | } |
3541 | } |
3542 | 3542 | ||
3543 | #if 0 |
3543 | #if 0 |
3544 | struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem, |
3544 | struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem, |
3545 | int width, |
3545 | int width, |
3546 | int height, |
3546 | int height, |
3547 | int bpp, |
3547 | int bpp, |
3548 | uint32_t flags) |
3548 | uint32_t flags) |
3549 | { |
3549 | { |
3550 | struct kgem_bo *bo; |
3550 | struct kgem_bo *bo; |
3551 | int stride, size; |
3551 | int stride, size; |
3552 | 3552 | ||
3553 | if (DBG_NO_CPU) |
3553 | if (DBG_NO_CPU) |
3554 | return NULL; |
3554 | return NULL; |
3555 | 3555 | ||
3556 | DBG(("%s(%dx%d, bpp=%d)\n", __FUNCTION__, width, height, bpp)); |
3556 | DBG(("%s(%dx%d, bpp=%d)\n", __FUNCTION__, width, height, bpp)); |
3557 | 3557 | ||
3558 | if (kgem->has_llc) { |
3558 | if (kgem->has_llc) { |
3559 | bo = kgem_create_2d(kgem, width, height, bpp, |
3559 | bo = kgem_create_2d(kgem, width, height, bpp, |
3560 | I915_TILING_NONE, flags); |
3560 | I915_TILING_NONE, flags); |
3561 | if (bo == NULL) |
3561 | if (bo == NULL) |
3562 | return bo; |
3562 | return bo; |
3563 | 3563 | ||
3564 | assert(bo->tiling == I915_TILING_NONE); |
3564 | assert(bo->tiling == I915_TILING_NONE); |
3565 | 3565 | ||
3566 | if (kgem_bo_map__cpu(kgem, bo) == NULL) { |
3566 | if (kgem_bo_map__cpu(kgem, bo) == NULL) { |
3567 | kgem_bo_destroy(kgem, bo); |
3567 | kgem_bo_destroy(kgem, bo); |
3568 | return NULL; |
3568 | return NULL; |
3569 | } |
3569 | } |
3570 | 3570 | ||
3571 | return bo; |
3571 | return bo; |
3572 | } |
3572 | } |
3573 | 3573 | ||
3574 | assert(width > 0 && height > 0); |
3574 | assert(width > 0 && height > 0); |
3575 | stride = ALIGN(width, 2) * bpp >> 3; |
3575 | stride = ALIGN(width, 2) * bpp >> 3; |
3576 | stride = ALIGN(stride, 4); |
3576 | stride = ALIGN(stride, 4); |
3577 | size = stride * ALIGN(height, 2); |
3577 | size = stride * ALIGN(height, 2); |
3578 | assert(size >= PAGE_SIZE); |
3578 | assert(size >= PAGE_SIZE); |
3579 | 3579 | ||
3580 | DBG(("%s: %dx%d, %d bpp, stride=%d\n", |
3580 | DBG(("%s: %dx%d, %d bpp, stride=%d\n", |
3581 | __FUNCTION__, width, height, bpp, stride)); |
3581 | __FUNCTION__, width, height, bpp, stride)); |
3582 | 3582 | ||
3583 | bo = search_snoop_cache(kgem, NUM_PAGES(size), 0); |
3583 | bo = search_snoop_cache(kgem, NUM_PAGES(size), 0); |
3584 | if (bo) { |
3584 | if (bo) { |
3585 | assert(bo->tiling == I915_TILING_NONE); |
3585 | assert(bo->tiling == I915_TILING_NONE); |
3586 | assert(bo->snoop); |
3586 | assert(bo->snoop); |
3587 | bo->refcnt = 1; |
3587 | bo->refcnt = 1; |
3588 | bo->pitch = stride; |
3588 | bo->pitch = stride; |
3589 | bo->unique_id = kgem_get_unique_id(kgem); |
3589 | bo->unique_id = kgem_get_unique_id(kgem); |
3590 | return bo; |
3590 | return bo; |
3591 | } |
3591 | } |
3592 | 3592 | ||
3593 | if (kgem->has_cacheing) { |
3593 | if (kgem->has_cacheing) { |
3594 | bo = kgem_create_linear(kgem, size, flags); |
3594 | bo = kgem_create_linear(kgem, size, flags); |
3595 | if (bo == NULL) |
3595 | if (bo == NULL) |
3596 | return NULL; |
3596 | return NULL; |
3597 | 3597 | ||
3598 | assert(bo->tiling == I915_TILING_NONE); |
3598 | assert(bo->tiling == I915_TILING_NONE); |
3599 | 3599 | ||
3600 | if (!gem_set_cacheing(kgem->fd, bo->handle, SNOOPED)) { |
3600 | if (!gem_set_cacheing(kgem->fd, bo->handle, SNOOPED)) { |
3601 | kgem_bo_destroy(kgem, bo); |
3601 | kgem_bo_destroy(kgem, bo); |
3602 | return NULL; |
3602 | return NULL; |
3603 | } |
3603 | } |
3604 | bo->snoop = true; |
3604 | bo->snoop = true; |
3605 | 3605 | ||
3606 | if (kgem_bo_map__cpu(kgem, bo) == NULL) { |
3606 | if (kgem_bo_map__cpu(kgem, bo) == NULL) { |
3607 | kgem_bo_destroy(kgem, bo); |
3607 | kgem_bo_destroy(kgem, bo); |
3608 | return NULL; |
3608 | return NULL; |
3609 | } |
3609 | } |
3610 | 3610 | ||
3611 | bo->pitch = stride; |
3611 | bo->pitch = stride; |
3612 | bo->unique_id = kgem_get_unique_id(kgem); |
3612 | bo->unique_id = kgem_get_unique_id(kgem); |
3613 | return bo; |
3613 | return bo; |
3614 | } |
3614 | } |
3615 | 3615 | ||
3616 | if (kgem->has_userptr) { |
3616 | if (kgem->has_userptr) { |
3617 | void *ptr; |
3617 | void *ptr; |
3618 | 3618 | ||
3619 | /* XXX */ |
3619 | /* XXX */ |
3620 | //if (posix_memalign(&ptr, 64, ALIGN(size, 64))) |
3620 | //if (posix_memalign(&ptr, 64, ALIGN(size, 64))) |
3621 | if (posix_memalign(&ptr, PAGE_SIZE, ALIGN(size, PAGE_SIZE))) |
3621 | if (posix_memalign(&ptr, PAGE_SIZE, ALIGN(size, PAGE_SIZE))) |
3622 | return NULL; |
3622 | return NULL; |
3623 | 3623 | ||
3624 | bo = kgem_create_map(kgem, ptr, size, false); |
3624 | bo = kgem_create_map(kgem, ptr, size, false); |
3625 | if (bo == NULL) { |
3625 | if (bo == NULL) { |
3626 | free(ptr); |
3626 | free(ptr); |
3627 | return NULL; |
3627 | return NULL; |
3628 | } |
3628 | } |
3629 | 3629 | ||
3630 | bo->map = MAKE_USER_MAP(ptr); |
3630 | bo->map = MAKE_USER_MAP(ptr); |
3631 | bo->pitch = stride; |
3631 | bo->pitch = stride; |
3632 | bo->unique_id = kgem_get_unique_id(kgem); |
3632 | bo->unique_id = kgem_get_unique_id(kgem); |
3633 | return bo; |
3633 | return bo; |
3634 | } |
3634 | } |
3635 | 3635 | ||
3636 | return NULL; |
3636 | return NULL; |
3637 | } |
3637 | } |
3638 | 3638 | ||
3639 | 3639 | ||
3640 | #endif |
3640 | #endif |
3641 | 3641 | ||
3642 | 3642 | ||
3643 | void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) |
3643 | void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) |
3644 | { |
3644 | { |
3645 | DBG(("%s: handle=%d, proxy? %d\n", |
3645 | DBG(("%s: handle=%d, proxy? %d\n", |
3646 | __FUNCTION__, bo->handle, bo->proxy != NULL)); |
3646 | __FUNCTION__, bo->handle, bo->proxy != NULL)); |
3647 | 3647 | ||
3648 | if (bo->proxy) { |
3648 | if (bo->proxy) { |
3649 | _list_del(&bo->vma); |
3649 | _list_del(&bo->vma); |
3650 | _list_del(&bo->request); |
3650 | _list_del(&bo->request); |
3651 | if (bo->io && bo->exec == NULL) |
3651 | if (bo->io && bo->exec == NULL) |
3652 | _kgem_bo_delete_buffer(kgem, bo); |
3652 | _kgem_bo_delete_buffer(kgem, bo); |
3653 | kgem_bo_unref(kgem, bo->proxy); |
3653 | kgem_bo_unref(kgem, bo->proxy); |
3654 | kgem_bo_binding_free(kgem, bo); |
3654 | kgem_bo_binding_free(kgem, bo); |
3655 | free(bo); |
3655 | free(bo); |
3656 | return; |
3656 | return; |
3657 | } |
3657 | } |
3658 | 3658 | ||
3659 | __kgem_bo_destroy(kgem, bo); |
3659 | __kgem_bo_destroy(kgem, bo); |
3660 | } |
3660 | } |
3661 | 3661 | ||
3662 | void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo) |
3662 | void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo) |
3663 | { |
3663 | { |
3664 | assert(bo->rq); |
3664 | assert(bo->rq); |
3665 | assert(bo->exec == NULL); |
3665 | assert(bo->exec == NULL); |
3666 | assert(bo->needs_flush); |
3666 | assert(bo->needs_flush); |
3667 | 3667 | ||
3668 | /* The kernel will emit a flush *and* update its own flushing lists. */ |
3668 | /* The kernel will emit a flush *and* update its own flushing lists. */ |
3669 | if (!__kgem_busy(kgem, bo->handle)) |
3669 | if (!__kgem_busy(kgem, bo->handle)) |
3670 | __kgem_bo_clear_busy(bo); |
3670 | __kgem_bo_clear_busy(bo); |
3671 | 3671 | ||
3672 | DBG(("%s: handle=%d, busy?=%d\n", |
3672 | DBG(("%s: handle=%d, busy?=%d\n", |
3673 | __FUNCTION__, bo->handle, bo->rq != NULL)); |
3673 | __FUNCTION__, bo->handle, bo->rq != NULL)); |
3674 | } |
3674 | } |
3675 | 3675 | ||
3676 | inline static bool needs_semaphore(struct kgem *kgem, struct kgem_bo *bo) |
3676 | inline static bool needs_semaphore(struct kgem *kgem, struct kgem_bo *bo) |
3677 | { |
3677 | { |
3678 | return kgem->nreloc && bo->rq && RQ_RING(bo->rq) != kgem->ring; |
3678 | return kgem->nreloc && bo->rq && RQ_RING(bo->rq) != kgem->ring; |
3679 | } |
3679 | } |
3680 | 3680 | ||
3681 | bool kgem_check_bo(struct kgem *kgem, ...) |
3681 | bool kgem_check_bo(struct kgem *kgem, ...) |
3682 | { |
3682 | { |
3683 | va_list ap; |
3683 | va_list ap; |
3684 | struct kgem_bo *bo; |
3684 | struct kgem_bo *bo; |
3685 | int num_exec = 0; |
3685 | int num_exec = 0; |
3686 | int num_pages = 0; |
3686 | int num_pages = 0; |
3687 | bool flush = false; |
3687 | bool flush = false; |
3688 | 3688 | ||
3689 | va_start(ap, kgem); |
3689 | va_start(ap, kgem); |
3690 | while ((bo = va_arg(ap, struct kgem_bo *))) { |
3690 | while ((bo = va_arg(ap, struct kgem_bo *))) { |
3691 | while (bo->proxy) |
3691 | while (bo->proxy) |
3692 | bo = bo->proxy; |
3692 | bo = bo->proxy; |
3693 | if (bo->exec) |
3693 | if (bo->exec) |
3694 | continue; |
3694 | continue; |
3695 | 3695 | ||
3696 | if (needs_semaphore(kgem, bo)) |
3696 | if (needs_semaphore(kgem, bo)) |
3697 | return false; |
3697 | return false; |
3698 | 3698 | ||
3699 | num_pages += num_pages(bo); |
3699 | num_pages += num_pages(bo); |
3700 | num_exec++; |
3700 | num_exec++; |
3701 | 3701 | ||
3702 | flush |= bo->flush; |
3702 | flush |= bo->flush; |
3703 | } |
3703 | } |
3704 | va_end(ap); |
3704 | va_end(ap); |
3705 | 3705 | ||
3706 | DBG(("%s: num_pages=+%d, num_exec=+%d\n", |
3706 | DBG(("%s: num_pages=+%d, num_exec=+%d\n", |
3707 | __FUNCTION__, num_pages, num_exec)); |
3707 | __FUNCTION__, num_pages, num_exec)); |
3708 | 3708 | ||
3709 | if (!num_pages) |
3709 | if (!num_pages) |
3710 | return true; |
3710 | return true; |
3711 | 3711 | ||
3712 | if (kgem_flush(kgem, flush)) |
3712 | if (kgem_flush(kgem, flush)) |
3713 | return false; |
3713 | return false; |
3714 | 3714 | ||
3715 | if (kgem->aperture > kgem->aperture_low && |
3715 | if (kgem->aperture > kgem->aperture_low && |
3716 | kgem_ring_is_idle(kgem, kgem->ring)) { |
3716 | kgem_ring_is_idle(kgem, kgem->ring)) { |
3717 | DBG(("%s: current aperture usage (%d) is greater than low water mark (%d)\n", |
3717 | DBG(("%s: current aperture usage (%d) is greater than low water mark (%d)\n", |
3718 | __FUNCTION__, kgem->aperture, kgem->aperture_low)); |
3718 | __FUNCTION__, kgem->aperture, kgem->aperture_low)); |
3719 | return false; |
3719 | return false; |
3720 | } |
3720 | } |
3721 | 3721 | ||
3722 | if (num_pages + kgem->aperture > kgem->aperture_high) { |
3722 | if (num_pages + kgem->aperture > kgem->aperture_high) { |
3723 | DBG(("%s: final aperture usage (%d) is greater than high water mark (%d)\n", |
3723 | DBG(("%s: final aperture usage (%d) is greater than high water mark (%d)\n", |
3724 | __FUNCTION__, num_pages + kgem->aperture, kgem->aperture_high)); |
3724 | __FUNCTION__, num_pages + kgem->aperture, kgem->aperture_high)); |
3725 | return false; |
3725 | return false; |
3726 | } |
3726 | } |
3727 | 3727 | ||
3728 | if (kgem->nexec + num_exec >= KGEM_EXEC_SIZE(kgem)) { |
3728 | if (kgem->nexec + num_exec >= KGEM_EXEC_SIZE(kgem)) { |
3729 | DBG(("%s: out of exec slots (%d + %d / %d)\n", __FUNCTION__, |
3729 | DBG(("%s: out of exec slots (%d + %d / %d)\n", __FUNCTION__, |
3730 | kgem->nexec, num_exec, KGEM_EXEC_SIZE(kgem))); |
3730 | kgem->nexec, num_exec, KGEM_EXEC_SIZE(kgem))); |
3731 | return false; |
3731 | return false; |
3732 | } |
3732 | } |
3733 | 3733 | ||
3734 | return true; |
3734 | return true; |
3735 | } |
3735 | } |
3736 | 3736 | ||
3737 | 3737 | ||
3738 | 3738 | ||
3739 | 3739 | ||
3740 | 3740 | ||
3741 | 3741 | ||
3742 | 3742 | ||
3743 | 3743 | ||
3744 | 3744 | ||
3745 | 3745 | ||
3746 | 3746 | ||
3747 | 3747 | ||
3748 | 3748 | ||
3749 | 3749 | ||
3750 | 3750 | ||
3751 | 3751 | ||
3752 | 3752 | ||
3753 | 3753 | ||
3754 | 3754 | ||
3755 | 3755 | ||
3756 | 3756 | ||
3757 | 3757 | ||
3758 | 3758 | ||
3759 | 3759 | ||
3760 | 3760 | ||
3761 | 3761 | ||
3762 | 3762 | ||
3763 | 3763 | ||
3764 | 3764 | ||
3765 | 3765 | ||
3766 | uint32_t kgem_add_reloc(struct kgem *kgem, |
3766 | uint32_t kgem_add_reloc(struct kgem *kgem, |
3767 | uint32_t pos, |
3767 | uint32_t pos, |
3768 | struct kgem_bo *bo, |
3768 | struct kgem_bo *bo, |
3769 | uint32_t read_write_domain, |
3769 | uint32_t read_write_domain, |
3770 | uint32_t delta) |
3770 | uint32_t delta) |
3771 | { |
3771 | { |
3772 | int index; |
3772 | int index; |
3773 | 3773 | ||
3774 | DBG(("%s: handle=%d, pos=%d, delta=%d, domains=%08x\n", |
3774 | DBG(("%s: handle=%d, pos=%d, delta=%d, domains=%08x\n", |
3775 | __FUNCTION__, bo ? bo->handle : 0, pos, delta, read_write_domain)); |
3775 | __FUNCTION__, bo ? bo->handle : 0, pos, delta, read_write_domain)); |
3776 | 3776 | ||
3777 | assert((read_write_domain & 0x7fff) == 0 || bo != NULL); |
3777 | assert((read_write_domain & 0x7fff) == 0 || bo != NULL); |
3778 | 3778 | ||
- | 3779 | if( bo != NULL && bo->handle == -2) |
|
- | 3780 | { |
|
- | 3781 | if (bo->exec == NULL) |
|
- | 3782 | kgem_add_bo(kgem, bo); |
|
- | 3783 | ||
- | 3784 | if (read_write_domain & 0x7fff && !bo->dirty) { |
|
- | 3785 | assert(!bo->snoop || kgem->can_blt_cpu); |
|
- | 3786 | __kgem_bo_mark_dirty(bo); |
|
3779 | // if( bo != NULL && bo->handle == -1) |
3787 | } |
- | 3788 | return 0; |
|
3780 | // return 0; |
3789 | }; |
3781 | 3790 | ||
3782 | index = kgem->nreloc++; |
3791 | index = kgem->nreloc++; |
3783 | assert(index < ARRAY_SIZE(kgem->reloc)); |
3792 | assert(index < ARRAY_SIZE(kgem->reloc)); |
3784 | kgem->reloc[index].offset = pos * sizeof(kgem->batch[0]); |
3793 | kgem->reloc[index].offset = pos * sizeof(kgem->batch[0]); |
3785 | if (bo) { |
3794 | if (bo) { |
3786 | assert(bo->refcnt); |
3795 | assert(bo->refcnt); |
3787 | assert(!bo->purged); |
3796 | assert(!bo->purged); |
3788 | 3797 | ||
3789 | while (bo->proxy) { |
3798 | while (bo->proxy) { |
3790 | DBG(("%s: adding proxy [delta=%d] for handle=%d\n", |
3799 | DBG(("%s: adding proxy [delta=%d] for handle=%d\n", |
3791 | __FUNCTION__, bo->delta, bo->handle)); |
3800 | __FUNCTION__, bo->delta, bo->handle)); |
3792 | delta += bo->delta; |
3801 | delta += bo->delta; |
3793 | assert(bo->handle == bo->proxy->handle); |
3802 | assert(bo->handle == bo->proxy->handle); |
3794 | /* need to release the cache upon batch submit */ |
3803 | /* need to release the cache upon batch submit */ |
3795 | if (bo->exec == NULL) { |
3804 | if (bo->exec == NULL) { |
3796 | list_move_tail(&bo->request, |
3805 | list_move_tail(&bo->request, |
3797 | &kgem->next_request->buffers); |
3806 | &kgem->next_request->buffers); |
3798 | bo->rq = MAKE_REQUEST(kgem->next_request, |
3807 | bo->rq = MAKE_REQUEST(kgem->next_request, |
3799 | kgem->ring); |
3808 | kgem->ring); |
3800 | bo->exec = &_kgem_dummy_exec; |
3809 | bo->exec = &_kgem_dummy_exec; |
3801 | } |
3810 | } |
3802 | 3811 | ||
3803 | if (read_write_domain & 0x7fff && !bo->dirty) |
3812 | if (read_write_domain & 0x7fff && !bo->dirty) |
3804 | __kgem_bo_mark_dirty(bo); |
3813 | __kgem_bo_mark_dirty(bo); |
3805 | 3814 | ||
3806 | bo = bo->proxy; |
3815 | bo = bo->proxy; |
3807 | assert(bo->refcnt); |
3816 | assert(bo->refcnt); |
3808 | assert(!bo->purged); |
3817 | assert(!bo->purged); |
3809 | } |
3818 | } |
3810 | 3819 | ||
3811 | if (bo->exec == NULL) |
3820 | if (bo->exec == NULL) |
3812 | kgem_add_bo(kgem, bo); |
3821 | kgem_add_bo(kgem, bo); |
3813 | assert(bo->rq == MAKE_REQUEST(kgem->next_request, kgem->ring)); |
3822 | assert(bo->rq == MAKE_REQUEST(kgem->next_request, kgem->ring)); |
3814 | assert(RQ_RING(bo->rq) == kgem->ring); |
3823 | assert(RQ_RING(bo->rq) == kgem->ring); |
3815 | 3824 | ||
3816 | if (kgem->gen < 040 && read_write_domain & KGEM_RELOC_FENCED) { |
3825 | if (kgem->gen < 040 && read_write_domain & KGEM_RELOC_FENCED) { |
3817 | if (bo->tiling && |
3826 | if (bo->tiling && |
3818 | (bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0) { |
3827 | (bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0) { |
3819 | assert(kgem->nfence < kgem->fence_max); |
3828 | assert(kgem->nfence < kgem->fence_max); |
3820 | kgem->aperture_fenced += |
3829 | kgem->aperture_fenced += |
3821 | kgem_bo_fenced_size(kgem, bo); |
3830 | kgem_bo_fenced_size(kgem, bo); |
3822 | kgem->nfence++; |
3831 | kgem->nfence++; |
3823 | } |
3832 | } |
3824 | bo->exec->flags |= EXEC_OBJECT_NEEDS_FENCE; |
3833 | bo->exec->flags |= EXEC_OBJECT_NEEDS_FENCE; |
3825 | } |
3834 | } |
3826 | 3835 | ||
3827 | kgem->reloc[index].delta = delta; |
3836 | kgem->reloc[index].delta = delta; |
3828 | kgem->reloc[index].target_handle = bo->target_handle; |
3837 | kgem->reloc[index].target_handle = bo->target_handle; |
3829 | kgem->reloc[index].presumed_offset = bo->presumed_offset; |
3838 | kgem->reloc[index].presumed_offset = bo->presumed_offset; |
3830 | 3839 | ||
3831 | if (read_write_domain & 0x7fff && !bo->dirty) { |
3840 | if (read_write_domain & 0x7fff && !bo->dirty) { |
3832 | assert(!bo->snoop || kgem->can_blt_cpu); |
3841 | assert(!bo->snoop || kgem->can_blt_cpu); |
3833 | __kgem_bo_mark_dirty(bo); |
3842 | __kgem_bo_mark_dirty(bo); |
3834 | } |
3843 | } |
3835 | 3844 | ||
3836 | delta += bo->presumed_offset; |
3845 | delta += bo->presumed_offset; |
3837 | } else { |
3846 | } else { |
3838 | kgem->reloc[index].delta = delta; |
3847 | kgem->reloc[index].delta = delta; |
3839 | kgem->reloc[index].target_handle = ~0U; |
3848 | kgem->reloc[index].target_handle = ~0U; |
3840 | kgem->reloc[index].presumed_offset = 0; |
3849 | kgem->reloc[index].presumed_offset = 0; |
3841 | if (kgem->nreloc__self < 256) |
3850 | if (kgem->nreloc__self < 256) |
3842 | kgem->reloc__self[kgem->nreloc__self++] = index; |
3851 | kgem->reloc__self[kgem->nreloc__self++] = index; |
3843 | } |
3852 | } |
3844 | kgem->reloc[index].read_domains = read_write_domain >> 16; |
3853 | kgem->reloc[index].read_domains = read_write_domain >> 16; |
3845 | kgem->reloc[index].write_domain = read_write_domain & 0x7fff; |
3854 | kgem->reloc[index].write_domain = read_write_domain & 0x7fff; |
3846 | 3855 | ||
3847 | return delta; |
3856 | return delta; |
3848 | } |
3857 | } |
3849 | 3858 | ||
3850 | static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket) |
3859 | static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket) |
3851 | { |
3860 | { |
3852 | int i, j; |
3861 | int i, j; |
3853 | 3862 | ||
3854 | DBG(("%s: type=%d, count=%d (bucket: %d)\n", |
3863 | DBG(("%s: type=%d, count=%d (bucket: %d)\n", |
3855 | __FUNCTION__, type, kgem->vma[type].count, bucket)); |
3864 | __FUNCTION__, type, kgem->vma[type].count, bucket)); |
3856 | if (kgem->vma[type].count <= 0) |
3865 | if (kgem->vma[type].count <= 0) |
3857 | return; |
3866 | return; |
3858 | 3867 | ||
3859 | if (kgem->need_purge) |
3868 | if (kgem->need_purge) |
3860 | kgem_purge_cache(kgem); |
3869 | kgem_purge_cache(kgem); |
3861 | 3870 | ||
3862 | /* vma are limited on a per-process basis to around 64k. |
3871 | /* vma are limited on a per-process basis to around 64k. |
3863 | * This includes all malloc arenas as well as other file |
3872 | * This includes all malloc arenas as well as other file |
3864 | * mappings. In order to be fair and not hog the cache, |
3873 | * mappings. In order to be fair and not hog the cache, |
3865 | * and more importantly not to exhaust that limit and to |
3874 | * and more importantly not to exhaust that limit and to |
3866 | * start failing mappings, we keep our own number of open |
3875 | * start failing mappings, we keep our own number of open |
3867 | * vma to within a conservative value. |
3876 | * vma to within a conservative value. |
3868 | */ |
3877 | */ |
3869 | i = 0; |
3878 | i = 0; |
3870 | while (kgem->vma[type].count > 0) { |
3879 | while (kgem->vma[type].count > 0) { |
3871 | struct kgem_bo *bo = NULL; |
3880 | struct kgem_bo *bo = NULL; |
3872 | 3881 | ||
3873 | for (j = 0; |
3882 | for (j = 0; |
3874 | bo == NULL && j < ARRAY_SIZE(kgem->vma[type].inactive); |
3883 | bo == NULL && j < ARRAY_SIZE(kgem->vma[type].inactive); |
3875 | j++) { |
3884 | j++) { |
3876 | struct list *head = &kgem->vma[type].inactive[i++%ARRAY_SIZE(kgem->vma[type].inactive)]; |
3885 | struct list *head = &kgem->vma[type].inactive[i++%ARRAY_SIZE(kgem->vma[type].inactive)]; |
3877 | if (!list_is_empty(head)) |
3886 | if (!list_is_empty(head)) |
3878 | bo = list_last_entry(head, struct kgem_bo, vma); |
3887 | bo = list_last_entry(head, struct kgem_bo, vma); |
3879 | } |
3888 | } |
3880 | if (bo == NULL) |
3889 | if (bo == NULL) |
3881 | break; |
3890 | break; |
3882 | 3891 | ||
3883 | DBG(("%s: discarding inactive %s vma cache for %d\n", |
3892 | DBG(("%s: discarding inactive %s vma cache for %d\n", |
3884 | __FUNCTION__, |
3893 | __FUNCTION__, |
3885 | IS_CPU_MAP(bo->map) ? "CPU" : "GTT", bo->handle)); |
3894 | IS_CPU_MAP(bo->map) ? "CPU" : "GTT", bo->handle)); |
3886 | assert(IS_CPU_MAP(bo->map) == type); |
3895 | assert(IS_CPU_MAP(bo->map) == type); |
3887 | assert(bo->map); |
3896 | assert(bo->map); |
3888 | assert(bo->rq == NULL); |
3897 | assert(bo->rq == NULL); |
3889 | 3898 | ||
3890 | VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo))); |
3899 | VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo))); |
3891 | // munmap(MAP(bo->map), bytes(bo)); |
3900 | // munmap(MAP(bo->map), bytes(bo)); |
3892 | bo->map = NULL; |
3901 | bo->map = NULL; |
3893 | list_del(&bo->vma); |
3902 | list_del(&bo->vma); |
3894 | kgem->vma[type].count--; |
3903 | kgem->vma[type].count--; |
3895 | 3904 | ||
3896 | if (!bo->purged && !kgem_bo_set_purgeable(kgem, bo)) { |
3905 | if (!bo->purged && !kgem_bo_set_purgeable(kgem, bo)) { |
3897 | DBG(("%s: freeing unpurgeable old mapping\n", |
3906 | DBG(("%s: freeing unpurgeable old mapping\n", |
3898 | __FUNCTION__)); |
3907 | __FUNCTION__)); |
3899 | kgem_bo_free(kgem, bo); |
3908 | kgem_bo_free(kgem, bo); |
3900 | } |
3909 | } |
3901 | } |
3910 | } |
3902 | } |
3911 | } |
3903 | 3912 | ||
3904 | 3913 | ||
3905 | void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo) |
3914 | void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo) |
3906 | { |
3915 | { |
3907 | void *ptr; |
3916 | void *ptr; |
3908 | 3917 | ||
3909 | DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__, |
3918 | DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__, |
3910 | bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain)); |
3919 | bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain)); |
3911 | 3920 | ||
3912 | assert(!bo->purged); |
3921 | assert(!bo->purged); |
3913 | assert(bo->proxy == NULL); |
3922 | assert(bo->proxy == NULL); |
3914 | assert(list_is_empty(&bo->list)); |
3923 | assert(list_is_empty(&bo->list)); |
3915 | assert(bo->exec == NULL); |
3924 | assert(bo->exec == NULL); |
3916 | 3925 | ||
3917 | if (bo->tiling == I915_TILING_NONE && !bo->scanout && |
3926 | if (bo->tiling == I915_TILING_NONE && !bo->scanout && |
3918 | (kgem->has_llc || bo->domain == DOMAIN_CPU)) { |
3927 | (kgem->has_llc || bo->domain == DOMAIN_CPU)) { |
3919 | DBG(("%s: converting request for GTT map into CPU map\n", |
3928 | DBG(("%s: converting request for GTT map into CPU map\n", |
3920 | __FUNCTION__)); |
3929 | __FUNCTION__)); |
3921 | ptr = kgem_bo_map__cpu(kgem, bo); |
3930 | ptr = kgem_bo_map__cpu(kgem, bo); |
3922 | kgem_bo_sync__cpu(kgem, bo); |
3931 | kgem_bo_sync__cpu(kgem, bo); |
3923 | return ptr; |
3932 | return ptr; |
3924 | } |
3933 | } |
3925 | 3934 | ||
3926 | if (IS_CPU_MAP(bo->map)) |
3935 | if (IS_CPU_MAP(bo->map)) |
3927 | kgem_bo_release_map(kgem, bo); |
3936 | kgem_bo_release_map(kgem, bo); |
3928 | 3937 | ||
3929 | ptr = bo->map; |
3938 | ptr = bo->map; |
3930 | if (ptr == NULL) { |
3939 | if (ptr == NULL) { |
3931 | assert(kgem_bo_size(bo) <= kgem->aperture_mappable / 2); |
3940 | assert(kgem_bo_size(bo) <= kgem->aperture_mappable / 2); |
3932 | assert(kgem->gen != 021 || bo->tiling != I915_TILING_Y); |
3941 | assert(kgem->gen != 021 || bo->tiling != I915_TILING_Y); |
3933 | 3942 | ||
3934 | kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo)); |
3943 | kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo)); |
3935 | 3944 | ||
3936 | ptr = __kgem_bo_map__gtt(kgem, bo); |
3945 | ptr = __kgem_bo_map__gtt(kgem, bo); |
3937 | if (ptr == NULL) |
3946 | if (ptr == NULL) |
3938 | return NULL; |
3947 | return NULL; |
3939 | 3948 | ||
3940 | /* Cache this mapping to avoid the overhead of an |
3949 | /* Cache this mapping to avoid the overhead of an |
3941 | * excruciatingly slow GTT pagefault. This is more an |
3950 | * excruciatingly slow GTT pagefault. This is more an |
3942 | * issue with compositing managers which need to frequently |
3951 | * issue with compositing managers which need to frequently |
3943 | * flush CPU damage to their GPU bo. |
3952 | * flush CPU damage to their GPU bo. |
3944 | */ |
3953 | */ |
3945 | bo->map = ptr; |
3954 | bo->map = ptr; |
3946 | DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle)); |
3955 | DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle)); |
3947 | } |
3956 | } |
3948 | 3957 | ||
3949 | if (bo->domain != DOMAIN_GTT) { |
3958 | if (bo->domain != DOMAIN_GTT) { |
3950 | struct drm_i915_gem_set_domain set_domain; |
3959 | struct drm_i915_gem_set_domain set_domain; |
3951 | 3960 | ||
3952 | DBG(("%s: sync: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__, |
3961 | DBG(("%s: sync: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__, |
3953 | bo->needs_flush, bo->domain, __kgem_busy(kgem, bo->handle))); |
3962 | bo->needs_flush, bo->domain, __kgem_busy(kgem, bo->handle))); |
3954 | 3963 | ||
3955 | /* XXX use PROT_READ to avoid the write flush? */ |
3964 | /* XXX use PROT_READ to avoid the write flush? */ |
3956 | 3965 | ||
3957 | VG_CLEAR(set_domain); |
3966 | VG_CLEAR(set_domain); |
3958 | set_domain.handle = bo->handle; |
3967 | set_domain.handle = bo->handle; |
3959 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
3968 | set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
3960 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
3969 | set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
3961 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) { |
3970 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) { |
3962 | kgem_bo_retire(kgem, bo); |
3971 | kgem_bo_retire(kgem, bo); |
3963 | bo->domain = DOMAIN_GTT; |
3972 | bo->domain = DOMAIN_GTT; |
3964 | } |
3973 | } |
3965 | } |
3974 | } |
3966 | 3975 | ||
3967 | return ptr; |
3976 | return ptr; |
3968 | } |
3977 | } |
3969 | 3978 | ||
3970 | void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo) |
3979 | void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo) |
3971 | { |
3980 | { |
3972 | void *ptr; |
3981 | void *ptr; |
3973 | 3982 | ||
3974 | DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__, |
3983 | DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__, |
3975 | bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain)); |
3984 | bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain)); |
3976 | 3985 | ||
3977 | assert(!bo->purged); |
3986 | assert(!bo->purged); |
3978 | assert(bo->exec == NULL); |
3987 | assert(bo->exec == NULL); |
3979 | assert(list_is_empty(&bo->list)); |
3988 | assert(list_is_empty(&bo->list)); |
3980 | 3989 | ||
3981 | if (IS_CPU_MAP(bo->map)) |
3990 | if (IS_CPU_MAP(bo->map)) |
3982 | kgem_bo_release_map(kgem, bo); |
3991 | kgem_bo_release_map(kgem, bo); |
3983 | 3992 | ||
3984 | ptr = bo->map; |
3993 | ptr = bo->map; |
3985 | if (ptr == NULL) { |
3994 | if (ptr == NULL) { |
3986 | assert(bytes(bo) <= kgem->aperture_mappable / 4); |
3995 | assert(bytes(bo) <= kgem->aperture_mappable / 4); |
3987 | 3996 | ||
3988 | kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo)); |
3997 | kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo)); |
3989 | 3998 | ||
3990 | ptr = __kgem_bo_map__gtt(kgem, bo); |
3999 | ptr = __kgem_bo_map__gtt(kgem, bo); |
3991 | if (ptr == NULL) |
4000 | if (ptr == NULL) |
3992 | return NULL; |
4001 | return NULL; |
3993 | 4002 | ||
3994 | /* Cache this mapping to avoid the overhead of an |
4003 | /* Cache this mapping to avoid the overhead of an |
3995 | * excruciatingly slow GTT pagefault. This is more an |
4004 | * excruciatingly slow GTT pagefault. This is more an |
3996 | * issue with compositing managers which need to frequently |
4005 | * issue with compositing managers which need to frequently |
3997 | * flush CPU damage to their GPU bo. |
4006 | * flush CPU damage to their GPU bo. |
3998 | */ |
4007 | */ |
3999 | bo->map = ptr; |
4008 | bo->map = ptr; |
4000 | DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle)); |
4009 | DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle)); |
4001 | } |
4010 | } |
4002 | 4011 | ||
4003 | return ptr; |
4012 | return ptr; |
4004 | } |
4013 | } |
4005 | 4014 | ||
4006 | 4015 | ||
4007 | void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo) |
4016 | void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo) |
4008 | { |
4017 | { |
4009 | struct drm_i915_gem_mmap mmap_arg; |
4018 | struct drm_i915_gem_mmap mmap_arg; |
4010 | 4019 | ||
4011 | DBG(("%s(handle=%d, size=%d, mapped? %d)\n", |
4020 | DBG(("%s(handle=%d, size=%d, mapped? %d)\n", |
4012 | __FUNCTION__, bo->handle, bytes(bo), (int)__MAP_TYPE(bo->map))); |
4021 | __FUNCTION__, bo->handle, bytes(bo), (int)__MAP_TYPE(bo->map))); |
4013 | assert(!bo->purged); |
4022 | assert(!bo->purged); |
4014 | assert(list_is_empty(&bo->list)); |
4023 | assert(list_is_empty(&bo->list)); |
4015 | assert(!bo->scanout); |
4024 | assert(!bo->scanout); |
4016 | assert(bo->proxy == NULL); |
4025 | assert(bo->proxy == NULL); |
4017 | 4026 | ||
4018 | if (IS_CPU_MAP(bo->map)) |
4027 | if (IS_CPU_MAP(bo->map)) |
4019 | return MAP(bo->map); |
4028 | return MAP(bo->map); |
4020 | 4029 | ||
4021 | if (bo->map) |
4030 | if (bo->map) |
4022 | kgem_bo_release_map(kgem, bo); |
4031 | kgem_bo_release_map(kgem, bo); |
4023 | 4032 | ||
4024 | kgem_trim_vma_cache(kgem, MAP_CPU, bucket(bo)); |
4033 | kgem_trim_vma_cache(kgem, MAP_CPU, bucket(bo)); |
4025 | 4034 | ||
4026 | retry: |
4035 | retry: |
4027 | VG_CLEAR(mmap_arg); |
4036 | VG_CLEAR(mmap_arg); |
4028 | mmap_arg.handle = bo->handle; |
4037 | mmap_arg.handle = bo->handle; |
4029 | mmap_arg.offset = 0; |
4038 | mmap_arg.offset = 0; |
4030 | mmap_arg.size = bytes(bo); |
4039 | mmap_arg.size = bytes(bo); |
4031 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) { |
4040 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) { |
4032 | printf("%s: failed to mmap %d, %d bytes, into CPU domain: %d\n", |
4041 | printf("%s: failed to mmap %d, %d bytes, into CPU domain: %d\n", |
4033 | __FUNCTION__, bo->handle, bytes(bo), 0); |
4042 | __FUNCTION__, bo->handle, bytes(bo), 0); |
4034 | if (__kgem_throttle_retire(kgem, 0)) |
4043 | if (__kgem_throttle_retire(kgem, 0)) |
4035 | goto retry; |
4044 | goto retry; |
4036 | 4045 | ||
4037 | if (kgem->need_expire) { |
4046 | if (kgem->need_expire) { |
4038 | kgem_cleanup_cache(kgem); |
4047 | kgem_cleanup_cache(kgem); |
4039 | goto retry; |
4048 | goto retry; |
4040 | } |
4049 | } |
4041 | 4050 | ||
4042 | return NULL; |
4051 | return NULL; |
4043 | } |
4052 | } |
4044 | 4053 | ||
4045 | VG(VALGRIND_MAKE_MEM_DEFINED(mmap_arg.addr_ptr, bytes(bo))); |
4054 | VG(VALGRIND_MAKE_MEM_DEFINED(mmap_arg.addr_ptr, bytes(bo))); |
4046 | 4055 | ||
4047 | DBG(("%s: caching CPU vma for %d\n", __FUNCTION__, bo->handle)); |
4056 | DBG(("%s: caching CPU vma for %d\n", __FUNCTION__, bo->handle)); |
4048 | bo->map = MAKE_CPU_MAP(mmap_arg.addr_ptr); |
4057 | bo->map = MAKE_CPU_MAP(mmap_arg.addr_ptr); |
4049 | return (void *)(uintptr_t)mmap_arg.addr_ptr; |
4058 | return (void *)(uintptr_t)mmap_arg.addr_ptr; |
4050 | } |
4059 | } |
4051 | 4060 | ||
4052 | void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo) |
4061 | void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo) |
4053 | { |
4062 | { |
4054 | assert(bo->proxy == NULL); |
4063 | assert(bo->proxy == NULL); |
4055 | kgem_bo_submit(kgem, bo); |
4064 | kgem_bo_submit(kgem, bo); |
4056 | 4065 | ||
4057 | if (bo->domain != DOMAIN_CPU) { |
4066 | if (bo->domain != DOMAIN_CPU) { |
4058 | struct drm_i915_gem_set_domain set_domain; |
4067 | struct drm_i915_gem_set_domain set_domain; |
4059 | 4068 | ||
4060 | DBG(("%s: SYNC: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__, |
4069 | DBG(("%s: SYNC: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__, |
4061 | bo->needs_flush, bo->domain, __kgem_busy(kgem, bo->handle))); |
4070 | bo->needs_flush, bo->domain, __kgem_busy(kgem, bo->handle))); |
4062 | 4071 | ||
4063 | VG_CLEAR(set_domain); |
4072 | VG_CLEAR(set_domain); |
4064 | set_domain.handle = bo->handle; |
4073 | set_domain.handle = bo->handle; |
4065 | set_domain.read_domains = I915_GEM_DOMAIN_CPU; |
4074 | set_domain.read_domains = I915_GEM_DOMAIN_CPU; |
4066 | set_domain.write_domain = I915_GEM_DOMAIN_CPU; |
4075 | set_domain.write_domain = I915_GEM_DOMAIN_CPU; |
4067 | 4076 | ||
4068 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) { |
4077 | if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) { |
4069 | kgem_bo_retire(kgem, bo); |
4078 | kgem_bo_retire(kgem, bo); |
4070 | bo->domain = DOMAIN_CPU; |
4079 | bo->domain = DOMAIN_CPU; |
4071 | } |
4080 | } |
4072 | } |
4081 | } |
4073 | } |
4082 | } |
4074 | 4083 | ||
4075 | void kgem_clear_dirty(struct kgem *kgem) |
4084 | void kgem_clear_dirty(struct kgem *kgem) |
4076 | { |
4085 | { |
4077 | struct list * const buffers = &kgem->next_request->buffers; |
4086 | struct list * const buffers = &kgem->next_request->buffers; |
4078 | struct kgem_bo *bo; |
4087 | struct kgem_bo *bo; |
4079 | 4088 | ||
4080 | list_for_each_entry(bo, buffers, request) { |
4089 | list_for_each_entry(bo, buffers, request) { |
4081 | if (!bo->dirty) |
4090 | if (!bo->dirty) |
4082 | break; |
4091 | break; |
4083 | 4092 | ||
4084 | bo->dirty = false; |
4093 | bo->dirty = false; |
4085 | } |
4094 | } |
4086 | } |
4095 | } |
4087 | 4096 | ||
4088 | struct kgem_bo *kgem_create_proxy(struct kgem *kgem, |
4097 | struct kgem_bo *kgem_create_proxy(struct kgem *kgem, |
4089 | struct kgem_bo *target, |
4098 | struct kgem_bo *target, |
4090 | int offset, int length) |
4099 | int offset, int length) |
4091 | { |
4100 | { |
4092 | struct kgem_bo *bo; |
4101 | struct kgem_bo *bo; |
4093 | 4102 | ||
4094 | DBG(("%s: target handle=%d [proxy? %d], offset=%d, length=%d, io=%d\n", |
4103 | DBG(("%s: target handle=%d [proxy? %d], offset=%d, length=%d, io=%d\n", |
4095 | __FUNCTION__, target->handle, target->proxy ? target->proxy->delta : -1, |
4104 | __FUNCTION__, target->handle, target->proxy ? target->proxy->delta : -1, |
4096 | offset, length, target->io)); |
4105 | offset, length, target->io)); |
4097 | 4106 | ||
4098 | bo = __kgem_bo_alloc(target->handle, length); |
4107 | bo = __kgem_bo_alloc(target->handle, length); |
4099 | if (bo == NULL) |
4108 | if (bo == NULL) |
4100 | return NULL; |
4109 | return NULL; |
4101 | 4110 | ||
4102 | bo->unique_id = kgem_get_unique_id(kgem); |
4111 | bo->unique_id = kgem_get_unique_id(kgem); |
4103 | bo->reusable = false; |
4112 | bo->reusable = false; |
4104 | bo->size.bytes = length; |
4113 | bo->size.bytes = length; |
4105 | 4114 | ||
4106 | bo->io = target->io && target->proxy == NULL; |
4115 | bo->io = target->io && target->proxy == NULL; |
4107 | bo->dirty = target->dirty; |
4116 | bo->dirty = target->dirty; |
4108 | bo->tiling = target->tiling; |
4117 | bo->tiling = target->tiling; |
4109 | bo->pitch = target->pitch; |
4118 | bo->pitch = target->pitch; |
4110 | 4119 | ||
4111 | assert(!bo->scanout); |
4120 | assert(!bo->scanout); |
4112 | bo->proxy = kgem_bo_reference(target); |
4121 | bo->proxy = kgem_bo_reference(target); |
4113 | bo->delta = offset; |
4122 | bo->delta = offset; |
4114 | 4123 | ||
4115 | if (target->exec) { |
4124 | if (target->exec) { |
4116 | list_move_tail(&bo->request, &kgem->next_request->buffers); |
4125 | list_move_tail(&bo->request, &kgem->next_request->buffers); |
4117 | bo->exec = &_kgem_dummy_exec; |
4126 | bo->exec = &_kgem_dummy_exec; |
4118 | } |
4127 | } |
4119 | bo->rq = target->rq; |
4128 | bo->rq = target->rq; |
4120 | 4129 | ||
4121 | return bo; |
4130 | return bo; |
4122 | } |
4131 | } |
4123 | 4132 | ||
4124 | uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format) |
4133 | uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format) |
4125 | { |
4134 | { |
4126 | struct kgem_bo_binding *b; |
4135 | struct kgem_bo_binding *b; |
4127 | 4136 | ||
4128 | for (b = &bo->binding; b && b->offset; b = b->next) |
4137 | for (b = &bo->binding; b && b->offset; b = b->next) |
4129 | if (format == b->format) |
4138 | if (format == b->format) |
4130 | return b->offset; |
4139 | return b->offset; |
4131 | 4140 | ||
4132 | return 0; |
4141 | return 0; |
4133 | } |
4142 | } |
4134 | 4143 | ||
4135 | void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset) |
4144 | void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset) |
4136 | { |
4145 | { |
4137 | struct kgem_bo_binding *b; |
4146 | struct kgem_bo_binding *b; |
4138 | 4147 | ||
4139 | for (b = &bo->binding; b; b = b->next) { |
4148 | for (b = &bo->binding; b; b = b->next) { |
4140 | if (b->offset) |
4149 | if (b->offset) |
4141 | continue; |
4150 | continue; |
4142 | 4151 | ||
4143 | b->offset = offset; |
4152 | b->offset = offset; |
4144 | b->format = format; |
4153 | b->format = format; |
4145 | 4154 | ||
4146 | if (b->next) |
4155 | if (b->next) |
4147 | b->next->offset = 0; |
4156 | b->next->offset = 0; |
4148 | 4157 | ||
4149 | return; |
4158 | return; |
4150 | } |
4159 | } |
4151 | 4160 | ||
4152 | b = malloc(sizeof(*b)); |
4161 | b = malloc(sizeof(*b)); |
4153 | if (b) { |
4162 | if (b) { |
4154 | b->next = bo->binding.next; |
4163 | b->next = bo->binding.next; |
4155 | b->format = format; |
4164 | b->format = format; |
4156 | b->offset = offset; |
4165 | b->offset = offset; |
4157 | bo->binding.next = b; |
4166 | bo->binding.next = b; |
4158 | } |
4167 | } |
4159 | } |
4168 | } |
4160 | 4169 | ||
4161 | 4170 | ||
4162 | int kgem_init_fb(struct kgem *kgem, struct sna_fb *fb) |
4171 | int kgem_init_fb(struct kgem *kgem, struct sna_fb *fb) |
4163 | { |
4172 | { |
4164 | struct kgem_bo *bo; |
4173 | struct kgem_bo *bo; |
4165 | size_t size; |
4174 | size_t size; |
4166 | int ret; |
4175 | int ret; |
4167 | 4176 | ||
4168 | ret = drmIoctl(kgem->fd, SRV_FBINFO, fb); |
4177 | ret = drmIoctl(kgem->fd, SRV_FBINFO, fb); |
4169 | if( ret != 0 ) |
4178 | if( ret != 0 ) |
4170 | return 0; |
4179 | return 0; |
4171 | 4180 | ||
4172 | size = fb->pitch * fb->height / PAGE_SIZE; |
4181 | size = fb->pitch * fb->height / PAGE_SIZE; |
4173 | 4182 | ||
4174 | bo = __kgem_bo_alloc(-2, size); |
4183 | bo = __kgem_bo_alloc(-2, size); |
4175 | if (!bo) { |
4184 | if (!bo) { |
4176 | return 0; |
4185 | return 0; |
4177 | } |
4186 | } |
4178 | 4187 | ||
4179 | bo->domain = DOMAIN_GTT; |
4188 | bo->domain = DOMAIN_GTT; |
4180 | bo->unique_id = kgem_get_unique_id(kgem); |
4189 | bo->unique_id = kgem_get_unique_id(kgem); |
4181 | bo->pitch = fb->pitch; |
4190 | bo->pitch = fb->pitch; |
4182 | bo->tiling = I915_TILING_NONE; |
4191 | bo->tiling = I915_TILING_NONE; |
4183 | bo->scanout = 1; |
4192 | bo->scanout = 1; |
4184 | fb->fb_bo = bo; |
4193 | fb->fb_bo = bo; |
4185 | 4194 | ||
4186 | printf("fb width %d height %d pitch %d bo %p\n", |
4195 | printf("fb width %d height %d pitch %d bo %p\n", |
4187 | fb->width, fb->height, fb->pitch, fb->fb_bo); |
4196 | fb->width, fb->height, fb->pitch, fb->fb_bo); |
4188 | 4197 | ||
4189 | return 1; |
4198 | return 1; |
4190 | };=>=>>=>>>>>=>>>=>>=>>=>=>>=>>>>>=>>>=>=>>>>=>=>>>>>=>>>>>>>>>>=>>=>=>=>=>=>=>=>=>>>>=>>=>>=>>=>>>>>>>>=>>>=><=>>>=>=>=>>><>><>>>>>>>>>>>>>>>>>>>>>=>31) |
4199 | };=>=>>=>>>>>=>>>=>>=>>=>=>>=>>>>>=>>>=>=>>>>=>=>>>>>=>>>>>>>>>>=>>=>=>=>=>=>=>=>=>>>>=>>=>>=>>=>>>>>>>>=>>>=><=>>>=>=>=>>><>><>>>>>>>>>>>>>>>>>>>>>=>31) |
4191 | ><31) |
4200 | ><31) |
4192 | >0) |
4201 | >0) |
4193 | #define><0) |
4202 | #define><0) |
4194 | #define>12) |
4203 | #define>12) |
4195 | struct><12) |
4204 | struct><12) |
4196 | struct>11) |
4205 | struct>11) |
4197 | #define><11) |
4206 | #define><11) |
4198 | #define>10) |
4207 | #define>10) |
4199 | #define><10) |
4208 | #define><10) |
4200 | #define> |
4209 | #define> |