Rev 3769 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3769 | Rev 4245 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2011 Intel Corporation |
2 | * Copyright (c) 2011 Intel Corporation |
3 | * |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
13 | * Software. |
14 | * |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
21 | * SOFTWARE. |
21 | * SOFTWARE. |
22 | * |
22 | * |
23 | * Authors: |
23 | * Authors: |
24 | * Chris Wilson |
24 | * Chris Wilson |
25 | * |
25 | * |
26 | */ |
26 | */ |
27 | 27 | ||
28 | #ifndef KGEM_H |
28 | #ifndef KGEM_H |
29 | #define KGEM_H |
29 | #define KGEM_H |
30 | 30 | ||
31 | #define HAS_DEBUG_FULL 0 |
31 | #define HAS_DEBUG_FULL 1 |
32 | 32 | ||
33 | #include |
33 | #include |
34 | #include |
34 | #include |
35 | #include |
35 | #include |
36 | #include |
36 | #include |
37 | 37 | ||
38 | #include |
38 | #include |
39 | 39 | ||
40 | #include "compiler.h" |
40 | #include "compiler.h" |
41 | #include "intel_list.h" |
41 | #include "intel_list.h" |
42 | - | ||
43 | static inline void delay(uint32_t time) |
- | |
44 | { |
- | |
45 | __asm__ __volatile__( |
- | |
46 | "int $0x40" |
- | |
47 | ::"a"(5), "b"(time) |
- | |
48 | :"memory"); |
- | |
49 | }; |
- | |
50 | 42 | ||
51 | #undef DBG |
43 | #undef DBG |
52 | 44 | ||
53 | #if HAS_DEBUG_FULL |
45 | #if HAS_DEBUG_FULL |
54 | #define DBG(x) printf x |
46 | #define DBG(x) printf x |
55 | #else |
47 | #else |
56 | #define DBG(x) |
48 | #define DBG(x) |
57 | #endif |
49 | #endif |
58 | 50 | ||
59 | struct kgem_bo { |
51 | struct kgem_bo { |
60 | struct kgem_request *rq; |
52 | struct kgem_request *rq; |
61 | #define RQ(rq) ((struct kgem_request *)((uintptr_t)(rq) & ~3)) |
53 | #define RQ(rq) ((struct kgem_request *)((uintptr_t)(rq) & ~3)) |
62 | #define RQ_RING(rq) ((uintptr_t)(rq) & 3) |
54 | #define RQ_RING(rq) ((uintptr_t)(rq) & 3) |
63 | #define RQ_IS_BLT(rq) (RQ_RING(rq) == KGEM_BLT) |
55 | #define RQ_IS_BLT(rq) (RQ_RING(rq) == KGEM_BLT) |
64 | struct drm_i915_gem_exec_object2 *exec; |
56 | struct drm_i915_gem_exec_object2 *exec; |
65 | 57 | ||
66 | struct kgem_bo *proxy; |
58 | struct kgem_bo *proxy; |
67 | 59 | ||
68 | struct list list; |
60 | struct list list; |
69 | struct list request; |
61 | struct list request; |
70 | struct list vma; |
62 | struct list vma; |
71 | 63 | ||
72 | void *map; |
64 | void *map; |
73 | #define IS_CPU_MAP(ptr) ((uintptr_t)(ptr) & 1) |
65 | #define IS_CPU_MAP(ptr) ((uintptr_t)(ptr) & 1) |
74 | #define IS_GTT_MAP(ptr) (ptr && ((uintptr_t)(ptr) & 1) == 0) |
66 | #define IS_GTT_MAP(ptr) (ptr && ((uintptr_t)(ptr) & 1) == 0) |
75 | 67 | ||
76 | struct kgem_bo_binding { |
68 | struct kgem_bo_binding { |
77 | struct kgem_bo_binding *next; |
69 | struct kgem_bo_binding *next; |
78 | uint32_t format; |
70 | uint32_t format; |
79 | uint16_t offset; |
71 | uint16_t offset; |
80 | } binding; |
72 | } binding; |
81 | 73 | ||
82 | uint32_t unique_id; |
74 | uint32_t unique_id; |
83 | uint32_t refcnt; |
75 | uint32_t refcnt; |
84 | uint32_t handle; |
76 | uint32_t handle; |
85 | uint32_t target_handle; |
77 | uint32_t target_handle; |
86 | uint32_t presumed_offset; |
78 | uint32_t presumed_offset; |
87 | uint32_t delta; |
79 | uint32_t delta; |
88 | union { |
80 | union { |
89 | struct { |
81 | struct { |
90 | uint32_t count:27; |
82 | uint32_t count:27; |
91 | #define PAGE_SIZE 4096 |
83 | #define PAGE_SIZE 4096 |
92 | uint32_t bucket:5; |
84 | uint32_t bucket:5; |
93 | #define NUM_CACHE_BUCKETS 16 |
85 | #define NUM_CACHE_BUCKETS 16 |
94 | #define MAX_CACHE_SIZE (1 << (NUM_CACHE_BUCKETS+12)) |
86 | #define MAX_CACHE_SIZE (1 << (NUM_CACHE_BUCKETS+12)) |
95 | } pages; |
87 | } pages; |
96 | uint32_t bytes; |
88 | uint32_t bytes; |
97 | } size; |
89 | } size; |
98 | uint32_t pitch : 18; /* max 128k */ |
90 | uint32_t pitch : 18; /* max 128k */ |
99 | uint32_t tiling : 2; |
91 | uint32_t tiling : 2; |
100 | uint32_t reusable : 1; |
92 | uint32_t reusable : 1; |
101 | uint32_t dirty : 1; |
93 | uint32_t dirty : 1; |
102 | uint32_t domain : 2; |
94 | uint32_t domain : 2; |
103 | uint32_t needs_flush : 1; |
95 | uint32_t needs_flush : 1; |
104 | uint32_t snoop : 1; |
96 | uint32_t snoop : 1; |
105 | uint32_t io : 1; |
97 | uint32_t io : 1; |
106 | uint32_t flush : 1; |
98 | uint32_t flush : 1; |
107 | uint32_t scanout : 1; |
99 | uint32_t scanout : 1; |
108 | uint32_t purged : 1; |
100 | uint32_t purged : 1; |
109 | }; |
101 | }; |
110 | #define DOMAIN_NONE 0 |
102 | #define DOMAIN_NONE 0 |
111 | #define DOMAIN_CPU 1 |
103 | #define DOMAIN_CPU 1 |
112 | #define DOMAIN_GTT 2 |
104 | #define DOMAIN_GTT 2 |
113 | #define DOMAIN_GPU 3 |
105 | #define DOMAIN_GPU 3 |
114 | 106 | ||
115 | struct kgem_request { |
107 | struct kgem_request { |
116 | struct list list; |
108 | struct list list; |
117 | struct kgem_bo *bo; |
109 | struct kgem_bo *bo; |
118 | struct list buffers; |
110 | struct list buffers; |
119 | int ring; |
111 | int ring; |
120 | }; |
112 | }; |
121 | 113 | ||
122 | enum { |
114 | enum { |
123 | MAP_GTT = 0, |
115 | MAP_GTT = 0, |
124 | MAP_CPU, |
116 | MAP_CPU, |
125 | NUM_MAP_TYPES, |
117 | NUM_MAP_TYPES, |
126 | }; |
118 | }; |
127 | 119 | ||
128 | struct kgem { |
120 | struct kgem { |
129 | int fd; |
121 | int fd; |
130 | int wedged; |
122 | int wedged; |
131 | unsigned gen; |
123 | unsigned gen; |
132 | 124 | ||
133 | uint32_t unique_id; |
125 | uint32_t unique_id; |
134 | 126 | ||
135 | enum kgem_mode { |
127 | enum kgem_mode { |
136 | /* order matches I915_EXEC_RING ordering */ |
128 | /* order matches I915_EXEC_RING ordering */ |
137 | KGEM_NONE = 0, |
129 | KGEM_NONE = 0, |
138 | KGEM_RENDER, |
130 | KGEM_RENDER, |
139 | KGEM_BSD, |
131 | KGEM_BSD, |
140 | KGEM_BLT, |
132 | KGEM_BLT, |
141 | } mode, ring; |
133 | } mode, ring; |
142 | 134 | ||
143 | struct list flushing; |
135 | struct list flushing; |
144 | struct list large; |
136 | struct list large; |
145 | struct list large_inactive; |
137 | struct list large_inactive; |
146 | struct list active[NUM_CACHE_BUCKETS][3]; |
138 | struct list active[NUM_CACHE_BUCKETS][3]; |
147 | struct list inactive[NUM_CACHE_BUCKETS]; |
139 | struct list inactive[NUM_CACHE_BUCKETS]; |
148 | struct list pinned_batches[2]; |
140 | struct list pinned_batches[2]; |
149 | struct list snoop; |
141 | struct list snoop; |
150 | struct list scanout; |
142 | struct list scanout; |
151 | struct list batch_buffers, active_buffers; |
143 | struct list batch_buffers, active_buffers; |
152 | 144 | ||
153 | struct list requests[2]; |
145 | struct list requests[2]; |
154 | struct kgem_request *next_request; |
146 | struct kgem_request *next_request; |
155 | struct kgem_request static_request; |
147 | struct kgem_request static_request; |
156 | 148 | ||
157 | struct { |
149 | struct { |
158 | struct list inactive[NUM_CACHE_BUCKETS]; |
150 | struct list inactive[NUM_CACHE_BUCKETS]; |
159 | int16_t count; |
151 | int16_t count; |
160 | } vma[NUM_MAP_TYPES]; |
152 | } vma[NUM_MAP_TYPES]; |
161 | 153 | ||
162 | uint32_t batch_flags; |
154 | uint32_t batch_flags; |
163 | uint32_t batch_flags_base; |
155 | uint32_t batch_flags_base; |
164 | #define I915_EXEC_SECURE (1<<9) |
156 | #define I915_EXEC_SECURE (1<<9) |
165 | #define LOCAL_EXEC_OBJECT_WRITE (1<<2) |
157 | #define LOCAL_EXEC_OBJECT_WRITE (1<<2) |
166 | 158 | ||
167 | uint16_t nbatch; |
159 | uint16_t nbatch; |
168 | uint16_t surface; |
160 | uint16_t surface; |
169 | uint16_t nexec; |
161 | uint16_t nexec; |
170 | uint16_t nreloc; |
162 | uint16_t nreloc; |
171 | uint16_t nreloc__self; |
163 | uint16_t nreloc__self; |
172 | uint16_t nfence; |
164 | uint16_t nfence; |
173 | uint16_t batch_size; |
165 | uint16_t batch_size; |
174 | uint16_t min_alignment; |
166 | uint16_t min_alignment; |
175 | 167 | ||
176 | uint32_t flush:1; |
168 | uint32_t flush:1; |
177 | uint32_t need_expire:1; |
169 | uint32_t need_expire:1; |
178 | uint32_t need_purge:1; |
170 | uint32_t need_purge:1; |
179 | uint32_t need_retire:1; |
171 | uint32_t need_retire:1; |
180 | uint32_t need_throttle:1; |
172 | uint32_t need_throttle:1; |
181 | uint32_t scanout_busy:1; |
173 | uint32_t scanout_busy:1; |
182 | uint32_t busy:1; |
174 | uint32_t busy:1; |
183 | 175 | ||
184 | uint32_t has_userptr :1; |
176 | uint32_t has_userptr :1; |
185 | uint32_t has_blt :1; |
177 | uint32_t has_blt :1; |
186 | uint32_t has_relaxed_fencing :1; |
178 | uint32_t has_relaxed_fencing :1; |
187 | uint32_t has_relaxed_delta :1; |
179 | uint32_t has_relaxed_delta :1; |
188 | uint32_t has_semaphores :1; |
180 | uint32_t has_semaphores :1; |
189 | uint32_t has_secure_batches :1; |
181 | uint32_t has_secure_batches :1; |
190 | uint32_t has_pinned_batches :1; |
182 | uint32_t has_pinned_batches :1; |
191 | uint32_t has_cacheing :1; |
183 | uint32_t has_cacheing :1; |
192 | uint32_t has_llc :1; |
184 | uint32_t has_llc :1; |
193 | uint32_t has_no_reloc :1; |
185 | uint32_t has_no_reloc :1; |
194 | uint32_t has_handle_lut :1; |
186 | uint32_t has_handle_lut :1; |
195 | 187 | ||
196 | uint32_t can_blt_cpu :1; |
188 | uint32_t can_blt_cpu :1; |
197 | 189 | ||
198 | uint16_t fence_max; |
190 | uint16_t fence_max; |
199 | uint16_t half_cpu_cache_pages; |
191 | uint16_t half_cpu_cache_pages; |
200 | uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable; |
192 | uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable; |
201 | uint32_t aperture, aperture_fenced; |
193 | uint32_t aperture, aperture_fenced; |
202 | uint32_t max_upload_tile_size, max_copy_tile_size; |
194 | uint32_t max_upload_tile_size, max_copy_tile_size; |
203 | uint32_t max_gpu_size, max_cpu_size; |
195 | uint32_t max_gpu_size, max_cpu_size; |
204 | uint32_t large_object_size, max_object_size; |
196 | uint32_t large_object_size, max_object_size; |
205 | uint32_t buffer_size; |
197 | uint32_t buffer_size; |
206 | 198 | ||
207 | void (*context_switch)(struct kgem *kgem, int new_mode); |
199 | void (*context_switch)(struct kgem *kgem, int new_mode); |
208 | void (*retire)(struct kgem *kgem); |
200 | void (*retire)(struct kgem *kgem); |
209 | void (*expire)(struct kgem *kgem); |
201 | void (*expire)(struct kgem *kgem); |
210 | 202 | ||
211 | uint32_t batch[64*1024-8]; |
203 | uint32_t batch[64*1024-8]; |
212 | struct drm_i915_gem_exec_object2 exec[256]; |
204 | struct drm_i915_gem_exec_object2 exec[256]; |
213 | struct drm_i915_gem_relocation_entry reloc[4096]; |
205 | struct drm_i915_gem_relocation_entry reloc[4096]; |
214 | uint16_t reloc__self[256]; |
206 | uint16_t reloc__self[256]; |
215 | 207 | ||
216 | #ifdef DEBUG_MEMORY |
208 | #ifdef DEBUG_MEMORY |
217 | struct { |
209 | struct { |
218 | int bo_allocs; |
210 | int bo_allocs; |
219 | size_t bo_bytes; |
211 | size_t bo_bytes; |
220 | } debug_memory; |
212 | } debug_memory; |
221 | #endif |
213 | #endif |
222 | }; |
214 | }; |
223 | 215 | ||
224 | #define KGEM_BATCH_RESERVED 1 |
216 | #define KGEM_BATCH_RESERVED 1 |
225 | #define KGEM_RELOC_RESERVED 4 |
217 | #define KGEM_RELOC_RESERVED 4 |
226 | #define KGEM_EXEC_RESERVED 1 |
218 | #define KGEM_EXEC_RESERVED 1 |
227 | 219 | ||
228 | #ifndef ARRAY_SIZE |
220 | #ifndef ARRAY_SIZE |
229 | #define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0])) |
221 | #define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0])) |
230 | #endif |
222 | #endif |
231 | 223 | ||
232 | #define KGEM_BATCH_SIZE(K) ((K)->batch_size-KGEM_BATCH_RESERVED) |
224 | #define KGEM_BATCH_SIZE(K) ((K)->batch_size-KGEM_BATCH_RESERVED) |
233 | #define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED) |
225 | #define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED) |
234 | #define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED) |
226 | #define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED) |
235 | 227 | ||
236 | void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen); |
228 | void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen); |
237 | void kgem_reset(struct kgem *kgem); |
229 | void kgem_reset(struct kgem *kgem); |
238 | 230 | ||
239 | struct kgem_bo *kgem_create_map(struct kgem *kgem, |
231 | struct kgem_bo *kgem_create_map(struct kgem *kgem, |
240 | void *ptr, uint32_t size, |
232 | void *ptr, uint32_t size, |
241 | bool read_only); |
233 | bool read_only); |
242 | 234 | ||
243 | struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name); |
235 | struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name); |
244 | 236 | ||
245 | struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags); |
237 | struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags); |
246 | struct kgem_bo *kgem_create_proxy(struct kgem *kgem, |
238 | struct kgem_bo *kgem_create_proxy(struct kgem *kgem, |
247 | struct kgem_bo *target, |
239 | struct kgem_bo *target, |
248 | int offset, int length); |
240 | int offset, int length); |
249 | 241 | ||
250 | 242 | ||
251 | int kgem_choose_tiling(struct kgem *kgem, |
243 | int kgem_choose_tiling(struct kgem *kgem, |
252 | int tiling, int width, int height, int bpp); |
244 | int tiling, int width, int height, int bpp); |
253 | unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth); |
245 | unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth); |
254 | #define KGEM_CAN_CREATE_GPU 0x1 |
246 | #define KGEM_CAN_CREATE_GPU 0x1 |
255 | #define KGEM_CAN_CREATE_CPU 0x2 |
247 | #define KGEM_CAN_CREATE_CPU 0x2 |
256 | #define KGEM_CAN_CREATE_LARGE 0x4 |
248 | #define KGEM_CAN_CREATE_LARGE 0x4 |
257 | #define KGEM_CAN_CREATE_GTT 0x8 |
249 | #define KGEM_CAN_CREATE_GTT 0x8 |
258 | 250 | ||
259 | struct kgem_bo * |
251 | struct kgem_bo * |
260 | kgem_replace_bo(struct kgem *kgem, |
252 | kgem_replace_bo(struct kgem *kgem, |
261 | struct kgem_bo *src, |
253 | struct kgem_bo *src, |
262 | uint32_t width, |
254 | uint32_t width, |
263 | uint32_t height, |
255 | uint32_t height, |
264 | uint32_t pitch, |
256 | uint32_t pitch, |
265 | uint32_t bpp); |
257 | uint32_t bpp); |
266 | enum { |
258 | enum { |
267 | CREATE_EXACT = 0x1, |
259 | CREATE_EXACT = 0x1, |
268 | CREATE_INACTIVE = 0x2, |
260 | CREATE_INACTIVE = 0x2, |
269 | CREATE_CPU_MAP = 0x4, |
261 | CREATE_CPU_MAP = 0x4, |
270 | CREATE_GTT_MAP = 0x8, |
262 | CREATE_GTT_MAP = 0x8, |
271 | CREATE_SCANOUT = 0x10, |
263 | CREATE_SCANOUT = 0x10, |
272 | CREATE_PRIME = 0x20, |
264 | CREATE_PRIME = 0x20, |
273 | CREATE_TEMPORARY = 0x40, |
265 | CREATE_TEMPORARY = 0x40, |
274 | CREATE_CACHED = 0x80, |
266 | CREATE_CACHED = 0x80, |
275 | CREATE_NO_RETIRE = 0x100, |
267 | CREATE_NO_RETIRE = 0x100, |
276 | CREATE_NO_THROTTLE = 0x200, |
268 | CREATE_NO_THROTTLE = 0x200, |
277 | }; |
269 | }; |
278 | struct kgem_bo *kgem_create_2d(struct kgem *kgem, |
270 | struct kgem_bo *kgem_create_2d(struct kgem *kgem, |
279 | int width, |
271 | int width, |
280 | int height, |
272 | int height, |
281 | int bpp, |
273 | int bpp, |
282 | int tiling, |
274 | int tiling, |
283 | uint32_t flags); |
275 | uint32_t flags); |
284 | struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem, |
276 | struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem, |
285 | int width, |
277 | int width, |
286 | int height, |
278 | int height, |
287 | int bpp, |
279 | int bpp, |
288 | uint32_t flags); |
280 | uint32_t flags); |
289 | 281 | ||
290 | uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format); |
282 | uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format); |
291 | void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset); |
283 | void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset); |
292 | int kgem_bo_get_swizzling(struct kgem *kgem, struct kgem_bo *bo); |
284 | int kgem_bo_get_swizzling(struct kgem *kgem, struct kgem_bo *bo); |
293 | 285 | ||
294 | bool kgem_retire(struct kgem *kgem); |
286 | bool kgem_retire(struct kgem *kgem); |
295 | 287 | ||
296 | bool __kgem_ring_is_idle(struct kgem *kgem, int ring); |
288 | bool __kgem_ring_is_idle(struct kgem *kgem, int ring); |
297 | static inline bool kgem_ring_is_idle(struct kgem *kgem, int ring) |
289 | static inline bool kgem_ring_is_idle(struct kgem *kgem, int ring) |
298 | { |
290 | { |
299 | ring = ring == KGEM_BLT; |
291 | ring = ring == KGEM_BLT; |
300 | 292 | ||
301 | if (list_is_empty(&kgem->requests[ring])) |
293 | if (list_is_empty(&kgem->requests[ring])) |
302 | return true; |
294 | return true; |
303 | 295 | ||
304 | return __kgem_ring_is_idle(kgem, ring); |
296 | return __kgem_ring_is_idle(kgem, ring); |
305 | } |
297 | } |
306 | 298 | ||
307 | static inline bool kgem_is_idle(struct kgem *kgem) |
299 | static inline bool kgem_is_idle(struct kgem *kgem) |
308 | { |
300 | { |
309 | if (!kgem->need_retire) |
301 | if (!kgem->need_retire) |
310 | return true; |
302 | return true; |
311 | 303 | ||
312 | return kgem_ring_is_idle(kgem, kgem->ring); |
304 | return kgem_ring_is_idle(kgem, kgem->ring); |
313 | } |
305 | } |
314 | 306 | ||
315 | void _kgem_submit(struct kgem *kgem); |
307 | void _kgem_submit(struct kgem *kgem); |
316 | static inline void kgem_submit(struct kgem *kgem) |
308 | static inline void kgem_submit(struct kgem *kgem) |
317 | { |
309 | { |
318 | if (kgem->nbatch) |
310 | if (kgem->nbatch) |
319 | _kgem_submit(kgem); |
311 | _kgem_submit(kgem); |
320 | } |
312 | } |
321 | 313 | ||
322 | static inline bool kgem_flush(struct kgem *kgem, bool flush) |
314 | static inline bool kgem_flush(struct kgem *kgem, bool flush) |
323 | { |
315 | { |
324 | if (kgem->nreloc == 0) |
316 | if (kgem->nreloc == 0) |
325 | return false; |
317 | return false; |
326 | 318 | ||
327 | return (kgem->flush ^ flush) && kgem_ring_is_idle(kgem, kgem->ring); |
319 | return (kgem->flush ^ flush) && kgem_ring_is_idle(kgem, kgem->ring); |
328 | } |
320 | } |
329 | 321 | ||
330 | static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo) |
322 | static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo) |
331 | { |
323 | { |
332 | if (bo->exec) |
324 | if (bo->exec) |
333 | _kgem_submit(kgem); |
325 | _kgem_submit(kgem); |
334 | } |
326 | } |
335 | 327 | ||
336 | void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo); |
328 | void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo); |
337 | static inline void kgem_bo_flush(struct kgem *kgem, struct kgem_bo *bo) |
329 | static inline void kgem_bo_flush(struct kgem *kgem, struct kgem_bo *bo) |
338 | { |
330 | { |
339 | kgem_bo_submit(kgem, bo); |
331 | kgem_bo_submit(kgem, bo); |
340 | 332 | ||
341 | if (!bo->needs_flush) |
333 | if (!bo->needs_flush) |
342 | return; |
334 | return; |
343 | 335 | ||
344 | /* If the kernel fails to emit the flush, then it will be forced when |
336 | /* If the kernel fails to emit the flush, then it will be forced when |
345 | * we assume direct access. And as the useual failure is EIO, we do |
337 | * we assume direct access. And as the useual failure is EIO, we do |
346 | * not actualy care. |
338 | * not actualy care. |
347 | */ |
339 | */ |
348 | __kgem_flush(kgem, bo); |
340 | __kgem_flush(kgem, bo); |
349 | } |
341 | } |
350 | 342 | ||
351 | static inline struct kgem_bo *kgem_bo_reference(struct kgem_bo *bo) |
343 | static inline struct kgem_bo *kgem_bo_reference(struct kgem_bo *bo) |
352 | { |
344 | { |
353 | assert(bo->refcnt); |
345 | assert(bo->refcnt); |
354 | bo->refcnt++; |
346 | bo->refcnt++; |
355 | return bo; |
347 | return bo; |
356 | } |
348 | } |
357 | 349 | ||
358 | void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo); |
350 | void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo); |
359 | static inline void kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) |
351 | static inline void kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) |
360 | { |
352 | { |
361 | assert(bo->refcnt); |
353 | assert(bo->refcnt); |
362 | if (--bo->refcnt == 0) |
354 | if (--bo->refcnt == 0) |
363 | _kgem_bo_destroy(kgem, bo); |
355 | _kgem_bo_destroy(kgem, bo); |
364 | } |
356 | } |
365 | 357 | ||
366 | void kgem_clear_dirty(struct kgem *kgem); |
358 | void kgem_clear_dirty(struct kgem *kgem); |
367 | 359 | ||
368 | static inline void kgem_set_mode(struct kgem *kgem, |
360 | static inline void kgem_set_mode(struct kgem *kgem, |
369 | enum kgem_mode mode, |
361 | enum kgem_mode mode, |
370 | struct kgem_bo *bo) |
362 | struct kgem_bo *bo) |
371 | { |
363 | { |
372 | assert(!kgem->wedged); |
364 | assert(!kgem->wedged); |
373 | 365 | ||
374 | #if DEBUG_FLUSH_BATCH |
366 | #if DEBUG_FLUSH_BATCH |
375 | kgem_submit(kgem); |
367 | kgem_submit(kgem); |
376 | #endif |
368 | #endif |
377 | 369 | ||
378 | if (kgem->mode == mode) |
370 | if (kgem->mode == mode) |
379 | return; |
371 | return; |
380 | 372 | ||
381 | // kgem->context_switch(kgem, mode); |
373 | // kgem->context_switch(kgem, mode); |
382 | kgem->mode = mode; |
374 | kgem->mode = mode; |
383 | } |
375 | } |
384 | 376 | ||
385 | static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode) |
377 | static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode) |
386 | { |
378 | { |
387 | assert(kgem->mode == KGEM_NONE); |
379 | assert(kgem->mode == KGEM_NONE); |
388 | assert(kgem->nbatch == 0); |
380 | assert(kgem->nbatch == 0); |
389 | assert(!kgem->wedged); |
381 | assert(!kgem->wedged); |
390 | // kgem->context_switch(kgem, mode); |
382 | // kgem->context_switch(kgem, mode); |
391 | kgem->mode = mode; |
383 | kgem->mode = mode; |
392 | } |
384 | } |
393 | 385 | ||
394 | static inline bool kgem_check_batch(struct kgem *kgem, int num_dwords) |
386 | static inline bool kgem_check_batch(struct kgem *kgem, int num_dwords) |
395 | { |
387 | { |
396 | assert(num_dwords > 0); |
388 | assert(num_dwords > 0); |
397 | assert(kgem->nbatch < kgem->surface); |
389 | assert(kgem->nbatch < kgem->surface); |
398 | assert(kgem->surface <= kgem->batch_size); |
390 | assert(kgem->surface <= kgem->batch_size); |
399 | return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface); |
391 | return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface); |
400 | } |
392 | } |
401 | 393 | ||
402 | static inline bool kgem_check_reloc(struct kgem *kgem, int n) |
394 | static inline bool kgem_check_reloc(struct kgem *kgem, int n) |
403 | { |
395 | { |
404 | assert(kgem->nreloc <= KGEM_RELOC_SIZE(kgem)); |
396 | assert(kgem->nreloc <= KGEM_RELOC_SIZE(kgem)); |
405 | return likely(kgem->nreloc + n <= KGEM_RELOC_SIZE(kgem)); |
397 | return likely(kgem->nreloc + n <= KGEM_RELOC_SIZE(kgem)); |
406 | } |
398 | } |
407 | 399 | ||
408 | static inline bool kgem_check_exec(struct kgem *kgem, int n) |
400 | static inline bool kgem_check_exec(struct kgem *kgem, int n) |
409 | { |
401 | { |
410 | assert(kgem->nexec <= KGEM_EXEC_SIZE(kgem)); |
402 | assert(kgem->nexec <= KGEM_EXEC_SIZE(kgem)); |
411 | return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem)); |
403 | return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem)); |
412 | } |
404 | } |
413 | 405 | ||
414 | static inline bool kgem_check_reloc_and_exec(struct kgem *kgem, int n) |
406 | static inline bool kgem_check_reloc_and_exec(struct kgem *kgem, int n) |
415 | { |
407 | { |
416 | return kgem_check_reloc(kgem, n) && kgem_check_exec(kgem, n); |
408 | return kgem_check_reloc(kgem, n) && kgem_check_exec(kgem, n); |
417 | } |
409 | } |
418 | 410 | ||
419 | static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem, |
411 | static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem, |
420 | int num_dwords, |
412 | int num_dwords, |
421 | int num_surfaces) |
413 | int num_surfaces) |
422 | { |
414 | { |
423 | return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) && |
415 | return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) && |
424 | kgem_check_reloc(kgem, num_surfaces) && |
416 | kgem_check_reloc(kgem, num_surfaces) && |
425 | kgem_check_exec(kgem, num_surfaces); |
417 | kgem_check_exec(kgem, num_surfaces); |
426 | } |
418 | } |
427 | 419 | ||
428 | static inline uint32_t *kgem_get_batch(struct kgem *kgem) |
420 | static inline uint32_t *kgem_get_batch(struct kgem *kgem) |
429 | { |
421 | { |
430 | 422 | ||
431 | return kgem->batch + kgem->nbatch; |
423 | return kgem->batch + kgem->nbatch; |
432 | } |
424 | } |
433 | 425 | ||
434 | bool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(0))); |
426 | bool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(0))); |
435 | bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo); |
427 | bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo); |
436 | bool kgem_check_many_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0))); |
428 | bool kgem_check_many_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0))); |
437 | 429 | ||
438 | #define KGEM_RELOC_FENCED 0x8000 |
430 | #define KGEM_RELOC_FENCED 0x8000 |
439 | uint32_t kgem_add_reloc(struct kgem *kgem, |
431 | uint32_t kgem_add_reloc(struct kgem *kgem, |
440 | uint32_t pos, |
432 | uint32_t pos, |
441 | struct kgem_bo *bo, |
433 | struct kgem_bo *bo, |
442 | uint32_t read_write_domains, |
434 | uint32_t read_write_domains, |
443 | uint32_t delta); |
435 | uint32_t delta); |
444 | 436 | ||
445 | void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo); |
437 | void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo); |
446 | void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo); |
438 | void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo); |
447 | void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo); |
439 | void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo); |
448 | void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo); |
440 | void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo); |
449 | void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo); |
441 | void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo); |
450 | void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo); |
442 | void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo); |
451 | void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo); |
443 | void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo); |
452 | void kgem_bo_sync__cpu_full(struct kgem *kgem, struct kgem_bo *bo, bool write); |
444 | void kgem_bo_sync__cpu_full(struct kgem *kgem, struct kgem_bo *bo, bool write); |
453 | void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo); |
445 | void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo); |
454 | void __kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr); |
446 | void __kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr); |
455 | uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo); |
447 | uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo); |
456 | 448 | ||
457 | bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo, |
449 | bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo, |
458 | const void *data, int length); |
450 | const void *data, int length); |
459 | 451 | ||
460 | int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo); |
452 | int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo); |
461 | void kgem_get_tile_size(struct kgem *kgem, int tiling, |
453 | void kgem_get_tile_size(struct kgem *kgem, int tiling, |
462 | int *tile_width, int *tile_height, int *tile_size); |
454 | int *tile_width, int *tile_height, int *tile_size); |
463 | 455 | ||
464 | static inline int __kgem_buffer_size(struct kgem_bo *bo) |
456 | static inline int __kgem_buffer_size(struct kgem_bo *bo) |
465 | { |
457 | { |
466 | assert(bo->proxy != NULL); |
458 | assert(bo->proxy != NULL); |
467 | return bo->size.bytes; |
459 | return bo->size.bytes; |
468 | } |
460 | } |
469 | 461 | ||
470 | static inline int __kgem_bo_size(struct kgem_bo *bo) |
462 | static inline int __kgem_bo_size(struct kgem_bo *bo) |
471 | { |
463 | { |
472 | assert(bo->proxy == NULL); |
464 | assert(bo->proxy == NULL); |
473 | return PAGE_SIZE * bo->size.pages.count; |
465 | return PAGE_SIZE * bo->size.pages.count; |
474 | } |
466 | } |
475 | 467 | ||
476 | static inline int kgem_bo_size(struct kgem_bo *bo) |
468 | static inline int kgem_bo_size(struct kgem_bo *bo) |
477 | { |
469 | { |
478 | if (bo->proxy) |
470 | if (bo->proxy) |
479 | return __kgem_buffer_size(bo); |
471 | return __kgem_buffer_size(bo); |
480 | else |
472 | else |
481 | return __kgem_bo_size(bo); |
473 | return __kgem_bo_size(bo); |
482 | } |
474 | } |
483 | 475 | ||
484 | /* |
476 | /* |
485 | static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem, |
477 | static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem, |
486 | struct kgem_bo *bo) |
478 | struct kgem_bo *bo) |
487 | { |
479 | { |
488 | int pitch = bo->pitch; |
480 | int pitch = bo->pitch; |
489 | if (kgem->gen >= 040 && bo->tiling) |
481 | if (kgem->gen >= 040 && bo->tiling) |
490 | pitch /= 4; |
482 | pitch /= 4; |
491 | if (pitch > MAXSHORT) { |
483 | if (pitch > MAXSHORT) { |
492 | DBG(("%s: can not blt to handle=%d, adjusted pitch=%d\n", |
484 | DBG(("%s: can not blt to handle=%d, adjusted pitch=%d\n", |
493 | __FUNCTION__, bo->handle, pitch)); |
485 | __FUNCTION__, bo->handle, pitch)); |
494 | return false; |
486 | return false; |
495 | } |
487 | } |
496 | 488 | ||
497 | return true; |
489 | return true; |
498 | } |
490 | } |
499 | 491 | ||
500 | static inline bool kgem_bo_can_blt(struct kgem *kgem, |
492 | static inline bool kgem_bo_can_blt(struct kgem *kgem, |
501 | struct kgem_bo *bo) |
493 | struct kgem_bo *bo) |
502 | { |
494 | { |
503 | if (bo->tiling == I915_TILING_Y) { |
495 | if (bo->tiling == I915_TILING_Y) { |
504 | DBG(("%s: can not blt to handle=%d, tiling=Y\n", |
496 | DBG(("%s: can not blt to handle=%d, tiling=Y\n", |
505 | __FUNCTION__, bo->handle)); |
497 | __FUNCTION__, bo->handle)); |
506 | return false; |
498 | return false; |
507 | } |
499 | } |
508 | 500 | ||
509 | return kgem_bo_blt_pitch_is_ok(kgem, bo); |
501 | return kgem_bo_blt_pitch_is_ok(kgem, bo); |
510 | } |
502 | } |
511 | */ |
503 | */ |
512 | 504 | ||
513 | static inline bool __kgem_bo_is_mappable(struct kgem *kgem, |
505 | static inline bool __kgem_bo_is_mappable(struct kgem *kgem, |
514 | struct kgem_bo *bo) |
506 | struct kgem_bo *bo) |
515 | { |
507 | { |
516 | if (bo->domain == DOMAIN_GTT) |
508 | if (bo->domain == DOMAIN_GTT) |
517 | return true; |
509 | return true; |
518 | 510 | ||
519 | if (kgem->gen < 040 && bo->tiling && |
511 | if (kgem->gen < 040 && bo->tiling && |
520 | bo->presumed_offset & (kgem_bo_fenced_size(kgem, bo) - 1)) |
512 | bo->presumed_offset & (kgem_bo_fenced_size(kgem, bo) - 1)) |
521 | return false; |
513 | return false; |
522 | 514 | ||
523 | if (!bo->presumed_offset) |
515 | if (!bo->presumed_offset) |
524 | return kgem_bo_size(bo) <= kgem->aperture_mappable / 4; |
516 | return kgem_bo_size(bo) <= kgem->aperture_mappable / 4; |
525 | 517 | ||
526 | return bo->presumed_offset + kgem_bo_size(bo) <= kgem->aperture_mappable; |
518 | return bo->presumed_offset + kgem_bo_size(bo) <= kgem->aperture_mappable; |
527 | } |
519 | } |
528 | 520 | ||
529 | static inline bool kgem_bo_is_mappable(struct kgem *kgem, |
521 | static inline bool kgem_bo_is_mappable(struct kgem *kgem, |
530 | struct kgem_bo *bo) |
522 | struct kgem_bo *bo) |
531 | { |
523 | { |
532 | DBG(("%s: domain=%d, offset: %d size: %d\n", |
524 | DBG(("%s: domain=%d, offset: %d size: %d\n", |
533 | __FUNCTION__, bo->domain, bo->presumed_offset, kgem_bo_size(bo))); |
525 | __FUNCTION__, bo->domain, bo->presumed_offset, kgem_bo_size(bo))); |
534 | assert(bo->refcnt); |
526 | assert(bo->refcnt); |
535 | return __kgem_bo_is_mappable(kgem, bo); |
527 | return __kgem_bo_is_mappable(kgem, bo); |
536 | } |
528 | } |
537 | 529 | ||
538 | static inline bool kgem_bo_mapped(struct kgem *kgem, struct kgem_bo *bo) |
530 | static inline bool kgem_bo_mapped(struct kgem *kgem, struct kgem_bo *bo) |
539 | { |
531 | { |
540 | DBG(("%s: map=%p, tiling=%d, domain=%d\n", |
532 | DBG(("%s: map=%p, tiling=%d, domain=%d\n", |
541 | __FUNCTION__, bo->map, bo->tiling, bo->domain)); |
533 | __FUNCTION__, bo->map, bo->tiling, bo->domain)); |
542 | assert(bo->refcnt); |
534 | assert(bo->refcnt); |
543 | 535 | ||
544 | if (bo->map == NULL) |
536 | if (bo->map == NULL) |
545 | return bo->tiling == I915_TILING_NONE && bo->domain == DOMAIN_CPU; |
537 | return bo->tiling == I915_TILING_NONE && bo->domain == DOMAIN_CPU; |
546 | 538 | ||
547 | return IS_CPU_MAP(bo->map) == !bo->tiling; |
539 | return IS_CPU_MAP(bo->map) == !bo->tiling; |
548 | } |
540 | } |
549 | 541 | ||
550 | static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo) |
542 | static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo) |
551 | { |
543 | { |
552 | if (kgem_bo_mapped(kgem, bo)) |
544 | if (kgem_bo_mapped(kgem, bo)) |
553 | return true; |
545 | return true; |
554 | 546 | ||
555 | if (!bo->tiling && kgem->has_llc) |
547 | if (!bo->tiling && kgem->has_llc) |
556 | return true; |
548 | return true; |
557 | 549 | ||
558 | if (kgem->gen == 021 && bo->tiling == I915_TILING_Y) |
550 | if (kgem->gen == 021 && bo->tiling == I915_TILING_Y) |
559 | return false; |
551 | return false; |
560 | 552 | ||
561 | return kgem_bo_size(bo) <= kgem->aperture_mappable / 4; |
553 | return kgem_bo_size(bo) <= kgem->aperture_mappable / 4; |
562 | } |
554 | } |
563 | 555 | ||
564 | static inline bool kgem_bo_is_snoop(struct kgem_bo *bo) |
556 | static inline bool kgem_bo_is_snoop(struct kgem_bo *bo) |
565 | { |
557 | { |
566 | assert(bo->refcnt); |
558 | assert(bo->refcnt); |
567 | while (bo->proxy) |
559 | while (bo->proxy) |
568 | bo = bo->proxy; |
560 | bo = bo->proxy; |
569 | return bo->snoop; |
561 | return bo->snoop; |
570 | } |
562 | } |
571 | 563 | ||
572 | bool __kgem_busy(struct kgem *kgem, int handle); |
564 | bool __kgem_busy(struct kgem *kgem, int handle); |
573 | 565 | ||
574 | static inline void kgem_bo_mark_busy(struct kgem_bo *bo, int ring) |
566 | static inline void kgem_bo_mark_busy(struct kgem_bo *bo, int ring) |
575 | { |
567 | { |
576 | bo->rq = (struct kgem_request *)((uintptr_t)bo->rq | ring); |
568 | bo->rq = (struct kgem_request *)((uintptr_t)bo->rq | ring); |
577 | } |
569 | } |
578 | 570 | ||
579 | inline static void __kgem_bo_clear_busy(struct kgem_bo *bo) |
571 | inline static void __kgem_bo_clear_busy(struct kgem_bo *bo) |
580 | { |
572 | { |
581 | bo->needs_flush = false; |
573 | bo->needs_flush = false; |
582 | list_del(&bo->request); |
574 | list_del(&bo->request); |
583 | bo->rq = NULL; |
575 | bo->rq = NULL; |
584 | bo->domain = DOMAIN_NONE; |
576 | bo->domain = DOMAIN_NONE; |
585 | } |
577 | } |
586 | 578 | ||
587 | static inline bool kgem_bo_is_busy(struct kgem_bo *bo) |
579 | static inline bool kgem_bo_is_busy(struct kgem_bo *bo) |
588 | { |
580 | { |
589 | DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__, |
581 | DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__, |
590 | bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL)); |
582 | bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL)); |
591 | assert(bo->refcnt); |
583 | assert(bo->refcnt); |
592 | return bo->rq; |
584 | return bo->rq; |
593 | } |
585 | } |
594 | 586 | ||
595 | /* |
587 | /* |
596 | 588 | ||
597 | static inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo) |
589 | static inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo) |
598 | { |
590 | { |
599 | DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__, |
591 | DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__, |
600 | bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL)); |
592 | bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL)); |
601 | assert(bo->refcnt); |
593 | assert(bo->refcnt); |
602 | 594 | ||
603 | if (bo->exec) |
595 | if (bo->exec) |
604 | return true; |
596 | return true; |
605 | 597 | ||
606 | if (kgem_flush(kgem, bo->flush)) |
598 | if (kgem_flush(kgem, bo->flush)) |
607 | kgem_submit(kgem); |
599 | kgem_submit(kgem); |
608 | 600 | ||
609 | if (bo->rq && !__kgem_busy(kgem, bo->handle)) |
601 | if (bo->rq && !__kgem_busy(kgem, bo->handle)) |
610 | __kgem_bo_clear_busy(bo); |
602 | __kgem_bo_clear_busy(bo); |
611 | 603 | ||
612 | return kgem_bo_is_busy(bo); |
604 | return kgem_bo_is_busy(bo); |
613 | } |
605 | } |
614 | 606 | ||
615 | */ |
607 | */ |
616 | 608 | ||
617 | static inline bool kgem_bo_is_dirty(struct kgem_bo *bo) |
609 | static inline bool kgem_bo_is_dirty(struct kgem_bo *bo) |
618 | { |
610 | { |
619 | if (bo == NULL) |
611 | if (bo == NULL) |
620 | return false; |
612 | return false; |
621 | 613 | ||
622 | assert(bo->refcnt); |
614 | assert(bo->refcnt); |
623 | return bo->dirty; |
615 | return bo->dirty; |
624 | } |
616 | } |
625 | 617 | ||
626 | static inline void kgem_bo_unclean(struct kgem *kgem, struct kgem_bo *bo) |
618 | static inline void kgem_bo_unclean(struct kgem *kgem, struct kgem_bo *bo) |
627 | { |
619 | { |
628 | /* The bo is outside of our control, so presume it is written to */ |
620 | /* The bo is outside of our control, so presume it is written to */ |
629 | bo->needs_flush = true; |
621 | bo->needs_flush = true; |
630 | if (bo->rq == NULL) |
622 | if (bo->rq == NULL) |
631 | bo->rq = (void *)kgem; |
623 | bo->rq = (void *)kgem; |
632 | 624 | ||
633 | if (bo->domain != DOMAIN_GPU) |
625 | if (bo->domain != DOMAIN_GPU) |
634 | bo->domain = DOMAIN_NONE; |
626 | bo->domain = DOMAIN_NONE; |
635 | } |
627 | } |
636 | 628 | ||
637 | static inline void __kgem_bo_mark_dirty(struct kgem_bo *bo) |
629 | static inline void __kgem_bo_mark_dirty(struct kgem_bo *bo) |
638 | { |
630 | { |
639 | DBG(("%s: handle=%d (proxy? %d)\n", __FUNCTION__, |
631 | DBG(("%s: handle=%d (proxy? %d)\n", __FUNCTION__, |
640 | bo->handle, bo->proxy != NULL)); |
632 | bo->handle, bo->proxy != NULL)); |
641 | 633 | ||
642 | bo->exec->flags |= LOCAL_EXEC_OBJECT_WRITE; |
634 | bo->exec->flags |= LOCAL_EXEC_OBJECT_WRITE; |
643 | bo->needs_flush = bo->dirty = true; |
635 | bo->needs_flush = bo->dirty = true; |
644 | list_move(&bo->request, &RQ(bo->rq)->buffers); |
636 | list_move(&bo->request, &RQ(bo->rq)->buffers); |
645 | } |
637 | } |
646 | 638 | ||
647 | static inline void kgem_bo_mark_dirty(struct kgem_bo *bo) |
639 | static inline void kgem_bo_mark_dirty(struct kgem_bo *bo) |
648 | { |
640 | { |
649 | assert(bo->refcnt); |
641 | assert(bo->refcnt); |
650 | do { |
642 | do { |
651 | assert(bo->exec); |
643 | assert(bo->exec); |
652 | assert(bo->rq); |
644 | assert(bo->rq); |
653 | 645 | ||
654 | if (bo->dirty) |
646 | if (bo->dirty) |
655 | return; |
647 | return; |
656 | 648 | ||
657 | __kgem_bo_mark_dirty(bo); |
649 | __kgem_bo_mark_dirty(bo); |
658 | } while ((bo = bo->proxy)); |
650 | } while ((bo = bo->proxy)); |
659 | } |
651 | } |
660 | 652 | ||
661 | #define KGEM_BUFFER_WRITE 0x1 |
653 | #define KGEM_BUFFER_WRITE 0x1 |
662 | #define KGEM_BUFFER_INPLACE 0x2 |
654 | #define KGEM_BUFFER_INPLACE 0x2 |
663 | #define KGEM_BUFFER_LAST 0x4 |
655 | #define KGEM_BUFFER_LAST 0x4 |
664 | 656 | ||
665 | #define KGEM_BUFFER_WRITE_INPLACE (KGEM_BUFFER_WRITE | KGEM_BUFFER_INPLACE) |
657 | #define KGEM_BUFFER_WRITE_INPLACE (KGEM_BUFFER_WRITE | KGEM_BUFFER_INPLACE) |
666 | 658 | ||
667 | struct kgem_bo *kgem_create_buffer(struct kgem *kgem, |
659 | struct kgem_bo *kgem_create_buffer(struct kgem *kgem, |
668 | uint32_t size, uint32_t flags, |
660 | uint32_t size, uint32_t flags, |
669 | void **ret); |
661 | void **ret); |
670 | struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem, |
662 | struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem, |
671 | int width, int height, int bpp, |
663 | int width, int height, int bpp, |
672 | uint32_t flags, |
664 | uint32_t flags, |
673 | void **ret); |
665 | void **ret); |
674 | bool kgem_buffer_is_inplace(struct kgem_bo *bo); |
666 | bool kgem_buffer_is_inplace(struct kgem_bo *bo); |
675 | void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo); |
667 | void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo); |
676 | 668 | ||
677 | void kgem_throttle(struct kgem *kgem); |
669 | void kgem_throttle(struct kgem *kgem); |
678 | #define MAX_INACTIVE_TIME 10 |
670 | #define MAX_INACTIVE_TIME 10 |
679 | bool kgem_expire_cache(struct kgem *kgem); |
671 | bool kgem_expire_cache(struct kgem *kgem); |
680 | void kgem_purge_cache(struct kgem *kgem); |
672 | void kgem_purge_cache(struct kgem *kgem); |
681 | void kgem_cleanup_cache(struct kgem *kgem); |
673 | void kgem_cleanup_cache(struct kgem *kgem); |
682 | 674 | ||
683 | #if HAS_DEBUG_FULL |
675 | #if HAS_DEBUG_FULL |
684 | void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch); |
676 | void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch); |
685 | #else |
677 | #else |
686 | static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch) |
678 | static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch) |
687 | { |
679 | { |
688 | (void)kgem; |
680 | (void)kgem; |
689 | (void)nbatch; |
681 | (void)nbatch; |
690 | } |
682 | } |
691 | #endif |
683 | #endif |
692 | 684 | ||
693 | #endif /* KGEM_H */=>=>=>>=>=>=>=>=>=>=>>2) |
685 | #endif /* KGEM_H */=>=>=>>=>=>=>=>=>=>=>>2) |
694 | 686 | ||
695 | ><2) |
687 | ><2) |
696 | 688 | ||
697 | >9) |
689 | >9) |
698 | #define><9) |
690 | #define><9) |
699 | #define>><> |
691 | #define>><> |