Go to most recent revision | Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
5564 | serge | 1 | /************************************************************************** |
2 | * |
||
3 | * Copyright 2007 VMware, Inc. |
||
4 | * All Rights Reserved. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the |
||
8 | * "Software"), to deal in the Software without restriction, including |
||
9 | * without limitation the rights to use, copy, modify, merge, publish, |
||
10 | * distribute, sub license, and/or sell copies of the Software, and to |
||
11 | * permit persons to whom the Software is furnished to do so, subject to |
||
12 | * the following conditions: |
||
13 | * |
||
14 | * The above copyright notice and this permission notice (including the |
||
15 | * next paragraph) shall be included in all copies or substantial portions |
||
16 | * of the Software. |
||
17 | * |
||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
||
19 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
||
20 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
||
21 | * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR |
||
22 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
||
23 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
||
24 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
25 | * |
||
26 | **************************************************************************/ |
||
27 | |||
28 | /** |
||
29 | * @file |
||
30 | * |
||
31 | * Wrap the cso cache & hash mechanisms in a simplified |
||
32 | * pipe-driver-specific interface. |
||
33 | * |
||
34 | * @author Zack Rusin |
||
35 | * @author Keith Whitwell |
||
36 | */ |
||
37 | |||
38 | #include "pipe/p_state.h" |
||
39 | #include "util/u_draw.h" |
||
40 | #include "util/u_framebuffer.h" |
||
41 | #include "util/u_inlines.h" |
||
42 | #include "util/u_math.h" |
||
43 | #include "util/u_memory.h" |
||
44 | #include "util/u_vbuf.h" |
||
45 | #include "tgsi/tgsi_parse.h" |
||
46 | |||
47 | #include "cso_cache/cso_context.h" |
||
48 | #include "cso_cache/cso_cache.h" |
||
49 | #include "cso_cache/cso_hash.h" |
||
50 | #include "cso_context.h" |
||
51 | |||
52 | |||
53 | /** |
||
54 | * Info related to samplers and sampler views. |
||
55 | * We have one of these for fragment samplers and another for vertex samplers. |
||
56 | */ |
||
57 | struct sampler_info |
||
58 | { |
||
59 | struct { |
||
60 | void *samplers[PIPE_MAX_SAMPLERS]; |
||
61 | unsigned nr_samplers; |
||
62 | } hw; |
||
63 | |||
64 | void *samplers[PIPE_MAX_SAMPLERS]; |
||
65 | unsigned nr_samplers; |
||
66 | |||
67 | void *samplers_saved[PIPE_MAX_SAMPLERS]; |
||
68 | unsigned nr_samplers_saved; |
||
69 | |||
70 | struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS]; |
||
71 | unsigned nr_views; |
||
72 | |||
73 | struct pipe_sampler_view *views_saved[PIPE_MAX_SHADER_SAMPLER_VIEWS]; |
||
74 | unsigned nr_views_saved; |
||
75 | }; |
||
76 | |||
77 | |||
78 | |||
79 | struct cso_context { |
||
80 | struct pipe_context *pipe; |
||
81 | struct cso_cache *cache; |
||
82 | struct u_vbuf *vbuf; |
||
83 | |||
84 | boolean has_geometry_shader; |
||
85 | boolean has_tessellation; |
||
86 | boolean has_streamout; |
||
87 | |||
88 | struct sampler_info samplers[PIPE_SHADER_TYPES]; |
||
89 | |||
90 | struct pipe_vertex_buffer aux_vertex_buffer_current; |
||
91 | struct pipe_vertex_buffer aux_vertex_buffer_saved; |
||
92 | unsigned aux_vertex_buffer_index; |
||
93 | |||
94 | struct pipe_constant_buffer aux_constbuf_current[PIPE_SHADER_TYPES]; |
||
95 | struct pipe_constant_buffer aux_constbuf_saved[PIPE_SHADER_TYPES]; |
||
96 | |||
97 | unsigned nr_so_targets; |
||
98 | struct pipe_stream_output_target *so_targets[PIPE_MAX_SO_BUFFERS]; |
||
99 | |||
100 | unsigned nr_so_targets_saved; |
||
101 | struct pipe_stream_output_target *so_targets_saved[PIPE_MAX_SO_BUFFERS]; |
||
102 | |||
103 | /** Current and saved state. |
||
104 | * The saved state is used as a 1-deep stack. |
||
105 | */ |
||
106 | void *blend, *blend_saved; |
||
107 | void *depth_stencil, *depth_stencil_saved; |
||
108 | void *rasterizer, *rasterizer_saved; |
||
109 | void *fragment_shader, *fragment_shader_saved; |
||
110 | void *vertex_shader, *vertex_shader_saved; |
||
111 | void *geometry_shader, *geometry_shader_saved; |
||
112 | void *tessctrl_shader, *tessctrl_shader_saved; |
||
113 | void *tesseval_shader, *tesseval_shader_saved; |
||
114 | void *velements, *velements_saved; |
||
115 | struct pipe_query *render_condition, *render_condition_saved; |
||
116 | uint render_condition_mode, render_condition_mode_saved; |
||
117 | boolean render_condition_cond, render_condition_cond_saved; |
||
118 | |||
119 | struct pipe_clip_state clip; |
||
120 | struct pipe_clip_state clip_saved; |
||
121 | |||
122 | struct pipe_framebuffer_state fb, fb_saved; |
||
123 | struct pipe_viewport_state vp, vp_saved; |
||
124 | struct pipe_blend_color blend_color; |
||
125 | unsigned sample_mask, sample_mask_saved; |
||
126 | unsigned min_samples, min_samples_saved; |
||
127 | struct pipe_stencil_ref stencil_ref, stencil_ref_saved; |
||
128 | }; |
||
129 | |||
130 | |||
131 | static boolean delete_blend_state(struct cso_context *ctx, void *state) |
||
132 | { |
||
133 | struct cso_blend *cso = (struct cso_blend *)state; |
||
134 | |||
135 | if (ctx->blend == cso->data) |
||
136 | return FALSE; |
||
137 | |||
138 | if (cso->delete_state) |
||
139 | cso->delete_state(cso->context, cso->data); |
||
140 | FREE(state); |
||
141 | return TRUE; |
||
142 | } |
||
143 | |||
144 | static boolean delete_depth_stencil_state(struct cso_context *ctx, void *state) |
||
145 | { |
||
146 | struct cso_depth_stencil_alpha *cso = |
||
147 | (struct cso_depth_stencil_alpha *)state; |
||
148 | |||
149 | if (ctx->depth_stencil == cso->data) |
||
150 | return FALSE; |
||
151 | |||
152 | if (cso->delete_state) |
||
153 | cso->delete_state(cso->context, cso->data); |
||
154 | FREE(state); |
||
155 | |||
156 | return TRUE; |
||
157 | } |
||
158 | |||
159 | static boolean delete_sampler_state(struct cso_context *ctx, void *state) |
||
160 | { |
||
161 | struct cso_sampler *cso = (struct cso_sampler *)state; |
||
162 | if (cso->delete_state) |
||
163 | cso->delete_state(cso->context, cso->data); |
||
164 | FREE(state); |
||
165 | return TRUE; |
||
166 | } |
||
167 | |||
168 | static boolean delete_rasterizer_state(struct cso_context *ctx, void *state) |
||
169 | { |
||
170 | struct cso_rasterizer *cso = (struct cso_rasterizer *)state; |
||
171 | |||
172 | if (ctx->rasterizer == cso->data) |
||
173 | return FALSE; |
||
174 | if (cso->delete_state) |
||
175 | cso->delete_state(cso->context, cso->data); |
||
176 | FREE(state); |
||
177 | return TRUE; |
||
178 | } |
||
179 | |||
180 | static boolean delete_vertex_elements(struct cso_context *ctx, |
||
181 | void *state) |
||
182 | { |
||
183 | struct cso_velements *cso = (struct cso_velements *)state; |
||
184 | |||
185 | if (ctx->velements == cso->data) |
||
186 | return FALSE; |
||
187 | |||
188 | if (cso->delete_state) |
||
189 | cso->delete_state(cso->context, cso->data); |
||
190 | FREE(state); |
||
191 | return TRUE; |
||
192 | } |
||
193 | |||
194 | |||
195 | static INLINE boolean delete_cso(struct cso_context *ctx, |
||
196 | void *state, enum cso_cache_type type) |
||
197 | { |
||
198 | switch (type) { |
||
199 | case CSO_BLEND: |
||
200 | return delete_blend_state(ctx, state); |
||
201 | case CSO_SAMPLER: |
||
202 | return delete_sampler_state(ctx, state); |
||
203 | case CSO_DEPTH_STENCIL_ALPHA: |
||
204 | return delete_depth_stencil_state(ctx, state); |
||
205 | case CSO_RASTERIZER: |
||
206 | return delete_rasterizer_state(ctx, state); |
||
207 | case CSO_VELEMENTS: |
||
208 | return delete_vertex_elements(ctx, state); |
||
209 | default: |
||
210 | assert(0); |
||
211 | FREE(state); |
||
212 | } |
||
213 | return FALSE; |
||
214 | } |
||
215 | |||
216 | static INLINE void |
||
217 | sanitize_hash(struct cso_hash *hash, enum cso_cache_type type, |
||
218 | int max_size, void *user_data) |
||
219 | { |
||
220 | struct cso_context *ctx = (struct cso_context *)user_data; |
||
221 | /* if we're approach the maximum size, remove fourth of the entries |
||
222 | * otherwise every subsequent call will go through the same */ |
||
223 | int hash_size = cso_hash_size(hash); |
||
224 | int max_entries = (max_size > hash_size) ? max_size : hash_size; |
||
225 | int to_remove = (max_size < max_entries) * max_entries/4; |
||
226 | struct cso_hash_iter iter = cso_hash_first_node(hash); |
||
227 | if (hash_size > max_size) |
||
228 | to_remove += hash_size - max_size; |
||
229 | while (to_remove) { |
||
230 | /*remove elements until we're good */ |
||
231 | /*fixme: currently we pick the nodes to remove at random*/ |
||
232 | void *cso = cso_hash_iter_data(iter); |
||
233 | if (delete_cso(ctx, cso, type)) { |
||
234 | iter = cso_hash_erase(hash, iter); |
||
235 | --to_remove; |
||
236 | } else |
||
237 | iter = cso_hash_iter_next(iter); |
||
238 | } |
||
239 | } |
||
240 | |||
241 | static void cso_init_vbuf(struct cso_context *cso) |
||
242 | { |
||
243 | struct u_vbuf_caps caps; |
||
244 | |||
245 | /* Install u_vbuf if there is anything unsupported. */ |
||
246 | if (u_vbuf_get_caps(cso->pipe->screen, &caps)) { |
||
247 | cso->vbuf = u_vbuf_create(cso->pipe, &caps, |
||
248 | cso->aux_vertex_buffer_index); |
||
249 | } |
||
250 | } |
||
251 | |||
252 | struct cso_context *cso_create_context( struct pipe_context *pipe ) |
||
253 | { |
||
254 | struct cso_context *ctx = CALLOC_STRUCT(cso_context); |
||
255 | if (ctx == NULL) |
||
256 | goto out; |
||
257 | |||
258 | ctx->cache = cso_cache_create(); |
||
259 | if (ctx->cache == NULL) |
||
260 | goto out; |
||
261 | cso_cache_set_sanitize_callback(ctx->cache, |
||
262 | sanitize_hash, |
||
263 | ctx); |
||
264 | |||
265 | ctx->pipe = pipe; |
||
266 | ctx->sample_mask = ~0; |
||
267 | |||
268 | ctx->aux_vertex_buffer_index = 0; /* 0 for now */ |
||
269 | |||
270 | cso_init_vbuf(ctx); |
||
271 | |||
272 | /* Enable for testing: */ |
||
273 | if (0) cso_set_maximum_cache_size( ctx->cache, 4 ); |
||
274 | |||
275 | if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_GEOMETRY, |
||
276 | PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) { |
||
277 | ctx->has_geometry_shader = TRUE; |
||
278 | } |
||
279 | if (pipe->screen->get_shader_param(pipe->screen, PIPE_SHADER_TESS_CTRL, |
||
280 | PIPE_SHADER_CAP_MAX_INSTRUCTIONS) > 0) { |
||
281 | ctx->has_tessellation = TRUE; |
||
282 | } |
||
283 | if (pipe->screen->get_param(pipe->screen, |
||
284 | PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS) != 0) { |
||
285 | ctx->has_streamout = TRUE; |
||
286 | } |
||
287 | |||
288 | return ctx; |
||
289 | |||
290 | out: |
||
291 | cso_destroy_context( ctx ); |
||
292 | return NULL; |
||
293 | } |
||
294 | |||
295 | /** |
||
296 | * Free the CSO context. |
||
297 | */ |
||
298 | void cso_destroy_context( struct cso_context *ctx ) |
||
299 | { |
||
300 | unsigned i, shader; |
||
301 | |||
302 | if (ctx->pipe) { |
||
303 | ctx->pipe->set_index_buffer(ctx->pipe, NULL); |
||
304 | |||
305 | ctx->pipe->bind_blend_state( ctx->pipe, NULL ); |
||
306 | ctx->pipe->bind_rasterizer_state( ctx->pipe, NULL ); |
||
307 | |||
308 | { |
||
309 | static struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS] = { NULL }; |
||
310 | static void *zeros[PIPE_MAX_SAMPLERS] = { NULL }; |
||
311 | struct pipe_screen *scr = ctx->pipe->screen; |
||
312 | unsigned sh; |
||
313 | for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) { |
||
314 | int maxsam = scr->get_shader_param(scr, sh, |
||
315 | PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS); |
||
316 | int maxview = scr->get_shader_param(scr, sh, |
||
317 | PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS); |
||
318 | assert(maxsam <= PIPE_MAX_SAMPLERS); |
||
319 | assert(maxview <= PIPE_MAX_SHADER_SAMPLER_VIEWS); |
||
320 | if (maxsam > 0) { |
||
321 | ctx->pipe->bind_sampler_states(ctx->pipe, sh, 0, maxsam, zeros); |
||
322 | } |
||
323 | if (maxview > 0) { |
||
324 | ctx->pipe->set_sampler_views(ctx->pipe, sh, 0, maxview, views); |
||
325 | } |
||
326 | } |
||
327 | } |
||
328 | |||
329 | ctx->pipe->bind_depth_stencil_alpha_state( ctx->pipe, NULL ); |
||
330 | ctx->pipe->bind_fs_state( ctx->pipe, NULL ); |
||
331 | ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, NULL); |
||
332 | ctx->pipe->bind_vs_state( ctx->pipe, NULL ); |
||
333 | ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_VERTEX, 0, NULL); |
||
334 | if (ctx->has_geometry_shader) { |
||
335 | ctx->pipe->bind_gs_state(ctx->pipe, NULL); |
||
336 | ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_GEOMETRY, 0, NULL); |
||
337 | } |
||
338 | if (ctx->has_tessellation) { |
||
339 | ctx->pipe->bind_tcs_state(ctx->pipe, NULL); |
||
340 | ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_TESS_CTRL, 0, NULL); |
||
341 | ctx->pipe->bind_tes_state(ctx->pipe, NULL); |
||
342 | ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_TESS_EVAL, 0, NULL); |
||
343 | } |
||
344 | ctx->pipe->bind_vertex_elements_state( ctx->pipe, NULL ); |
||
345 | |||
346 | if (ctx->has_streamout) |
||
347 | ctx->pipe->set_stream_output_targets(ctx->pipe, 0, NULL, NULL); |
||
348 | } |
||
349 | |||
350 | /* free sampler views for each shader stage */ |
||
351 | for (shader = 0; shader < Elements(ctx->samplers); shader++) { |
||
352 | struct sampler_info *info = &ctx->samplers[shader]; |
||
353 | for (i = 0; i < PIPE_MAX_SHADER_SAMPLER_VIEWS; i++) { |
||
354 | pipe_sampler_view_reference(&info->views[i], NULL); |
||
355 | pipe_sampler_view_reference(&info->views_saved[i], NULL); |
||
356 | } |
||
357 | } |
||
358 | |||
359 | util_unreference_framebuffer_state(&ctx->fb); |
||
360 | util_unreference_framebuffer_state(&ctx->fb_saved); |
||
361 | |||
362 | pipe_resource_reference(&ctx->aux_vertex_buffer_current.buffer, NULL); |
||
363 | pipe_resource_reference(&ctx->aux_vertex_buffer_saved.buffer, NULL); |
||
364 | |||
365 | for (i = 0; i < PIPE_SHADER_TYPES; i++) { |
||
366 | pipe_resource_reference(&ctx->aux_constbuf_current[i].buffer, NULL); |
||
367 | pipe_resource_reference(&ctx->aux_constbuf_saved[i].buffer, NULL); |
||
368 | } |
||
369 | |||
370 | for (i = 0; i < PIPE_MAX_SO_BUFFERS; i++) { |
||
371 | pipe_so_target_reference(&ctx->so_targets[i], NULL); |
||
372 | pipe_so_target_reference(&ctx->so_targets_saved[i], NULL); |
||
373 | } |
||
374 | |||
375 | if (ctx->cache) { |
||
376 | cso_cache_delete( ctx->cache ); |
||
377 | ctx->cache = NULL; |
||
378 | } |
||
379 | |||
380 | if (ctx->vbuf) |
||
381 | u_vbuf_destroy(ctx->vbuf); |
||
382 | FREE( ctx ); |
||
383 | } |
||
384 | |||
385 | |||
386 | /* Those function will either find the state of the given template |
||
387 | * in the cache or they will create a new state from the given |
||
388 | * template, insert it in the cache and return it. |
||
389 | */ |
||
390 | |||
391 | /* |
||
392 | * If the driver returns 0 from the create method then they will assign |
||
393 | * the data member of the cso to be the template itself. |
||
394 | */ |
||
395 | |||
396 | enum pipe_error cso_set_blend(struct cso_context *ctx, |
||
397 | const struct pipe_blend_state *templ) |
||
398 | { |
||
399 | unsigned key_size, hash_key; |
||
400 | struct cso_hash_iter iter; |
||
401 | void *handle; |
||
402 | |||
403 | key_size = templ->independent_blend_enable ? |
||
404 | sizeof(struct pipe_blend_state) : |
||
405 | (char *)&(templ->rt[1]) - (char *)templ; |
||
406 | hash_key = cso_construct_key((void*)templ, key_size); |
||
407 | iter = cso_find_state_template(ctx->cache, hash_key, CSO_BLEND, |
||
408 | (void*)templ, key_size); |
||
409 | |||
410 | if (cso_hash_iter_is_null(iter)) { |
||
411 | struct cso_blend *cso = MALLOC(sizeof(struct cso_blend)); |
||
412 | if (!cso) |
||
413 | return PIPE_ERROR_OUT_OF_MEMORY; |
||
414 | |||
415 | memset(&cso->state, 0, sizeof cso->state); |
||
416 | memcpy(&cso->state, templ, key_size); |
||
417 | cso->data = ctx->pipe->create_blend_state(ctx->pipe, &cso->state); |
||
418 | cso->delete_state = (cso_state_callback)ctx->pipe->delete_blend_state; |
||
419 | cso->context = ctx->pipe; |
||
420 | |||
421 | iter = cso_insert_state(ctx->cache, hash_key, CSO_BLEND, cso); |
||
422 | if (cso_hash_iter_is_null(iter)) { |
||
423 | FREE(cso); |
||
424 | return PIPE_ERROR_OUT_OF_MEMORY; |
||
425 | } |
||
426 | |||
427 | handle = cso->data; |
||
428 | } |
||
429 | else { |
||
430 | handle = ((struct cso_blend *)cso_hash_iter_data(iter))->data; |
||
431 | } |
||
432 | |||
433 | if (ctx->blend != handle) { |
||
434 | ctx->blend = handle; |
||
435 | ctx->pipe->bind_blend_state(ctx->pipe, handle); |
||
436 | } |
||
437 | return PIPE_OK; |
||
438 | } |
||
439 | |||
440 | void cso_save_blend(struct cso_context *ctx) |
||
441 | { |
||
442 | assert(!ctx->blend_saved); |
||
443 | ctx->blend_saved = ctx->blend; |
||
444 | } |
||
445 | |||
446 | void cso_restore_blend(struct cso_context *ctx) |
||
447 | { |
||
448 | if (ctx->blend != ctx->blend_saved) { |
||
449 | ctx->blend = ctx->blend_saved; |
||
450 | ctx->pipe->bind_blend_state(ctx->pipe, ctx->blend_saved); |
||
451 | } |
||
452 | ctx->blend_saved = NULL; |
||
453 | } |
||
454 | |||
455 | |||
456 | |||
457 | enum pipe_error |
||
458 | cso_set_depth_stencil_alpha(struct cso_context *ctx, |
||
459 | const struct pipe_depth_stencil_alpha_state *templ) |
||
460 | { |
||
461 | unsigned key_size = sizeof(struct pipe_depth_stencil_alpha_state); |
||
462 | unsigned hash_key = cso_construct_key((void*)templ, key_size); |
||
463 | struct cso_hash_iter iter = cso_find_state_template(ctx->cache, |
||
464 | hash_key, |
||
465 | CSO_DEPTH_STENCIL_ALPHA, |
||
466 | (void*)templ, key_size); |
||
467 | void *handle; |
||
468 | |||
469 | if (cso_hash_iter_is_null(iter)) { |
||
470 | struct cso_depth_stencil_alpha *cso = |
||
471 | MALLOC(sizeof(struct cso_depth_stencil_alpha)); |
||
472 | if (!cso) |
||
473 | return PIPE_ERROR_OUT_OF_MEMORY; |
||
474 | |||
475 | memcpy(&cso->state, templ, sizeof(*templ)); |
||
476 | cso->data = ctx->pipe->create_depth_stencil_alpha_state(ctx->pipe, |
||
477 | &cso->state); |
||
478 | cso->delete_state = |
||
479 | (cso_state_callback)ctx->pipe->delete_depth_stencil_alpha_state; |
||
480 | cso->context = ctx->pipe; |
||
481 | |||
482 | iter = cso_insert_state(ctx->cache, hash_key, |
||
483 | CSO_DEPTH_STENCIL_ALPHA, cso); |
||
484 | if (cso_hash_iter_is_null(iter)) { |
||
485 | FREE(cso); |
||
486 | return PIPE_ERROR_OUT_OF_MEMORY; |
||
487 | } |
||
488 | |||
489 | handle = cso->data; |
||
490 | } |
||
491 | else { |
||
492 | handle = ((struct cso_depth_stencil_alpha *) |
||
493 | cso_hash_iter_data(iter))->data; |
||
494 | } |
||
495 | |||
496 | if (ctx->depth_stencil != handle) { |
||
497 | ctx->depth_stencil = handle; |
||
498 | ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe, handle); |
||
499 | } |
||
500 | return PIPE_OK; |
||
501 | } |
||
502 | |||
503 | void cso_save_depth_stencil_alpha(struct cso_context *ctx) |
||
504 | { |
||
505 | assert(!ctx->depth_stencil_saved); |
||
506 | ctx->depth_stencil_saved = ctx->depth_stencil; |
||
507 | } |
||
508 | |||
509 | void cso_restore_depth_stencil_alpha(struct cso_context *ctx) |
||
510 | { |
||
511 | if (ctx->depth_stencil != ctx->depth_stencil_saved) { |
||
512 | ctx->depth_stencil = ctx->depth_stencil_saved; |
||
513 | ctx->pipe->bind_depth_stencil_alpha_state(ctx->pipe, |
||
514 | ctx->depth_stencil_saved); |
||
515 | } |
||
516 | ctx->depth_stencil_saved = NULL; |
||
517 | } |
||
518 | |||
519 | |||
520 | |||
521 | enum pipe_error cso_set_rasterizer(struct cso_context *ctx, |
||
522 | const struct pipe_rasterizer_state *templ) |
||
523 | { |
||
524 | unsigned key_size = sizeof(struct pipe_rasterizer_state); |
||
525 | unsigned hash_key = cso_construct_key((void*)templ, key_size); |
||
526 | struct cso_hash_iter iter = cso_find_state_template(ctx->cache, |
||
527 | hash_key, |
||
528 | CSO_RASTERIZER, |
||
529 | (void*)templ, key_size); |
||
530 | void *handle = NULL; |
||
531 | |||
532 | if (cso_hash_iter_is_null(iter)) { |
||
533 | struct cso_rasterizer *cso = MALLOC(sizeof(struct cso_rasterizer)); |
||
534 | if (!cso) |
||
535 | return PIPE_ERROR_OUT_OF_MEMORY; |
||
536 | |||
537 | memcpy(&cso->state, templ, sizeof(*templ)); |
||
538 | cso->data = ctx->pipe->create_rasterizer_state(ctx->pipe, &cso->state); |
||
539 | cso->delete_state = |
||
540 | (cso_state_callback)ctx->pipe->delete_rasterizer_state; |
||
541 | cso->context = ctx->pipe; |
||
542 | |||
543 | iter = cso_insert_state(ctx->cache, hash_key, CSO_RASTERIZER, cso); |
||
544 | if (cso_hash_iter_is_null(iter)) { |
||
545 | FREE(cso); |
||
546 | return PIPE_ERROR_OUT_OF_MEMORY; |
||
547 | } |
||
548 | |||
549 | handle = cso->data; |
||
550 | } |
||
551 | else { |
||
552 | handle = ((struct cso_rasterizer *)cso_hash_iter_data(iter))->data; |
||
553 | } |
||
554 | |||
555 | if (ctx->rasterizer != handle) { |
||
556 | ctx->rasterizer = handle; |
||
557 | ctx->pipe->bind_rasterizer_state(ctx->pipe, handle); |
||
558 | } |
||
559 | return PIPE_OK; |
||
560 | } |
||
561 | |||
562 | void cso_save_rasterizer(struct cso_context *ctx) |
||
563 | { |
||
564 | assert(!ctx->rasterizer_saved); |
||
565 | ctx->rasterizer_saved = ctx->rasterizer; |
||
566 | } |
||
567 | |||
568 | void cso_restore_rasterizer(struct cso_context *ctx) |
||
569 | { |
||
570 | if (ctx->rasterizer != ctx->rasterizer_saved) { |
||
571 | ctx->rasterizer = ctx->rasterizer_saved; |
||
572 | ctx->pipe->bind_rasterizer_state(ctx->pipe, ctx->rasterizer_saved); |
||
573 | } |
||
574 | ctx->rasterizer_saved = NULL; |
||
575 | } |
||
576 | |||
577 | |||
578 | void cso_set_fragment_shader_handle(struct cso_context *ctx, void *handle ) |
||
579 | { |
||
580 | if (ctx->fragment_shader != handle) { |
||
581 | ctx->fragment_shader = handle; |
||
582 | ctx->pipe->bind_fs_state(ctx->pipe, handle); |
||
583 | } |
||
584 | } |
||
585 | |||
586 | void cso_delete_fragment_shader(struct cso_context *ctx, void *handle ) |
||
587 | { |
||
588 | if (handle == ctx->fragment_shader) { |
||
589 | /* unbind before deleting */ |
||
590 | ctx->pipe->bind_fs_state(ctx->pipe, NULL); |
||
591 | ctx->fragment_shader = NULL; |
||
592 | } |
||
593 | ctx->pipe->delete_fs_state(ctx->pipe, handle); |
||
594 | } |
||
595 | |||
596 | void cso_save_fragment_shader(struct cso_context *ctx) |
||
597 | { |
||
598 | assert(!ctx->fragment_shader_saved); |
||
599 | ctx->fragment_shader_saved = ctx->fragment_shader; |
||
600 | } |
||
601 | |||
602 | void cso_restore_fragment_shader(struct cso_context *ctx) |
||
603 | { |
||
604 | if (ctx->fragment_shader_saved != ctx->fragment_shader) { |
||
605 | ctx->pipe->bind_fs_state(ctx->pipe, ctx->fragment_shader_saved); |
||
606 | ctx->fragment_shader = ctx->fragment_shader_saved; |
||
607 | } |
||
608 | ctx->fragment_shader_saved = NULL; |
||
609 | } |
||
610 | |||
611 | |||
612 | void cso_set_vertex_shader_handle(struct cso_context *ctx, void *handle) |
||
613 | { |
||
614 | if (ctx->vertex_shader != handle) { |
||
615 | ctx->vertex_shader = handle; |
||
616 | ctx->pipe->bind_vs_state(ctx->pipe, handle); |
||
617 | } |
||
618 | } |
||
619 | |||
620 | void cso_delete_vertex_shader(struct cso_context *ctx, void *handle ) |
||
621 | { |
||
622 | if (handle == ctx->vertex_shader) { |
||
623 | /* unbind before deleting */ |
||
624 | ctx->pipe->bind_vs_state(ctx->pipe, NULL); |
||
625 | ctx->vertex_shader = NULL; |
||
626 | } |
||
627 | ctx->pipe->delete_vs_state(ctx->pipe, handle); |
||
628 | } |
||
629 | |||
630 | void cso_save_vertex_shader(struct cso_context *ctx) |
||
631 | { |
||
632 | assert(!ctx->vertex_shader_saved); |
||
633 | ctx->vertex_shader_saved = ctx->vertex_shader; |
||
634 | } |
||
635 | |||
636 | void cso_restore_vertex_shader(struct cso_context *ctx) |
||
637 | { |
||
638 | if (ctx->vertex_shader_saved != ctx->vertex_shader) { |
||
639 | ctx->pipe->bind_vs_state(ctx->pipe, ctx->vertex_shader_saved); |
||
640 | ctx->vertex_shader = ctx->vertex_shader_saved; |
||
641 | } |
||
642 | ctx->vertex_shader_saved = NULL; |
||
643 | } |
||
644 | |||
645 | |||
646 | void cso_set_framebuffer(struct cso_context *ctx, |
||
647 | const struct pipe_framebuffer_state *fb) |
||
648 | { |
||
649 | if (memcmp(&ctx->fb, fb, sizeof(*fb)) != 0) { |
||
650 | util_copy_framebuffer_state(&ctx->fb, fb); |
||
651 | ctx->pipe->set_framebuffer_state(ctx->pipe, fb); |
||
652 | } |
||
653 | } |
||
654 | |||
655 | void cso_save_framebuffer(struct cso_context *ctx) |
||
656 | { |
||
657 | util_copy_framebuffer_state(&ctx->fb_saved, &ctx->fb); |
||
658 | } |
||
659 | |||
660 | void cso_restore_framebuffer(struct cso_context *ctx) |
||
661 | { |
||
662 | if (memcmp(&ctx->fb, &ctx->fb_saved, sizeof(ctx->fb))) { |
||
663 | util_copy_framebuffer_state(&ctx->fb, &ctx->fb_saved); |
||
664 | ctx->pipe->set_framebuffer_state(ctx->pipe, &ctx->fb); |
||
665 | util_unreference_framebuffer_state(&ctx->fb_saved); |
||
666 | } |
||
667 | } |
||
668 | |||
669 | |||
670 | void cso_set_viewport(struct cso_context *ctx, |
||
671 | const struct pipe_viewport_state *vp) |
||
672 | { |
||
673 | if (memcmp(&ctx->vp, vp, sizeof(*vp))) { |
||
674 | ctx->vp = *vp; |
||
675 | ctx->pipe->set_viewport_states(ctx->pipe, 0, 1, vp); |
||
676 | } |
||
677 | } |
||
678 | |||
679 | void cso_save_viewport(struct cso_context *ctx) |
||
680 | { |
||
681 | ctx->vp_saved = ctx->vp; |
||
682 | } |
||
683 | |||
684 | |||
685 | void cso_restore_viewport(struct cso_context *ctx) |
||
686 | { |
||
687 | if (memcmp(&ctx->vp, &ctx->vp_saved, sizeof(ctx->vp))) { |
||
688 | ctx->vp = ctx->vp_saved; |
||
689 | ctx->pipe->set_viewport_states(ctx->pipe, 0, 1, &ctx->vp); |
||
690 | } |
||
691 | } |
||
692 | |||
693 | |||
694 | void cso_set_blend_color(struct cso_context *ctx, |
||
695 | const struct pipe_blend_color *bc) |
||
696 | { |
||
697 | if (memcmp(&ctx->blend_color, bc, sizeof(ctx->blend_color))) { |
||
698 | ctx->blend_color = *bc; |
||
699 | ctx->pipe->set_blend_color(ctx->pipe, bc); |
||
700 | } |
||
701 | } |
||
702 | |||
703 | void cso_set_sample_mask(struct cso_context *ctx, unsigned sample_mask) |
||
704 | { |
||
705 | if (ctx->sample_mask != sample_mask) { |
||
706 | ctx->sample_mask = sample_mask; |
||
707 | ctx->pipe->set_sample_mask(ctx->pipe, sample_mask); |
||
708 | } |
||
709 | } |
||
710 | |||
711 | void cso_save_sample_mask(struct cso_context *ctx) |
||
712 | { |
||
713 | ctx->sample_mask_saved = ctx->sample_mask; |
||
714 | } |
||
715 | |||
716 | void cso_restore_sample_mask(struct cso_context *ctx) |
||
717 | { |
||
718 | cso_set_sample_mask(ctx, ctx->sample_mask_saved); |
||
719 | } |
||
720 | |||
721 | void cso_set_min_samples(struct cso_context *ctx, unsigned min_samples) |
||
722 | { |
||
723 | if (ctx->min_samples != min_samples && ctx->pipe->set_min_samples) { |
||
724 | ctx->min_samples = min_samples; |
||
725 | ctx->pipe->set_min_samples(ctx->pipe, min_samples); |
||
726 | } |
||
727 | } |
||
728 | |||
729 | void cso_save_min_samples(struct cso_context *ctx) |
||
730 | { |
||
731 | ctx->min_samples_saved = ctx->min_samples; |
||
732 | } |
||
733 | |||
734 | void cso_restore_min_samples(struct cso_context *ctx) |
||
735 | { |
||
736 | cso_set_min_samples(ctx, ctx->min_samples_saved); |
||
737 | } |
||
738 | |||
739 | void cso_set_stencil_ref(struct cso_context *ctx, |
||
740 | const struct pipe_stencil_ref *sr) |
||
741 | { |
||
742 | if (memcmp(&ctx->stencil_ref, sr, sizeof(ctx->stencil_ref))) { |
||
743 | ctx->stencil_ref = *sr; |
||
744 | ctx->pipe->set_stencil_ref(ctx->pipe, sr); |
||
745 | } |
||
746 | } |
||
747 | |||
748 | void cso_save_stencil_ref(struct cso_context *ctx) |
||
749 | { |
||
750 | ctx->stencil_ref_saved = ctx->stencil_ref; |
||
751 | } |
||
752 | |||
753 | |||
754 | void cso_restore_stencil_ref(struct cso_context *ctx) |
||
755 | { |
||
756 | if (memcmp(&ctx->stencil_ref, &ctx->stencil_ref_saved, |
||
757 | sizeof(ctx->stencil_ref))) { |
||
758 | ctx->stencil_ref = ctx->stencil_ref_saved; |
||
759 | ctx->pipe->set_stencil_ref(ctx->pipe, &ctx->stencil_ref); |
||
760 | } |
||
761 | } |
||
762 | |||
763 | void cso_set_render_condition(struct cso_context *ctx, |
||
764 | struct pipe_query *query, |
||
765 | boolean condition, uint mode) |
||
766 | { |
||
767 | struct pipe_context *pipe = ctx->pipe; |
||
768 | |||
769 | if (ctx->render_condition != query || |
||
770 | ctx->render_condition_mode != mode || |
||
771 | ctx->render_condition_cond != condition) { |
||
772 | pipe->render_condition(pipe, query, condition, mode); |
||
773 | ctx->render_condition = query; |
||
774 | ctx->render_condition_cond = condition; |
||
775 | ctx->render_condition_mode = mode; |
||
776 | } |
||
777 | } |
||
778 | |||
779 | void cso_save_render_condition(struct cso_context *ctx) |
||
780 | { |
||
781 | ctx->render_condition_saved = ctx->render_condition; |
||
782 | ctx->render_condition_cond_saved = ctx->render_condition_cond; |
||
783 | ctx->render_condition_mode_saved = ctx->render_condition_mode; |
||
784 | } |
||
785 | |||
786 | void cso_restore_render_condition(struct cso_context *ctx) |
||
787 | { |
||
788 | cso_set_render_condition(ctx, ctx->render_condition_saved, |
||
789 | ctx->render_condition_cond_saved, |
||
790 | ctx->render_condition_mode_saved); |
||
791 | } |
||
792 | |||
793 | void cso_set_geometry_shader_handle(struct cso_context *ctx, void *handle) |
||
794 | { |
||
795 | assert(ctx->has_geometry_shader || !handle); |
||
796 | |||
797 | if (ctx->has_geometry_shader && ctx->geometry_shader != handle) { |
||
798 | ctx->geometry_shader = handle; |
||
799 | ctx->pipe->bind_gs_state(ctx->pipe, handle); |
||
800 | } |
||
801 | } |
||
802 | |||
803 | void cso_delete_geometry_shader(struct cso_context *ctx, void *handle) |
||
804 | { |
||
805 | if (handle == ctx->geometry_shader) { |
||
806 | /* unbind before deleting */ |
||
807 | ctx->pipe->bind_gs_state(ctx->pipe, NULL); |
||
808 | ctx->geometry_shader = NULL; |
||
809 | } |
||
810 | ctx->pipe->delete_gs_state(ctx->pipe, handle); |
||
811 | } |
||
812 | |||
813 | void cso_save_geometry_shader(struct cso_context *ctx) |
||
814 | { |
||
815 | if (!ctx->has_geometry_shader) { |
||
816 | return; |
||
817 | } |
||
818 | |||
819 | assert(!ctx->geometry_shader_saved); |
||
820 | ctx->geometry_shader_saved = ctx->geometry_shader; |
||
821 | } |
||
822 | |||
823 | void cso_restore_geometry_shader(struct cso_context *ctx) |
||
824 | { |
||
825 | if (!ctx->has_geometry_shader) { |
||
826 | return; |
||
827 | } |
||
828 | |||
829 | if (ctx->geometry_shader_saved != ctx->geometry_shader) { |
||
830 | ctx->pipe->bind_gs_state(ctx->pipe, ctx->geometry_shader_saved); |
||
831 | ctx->geometry_shader = ctx->geometry_shader_saved; |
||
832 | } |
||
833 | ctx->geometry_shader_saved = NULL; |
||
834 | } |
||
835 | |||
836 | void cso_set_tessctrl_shader_handle(struct cso_context *ctx, void *handle) |
||
837 | { |
||
838 | assert(ctx->has_tessellation || !handle); |
||
839 | |||
840 | if (ctx->has_tessellation && ctx->tessctrl_shader != handle) { |
||
841 | ctx->tessctrl_shader = handle; |
||
842 | ctx->pipe->bind_tcs_state(ctx->pipe, handle); |
||
843 | } |
||
844 | } |
||
845 | |||
846 | void cso_delete_tessctrl_shader(struct cso_context *ctx, void *handle) |
||
847 | { |
||
848 | if (handle == ctx->tessctrl_shader) { |
||
849 | /* unbind before deleting */ |
||
850 | ctx->pipe->bind_tcs_state(ctx->pipe, NULL); |
||
851 | ctx->tessctrl_shader = NULL; |
||
852 | } |
||
853 | ctx->pipe->delete_tcs_state(ctx->pipe, handle); |
||
854 | } |
||
855 | |||
856 | void cso_save_tessctrl_shader(struct cso_context *ctx) |
||
857 | { |
||
858 | if (!ctx->has_tessellation) { |
||
859 | return; |
||
860 | } |
||
861 | |||
862 | assert(!ctx->tessctrl_shader_saved); |
||
863 | ctx->tessctrl_shader_saved = ctx->tessctrl_shader; |
||
864 | } |
||
865 | |||
866 | void cso_restore_tessctrl_shader(struct cso_context *ctx) |
||
867 | { |
||
868 | if (!ctx->has_tessellation) { |
||
869 | return; |
||
870 | } |
||
871 | |||
872 | if (ctx->tessctrl_shader_saved != ctx->tessctrl_shader) { |
||
873 | ctx->pipe->bind_tcs_state(ctx->pipe, ctx->tessctrl_shader_saved); |
||
874 | ctx->tessctrl_shader = ctx->tessctrl_shader_saved; |
||
875 | } |
||
876 | ctx->tessctrl_shader_saved = NULL; |
||
877 | } |
||
878 | |||
879 | void cso_set_tesseval_shader_handle(struct cso_context *ctx, void *handle) |
||
880 | { |
||
881 | assert(ctx->has_tessellation || !handle); |
||
882 | |||
883 | if (ctx->has_tessellation && ctx->tesseval_shader != handle) { |
||
884 | ctx->tesseval_shader = handle; |
||
885 | ctx->pipe->bind_tes_state(ctx->pipe, handle); |
||
886 | } |
||
887 | } |
||
888 | |||
889 | void cso_delete_tesseval_shader(struct cso_context *ctx, void *handle) |
||
890 | { |
||
891 | if (handle == ctx->tesseval_shader) { |
||
892 | /* unbind before deleting */ |
||
893 | ctx->pipe->bind_tes_state(ctx->pipe, NULL); |
||
894 | ctx->tesseval_shader = NULL; |
||
895 | } |
||
896 | ctx->pipe->delete_tes_state(ctx->pipe, handle); |
||
897 | } |
||
898 | |||
899 | void cso_save_tesseval_shader(struct cso_context *ctx) |
||
900 | { |
||
901 | if (!ctx->has_tessellation) { |
||
902 | return; |
||
903 | } |
||
904 | |||
905 | assert(!ctx->tesseval_shader_saved); |
||
906 | ctx->tesseval_shader_saved = ctx->tesseval_shader; |
||
907 | } |
||
908 | |||
909 | void cso_restore_tesseval_shader(struct cso_context *ctx) |
||
910 | { |
||
911 | if (!ctx->has_tessellation) { |
||
912 | return; |
||
913 | } |
||
914 | |||
915 | if (ctx->tesseval_shader_saved != ctx->tesseval_shader) { |
||
916 | ctx->pipe->bind_tes_state(ctx->pipe, ctx->tesseval_shader_saved); |
||
917 | ctx->tesseval_shader = ctx->tesseval_shader_saved; |
||
918 | } |
||
919 | ctx->tesseval_shader_saved = NULL; |
||
920 | } |
||
921 | |||
922 | /* clip state */ |
||
923 | |||
924 | static INLINE void |
||
925 | clip_state_cpy(struct pipe_clip_state *dst, |
||
926 | const struct pipe_clip_state *src) |
||
927 | { |
||
928 | memcpy(dst->ucp, src->ucp, sizeof(dst->ucp)); |
||
929 | } |
||
930 | |||
931 | static INLINE int |
||
932 | clip_state_cmp(const struct pipe_clip_state *a, |
||
933 | const struct pipe_clip_state *b) |
||
934 | { |
||
935 | return memcmp(a->ucp, b->ucp, sizeof(a->ucp)); |
||
936 | } |
||
937 | |||
938 | void |
||
939 | cso_set_clip(struct cso_context *ctx, |
||
940 | const struct pipe_clip_state *clip) |
||
941 | { |
||
942 | if (clip_state_cmp(&ctx->clip, clip)) { |
||
943 | clip_state_cpy(&ctx->clip, clip); |
||
944 | ctx->pipe->set_clip_state(ctx->pipe, clip); |
||
945 | } |
||
946 | } |
||
947 | |||
948 | void |
||
949 | cso_save_clip(struct cso_context *ctx) |
||
950 | { |
||
951 | clip_state_cpy(&ctx->clip_saved, &ctx->clip); |
||
952 | } |
||
953 | |||
954 | void |
||
955 | cso_restore_clip(struct cso_context *ctx) |
||
956 | { |
||
957 | if (clip_state_cmp(&ctx->clip, &ctx->clip_saved)) { |
||
958 | clip_state_cpy(&ctx->clip, &ctx->clip_saved); |
||
959 | ctx->pipe->set_clip_state(ctx->pipe, &ctx->clip_saved); |
||
960 | } |
||
961 | } |
||
962 | |||
963 | enum pipe_error |
||
964 | cso_set_vertex_elements(struct cso_context *ctx, |
||
965 | unsigned count, |
||
966 | const struct pipe_vertex_element *states) |
||
967 | { |
||
968 | struct u_vbuf *vbuf = ctx->vbuf; |
||
969 | unsigned key_size, hash_key; |
||
970 | struct cso_hash_iter iter; |
||
971 | void *handle; |
||
972 | struct cso_velems_state velems_state; |
||
973 | |||
974 | if (vbuf) { |
||
975 | u_vbuf_set_vertex_elements(vbuf, count, states); |
||
976 | return PIPE_OK; |
||
977 | } |
||
978 | |||
979 | /* Need to include the count into the stored state data too. |
||
980 | * Otherwise first few count pipe_vertex_elements could be identical |
||
981 | * even if count is different, and there's no guarantee the hash would |
||
982 | * be different in that case neither. |
||
983 | */ |
||
984 | key_size = sizeof(struct pipe_vertex_element) * count + sizeof(unsigned); |
||
985 | velems_state.count = count; |
||
986 | memcpy(velems_state.velems, states, |
||
987 | sizeof(struct pipe_vertex_element) * count); |
||
988 | hash_key = cso_construct_key((void*)&velems_state, key_size); |
||
989 | iter = cso_find_state_template(ctx->cache, hash_key, CSO_VELEMENTS, |
||
990 | (void*)&velems_state, key_size); |
||
991 | |||
992 | if (cso_hash_iter_is_null(iter)) { |
||
993 | struct cso_velements *cso = MALLOC(sizeof(struct cso_velements)); |
||
994 | if (!cso) |
||
995 | return PIPE_ERROR_OUT_OF_MEMORY; |
||
996 | |||
997 | memcpy(&cso->state, &velems_state, key_size); |
||
998 | cso->data = ctx->pipe->create_vertex_elements_state(ctx->pipe, count, |
||
999 | &cso->state.velems[0]); |
||
1000 | cso->delete_state = |
||
1001 | (cso_state_callback) ctx->pipe->delete_vertex_elements_state; |
||
1002 | cso->context = ctx->pipe; |
||
1003 | |||
1004 | iter = cso_insert_state(ctx->cache, hash_key, CSO_VELEMENTS, cso); |
||
1005 | if (cso_hash_iter_is_null(iter)) { |
||
1006 | FREE(cso); |
||
1007 | return PIPE_ERROR_OUT_OF_MEMORY; |
||
1008 | } |
||
1009 | |||
1010 | handle = cso->data; |
||
1011 | } |
||
1012 | else { |
||
1013 | handle = ((struct cso_velements *)cso_hash_iter_data(iter))->data; |
||
1014 | } |
||
1015 | |||
1016 | if (ctx->velements != handle) { |
||
1017 | ctx->velements = handle; |
||
1018 | ctx->pipe->bind_vertex_elements_state(ctx->pipe, handle); |
||
1019 | } |
||
1020 | return PIPE_OK; |
||
1021 | } |
||
1022 | |||
1023 | void cso_save_vertex_elements(struct cso_context *ctx) |
||
1024 | { |
||
1025 | struct u_vbuf *vbuf = ctx->vbuf; |
||
1026 | |||
1027 | if (vbuf) { |
||
1028 | u_vbuf_save_vertex_elements(vbuf); |
||
1029 | return; |
||
1030 | } |
||
1031 | |||
1032 | assert(!ctx->velements_saved); |
||
1033 | ctx->velements_saved = ctx->velements; |
||
1034 | } |
||
1035 | |||
1036 | void cso_restore_vertex_elements(struct cso_context *ctx) |
||
1037 | { |
||
1038 | struct u_vbuf *vbuf = ctx->vbuf; |
||
1039 | |||
1040 | if (vbuf) { |
||
1041 | u_vbuf_restore_vertex_elements(vbuf); |
||
1042 | return; |
||
1043 | } |
||
1044 | |||
1045 | if (ctx->velements != ctx->velements_saved) { |
||
1046 | ctx->velements = ctx->velements_saved; |
||
1047 | ctx->pipe->bind_vertex_elements_state(ctx->pipe, ctx->velements_saved); |
||
1048 | } |
||
1049 | ctx->velements_saved = NULL; |
||
1050 | } |
||
1051 | |||
1052 | /* vertex buffers */ |
||
1053 | |||
1054 | void cso_set_vertex_buffers(struct cso_context *ctx, |
||
1055 | unsigned start_slot, unsigned count, |
||
1056 | const struct pipe_vertex_buffer *buffers) |
||
1057 | { |
||
1058 | struct u_vbuf *vbuf = ctx->vbuf; |
||
1059 | |||
1060 | if (vbuf) { |
||
1061 | u_vbuf_set_vertex_buffers(vbuf, start_slot, count, buffers); |
||
1062 | return; |
||
1063 | } |
||
1064 | |||
1065 | /* Save what's in the auxiliary slot, so that we can save and restore it |
||
1066 | * for meta ops. */ |
||
1067 | if (start_slot <= ctx->aux_vertex_buffer_index && |
||
1068 | start_slot+count > ctx->aux_vertex_buffer_index) { |
||
1069 | if (buffers) { |
||
1070 | const struct pipe_vertex_buffer *vb = |
||
1071 | buffers + (ctx->aux_vertex_buffer_index - start_slot); |
||
1072 | |||
1073 | pipe_resource_reference(&ctx->aux_vertex_buffer_current.buffer, |
||
1074 | vb->buffer); |
||
1075 | memcpy(&ctx->aux_vertex_buffer_current, vb, |
||
1076 | sizeof(struct pipe_vertex_buffer)); |
||
1077 | } |
||
1078 | else { |
||
1079 | pipe_resource_reference(&ctx->aux_vertex_buffer_current.buffer, |
||
1080 | NULL); |
||
1081 | ctx->aux_vertex_buffer_current.user_buffer = NULL; |
||
1082 | } |
||
1083 | } |
||
1084 | |||
1085 | ctx->pipe->set_vertex_buffers(ctx->pipe, start_slot, count, buffers); |
||
1086 | } |
||
1087 | |||
1088 | void cso_save_aux_vertex_buffer_slot(struct cso_context *ctx) |
||
1089 | { |
||
1090 | struct u_vbuf *vbuf = ctx->vbuf; |
||
1091 | |||
1092 | if (vbuf) { |
||
1093 | u_vbuf_save_aux_vertex_buffer_slot(vbuf); |
||
1094 | return; |
||
1095 | } |
||
1096 | |||
1097 | pipe_resource_reference(&ctx->aux_vertex_buffer_saved.buffer, |
||
1098 | ctx->aux_vertex_buffer_current.buffer); |
||
1099 | memcpy(&ctx->aux_vertex_buffer_saved, &ctx->aux_vertex_buffer_current, |
||
1100 | sizeof(struct pipe_vertex_buffer)); |
||
1101 | } |
||
1102 | |||
1103 | void cso_restore_aux_vertex_buffer_slot(struct cso_context *ctx) |
||
1104 | { |
||
1105 | struct u_vbuf *vbuf = ctx->vbuf; |
||
1106 | |||
1107 | if (vbuf) { |
||
1108 | u_vbuf_restore_aux_vertex_buffer_slot(vbuf); |
||
1109 | return; |
||
1110 | } |
||
1111 | |||
1112 | cso_set_vertex_buffers(ctx, ctx->aux_vertex_buffer_index, 1, |
||
1113 | &ctx->aux_vertex_buffer_saved); |
||
1114 | pipe_resource_reference(&ctx->aux_vertex_buffer_saved.buffer, NULL); |
||
1115 | } |
||
1116 | |||
1117 | unsigned cso_get_aux_vertex_buffer_slot(struct cso_context *ctx) |
||
1118 | { |
||
1119 | return ctx->aux_vertex_buffer_index; |
||
1120 | } |
||
1121 | |||
1122 | |||
1123 | /**************** fragment/vertex sampler view state *************************/ |
||
1124 | |||
1125 | static enum pipe_error |
||
1126 | single_sampler(struct cso_context *ctx, |
||
1127 | struct sampler_info *info, |
||
1128 | unsigned idx, |
||
1129 | const struct pipe_sampler_state *templ) |
||
1130 | { |
||
1131 | void *handle = NULL; |
||
1132 | |||
1133 | if (templ != NULL) { |
||
1134 | unsigned key_size = sizeof(struct pipe_sampler_state); |
||
1135 | unsigned hash_key = cso_construct_key((void*)templ, key_size); |
||
1136 | struct cso_hash_iter iter = |
||
1137 | cso_find_state_template(ctx->cache, |
||
1138 | hash_key, CSO_SAMPLER, |
||
1139 | (void *) templ, key_size); |
||
1140 | |||
1141 | if (cso_hash_iter_is_null(iter)) { |
||
1142 | struct cso_sampler *cso = MALLOC(sizeof(struct cso_sampler)); |
||
1143 | if (!cso) |
||
1144 | return PIPE_ERROR_OUT_OF_MEMORY; |
||
1145 | |||
1146 | memcpy(&cso->state, templ, sizeof(*templ)); |
||
1147 | cso->data = ctx->pipe->create_sampler_state(ctx->pipe, &cso->state); |
||
1148 | cso->delete_state = |
||
1149 | (cso_state_callback) ctx->pipe->delete_sampler_state; |
||
1150 | cso->context = ctx->pipe; |
||
1151 | |||
1152 | iter = cso_insert_state(ctx->cache, hash_key, CSO_SAMPLER, cso); |
||
1153 | if (cso_hash_iter_is_null(iter)) { |
||
1154 | FREE(cso); |
||
1155 | return PIPE_ERROR_OUT_OF_MEMORY; |
||
1156 | } |
||
1157 | |||
1158 | handle = cso->data; |
||
1159 | } |
||
1160 | else { |
||
1161 | handle = ((struct cso_sampler *)cso_hash_iter_data(iter))->data; |
||
1162 | } |
||
1163 | } |
||
1164 | |||
1165 | info->samplers[idx] = handle; |
||
1166 | |||
1167 | return PIPE_OK; |
||
1168 | } |
||
1169 | |||
1170 | enum pipe_error |
||
1171 | cso_single_sampler(struct cso_context *ctx, |
||
1172 | unsigned shader_stage, |
||
1173 | unsigned idx, |
||
1174 | const struct pipe_sampler_state *templ) |
||
1175 | { |
||
1176 | return single_sampler(ctx, &ctx->samplers[shader_stage], idx, templ); |
||
1177 | } |
||
1178 | |||
1179 | |||
1180 | |||
1181 | static void |
||
1182 | single_sampler_done(struct cso_context *ctx, unsigned shader_stage) |
||
1183 | { |
||
1184 | struct sampler_info *info = &ctx->samplers[shader_stage]; |
||
1185 | unsigned i; |
||
1186 | |||
1187 | /* find highest non-null sampler */ |
||
1188 | for (i = PIPE_MAX_SAMPLERS; i > 0; i--) { |
||
1189 | if (info->samplers[i - 1] != NULL) |
||
1190 | break; |
||
1191 | } |
||
1192 | |||
1193 | info->nr_samplers = i; |
||
1194 | |||
1195 | if (info->hw.nr_samplers != info->nr_samplers || |
||
1196 | memcmp(info->hw.samplers, |
||
1197 | info->samplers, |
||
1198 | info->nr_samplers * sizeof(void *)) != 0) |
||
1199 | { |
||
1200 | memcpy(info->hw.samplers, |
||
1201 | info->samplers, |
||
1202 | info->nr_samplers * sizeof(void *)); |
||
1203 | |||
1204 | /* set remaining slots/pointers to null */ |
||
1205 | for (i = info->nr_samplers; i < info->hw.nr_samplers; i++) |
||
1206 | info->samplers[i] = NULL; |
||
1207 | |||
1208 | ctx->pipe->bind_sampler_states(ctx->pipe, shader_stage, 0, |
||
1209 | MAX2(info->nr_samplers, |
||
1210 | info->hw.nr_samplers), |
||
1211 | info->samplers); |
||
1212 | |||
1213 | info->hw.nr_samplers = info->nr_samplers; |
||
1214 | } |
||
1215 | } |
||
1216 | |||
1217 | void |
||
1218 | cso_single_sampler_done(struct cso_context *ctx, unsigned shader_stage) |
||
1219 | { |
||
1220 | single_sampler_done(ctx, shader_stage); |
||
1221 | } |
||
1222 | |||
1223 | |||
1224 | /* |
||
1225 | * If the function encouters any errors it will return the |
||
1226 | * last one. Done to always try to set as many samplers |
||
1227 | * as possible. |
||
1228 | */ |
||
1229 | enum pipe_error |
||
1230 | cso_set_samplers(struct cso_context *ctx, |
||
1231 | unsigned shader_stage, |
||
1232 | unsigned nr, |
||
1233 | const struct pipe_sampler_state **templates) |
||
1234 | { |
||
1235 | struct sampler_info *info = &ctx->samplers[shader_stage]; |
||
1236 | unsigned i; |
||
1237 | enum pipe_error temp, error = PIPE_OK; |
||
1238 | |||
1239 | /* TODO: fastpath |
||
1240 | */ |
||
1241 | |||
1242 | for (i = 0; i < nr; i++) { |
||
1243 | temp = single_sampler(ctx, info, i, templates[i]); |
||
1244 | if (temp != PIPE_OK) |
||
1245 | error = temp; |
||
1246 | } |
||
1247 | |||
1248 | for ( ; i < info->nr_samplers; i++) { |
||
1249 | temp = single_sampler(ctx, info, i, NULL); |
||
1250 | if (temp != PIPE_OK) |
||
1251 | error = temp; |
||
1252 | } |
||
1253 | |||
1254 | single_sampler_done(ctx, shader_stage); |
||
1255 | |||
1256 | return error; |
||
1257 | } |
||
1258 | |||
1259 | void |
||
1260 | cso_save_samplers(struct cso_context *ctx, unsigned shader_stage) |
||
1261 | { |
||
1262 | struct sampler_info *info = &ctx->samplers[shader_stage]; |
||
1263 | info->nr_samplers_saved = info->nr_samplers; |
||
1264 | memcpy(info->samplers_saved, info->samplers, sizeof(info->samplers)); |
||
1265 | } |
||
1266 | |||
1267 | |||
1268 | void |
||
1269 | cso_restore_samplers(struct cso_context *ctx, unsigned shader_stage) |
||
1270 | { |
||
1271 | struct sampler_info *info = &ctx->samplers[shader_stage]; |
||
1272 | info->nr_samplers = info->nr_samplers_saved; |
||
1273 | memcpy(info->samplers, info->samplers_saved, sizeof(info->samplers)); |
||
1274 | single_sampler_done(ctx, shader_stage); |
||
1275 | } |
||
1276 | |||
1277 | |||
1278 | void |
||
1279 | cso_set_sampler_views(struct cso_context *ctx, |
||
1280 | unsigned shader_stage, |
||
1281 | unsigned count, |
||
1282 | struct pipe_sampler_view **views) |
||
1283 | { |
||
1284 | struct sampler_info *info = &ctx->samplers[shader_stage]; |
||
1285 | unsigned i; |
||
1286 | boolean any_change = FALSE; |
||
1287 | |||
1288 | /* reference new views */ |
||
1289 | for (i = 0; i < count; i++) { |
||
1290 | any_change |= info->views[i] != views[i]; |
||
1291 | pipe_sampler_view_reference(&info->views[i], views[i]); |
||
1292 | } |
||
1293 | /* unref extra old views, if any */ |
||
1294 | for (; i < info->nr_views; i++) { |
||
1295 | any_change |= info->views[i] != NULL; |
||
1296 | pipe_sampler_view_reference(&info->views[i], NULL); |
||
1297 | } |
||
1298 | |||
1299 | /* bind the new sampler views */ |
||
1300 | if (any_change) { |
||
1301 | ctx->pipe->set_sampler_views(ctx->pipe, shader_stage, 0, |
||
1302 | MAX2(info->nr_views, count), |
||
1303 | info->views); |
||
1304 | } |
||
1305 | |||
1306 | info->nr_views = count; |
||
1307 | } |
||
1308 | |||
1309 | |||
1310 | void |
||
1311 | cso_save_sampler_views(struct cso_context *ctx, unsigned shader_stage) |
||
1312 | { |
||
1313 | struct sampler_info *info = &ctx->samplers[shader_stage]; |
||
1314 | unsigned i; |
||
1315 | |||
1316 | info->nr_views_saved = info->nr_views; |
||
1317 | |||
1318 | for (i = 0; i < info->nr_views; i++) { |
||
1319 | assert(!info->views_saved[i]); |
||
1320 | pipe_sampler_view_reference(&info->views_saved[i], info->views[i]); |
||
1321 | } |
||
1322 | } |
||
1323 | |||
1324 | |||
1325 | void |
||
1326 | cso_restore_sampler_views(struct cso_context *ctx, unsigned shader_stage) |
||
1327 | { |
||
1328 | struct sampler_info *info = &ctx->samplers[shader_stage]; |
||
1329 | unsigned i, nr_saved = info->nr_views_saved; |
||
1330 | unsigned num; |
||
1331 | |||
1332 | for (i = 0; i < nr_saved; i++) { |
||
1333 | pipe_sampler_view_reference(&info->views[i], NULL); |
||
1334 | /* move the reference from one pointer to another */ |
||
1335 | info->views[i] = info->views_saved[i]; |
||
1336 | info->views_saved[i] = NULL; |
||
1337 | } |
||
1338 | for (; i < info->nr_views; i++) { |
||
1339 | pipe_sampler_view_reference(&info->views[i], NULL); |
||
1340 | } |
||
1341 | |||
1342 | num = MAX2(info->nr_views, nr_saved); |
||
1343 | |||
1344 | /* bind the old/saved sampler views */ |
||
1345 | ctx->pipe->set_sampler_views(ctx->pipe, shader_stage, 0, num, info->views); |
||
1346 | |||
1347 | info->nr_views = nr_saved; |
||
1348 | info->nr_views_saved = 0; |
||
1349 | } |
||
1350 | |||
1351 | |||
1352 | void |
||
1353 | cso_set_stream_outputs(struct cso_context *ctx, |
||
1354 | unsigned num_targets, |
||
1355 | struct pipe_stream_output_target **targets, |
||
1356 | const unsigned *offsets) |
||
1357 | { |
||
1358 | struct pipe_context *pipe = ctx->pipe; |
||
1359 | uint i; |
||
1360 | |||
1361 | if (!ctx->has_streamout) { |
||
1362 | assert(num_targets == 0); |
||
1363 | return; |
||
1364 | } |
||
1365 | |||
1366 | if (ctx->nr_so_targets == 0 && num_targets == 0) { |
||
1367 | /* Nothing to do. */ |
||
1368 | return; |
||
1369 | } |
||
1370 | |||
1371 | /* reference new targets */ |
||
1372 | for (i = 0; i < num_targets; i++) { |
||
1373 | pipe_so_target_reference(&ctx->so_targets[i], targets[i]); |
||
1374 | } |
||
1375 | /* unref extra old targets, if any */ |
||
1376 | for (; i < ctx->nr_so_targets; i++) { |
||
1377 | pipe_so_target_reference(&ctx->so_targets[i], NULL); |
||
1378 | } |
||
1379 | |||
1380 | pipe->set_stream_output_targets(pipe, num_targets, targets, |
||
1381 | offsets); |
||
1382 | ctx->nr_so_targets = num_targets; |
||
1383 | } |
||
1384 | |||
1385 | void |
||
1386 | cso_save_stream_outputs(struct cso_context *ctx) |
||
1387 | { |
||
1388 | uint i; |
||
1389 | |||
1390 | if (!ctx->has_streamout) { |
||
1391 | return; |
||
1392 | } |
||
1393 | |||
1394 | ctx->nr_so_targets_saved = ctx->nr_so_targets; |
||
1395 | |||
1396 | for (i = 0; i < ctx->nr_so_targets; i++) { |
||
1397 | assert(!ctx->so_targets_saved[i]); |
||
1398 | pipe_so_target_reference(&ctx->so_targets_saved[i], ctx->so_targets[i]); |
||
1399 | } |
||
1400 | } |
||
1401 | |||
1402 | void |
||
1403 | cso_restore_stream_outputs(struct cso_context *ctx) |
||
1404 | { |
||
1405 | struct pipe_context *pipe = ctx->pipe; |
||
1406 | uint i; |
||
1407 | unsigned offset[PIPE_MAX_SO_BUFFERS]; |
||
1408 | |||
1409 | if (!ctx->has_streamout) { |
||
1410 | return; |
||
1411 | } |
||
1412 | |||
1413 | if (ctx->nr_so_targets == 0 && ctx->nr_so_targets_saved == 0) { |
||
1414 | /* Nothing to do. */ |
||
1415 | return; |
||
1416 | } |
||
1417 | |||
1418 | assert(ctx->nr_so_targets_saved <= PIPE_MAX_SO_BUFFERS); |
||
1419 | for (i = 0; i < ctx->nr_so_targets_saved; i++) { |
||
1420 | pipe_so_target_reference(&ctx->so_targets[i], NULL); |
||
1421 | /* move the reference from one pointer to another */ |
||
1422 | ctx->so_targets[i] = ctx->so_targets_saved[i]; |
||
1423 | ctx->so_targets_saved[i] = NULL; |
||
1424 | /* -1 means append */ |
||
1425 | offset[i] = (unsigned)-1; |
||
1426 | } |
||
1427 | for (; i < ctx->nr_so_targets; i++) { |
||
1428 | pipe_so_target_reference(&ctx->so_targets[i], NULL); |
||
1429 | } |
||
1430 | |||
1431 | pipe->set_stream_output_targets(pipe, ctx->nr_so_targets_saved, |
||
1432 | ctx->so_targets, offset); |
||
1433 | |||
1434 | ctx->nr_so_targets = ctx->nr_so_targets_saved; |
||
1435 | ctx->nr_so_targets_saved = 0; |
||
1436 | } |
||
1437 | |||
1438 | /* constant buffers */ |
||
1439 | |||
1440 | void |
||
1441 | cso_set_constant_buffer(struct cso_context *cso, unsigned shader_stage, |
||
1442 | unsigned index, struct pipe_constant_buffer *cb) |
||
1443 | { |
||
1444 | struct pipe_context *pipe = cso->pipe; |
||
1445 | |||
1446 | pipe->set_constant_buffer(pipe, shader_stage, index, cb); |
||
1447 | |||
1448 | if (index == 0) { |
||
1449 | util_copy_constant_buffer(&cso->aux_constbuf_current[shader_stage], cb); |
||
1450 | } |
||
1451 | } |
||
1452 | |||
1453 | void |
||
1454 | cso_set_constant_buffer_resource(struct cso_context *cso, |
||
1455 | unsigned shader_stage, |
||
1456 | unsigned index, |
||
1457 | struct pipe_resource *buffer) |
||
1458 | { |
||
1459 | if (buffer) { |
||
1460 | struct pipe_constant_buffer cb; |
||
1461 | cb.buffer = buffer; |
||
1462 | cb.buffer_offset = 0; |
||
1463 | cb.buffer_size = buffer->width0; |
||
1464 | cb.user_buffer = NULL; |
||
1465 | cso_set_constant_buffer(cso, shader_stage, index, &cb); |
||
1466 | } else { |
||
1467 | cso_set_constant_buffer(cso, shader_stage, index, NULL); |
||
1468 | } |
||
1469 | } |
||
1470 | |||
1471 | void |
||
1472 | cso_save_constant_buffer_slot0(struct cso_context *cso, |
||
1473 | unsigned shader_stage) |
||
1474 | { |
||
1475 | util_copy_constant_buffer(&cso->aux_constbuf_saved[shader_stage], |
||
1476 | &cso->aux_constbuf_current[shader_stage]); |
||
1477 | } |
||
1478 | |||
1479 | void |
||
1480 | cso_restore_constant_buffer_slot0(struct cso_context *cso, |
||
1481 | unsigned shader_stage) |
||
1482 | { |
||
1483 | cso_set_constant_buffer(cso, shader_stage, 0, |
||
1484 | &cso->aux_constbuf_saved[shader_stage]); |
||
1485 | pipe_resource_reference(&cso->aux_constbuf_saved[shader_stage].buffer, |
||
1486 | NULL); |
||
1487 | } |
||
1488 | |||
1489 | /* drawing */ |
||
1490 | |||
1491 | void |
||
1492 | cso_set_index_buffer(struct cso_context *cso, |
||
1493 | const struct pipe_index_buffer *ib) |
||
1494 | { |
||
1495 | struct u_vbuf *vbuf = cso->vbuf; |
||
1496 | |||
1497 | if (vbuf) { |
||
1498 | u_vbuf_set_index_buffer(vbuf, ib); |
||
1499 | } else { |
||
1500 | struct pipe_context *pipe = cso->pipe; |
||
1501 | pipe->set_index_buffer(pipe, ib); |
||
1502 | } |
||
1503 | } |
||
1504 | |||
1505 | void |
||
1506 | cso_draw_vbo(struct cso_context *cso, |
||
1507 | const struct pipe_draw_info *info) |
||
1508 | { |
||
1509 | struct u_vbuf *vbuf = cso->vbuf; |
||
1510 | |||
1511 | if (vbuf) { |
||
1512 | u_vbuf_draw_vbo(vbuf, info); |
||
1513 | } else { |
||
1514 | struct pipe_context *pipe = cso->pipe; |
||
1515 | pipe->draw_vbo(pipe, info); |
||
1516 | } |
||
1517 | } |
||
1518 | |||
1519 | void |
||
1520 | cso_draw_arrays(struct cso_context *cso, uint mode, uint start, uint count) |
||
1521 | { |
||
1522 | struct pipe_draw_info info; |
||
1523 | |||
1524 | util_draw_init_info(&info); |
||
1525 | |||
1526 | info.mode = mode; |
||
1527 | info.start = start; |
||
1528 | info.count = count; |
||
1529 | info.min_index = start; |
||
1530 | info.max_index = start + count - 1; |
||
1531 | |||
1532 | cso_draw_vbo(cso, &info); |
||
1533 | } |
||
1534 | |||
1535 | void |
||
1536 | cso_draw_arrays_instanced(struct cso_context *cso, uint mode, |
||
1537 | uint start, uint count, |
||
1538 | uint start_instance, uint instance_count) |
||
1539 | { |
||
1540 | struct pipe_draw_info info; |
||
1541 | |||
1542 | util_draw_init_info(&info); |
||
1543 | |||
1544 | info.mode = mode; |
||
1545 | info.start = start; |
||
1546 | info.count = count; |
||
1547 | info.min_index = start; |
||
1548 | info.max_index = start + count - 1; |
||
1549 | info.start_instance = start_instance; |
||
1550 | info.instance_count = instance_count; |
||
1551 | |||
1552 | cso_draw_vbo(cso, &info); |
||
1553 | }>>=>>>>>>>>>>>>=>>>>>=>=>>> |