Rev 4359 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
4304 | Serge | 1 | /* |
2 | * Copyright © 2006,2008,2011 Intel Corporation |
||
3 | * Copyright © 2007 Red Hat, Inc. |
||
4 | * |
||
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
6 | * copy of this software and associated documentation files (the "Software"), |
||
7 | * to deal in the Software without restriction, including without limitation |
||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
9 | * and/or sell copies of the Software, and to permit persons to whom the |
||
10 | * Software is furnished to do so, subject to the following conditions: |
||
11 | * |
||
12 | * The above copyright notice and this permission notice (including the next |
||
13 | * paragraph) shall be included in all copies or substantial portions of the |
||
14 | * Software. |
||
15 | * |
||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
||
22 | * SOFTWARE. |
||
23 | * |
||
24 | * Authors: |
||
25 | * Wang Zhenyu |
||
26 | * Eric Anholt |
||
27 | * Carl Worth |
||
28 | * Keith Packard |
||
29 | * Chris Wilson |
||
30 | * |
||
31 | */ |
||
32 | |||
33 | #ifdef HAVE_CONFIG_H |
||
34 | #include "config.h" |
||
35 | #endif |
||
36 | |||
37 | #include "sna.h" |
||
38 | #include "sna_reg.h" |
||
39 | #include "sna_render.h" |
||
40 | #include "sna_render_inline.h" |
||
41 | //#include "sna_video.h" |
||
42 | |||
43 | #include "brw/brw.h" |
||
44 | #include "gen5_render.h" |
||
4501 | Serge | 45 | #include "gen4_common.h" |
4304 | Serge | 46 | #include "gen4_source.h" |
47 | #include "gen4_vertex.h" |
||
48 | |||
49 | #define NO_COMPOSITE 0 |
||
50 | #define NO_COMPOSITE_SPANS 0 |
||
51 | |||
52 | #define PREFER_BLT_FILL 1 |
||
53 | |||
54 | #define DBG_NO_STATE_CACHE 0 |
||
55 | #define DBG_NO_SURFACE_CACHE 0 |
||
56 | |||
57 | #define MAX_3D_SIZE 8192 |
||
58 | |||
59 | #define GEN5_GRF_BLOCKS(nreg) ((nreg + 15) / 16 - 1) |
||
60 | |||
61 | /* Set up a default static partitioning of the URB, which is supposed to |
||
62 | * allow anything we would want to do, at potentially lower performance. |
||
63 | */ |
||
64 | #define URB_CS_ENTRY_SIZE 1 |
||
65 | #define URB_CS_ENTRIES 0 |
||
66 | |||
67 | #define URB_VS_ENTRY_SIZE 1 |
||
68 | #define URB_VS_ENTRIES 256 /* minimum of 8 */ |
||
69 | |||
70 | #define URB_GS_ENTRY_SIZE 0 |
||
71 | #define URB_GS_ENTRIES 0 |
||
72 | |||
73 | #define URB_CLIP_ENTRY_SIZE 0 |
||
74 | #define URB_CLIP_ENTRIES 0 |
||
75 | |||
76 | #define URB_SF_ENTRY_SIZE 2 |
||
77 | #define URB_SF_ENTRIES 64 |
||
78 | |||
79 | /* |
||
80 | * this program computes dA/dx and dA/dy for the texture coordinates along |
||
81 | * with the base texture coordinate. It was extracted from the Mesa driver |
||
82 | */ |
||
83 | |||
84 | #define SF_KERNEL_NUM_GRF 16 |
||
85 | #define SF_MAX_THREADS 48 |
||
86 | |||
87 | #define PS_KERNEL_NUM_GRF 32 |
||
88 | #define PS_MAX_THREADS 72 |
||
89 | |||
90 | static const uint32_t ps_kernel_packed_static[][4] = { |
||
91 | #include "exa_wm_xy.g5b" |
||
92 | #include "exa_wm_src_affine.g5b" |
||
93 | #include "exa_wm_src_sample_argb.g5b" |
||
94 | #include "exa_wm_yuv_rgb.g5b" |
||
95 | #include "exa_wm_write.g5b" |
||
96 | }; |
||
97 | |||
98 | static const uint32_t ps_kernel_planar_static[][4] = { |
||
99 | #include "exa_wm_xy.g5b" |
||
100 | #include "exa_wm_src_affine.g5b" |
||
101 | #include "exa_wm_src_sample_planar.g5b" |
||
102 | #include "exa_wm_yuv_rgb.g5b" |
||
103 | #include "exa_wm_write.g5b" |
||
104 | }; |
||
105 | |||
106 | #define NOKERNEL(kernel_enum, func, masked) \ |
||
107 | [kernel_enum] = {func, 0, masked} |
||
108 | #define KERNEL(kernel_enum, kernel, masked) \ |
||
109 | [kernel_enum] = {&kernel, sizeof(kernel), masked} |
||
110 | static const struct wm_kernel_info { |
||
111 | const void *data; |
||
112 | unsigned int size; |
||
113 | bool has_mask; |
||
114 | } wm_kernels[] = { |
||
115 | NOKERNEL(WM_KERNEL, brw_wm_kernel__affine, false), |
||
116 | NOKERNEL(WM_KERNEL_P, brw_wm_kernel__projective, false), |
||
117 | |||
118 | NOKERNEL(WM_KERNEL_MASK, brw_wm_kernel__affine_mask, true), |
||
119 | NOKERNEL(WM_KERNEL_MASK_P, brw_wm_kernel__projective_mask, true), |
||
120 | |||
121 | NOKERNEL(WM_KERNEL_MASKCA, brw_wm_kernel__affine_mask_ca, true), |
||
122 | NOKERNEL(WM_KERNEL_MASKCA_P, brw_wm_kernel__projective_mask_ca, true), |
||
123 | |||
124 | NOKERNEL(WM_KERNEL_MASKSA, brw_wm_kernel__affine_mask_sa, true), |
||
125 | NOKERNEL(WM_KERNEL_MASKSA_P, brw_wm_kernel__projective_mask_sa, true), |
||
126 | |||
127 | NOKERNEL(WM_KERNEL_OPACITY, brw_wm_kernel__affine_opacity, true), |
||
128 | NOKERNEL(WM_KERNEL_OPACITY_P, brw_wm_kernel__projective_opacity, true), |
||
129 | |||
130 | KERNEL(WM_KERNEL_VIDEO_PLANAR, ps_kernel_planar_static, false), |
||
131 | KERNEL(WM_KERNEL_VIDEO_PACKED, ps_kernel_packed_static, false), |
||
132 | }; |
||
133 | #undef KERNEL |
||
134 | |||
135 | static const struct blendinfo { |
||
136 | bool src_alpha; |
||
137 | uint32_t src_blend; |
||
138 | uint32_t dst_blend; |
||
139 | } gen5_blend_op[] = { |
||
140 | /* Clear */ {0, GEN5_BLENDFACTOR_ZERO, GEN5_BLENDFACTOR_ZERO}, |
||
141 | /* Src */ {0, GEN5_BLENDFACTOR_ONE, GEN5_BLENDFACTOR_ZERO}, |
||
142 | /* Dst */ {0, GEN5_BLENDFACTOR_ZERO, GEN5_BLENDFACTOR_ONE}, |
||
143 | /* Over */ {1, GEN5_BLENDFACTOR_ONE, GEN5_BLENDFACTOR_INV_SRC_ALPHA}, |
||
144 | /* OverReverse */ {0, GEN5_BLENDFACTOR_INV_DST_ALPHA, GEN5_BLENDFACTOR_ONE}, |
||
145 | /* In */ {0, GEN5_BLENDFACTOR_DST_ALPHA, GEN5_BLENDFACTOR_ZERO}, |
||
146 | /* InReverse */ {1, GEN5_BLENDFACTOR_ZERO, GEN5_BLENDFACTOR_SRC_ALPHA}, |
||
147 | /* Out */ {0, GEN5_BLENDFACTOR_INV_DST_ALPHA, GEN5_BLENDFACTOR_ZERO}, |
||
148 | /* OutReverse */ {1, GEN5_BLENDFACTOR_ZERO, GEN5_BLENDFACTOR_INV_SRC_ALPHA}, |
||
149 | /* Atop */ {1, GEN5_BLENDFACTOR_DST_ALPHA, GEN5_BLENDFACTOR_INV_SRC_ALPHA}, |
||
150 | /* AtopReverse */ {1, GEN5_BLENDFACTOR_INV_DST_ALPHA, GEN5_BLENDFACTOR_SRC_ALPHA}, |
||
151 | /* Xor */ {1, GEN5_BLENDFACTOR_INV_DST_ALPHA, GEN5_BLENDFACTOR_INV_SRC_ALPHA}, |
||
152 | /* Add */ {0, GEN5_BLENDFACTOR_ONE, GEN5_BLENDFACTOR_ONE}, |
||
153 | }; |
||
154 | |||
155 | /** |
||
156 | * Highest-valued BLENDFACTOR used in gen5_blend_op. |
||
157 | * |
||
158 | * This leaves out GEN5_BLENDFACTOR_INV_DST_COLOR, |
||
159 | * GEN5_BLENDFACTOR_INV_CONST_{COLOR,ALPHA}, |
||
160 | * GEN5_BLENDFACTOR_INV_SRC1_{COLOR,ALPHA} |
||
161 | */ |
||
162 | #define GEN5_BLENDFACTOR_COUNT (GEN5_BLENDFACTOR_INV_DST_ALPHA + 1) |
||
163 | |||
164 | #define BLEND_OFFSET(s, d) \ |
||
165 | (((s) * GEN5_BLENDFACTOR_COUNT + (d)) * 64) |
||
166 | |||
167 | #define SAMPLER_OFFSET(sf, se, mf, me, k) \ |
||
168 | ((((((sf) * EXTEND_COUNT + (se)) * FILTER_COUNT + (mf)) * EXTEND_COUNT + (me)) * KERNEL_COUNT + (k)) * 64) |
||
169 | |||
170 | static bool |
||
171 | gen5_emit_pipelined_pointers(struct sna *sna, |
||
172 | const struct sna_composite_op *op, |
||
173 | int blend, int kernel); |
||
174 | |||
175 | #define OUT_BATCH(v) batch_emit(sna, v) |
||
176 | #define OUT_VERTEX(x,y) vertex_emit_2s(sna, x,y) |
||
177 | #define OUT_VERTEX_F(v) vertex_emit(sna, v) |
||
178 | |||
179 | static inline bool too_large(int width, int height) |
||
180 | { |
||
181 | return width > MAX_3D_SIZE || height > MAX_3D_SIZE; |
||
182 | } |
||
183 | |||
184 | static int |
||
185 | gen5_choose_composite_kernel(int op, bool has_mask, bool is_ca, bool is_affine) |
||
186 | { |
||
187 | int base; |
||
188 | |||
189 | if (has_mask) { |
||
190 | if (is_ca) { |
||
191 | if (gen5_blend_op[op].src_alpha) |
||
192 | base = WM_KERNEL_MASKSA; |
||
193 | else |
||
194 | base = WM_KERNEL_MASKCA; |
||
195 | } else |
||
196 | base = WM_KERNEL_MASK; |
||
197 | } else |
||
198 | base = WM_KERNEL; |
||
199 | |||
200 | return base + !is_affine; |
||
201 | } |
||
202 | |||
203 | static bool gen5_magic_ca_pass(struct sna *sna, |
||
204 | const struct sna_composite_op *op) |
||
205 | { |
||
206 | struct gen5_render_state *state = &sna->render_state.gen5; |
||
207 | |||
208 | if (!op->need_magic_ca_pass) |
||
209 | return false; |
||
210 | |||
211 | assert(sna->render.vertex_index > sna->render.vertex_start); |
||
212 | |||
213 | DBG(("%s: CA fixup\n", __FUNCTION__)); |
||
214 | assert(op->mask.bo != NULL); |
||
215 | assert(op->has_component_alpha); |
||
216 | |||
217 | gen5_emit_pipelined_pointers |
||
218 | (sna, op, PictOpAdd, |
||
219 | gen5_choose_composite_kernel(PictOpAdd, |
||
220 | true, true, op->is_affine)); |
||
221 | |||
222 | OUT_BATCH(GEN5_3DPRIMITIVE | |
||
223 | GEN5_3DPRIMITIVE_VERTEX_SEQUENTIAL | |
||
224 | (_3DPRIM_RECTLIST << GEN5_3DPRIMITIVE_TOPOLOGY_SHIFT) | |
||
225 | (0 << 9) | |
||
226 | 4); |
||
227 | OUT_BATCH(sna->render.vertex_index - sna->render.vertex_start); |
||
228 | OUT_BATCH(sna->render.vertex_start); |
||
229 | OUT_BATCH(1); /* single instance */ |
||
230 | OUT_BATCH(0); /* start instance location */ |
||
231 | OUT_BATCH(0); /* index buffer offset, ignored */ |
||
232 | |||
233 | state->last_primitive = sna->kgem.nbatch; |
||
234 | return true; |
||
235 | } |
||
236 | |||
237 | static uint32_t gen5_get_blend(int op, |
||
238 | bool has_component_alpha, |
||
239 | uint32_t dst_format) |
||
240 | { |
||
241 | uint32_t src, dst; |
||
242 | |||
243 | src = GEN5_BLENDFACTOR_ONE; //gen6_blend_op[op].src_blend; |
||
244 | dst = GEN5_BLENDFACTOR_INV_SRC_ALPHA; //gen6_blend_op[op].dst_blend; |
||
245 | #if 0 |
||
246 | /* If there's no dst alpha channel, adjust the blend op so that we'll treat |
||
247 | * it as always 1. |
||
248 | */ |
||
249 | if (PICT_FORMAT_A(dst_format) == 0) { |
||
250 | if (src == GEN5_BLENDFACTOR_DST_ALPHA) |
||
251 | src = GEN5_BLENDFACTOR_ONE; |
||
252 | else if (src == GEN5_BLENDFACTOR_INV_DST_ALPHA) |
||
253 | src = GEN5_BLENDFACTOR_ZERO; |
||
254 | } |
||
255 | |||
256 | /* If the source alpha is being used, then we should only be in a |
||
257 | * case where the source blend factor is 0, and the source blend |
||
258 | * value is the mask channels multiplied by the source picture's alpha. |
||
259 | */ |
||
260 | if (has_component_alpha && gen5_blend_op[op].src_alpha) { |
||
261 | if (dst == GEN5_BLENDFACTOR_SRC_ALPHA) |
||
262 | dst = GEN5_BLENDFACTOR_SRC_COLOR; |
||
263 | else if (dst == GEN5_BLENDFACTOR_INV_SRC_ALPHA) |
||
264 | dst = GEN5_BLENDFACTOR_INV_SRC_COLOR; |
||
265 | } |
||
266 | #endif |
||
267 | |||
268 | DBG(("blend op=%d, dst=%x [A=%d] => src=%d, dst=%d => offset=%x\n", |
||
269 | op, dst_format, PICT_FORMAT_A(dst_format), |
||
270 | src, dst, BLEND_OFFSET(src, dst))); |
||
271 | return BLEND_OFFSET(src, dst); |
||
272 | } |
||
273 | |||
274 | static uint32_t gen5_get_card_format(PictFormat format) |
||
275 | { |
||
276 | switch (format) { |
||
277 | default: |
||
278 | return -1; |
||
279 | case PICT_a8r8g8b8: |
||
280 | return GEN5_SURFACEFORMAT_B8G8R8A8_UNORM; |
||
281 | case PICT_x8r8g8b8: |
||
282 | return GEN5_SURFACEFORMAT_B8G8R8X8_UNORM; |
||
283 | case PICT_a8b8g8r8: |
||
284 | return GEN5_SURFACEFORMAT_R8G8B8A8_UNORM; |
||
285 | case PICT_x8b8g8r8: |
||
286 | return GEN5_SURFACEFORMAT_R8G8B8X8_UNORM; |
||
287 | case PICT_a2r10g10b10: |
||
288 | return GEN5_SURFACEFORMAT_B10G10R10A2_UNORM; |
||
289 | case PICT_x2r10g10b10: |
||
290 | return GEN5_SURFACEFORMAT_B10G10R10X2_UNORM; |
||
291 | case PICT_r8g8b8: |
||
292 | return GEN5_SURFACEFORMAT_R8G8B8_UNORM; |
||
293 | case PICT_r5g6b5: |
||
294 | return GEN5_SURFACEFORMAT_B5G6R5_UNORM; |
||
295 | case PICT_a1r5g5b5: |
||
296 | return GEN5_SURFACEFORMAT_B5G5R5A1_UNORM; |
||
297 | case PICT_a8: |
||
298 | return GEN5_SURFACEFORMAT_A8_UNORM; |
||
299 | case PICT_a4r4g4b4: |
||
300 | return GEN5_SURFACEFORMAT_B4G4R4A4_UNORM; |
||
301 | } |
||
302 | } |
||
303 | |||
304 | static uint32_t gen5_get_dest_format(PictFormat format) |
||
305 | { |
||
306 | return GEN5_SURFACEFORMAT_B8G8R8A8_UNORM; |
||
307 | #if 0 |
||
308 | switch (format) { |
||
309 | default: |
||
310 | return -1; |
||
311 | case PICT_a8r8g8b8: |
||
312 | case PICT_x8r8g8b8: |
||
313 | return GEN5_SURFACEFORMAT_B8G8R8A8_UNORM; |
||
314 | case PICT_a8b8g8r8: |
||
315 | case PICT_x8b8g8r8: |
||
316 | return GEN5_SURFACEFORMAT_R8G8B8A8_UNORM; |
||
317 | case PICT_a2r10g10b10: |
||
318 | case PICT_x2r10g10b10: |
||
319 | return GEN5_SURFACEFORMAT_B10G10R10A2_UNORM; |
||
320 | case PICT_r5g6b5: |
||
321 | return GEN5_SURFACEFORMAT_B5G6R5_UNORM; |
||
322 | case PICT_x1r5g5b5: |
||
323 | case PICT_a1r5g5b5: |
||
324 | return GEN5_SURFACEFORMAT_B5G5R5A1_UNORM; |
||
325 | case PICT_a8: |
||
326 | return GEN5_SURFACEFORMAT_A8_UNORM; |
||
327 | case PICT_a4r4g4b4: |
||
328 | case PICT_x4r4g4b4: |
||
329 | return GEN5_SURFACEFORMAT_B4G4R4A4_UNORM; |
||
330 | } |
||
331 | #endif |
||
332 | } |
||
333 | typedef struct gen5_surface_state_padded { |
||
334 | struct gen5_surface_state state; |
||
335 | char pad[32 - sizeof(struct gen5_surface_state)]; |
||
336 | } gen5_surface_state_padded; |
||
337 | |||
338 | static void null_create(struct sna_static_stream *stream) |
||
339 | { |
||
340 | /* A bunch of zeros useful for legacy border color and depth-stencil */ |
||
341 | sna_static_stream_map(stream, 64, 64); |
||
342 | } |
||
343 | |||
344 | static void |
||
345 | sampler_state_init(struct gen5_sampler_state *sampler_state, |
||
346 | sampler_filter_t filter, |
||
347 | sampler_extend_t extend) |
||
348 | { |
||
349 | sampler_state->ss0.lod_preclamp = 1; /* GL mode */ |
||
350 | |||
351 | /* We use the legacy mode to get the semantics specified by |
||
352 | * the Render extension. */ |
||
353 | sampler_state->ss0.border_color_mode = GEN5_BORDER_COLOR_MODE_LEGACY; |
||
354 | |||
355 | switch (filter) { |
||
356 | default: |
||
357 | case SAMPLER_FILTER_NEAREST: |
||
358 | sampler_state->ss0.min_filter = GEN5_MAPFILTER_NEAREST; |
||
359 | sampler_state->ss0.mag_filter = GEN5_MAPFILTER_NEAREST; |
||
360 | break; |
||
361 | case SAMPLER_FILTER_BILINEAR: |
||
362 | sampler_state->ss0.min_filter = GEN5_MAPFILTER_LINEAR; |
||
363 | sampler_state->ss0.mag_filter = GEN5_MAPFILTER_LINEAR; |
||
364 | break; |
||
365 | } |
||
366 | |||
367 | switch (extend) { |
||
368 | default: |
||
369 | case SAMPLER_EXTEND_NONE: |
||
370 | sampler_state->ss1.r_wrap_mode = GEN5_TEXCOORDMODE_CLAMP_BORDER; |
||
371 | sampler_state->ss1.s_wrap_mode = GEN5_TEXCOORDMODE_CLAMP_BORDER; |
||
372 | sampler_state->ss1.t_wrap_mode = GEN5_TEXCOORDMODE_CLAMP_BORDER; |
||
373 | break; |
||
374 | case SAMPLER_EXTEND_REPEAT: |
||
375 | sampler_state->ss1.r_wrap_mode = GEN5_TEXCOORDMODE_WRAP; |
||
376 | sampler_state->ss1.s_wrap_mode = GEN5_TEXCOORDMODE_WRAP; |
||
377 | sampler_state->ss1.t_wrap_mode = GEN5_TEXCOORDMODE_WRAP; |
||
378 | break; |
||
379 | case SAMPLER_EXTEND_PAD: |
||
380 | sampler_state->ss1.r_wrap_mode = GEN5_TEXCOORDMODE_CLAMP; |
||
381 | sampler_state->ss1.s_wrap_mode = GEN5_TEXCOORDMODE_CLAMP; |
||
382 | sampler_state->ss1.t_wrap_mode = GEN5_TEXCOORDMODE_CLAMP; |
||
383 | break; |
||
384 | case SAMPLER_EXTEND_REFLECT: |
||
385 | sampler_state->ss1.r_wrap_mode = GEN5_TEXCOORDMODE_MIRROR; |
||
386 | sampler_state->ss1.s_wrap_mode = GEN5_TEXCOORDMODE_MIRROR; |
||
387 | sampler_state->ss1.t_wrap_mode = GEN5_TEXCOORDMODE_MIRROR; |
||
388 | break; |
||
389 | } |
||
390 | } |
||
391 | |||
392 | static uint32_t |
||
393 | gen5_tiling_bits(uint32_t tiling) |
||
394 | { |
||
395 | switch (tiling) { |
||
396 | default: assert(0); |
||
397 | case I915_TILING_NONE: return 0; |
||
398 | case I915_TILING_X: return GEN5_SURFACE_TILED; |
||
399 | case I915_TILING_Y: return GEN5_SURFACE_TILED | GEN5_SURFACE_TILED_Y; |
||
400 | } |
||
401 | } |
||
402 | |||
403 | /** |
||
404 | * Sets up the common fields for a surface state buffer for the given |
||
405 | * picture in the given surface state buffer. |
||
406 | */ |
||
407 | static uint32_t |
||
408 | gen5_bind_bo(struct sna *sna, |
||
409 | struct kgem_bo *bo, |
||
410 | uint32_t width, |
||
411 | uint32_t height, |
||
412 | uint32_t format, |
||
413 | bool is_dst) |
||
414 | { |
||
415 | uint32_t domains; |
||
416 | uint16_t offset; |
||
417 | uint32_t *ss; |
||
418 | |||
419 | /* After the first bind, we manage the cache domains within the batch */ |
||
420 | if (!DBG_NO_SURFACE_CACHE) { |
||
421 | offset = kgem_bo_get_binding(bo, format | is_dst << 31); |
||
422 | if (offset) { |
||
423 | if (is_dst) |
||
424 | kgem_bo_mark_dirty(bo); |
||
425 | return offset * sizeof(uint32_t); |
||
426 | } |
||
427 | } |
||
428 | |||
429 | offset = sna->kgem.surface -= |
||
430 | sizeof(struct gen5_surface_state_padded) / sizeof(uint32_t); |
||
431 | ss = sna->kgem.batch + offset; |
||
432 | |||
433 | ss[0] = (GEN5_SURFACE_2D << GEN5_SURFACE_TYPE_SHIFT | |
||
434 | GEN5_SURFACE_BLEND_ENABLED | |
||
435 | format << GEN5_SURFACE_FORMAT_SHIFT); |
||
436 | |||
437 | if (is_dst) { |
||
438 | ss[0] |= GEN5_SURFACE_RC_READ_WRITE; |
||
439 | domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER; |
||
440 | } else |
||
441 | domains = I915_GEM_DOMAIN_SAMPLER << 16; |
||
442 | ss[1] = kgem_add_reloc(&sna->kgem, offset + 1, bo, domains, 0); |
||
443 | |||
444 | ss[2] = ((width - 1) << GEN5_SURFACE_WIDTH_SHIFT | |
||
445 | (height - 1) << GEN5_SURFACE_HEIGHT_SHIFT); |
||
446 | ss[3] = (gen5_tiling_bits(bo->tiling) | |
||
447 | (bo->pitch - 1) << GEN5_SURFACE_PITCH_SHIFT); |
||
448 | ss[4] = 0; |
||
449 | ss[5] = 0; |
||
450 | |||
451 | kgem_bo_set_binding(bo, format | is_dst << 31, offset); |
||
452 | |||
453 | DBG(("[%x] bind bo(handle=%d, addr=%d), format=%d, width=%d, height=%d, pitch=%d, tiling=%d -> %s\n", |
||
454 | offset, bo->handle, ss[1], |
||
455 | format, width, height, bo->pitch, bo->tiling, |
||
456 | domains & 0xffff ? "render" : "sampler")); |
||
457 | |||
458 | return offset * sizeof(uint32_t); |
||
459 | } |
||
460 | |||
461 | static void gen5_emit_vertex_buffer(struct sna *sna, |
||
462 | const struct sna_composite_op *op) |
||
463 | { |
||
464 | int id = op->u.gen5.ve_id; |
||
465 | |||
466 | assert((sna->render.vb_id & (1 << id)) == 0); |
||
467 | |||
468 | OUT_BATCH(GEN5_3DSTATE_VERTEX_BUFFERS | 3); |
||
469 | OUT_BATCH(id << VB0_BUFFER_INDEX_SHIFT | VB0_VERTEXDATA | |
||
470 | (4*op->floats_per_vertex << VB0_BUFFER_PITCH_SHIFT)); |
||
471 | assert(sna->render.nvertex_reloc < ARRAY_SIZE(sna->render.vertex_reloc)); |
||
472 | sna->render.vertex_reloc[sna->render.nvertex_reloc++] = sna->kgem.nbatch; |
||
473 | OUT_BATCH(0); |
||
474 | OUT_BATCH(~0); /* max address: disabled */ |
||
475 | OUT_BATCH(0); |
||
476 | |||
477 | sna->render.vb_id |= 1 << id; |
||
478 | } |
||
479 | |||
480 | static void gen5_emit_primitive(struct sna *sna) |
||
481 | { |
||
482 | if (sna->kgem.nbatch == sna->render_state.gen5.last_primitive) { |
||
483 | sna->render.vertex_offset = sna->kgem.nbatch - 5; |
||
484 | return; |
||
485 | } |
||
486 | |||
487 | OUT_BATCH(GEN5_3DPRIMITIVE | |
||
488 | GEN5_3DPRIMITIVE_VERTEX_SEQUENTIAL | |
||
489 | (_3DPRIM_RECTLIST << GEN5_3DPRIMITIVE_TOPOLOGY_SHIFT) | |
||
490 | (0 << 9) | |
||
491 | 4); |
||
492 | sna->render.vertex_offset = sna->kgem.nbatch; |
||
493 | OUT_BATCH(0); /* vertex count, to be filled in later */ |
||
494 | OUT_BATCH(sna->render.vertex_index); |
||
495 | OUT_BATCH(1); /* single instance */ |
||
496 | OUT_BATCH(0); /* start instance location */ |
||
497 | OUT_BATCH(0); /* index buffer offset, ignored */ |
||
498 | sna->render.vertex_start = sna->render.vertex_index; |
||
499 | |||
500 | sna->render_state.gen5.last_primitive = sna->kgem.nbatch; |
||
501 | } |
||
502 | |||
503 | static bool gen5_rectangle_begin(struct sna *sna, |
||
504 | const struct sna_composite_op *op) |
||
505 | { |
||
506 | int id = op->u.gen5.ve_id; |
||
507 | int ndwords; |
||
508 | |||
509 | if (sna_vertex_wait__locked(&sna->render) && sna->render.vertex_offset) |
||
510 | return true; |
||
511 | |||
512 | ndwords = op->need_magic_ca_pass ? 20 : 6; |
||
513 | if ((sna->render.vb_id & (1 << id)) == 0) |
||
514 | ndwords += 5; |
||
515 | |||
516 | if (!kgem_check_batch(&sna->kgem, ndwords)) |
||
517 | return false; |
||
518 | |||
519 | if ((sna->render.vb_id & (1 << id)) == 0) |
||
520 | gen5_emit_vertex_buffer(sna, op); |
||
521 | if (sna->render.vertex_offset == 0) |
||
522 | gen5_emit_primitive(sna); |
||
523 | |||
524 | return true; |
||
525 | } |
||
526 | |||
527 | static int gen5_get_rectangles__flush(struct sna *sna, |
||
528 | const struct sna_composite_op *op) |
||
529 | { |
||
530 | /* Preventing discarding new vbo after lock contention */ |
||
531 | if (sna_vertex_wait__locked(&sna->render)) { |
||
532 | int rem = vertex_space(sna); |
||
533 | if (rem > op->floats_per_rect) |
||
534 | return rem; |
||
535 | } |
||
536 | |||
537 | if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 20 : 6)) |
||
538 | return 0; |
||
539 | if (!kgem_check_reloc_and_exec(&sna->kgem, 2)) |
||
540 | return 0; |
||
541 | |||
542 | if (sna->render.vertex_offset) { |
||
543 | gen4_vertex_flush(sna); |
||
544 | if (gen5_magic_ca_pass(sna, op)) |
||
545 | gen5_emit_pipelined_pointers(sna, op, op->op, |
||
546 | op->u.gen5.wm_kernel); |
||
547 | } |
||
548 | |||
549 | return gen4_vertex_finish(sna); |
||
550 | } |
||
551 | |||
552 | inline static int gen5_get_rectangles(struct sna *sna, |
||
553 | const struct sna_composite_op *op, |
||
554 | int want, |
||
555 | void (*emit_state)(struct sna *sna, |
||
556 | const struct sna_composite_op *op)) |
||
557 | { |
||
558 | int rem; |
||
559 | |||
560 | assert(want); |
||
561 | |||
562 | start: |
||
563 | rem = vertex_space(sna); |
||
564 | if (unlikely(rem < op->floats_per_rect)) { |
||
565 | DBG(("flushing vbo for %s: %d < %d\n", |
||
566 | __FUNCTION__, rem, op->floats_per_rect)); |
||
567 | rem = gen5_get_rectangles__flush(sna, op); |
||
568 | if (unlikely (rem == 0)) |
||
569 | goto flush; |
||
570 | } |
||
571 | |||
572 | if (unlikely(sna->render.vertex_offset == 0)) { |
||
573 | if (!gen5_rectangle_begin(sna, op)) |
||
574 | goto flush; |
||
575 | else |
||
576 | goto start; |
||
577 | } |
||
578 | |||
579 | assert(rem <= vertex_space(sna)); |
||
580 | assert(op->floats_per_rect <= rem); |
||
581 | if (want > 1 && want * op->floats_per_rect > rem) |
||
582 | want = rem / op->floats_per_rect; |
||
583 | |||
584 | sna->render.vertex_index += 3*want; |
||
585 | return want; |
||
586 | |||
587 | flush: |
||
588 | if (sna->render.vertex_offset) { |
||
589 | gen4_vertex_flush(sna); |
||
590 | gen5_magic_ca_pass(sna, op); |
||
591 | } |
||
592 | sna_vertex_wait__locked(&sna->render); |
||
593 | _kgem_submit(&sna->kgem); |
||
594 | emit_state(sna, op); |
||
595 | goto start; |
||
596 | } |
||
597 | |||
598 | static uint32_t * |
||
599 | gen5_composite_get_binding_table(struct sna *sna, |
||
600 | uint16_t *offset) |
||
601 | { |
||
602 | sna->kgem.surface -= |
||
603 | sizeof(struct gen5_surface_state_padded) / sizeof(uint32_t); |
||
604 | |||
605 | DBG(("%s(%x)\n", __FUNCTION__, 4*sna->kgem.surface)); |
||
606 | |||
607 | /* Clear all surplus entries to zero in case of prefetch */ |
||
608 | *offset = sna->kgem.surface; |
||
609 | return memset(sna->kgem.batch + sna->kgem.surface, |
||
610 | 0, sizeof(struct gen5_surface_state_padded)); |
||
611 | } |
||
612 | |||
613 | static void |
||
614 | gen5_emit_urb(struct sna *sna) |
||
615 | { |
||
616 | int urb_vs_start, urb_vs_size; |
||
617 | int urb_gs_start, urb_gs_size; |
||
618 | int urb_clip_start, urb_clip_size; |
||
619 | int urb_sf_start, urb_sf_size; |
||
620 | int urb_cs_start, urb_cs_size; |
||
621 | |||
622 | urb_vs_start = 0; |
||
623 | urb_vs_size = URB_VS_ENTRIES * URB_VS_ENTRY_SIZE; |
||
624 | urb_gs_start = urb_vs_start + urb_vs_size; |
||
625 | urb_gs_size = URB_GS_ENTRIES * URB_GS_ENTRY_SIZE; |
||
626 | urb_clip_start = urb_gs_start + urb_gs_size; |
||
627 | urb_clip_size = URB_CLIP_ENTRIES * URB_CLIP_ENTRY_SIZE; |
||
628 | urb_sf_start = urb_clip_start + urb_clip_size; |
||
629 | urb_sf_size = URB_SF_ENTRIES * URB_SF_ENTRY_SIZE; |
||
630 | urb_cs_start = urb_sf_start + urb_sf_size; |
||
631 | urb_cs_size = URB_CS_ENTRIES * URB_CS_ENTRY_SIZE; |
||
632 | |||
633 | OUT_BATCH(GEN5_URB_FENCE | |
||
634 | UF0_CS_REALLOC | |
||
635 | UF0_SF_REALLOC | |
||
636 | UF0_CLIP_REALLOC | |
||
637 | UF0_GS_REALLOC | |
||
638 | UF0_VS_REALLOC | |
||
639 | 1); |
||
640 | OUT_BATCH(((urb_clip_start + urb_clip_size) << UF1_CLIP_FENCE_SHIFT) | |
||
641 | ((urb_gs_start + urb_gs_size) << UF1_GS_FENCE_SHIFT) | |
||
642 | ((urb_vs_start + urb_vs_size) << UF1_VS_FENCE_SHIFT)); |
||
643 | OUT_BATCH(((urb_cs_start + urb_cs_size) << UF2_CS_FENCE_SHIFT) | |
||
644 | ((urb_sf_start + urb_sf_size) << UF2_SF_FENCE_SHIFT)); |
||
645 | |||
646 | /* Constant buffer state */ |
||
647 | OUT_BATCH(GEN5_CS_URB_STATE | 0); |
||
648 | OUT_BATCH((URB_CS_ENTRY_SIZE - 1) << 4 | URB_CS_ENTRIES << 0); |
||
649 | } |
||
650 | |||
651 | static void |
||
652 | gen5_emit_state_base_address(struct sna *sna) |
||
653 | { |
||
654 | assert(sna->render_state.gen5.general_bo->proxy == NULL); |
||
655 | OUT_BATCH(GEN5_STATE_BASE_ADDRESS | 6); |
||
656 | OUT_BATCH(kgem_add_reloc(&sna->kgem, /* general */ |
||
657 | sna->kgem.nbatch, |
||
658 | sna->render_state.gen5.general_bo, |
||
659 | I915_GEM_DOMAIN_INSTRUCTION << 16, |
||
660 | BASE_ADDRESS_MODIFY)); |
||
661 | OUT_BATCH(kgem_add_reloc(&sna->kgem, /* surface */ |
||
662 | sna->kgem.nbatch, |
||
663 | NULL, |
||
664 | I915_GEM_DOMAIN_INSTRUCTION << 16, |
||
665 | BASE_ADDRESS_MODIFY)); |
||
666 | OUT_BATCH(0); /* media */ |
||
667 | OUT_BATCH(kgem_add_reloc(&sna->kgem, /* instruction */ |
||
668 | sna->kgem.nbatch, |
||
669 | sna->render_state.gen5.general_bo, |
||
670 | I915_GEM_DOMAIN_INSTRUCTION << 16, |
||
671 | BASE_ADDRESS_MODIFY)); |
||
672 | |||
673 | /* upper bounds, all disabled */ |
||
674 | OUT_BATCH(BASE_ADDRESS_MODIFY); |
||
675 | OUT_BATCH(0); |
||
676 | OUT_BATCH(BASE_ADDRESS_MODIFY); |
||
677 | } |
||
678 | |||
679 | static void |
||
680 | gen5_emit_invariant(struct sna *sna) |
||
681 | { |
||
682 | /* Ironlake errata workaround: Before disabling the clipper, |
||
683 | * you have to MI_FLUSH to get the pipeline idle. |
||
684 | * |
||
685 | * However, the kernel flushes the pipeline between batches, |
||
686 | * so we should be safe.... |
||
687 | * |
||
688 | * On the other hand, after using BLT we must use a non-pipelined |
||
689 | * operation... |
||
690 | */ |
||
691 | if (sna->kgem.nreloc) |
||
692 | OUT_BATCH(MI_FLUSH | MI_INHIBIT_RENDER_CACHE_FLUSH); |
||
693 | |||
694 | OUT_BATCH(GEN5_PIPELINE_SELECT | PIPELINE_SELECT_3D); |
||
695 | |||
696 | gen5_emit_state_base_address(sna); |
||
697 | |||
698 | sna->render_state.gen5.needs_invariant = false; |
||
699 | } |
||
700 | |||
701 | static void |
||
702 | gen5_get_batch(struct sna *sna, const struct sna_composite_op *op) |
||
703 | { |
||
704 | kgem_set_mode(&sna->kgem, KGEM_RENDER, op->dst.bo); |
||
705 | |||
706 | if (!kgem_check_batch_with_surfaces(&sna->kgem, 150, 4)) { |
||
707 | DBG(("%s: flushing batch: %d < %d+%d\n", |
||
708 | __FUNCTION__, sna->kgem.surface - sna->kgem.nbatch, |
||
709 | 150, 4*8)); |
||
710 | kgem_submit(&sna->kgem); |
||
711 | _kgem_set_mode(&sna->kgem, KGEM_RENDER); |
||
712 | } |
||
713 | |||
714 | if (sna->render_state.gen5.needs_invariant) |
||
715 | gen5_emit_invariant(sna); |
||
716 | } |
||
717 | |||
718 | static void |
||
719 | gen5_align_vertex(struct sna *sna, const struct sna_composite_op *op) |
||
720 | { |
||
721 | assert(op->floats_per_rect == 3*op->floats_per_vertex); |
||
722 | if (op->floats_per_vertex != sna->render_state.gen5.floats_per_vertex) { |
||
4501 | Serge | 723 | DBG(("aligning vertex: was %d, now %d floats per vertex\n", |
4304 | Serge | 724 | sna->render_state.gen5.floats_per_vertex, |
4501 | Serge | 725 | op->floats_per_vertex)); |
726 | gen4_vertex_align(sna, op); |
||
4304 | Serge | 727 | sna->render_state.gen5.floats_per_vertex = op->floats_per_vertex; |
728 | } |
||
729 | } |
||
730 | |||
731 | static void |
||
732 | gen5_emit_binding_table(struct sna *sna, uint16_t offset) |
||
733 | { |
||
734 | if (!DBG_NO_STATE_CACHE && |
||
735 | sna->render_state.gen5.surface_table == offset) |
||
736 | return; |
||
737 | |||
738 | sna->render_state.gen5.surface_table = offset; |
||
739 | |||
740 | /* Binding table pointers */ |
||
741 | OUT_BATCH(GEN5_3DSTATE_BINDING_TABLE_POINTERS | 4); |
||
742 | OUT_BATCH(0); /* vs */ |
||
743 | OUT_BATCH(0); /* gs */ |
||
744 | OUT_BATCH(0); /* clip */ |
||
745 | OUT_BATCH(0); /* sf */ |
||
746 | /* Only the PS uses the binding table */ |
||
747 | OUT_BATCH(offset*4); |
||
748 | } |
||
749 | |||
750 | static bool |
||
751 | gen5_emit_pipelined_pointers(struct sna *sna, |
||
752 | const struct sna_composite_op *op, |
||
753 | int blend, int kernel) |
||
754 | { |
||
755 | uint16_t sp, bp; |
||
756 | uint32_t key; |
||
757 | |||
758 | DBG(("%s: has_mask=%d, src=(%d, %d), mask=(%d, %d),kernel=%d, blend=%d, ca=%d, format=%x\n", |
||
759 | __FUNCTION__, op->u.gen5.ve_id & 2, |
||
760 | op->src.filter, op->src.repeat, |
||
761 | op->mask.filter, op->mask.repeat, |
||
762 | kernel, blend, op->has_component_alpha, (int)op->dst.format)); |
||
763 | |||
764 | sp = SAMPLER_OFFSET(op->src.filter, op->src.repeat, |
||
765 | op->mask.filter, op->mask.repeat, |
||
766 | kernel); |
||
767 | bp = gen5_get_blend(blend, op->has_component_alpha, op->dst.format); |
||
768 | |||
769 | key = sp | (uint32_t)bp << 16 | (op->mask.bo != NULL) << 31; |
||
770 | DBG(("%s: sp=%d, bp=%d, key=%08x (current sp=%d, bp=%d, key=%08x)\n", |
||
771 | __FUNCTION__, sp, bp, key, |
||
772 | sna->render_state.gen5.last_pipelined_pointers & 0xffff, |
||
773 | (sna->render_state.gen5.last_pipelined_pointers >> 16) & 0x7fff, |
||
774 | sna->render_state.gen5.last_pipelined_pointers)); |
||
775 | if (key == sna->render_state.gen5.last_pipelined_pointers) |
||
776 | return false; |
||
777 | |||
778 | OUT_BATCH(GEN5_3DSTATE_PIPELINED_POINTERS | 5); |
||
779 | OUT_BATCH(sna->render_state.gen5.vs); |
||
780 | OUT_BATCH(GEN5_GS_DISABLE); /* passthrough */ |
||
781 | OUT_BATCH(GEN5_CLIP_DISABLE); /* passthrough */ |
||
782 | OUT_BATCH(sna->render_state.gen5.sf[op->mask.bo != NULL]); |
||
783 | OUT_BATCH(sna->render_state.gen5.wm + sp); |
||
784 | OUT_BATCH(sna->render_state.gen5.cc + bp); |
||
785 | |||
786 | bp = (sna->render_state.gen5.last_pipelined_pointers & 0x7fff0000) != ((uint32_t)bp << 16); |
||
787 | sna->render_state.gen5.last_pipelined_pointers = key; |
||
788 | |||
789 | gen5_emit_urb(sna); |
||
790 | |||
791 | return bp; |
||
792 | } |
||
793 | |||
794 | static bool |
||
795 | gen5_emit_drawing_rectangle(struct sna *sna, const struct sna_composite_op *op) |
||
796 | { |
||
797 | uint32_t limit = (op->dst.height - 1) << 16 | (op->dst.width - 1); |
||
798 | uint32_t offset = (uint16_t)op->dst.y << 16 | (uint16_t)op->dst.x; |
||
799 | |||
800 | assert(!too_large(op->dst.x, op->dst.y)); |
||
801 | assert(!too_large(op->dst.width, op->dst.height)); |
||
802 | |||
803 | if (!DBG_NO_STATE_CACHE && |
||
804 | sna->render_state.gen5.drawrect_limit == limit && |
||
805 | sna->render_state.gen5.drawrect_offset == offset) |
||
806 | return false; |
||
807 | |||
808 | sna->render_state.gen5.drawrect_offset = offset; |
||
809 | sna->render_state.gen5.drawrect_limit = limit; |
||
810 | |||
811 | OUT_BATCH(GEN5_3DSTATE_DRAWING_RECTANGLE | (4 - 2)); |
||
812 | OUT_BATCH(0x00000000); |
||
813 | OUT_BATCH(limit); |
||
814 | OUT_BATCH(offset); |
||
815 | return true; |
||
816 | } |
||
817 | |||
818 | static void |
||
819 | gen5_emit_vertex_elements(struct sna *sna, |
||
820 | const struct sna_composite_op *op) |
||
821 | { |
||
822 | /* |
||
823 | * vertex data in vertex buffer |
||
824 | * position: (x, y) |
||
825 | * texture coordinate 0: (u0, v0) if (is_affine is true) else (u0, v0, w0) |
||
826 | * texture coordinate 1 if (has_mask is true): same as above |
||
827 | */ |
||
828 | struct gen5_render_state *render = &sna->render_state.gen5; |
||
829 | int id = op->u.gen5.ve_id; |
||
830 | bool has_mask = id >> 2; |
||
831 | uint32_t format, dw; |
||
832 | |||
833 | if (!DBG_NO_STATE_CACHE && render->ve_id == id) |
||
834 | return; |
||
835 | |||
836 | DBG(("%s: changing %d -> %d\n", __FUNCTION__, render->ve_id, id)); |
||
837 | render->ve_id = id; |
||
838 | |||
839 | /* The VUE layout |
||
840 | * dword 0-3: pad (0.0, 0.0, 0.0. 0.0) |
||
841 | * dword 4-7: position (x, y, 1.0, 1.0), |
||
842 | * dword 8-11: texture coordinate 0 (u0, v0, w0, 1.0) |
||
843 | * dword 12-15: texture coordinate 1 (u1, v1, w1, 1.0) |
||
844 | * |
||
845 | * dword 4-15 are fetched from vertex buffer |
||
846 | */ |
||
847 | OUT_BATCH(GEN5_3DSTATE_VERTEX_ELEMENTS | |
||
848 | ((2 * (has_mask ? 4 : 3)) + 1 - 2)); |
||
849 | |||
850 | OUT_BATCH((id << VE0_VERTEX_BUFFER_INDEX_SHIFT) | VE0_VALID | |
||
851 | (GEN5_SURFACEFORMAT_R32G32B32A32_FLOAT << VE0_FORMAT_SHIFT) | |
||
852 | (0 << VE0_OFFSET_SHIFT)); |
||
853 | OUT_BATCH((VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_0_SHIFT) | |
||
854 | (VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT) | |
||
855 | (VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT) | |
||
856 | (VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_3_SHIFT)); |
||
857 | |||
858 | /* x,y */ |
||
859 | OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID | |
||
860 | GEN5_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT | |
||
861 | |||
862 | OUT_BATCH(VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT | |
||
863 | VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT | |
||
864 | VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT | |
||
865 | VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT); |
||
866 | |||
867 | /* u0, v0, w0 */ |
||
868 | DBG(("%s: id=%d, first channel %d floats, offset=4b\n", __FUNCTION__, |
||
869 | id, id & 3)); |
||
870 | dw = VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT; |
||
871 | switch (id & 3) { |
||
872 | default: |
||
873 | assert(0); |
||
874 | case 0: |
||
875 | format = GEN5_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT; |
||
876 | dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT; |
||
877 | dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT; |
||
878 | dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT; |
||
879 | break; |
||
880 | case 1: |
||
881 | format = GEN5_SURFACEFORMAT_R32_FLOAT << VE0_FORMAT_SHIFT; |
||
882 | dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT; |
||
883 | dw |= VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT; |
||
884 | dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT; |
||
885 | break; |
||
886 | case 2: |
||
887 | format = GEN5_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT; |
||
888 | dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT; |
||
889 | dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT; |
||
890 | dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT; |
||
891 | break; |
||
892 | case 3: |
||
893 | format = GEN5_SURFACEFORMAT_R32G32B32_FLOAT << VE0_FORMAT_SHIFT; |
||
894 | dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT; |
||
895 | dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT; |
||
896 | dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_2_SHIFT; |
||
897 | break; |
||
898 | } |
||
899 | OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID | |
||
900 | format | 4 << VE0_OFFSET_SHIFT); |
||
901 | OUT_BATCH(dw); |
||
902 | |||
903 | /* u1, v1, w1 */ |
||
904 | if (has_mask) { |
||
905 | unsigned offset = 4 + ((id & 3) ?: 1) * sizeof(float); |
||
906 | DBG(("%s: id=%x, second channel %d floats, offset=%db\n", __FUNCTION__, |
||
907 | id, id >> 2, offset)); |
||
908 | dw = VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT; |
||
909 | switch (id >> 2) { |
||
910 | case 1: |
||
911 | format = GEN5_SURFACEFORMAT_R32_FLOAT << VE0_FORMAT_SHIFT; |
||
912 | dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT; |
||
913 | dw |= VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT; |
||
914 | dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT; |
||
915 | break; |
||
916 | default: |
||
917 | assert(0); |
||
918 | case 2: |
||
919 | format = GEN5_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT; |
||
920 | dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT; |
||
921 | dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT; |
||
922 | dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT; |
||
923 | break; |
||
924 | case 3: |
||
925 | format = GEN5_SURFACEFORMAT_R32G32B32_FLOAT << VE0_FORMAT_SHIFT; |
||
926 | dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT; |
||
927 | dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT; |
||
928 | dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_2_SHIFT; |
||
929 | break; |
||
930 | } |
||
931 | OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID | |
||
932 | format | offset << VE0_OFFSET_SHIFT); |
||
933 | OUT_BATCH(dw); |
||
934 | } |
||
935 | } |
||
936 | |||
937 | inline static void |
||
938 | gen5_emit_pipe_flush(struct sna *sna) |
||
939 | { |
||
4501 | Serge | 940 | #if 0 |
4304 | Serge | 941 | OUT_BATCH(GEN5_PIPE_CONTROL | (4 - 2)); |
942 | OUT_BATCH(GEN5_PIPE_CONTROL_WC_FLUSH); |
||
943 | OUT_BATCH(0); |
||
944 | OUT_BATCH(0); |
||
4501 | Serge | 945 | #else |
946 | OUT_BATCH(MI_FLUSH | MI_INHIBIT_RENDER_CACHE_FLUSH); |
||
947 | #endif |
||
4304 | Serge | 948 | } |
949 | |||
950 | static void |
||
951 | gen5_emit_state(struct sna *sna, |
||
952 | const struct sna_composite_op *op, |
||
953 | uint16_t offset) |
||
954 | { |
||
955 | bool flush = false; |
||
956 | |||
957 | assert(op->dst.bo->exec); |
||
958 | |||
959 | /* drawrect must be first for Ironlake BLT workaround */ |
||
960 | if (gen5_emit_drawing_rectangle(sna, op)) |
||
961 | offset &= ~1; |
||
962 | gen5_emit_binding_table(sna, offset & ~1); |
||
963 | if (gen5_emit_pipelined_pointers(sna, op, op->op, op->u.gen5.wm_kernel)){ |
||
964 | DBG(("%s: changed blend state, flush required? %d\n", |
||
965 | __FUNCTION__, (offset & 1) && op->op > PictOpSrc)); |
||
966 | flush = (offset & 1) && op->op > PictOpSrc; |
||
967 | } |
||
968 | gen5_emit_vertex_elements(sna, op); |
||
969 | |||
970 | if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) { |
||
971 | DBG(("%s: flushing dirty (%d, %d)\n", __FUNCTION__, |
||
972 | kgem_bo_is_dirty(op->src.bo), |
||
973 | kgem_bo_is_dirty(op->mask.bo))); |
||
974 | OUT_BATCH(MI_FLUSH); |
||
975 | kgem_clear_dirty(&sna->kgem); |
||
976 | kgem_bo_mark_dirty(op->dst.bo); |
||
977 | flush = false; |
||
978 | } |
||
979 | if (flush) { |
||
980 | DBG(("%s: forcing flush\n", __FUNCTION__)); |
||
981 | gen5_emit_pipe_flush(sna); |
||
982 | } |
||
983 | } |
||
984 | |||
985 | static void gen5_bind_surfaces(struct sna *sna, |
||
986 | const struct sna_composite_op *op) |
||
987 | { |
||
988 | bool dirty = kgem_bo_is_dirty(op->dst.bo); |
||
989 | uint32_t *binding_table; |
||
990 | uint16_t offset; |
||
991 | |||
992 | gen5_get_batch(sna, op); |
||
993 | |||
994 | binding_table = gen5_composite_get_binding_table(sna, &offset); |
||
995 | |||
996 | binding_table[0] = |
||
997 | gen5_bind_bo(sna, |
||
998 | op->dst.bo, op->dst.width, op->dst.height, |
||
999 | gen5_get_dest_format(op->dst.format), |
||
1000 | true); |
||
1001 | binding_table[1] = |
||
1002 | gen5_bind_bo(sna, |
||
1003 | op->src.bo, op->src.width, op->src.height, |
||
1004 | op->src.card_format, |
||
1005 | false); |
||
1006 | if (op->mask.bo) { |
||
1007 | assert(op->u.gen5.ve_id >> 2); |
||
1008 | binding_table[2] = |
||
1009 | gen5_bind_bo(sna, |
||
1010 | op->mask.bo, |
||
1011 | op->mask.width, |
||
1012 | op->mask.height, |
||
1013 | op->mask.card_format, |
||
1014 | false); |
||
1015 | } |
||
1016 | |||
1017 | if (sna->kgem.surface == offset && |
||
1018 | *(uint64_t *)(sna->kgem.batch + sna->render_state.gen5.surface_table) == *(uint64_t*)binding_table && |
||
1019 | (op->mask.bo == NULL || |
||
1020 | sna->kgem.batch[sna->render_state.gen5.surface_table+2] == binding_table[2])) { |
||
1021 | sna->kgem.surface += sizeof(struct gen5_surface_state_padded) / sizeof(uint32_t); |
||
1022 | offset = sna->render_state.gen5.surface_table; |
||
1023 | } |
||
1024 | |||
1025 | gen5_emit_state(sna, op, offset | dirty); |
||
1026 | } |
||
1027 | |||
1028 | fastcall static void |
||
1029 | gen5_render_composite_blt(struct sna *sna, |
||
1030 | const struct sna_composite_op *op, |
||
1031 | const struct sna_composite_rectangles *r) |
||
1032 | { |
||
1033 | DBG(("%s: src=(%d, %d)+(%d, %d), mask=(%d, %d)+(%d, %d), dst=(%d, %d)+(%d, %d), size=(%d, %d)\n", |
||
1034 | __FUNCTION__, |
||
1035 | r->src.x, r->src.y, op->src.offset[0], op->src.offset[1], |
||
1036 | r->mask.x, r->mask.y, op->mask.offset[0], op->mask.offset[1], |
||
1037 | r->dst.x, r->dst.y, op->dst.x, op->dst.y, |
||
1038 | r->width, r->height)); |
||
1039 | |||
1040 | gen5_get_rectangles(sna, op, 1, gen5_bind_surfaces); |
||
1041 | op->prim_emit(sna, op, r); |
||
1042 | } |
||
1043 | |||
1044 | #if 0 |
||
1045 | fastcall static void |
||
1046 | gen5_render_composite_box(struct sna *sna, |
||
1047 | const struct sna_composite_op *op, |
||
1048 | const BoxRec *box) |
||
1049 | { |
||
1050 | struct sna_composite_rectangles r; |
||
1051 | |||
1052 | DBG((" %s: (%d, %d), (%d, %d)\n", |
||
1053 | __FUNCTION__, |
||
1054 | box->x1, box->y1, box->x2, box->y2)); |
||
1055 | |||
1056 | gen5_get_rectangles(sna, op, 1, gen5_bind_surfaces); |
||
1057 | |||
1058 | r.dst.x = box->x1; |
||
1059 | r.dst.y = box->y1; |
||
1060 | r.width = box->x2 - box->x1; |
||
1061 | r.height = box->y2 - box->y1; |
||
1062 | r.mask = r.src = r.dst; |
||
1063 | |||
1064 | op->prim_emit(sna, op, &r); |
||
1065 | } |
||
1066 | |||
1067 | static void |
||
1068 | gen5_render_composite_boxes__blt(struct sna *sna, |
||
1069 | const struct sna_composite_op *op, |
||
1070 | const BoxRec *box, int nbox) |
||
1071 | { |
||
1072 | DBG(("%s(%d) delta=(%d, %d), src=(%d, %d)/(%d, %d), mask=(%d, %d)/(%d, %d)\n", |
||
1073 | __FUNCTION__, nbox, op->dst.x, op->dst.y, |
||
1074 | op->src.offset[0], op->src.offset[1], |
||
1075 | op->src.width, op->src.height, |
||
1076 | op->mask.offset[0], op->mask.offset[1], |
||
1077 | op->mask.width, op->mask.height)); |
||
1078 | |||
1079 | do { |
||
1080 | int nbox_this_time; |
||
1081 | |||
1082 | nbox_this_time = gen5_get_rectangles(sna, op, nbox, |
||
1083 | gen5_bind_surfaces); |
||
1084 | nbox -= nbox_this_time; |
||
1085 | |||
1086 | do { |
||
1087 | struct sna_composite_rectangles r; |
||
1088 | |||
1089 | DBG((" %s: (%d, %d), (%d, %d)\n", |
||
1090 | __FUNCTION__, |
||
1091 | box->x1, box->y1, box->x2, box->y2)); |
||
1092 | |||
1093 | r.dst.x = box->x1; |
||
1094 | r.dst.y = box->y1; |
||
1095 | r.width = box->x2 - box->x1; |
||
1096 | r.height = box->y2 - box->y1; |
||
1097 | r.mask = r.src = r.dst; |
||
1098 | op->prim_emit(sna, op, &r); |
||
1099 | box++; |
||
1100 | } while (--nbox_this_time); |
||
1101 | } while (nbox); |
||
1102 | } |
||
1103 | |||
1104 | static void |
||
1105 | gen5_render_composite_boxes(struct sna *sna, |
||
1106 | const struct sna_composite_op *op, |
||
1107 | const BoxRec *box, int nbox) |
||
1108 | { |
||
1109 | DBG(("%s: nbox=%d\n", __FUNCTION__, nbox)); |
||
1110 | |||
1111 | do { |
||
1112 | int nbox_this_time; |
||
1113 | float *v; |
||
1114 | |||
1115 | nbox_this_time = gen5_get_rectangles(sna, op, nbox, |
||
1116 | gen5_bind_surfaces); |
||
1117 | assert(nbox_this_time); |
||
1118 | nbox -= nbox_this_time; |
||
1119 | |||
1120 | v = sna->render.vertices + sna->render.vertex_used; |
||
1121 | sna->render.vertex_used += nbox_this_time * op->floats_per_rect; |
||
1122 | |||
1123 | op->emit_boxes(op, box, nbox_this_time, v); |
||
1124 | box += nbox_this_time; |
||
1125 | } while (nbox); |
||
1126 | } |
||
1127 | |||
1128 | static void |
||
1129 | gen5_render_composite_boxes__thread(struct sna *sna, |
||
1130 | const struct sna_composite_op *op, |
||
1131 | const BoxRec *box, int nbox) |
||
1132 | { |
||
1133 | DBG(("%s: nbox=%d\n", __FUNCTION__, nbox)); |
||
1134 | |||
1135 | sna_vertex_lock(&sna->render); |
||
1136 | do { |
||
1137 | int nbox_this_time; |
||
1138 | float *v; |
||
1139 | |||
1140 | nbox_this_time = gen5_get_rectangles(sna, op, nbox, |
||
1141 | gen5_bind_surfaces); |
||
1142 | assert(nbox_this_time); |
||
1143 | nbox -= nbox_this_time; |
||
1144 | |||
1145 | v = sna->render.vertices + sna->render.vertex_used; |
||
1146 | sna->render.vertex_used += nbox_this_time * op->floats_per_rect; |
||
1147 | |||
1148 | sna_vertex_acquire__locked(&sna->render); |
||
1149 | sna_vertex_unlock(&sna->render); |
||
1150 | |||
1151 | op->emit_boxes(op, box, nbox_this_time, v); |
||
1152 | box += nbox_this_time; |
||
1153 | |||
1154 | sna_vertex_lock(&sna->render); |
||
1155 | sna_vertex_release__locked(&sna->render); |
||
1156 | } while (nbox); |
||
1157 | sna_vertex_unlock(&sna->render); |
||
1158 | } |
||
1159 | |||
1160 | #ifndef MAX |
||
1161 | #define MAX(a,b) ((a) > (b) ? (a) : (b)) |
||
1162 | #endif |
||
1163 | |||
1164 | static uint32_t gen5_bind_video_source(struct sna *sna, |
||
1165 | struct kgem_bo *src_bo, |
||
1166 | uint32_t src_offset, |
||
1167 | int src_width, |
||
1168 | int src_height, |
||
1169 | int src_pitch, |
||
1170 | uint32_t src_surf_format) |
||
1171 | { |
||
1172 | struct gen5_surface_state *ss; |
||
1173 | |||
1174 | sna->kgem.surface -= sizeof(struct gen5_surface_state_padded) / sizeof(uint32_t); |
||
1175 | |||
1176 | ss = memset(sna->kgem.batch + sna->kgem.surface, 0, sizeof(*ss)); |
||
1177 | ss->ss0.surface_type = GEN5_SURFACE_2D; |
||
1178 | ss->ss0.surface_format = src_surf_format; |
||
1179 | ss->ss0.color_blend = 1; |
||
1180 | |||
1181 | ss->ss1.base_addr = |
||
1182 | kgem_add_reloc(&sna->kgem, |
||
1183 | sna->kgem.surface + 1, |
||
1184 | src_bo, |
||
1185 | I915_GEM_DOMAIN_SAMPLER << 16, |
||
1186 | src_offset); |
||
1187 | |||
1188 | ss->ss2.width = src_width - 1; |
||
1189 | ss->ss2.height = src_height - 1; |
||
1190 | ss->ss3.pitch = src_pitch - 1; |
||
1191 | |||
1192 | return sna->kgem.surface * sizeof(uint32_t); |
||
1193 | } |
||
1194 | |||
1195 | static void gen5_video_bind_surfaces(struct sna *sna, |
||
1196 | const struct sna_composite_op *op) |
||
1197 | { |
||
1198 | bool dirty = kgem_bo_is_dirty(op->dst.bo); |
||
1199 | struct sna_video_frame *frame = op->priv; |
||
1200 | uint32_t src_surf_format; |
||
1201 | uint32_t src_surf_base[6]; |
||
1202 | int src_width[6]; |
||
1203 | int src_height[6]; |
||
1204 | int src_pitch[6]; |
||
1205 | uint32_t *binding_table; |
||
1206 | uint16_t offset; |
||
1207 | int n_src, n; |
||
1208 | |||
1209 | src_surf_base[0] = 0; |
||
1210 | src_surf_base[1] = 0; |
||
1211 | src_surf_base[2] = frame->VBufOffset; |
||
1212 | src_surf_base[3] = frame->VBufOffset; |
||
1213 | src_surf_base[4] = frame->UBufOffset; |
||
1214 | src_surf_base[5] = frame->UBufOffset; |
||
1215 | |||
1216 | if (is_planar_fourcc(frame->id)) { |
||
1217 | src_surf_format = GEN5_SURFACEFORMAT_R8_UNORM; |
||
1218 | src_width[1] = src_width[0] = frame->width; |
||
1219 | src_height[1] = src_height[0] = frame->height; |
||
1220 | src_pitch[1] = src_pitch[0] = frame->pitch[1]; |
||
1221 | src_width[4] = src_width[5] = src_width[2] = src_width[3] = |
||
1222 | frame->width / 2; |
||
1223 | src_height[4] = src_height[5] = src_height[2] = src_height[3] = |
||
1224 | frame->height / 2; |
||
1225 | src_pitch[4] = src_pitch[5] = src_pitch[2] = src_pitch[3] = |
||
1226 | frame->pitch[0]; |
||
1227 | n_src = 6; |
||
1228 | } else { |
||
1229 | if (frame->id == FOURCC_UYVY) |
||
1230 | src_surf_format = GEN5_SURFACEFORMAT_YCRCB_SWAPY; |
||
1231 | else |
||
1232 | src_surf_format = GEN5_SURFACEFORMAT_YCRCB_NORMAL; |
||
1233 | |||
1234 | src_width[0] = frame->width; |
||
1235 | src_height[0] = frame->height; |
||
1236 | src_pitch[0] = frame->pitch[0]; |
||
1237 | n_src = 1; |
||
1238 | } |
||
1239 | |||
1240 | gen5_get_batch(sna, op); |
||
1241 | |||
1242 | binding_table = gen5_composite_get_binding_table(sna, &offset); |
||
1243 | binding_table[0] = |
||
1244 | gen5_bind_bo(sna, |
||
1245 | op->dst.bo, op->dst.width, op->dst.height, |
||
1246 | gen5_get_dest_format(op->dst.format), |
||
1247 | true); |
||
1248 | for (n = 0; n < n_src; n++) { |
||
1249 | binding_table[1+n] = |
||
1250 | gen5_bind_video_source(sna, |
||
1251 | frame->bo, |
||
1252 | src_surf_base[n], |
||
1253 | src_width[n], |
||
1254 | src_height[n], |
||
1255 | src_pitch[n], |
||
1256 | src_surf_format); |
||
1257 | } |
||
1258 | |||
1259 | gen5_emit_state(sna, op, offset | dirty); |
||
1260 | } |
||
1261 | |||
1262 | static bool |
||
1263 | gen5_render_video(struct sna *sna, |
||
1264 | struct sna_video *video, |
||
1265 | struct sna_video_frame *frame, |
||
1266 | RegionPtr dstRegion, |
||
1267 | PixmapPtr pixmap) |
||
1268 | { |
||
1269 | struct sna_composite_op tmp; |
||
1270 | int dst_width = dstRegion->extents.x2 - dstRegion->extents.x1; |
||
1271 | int dst_height = dstRegion->extents.y2 - dstRegion->extents.y1; |
||
1272 | int src_width = frame->src.x2 - frame->src.x1; |
||
1273 | int src_height = frame->src.y2 - frame->src.y1; |
||
1274 | float src_offset_x, src_offset_y; |
||
1275 | float src_scale_x, src_scale_y; |
||
1276 | int nbox, pix_xoff, pix_yoff; |
||
1277 | struct sna_pixmap *priv; |
||
1278 | BoxPtr box; |
||
1279 | |||
1280 | DBG(("%s: %dx%d -> %dx%d\n", __FUNCTION__, |
||
1281 | src_width, src_height, dst_width, dst_height)); |
||
1282 | |||
1283 | priv = sna_pixmap_force_to_gpu(pixmap, MOVE_READ | MOVE_WRITE); |
||
1284 | if (priv == NULL) |
||
1285 | return false; |
||
1286 | |||
1287 | memset(&tmp, 0, sizeof(tmp)); |
||
1288 | |||
1289 | tmp.op = PictOpSrc; |
||
1290 | tmp.dst.pixmap = pixmap; |
||
1291 | tmp.dst.width = pixmap->drawable.width; |
||
1292 | tmp.dst.height = pixmap->drawable.height; |
||
1293 | tmp.dst.format = sna_format_for_depth(pixmap->drawable.depth); |
||
1294 | tmp.dst.bo = priv->gpu_bo; |
||
1295 | |||
1296 | if (src_width == dst_width && src_height == dst_height) |
||
1297 | tmp.src.filter = SAMPLER_FILTER_NEAREST; |
||
1298 | else |
||
1299 | tmp.src.filter = SAMPLER_FILTER_BILINEAR; |
||
1300 | tmp.src.repeat = SAMPLER_EXTEND_PAD; |
||
1301 | tmp.src.bo = frame->bo; |
||
1302 | tmp.mask.bo = NULL; |
||
1303 | tmp.u.gen5.wm_kernel = |
||
1304 | is_planar_fourcc(frame->id) ? WM_KERNEL_VIDEO_PLANAR : WM_KERNEL_VIDEO_PACKED; |
||
1305 | tmp.u.gen5.ve_id = 2; |
||
1306 | tmp.is_affine = true; |
||
1307 | tmp.floats_per_vertex = 3; |
||
1308 | tmp.floats_per_rect = 9; |
||
1309 | tmp.priv = frame; |
||
1310 | |||
1311 | if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) { |
||
1312 | kgem_submit(&sna->kgem); |
||
4501 | Serge | 1313 | if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) |
1314 | return false; |
||
4304 | Serge | 1315 | } |
1316 | |||
4501 | Serge | 1317 | gen5_align_vertex(sna, &tmp); |
4304 | Serge | 1318 | gen5_video_bind_surfaces(sna, &tmp); |
1319 | |||
1320 | /* Set up the offset for translating from the given region (in screen |
||
1321 | * coordinates) to the backing pixmap. |
||
1322 | */ |
||
1323 | #ifdef COMPOSITE |
||
1324 | pix_xoff = -pixmap->screen_x + pixmap->drawable.x; |
||
1325 | pix_yoff = -pixmap->screen_y + pixmap->drawable.y; |
||
1326 | #else |
||
1327 | pix_xoff = 0; |
||
1328 | pix_yoff = 0; |
||
1329 | #endif |
||
1330 | |||
1331 | src_scale_x = (float)src_width / dst_width / frame->width; |
||
1332 | src_offset_x = (float)frame->src.x1 / frame->width - dstRegion->extents.x1 * src_scale_x; |
||
1333 | |||
1334 | src_scale_y = (float)src_height / dst_height / frame->height; |
||
1335 | src_offset_y = (float)frame->src.y1 / frame->height - dstRegion->extents.y1 * src_scale_y; |
||
1336 | |||
1337 | box = REGION_RECTS(dstRegion); |
||
1338 | nbox = REGION_NUM_RECTS(dstRegion); |
||
1339 | while (nbox--) { |
||
1340 | BoxRec r; |
||
1341 | |||
1342 | r.x1 = box->x1 + pix_xoff; |
||
1343 | r.x2 = box->x2 + pix_xoff; |
||
1344 | r.y1 = box->y1 + pix_yoff; |
||
1345 | r.y2 = box->y2 + pix_yoff; |
||
1346 | |||
1347 | gen5_get_rectangles(sna, &tmp, 1, gen5_video_bind_surfaces); |
||
1348 | |||
1349 | OUT_VERTEX(r.x2, r.y2); |
||
1350 | OUT_VERTEX_F(box->x2 * src_scale_x + src_offset_x); |
||
1351 | OUT_VERTEX_F(box->y2 * src_scale_y + src_offset_y); |
||
1352 | |||
1353 | OUT_VERTEX(r.x1, r.y2); |
||
1354 | OUT_VERTEX_F(box->x1 * src_scale_x + src_offset_x); |
||
1355 | OUT_VERTEX_F(box->y2 * src_scale_y + src_offset_y); |
||
1356 | |||
1357 | OUT_VERTEX(r.x1, r.y1); |
||
1358 | OUT_VERTEX_F(box->x1 * src_scale_x + src_offset_x); |
||
1359 | OUT_VERTEX_F(box->y1 * src_scale_y + src_offset_y); |
||
1360 | |||
1361 | if (!DAMAGE_IS_ALL(priv->gpu_damage)) { |
||
1362 | sna_damage_add_box(&priv->gpu_damage, &r); |
||
1363 | sna_damage_subtract_box(&priv->cpu_damage, &r); |
||
1364 | } |
||
1365 | box++; |
||
1366 | } |
||
1367 | |||
1368 | gen4_vertex_flush(sna); |
||
1369 | return true; |
||
1370 | } |
||
1371 | #endif |
||
1372 | |||
1373 | static void |
||
1374 | gen5_render_composite_done(struct sna *sna, |
||
1375 | const struct sna_composite_op *op) |
||
1376 | { |
||
1377 | if (sna->render.vertex_offset) { |
||
1378 | gen4_vertex_flush(sna); |
||
1379 | gen5_magic_ca_pass(sna,op); |
||
1380 | } |
||
1381 | |||
1382 | DBG(("%s()\n", __FUNCTION__)); |
||
1383 | |||
1384 | } |
||
1385 | |||
1386 | #if 0 |
||
1387 | static bool |
||
1388 | gen5_composite_set_target(struct sna *sna, |
||
1389 | struct sna_composite_op *op, |
||
1390 | PicturePtr dst, |
||
1391 | int x, int y, int w, int h, |
||
1392 | bool partial) |
||
1393 | { |
||
1394 | BoxRec box; |
||
1395 | |||
1396 | op->dst.pixmap = get_drawable_pixmap(dst->pDrawable); |
||
1397 | op->dst.width = op->dst.pixmap->drawable.width; |
||
1398 | op->dst.height = op->dst.pixmap->drawable.height; |
||
1399 | op->dst.format = dst->format; |
||
1400 | if (w && h) { |
||
1401 | box.x1 = x; |
||
1402 | box.y1 = y; |
||
1403 | box.x2 = x + w; |
||
1404 | box.y2 = y + h; |
||
1405 | } else |
||
1406 | sna_render_picture_extents(dst, &box); |
||
1407 | |||
1408 | op->dst.bo = sna_drawable_use_bo (dst->pDrawable, |
||
1409 | PREFER_GPU | FORCE_GPU | RENDER_GPU, |
||
1410 | &box, &op->damage); |
||
1411 | if (op->dst.bo == NULL) |
||
1412 | return false; |
||
1413 | |||
1414 | get_drawable_deltas(dst->pDrawable, op->dst.pixmap, |
||
1415 | &op->dst.x, &op->dst.y); |
||
1416 | |||
1417 | DBG(("%s: pixmap=%p, format=%08x, size=%dx%d, pitch=%d, delta=(%d,%d),damage=%p\n", |
||
1418 | __FUNCTION__, |
||
1419 | op->dst.pixmap, (int)op->dst.format, |
||
1420 | op->dst.width, op->dst.height, |
||
1421 | op->dst.bo->pitch, |
||
1422 | op->dst.x, op->dst.y, |
||
1423 | op->damage ? *op->damage : (void *)-1)); |
||
1424 | |||
1425 | assert(op->dst.bo->proxy == NULL); |
||
1426 | |||
1427 | if (too_large(op->dst.width, op->dst.height) && |
||
1428 | !sna_render_composite_redirect(sna, op, x, y, w, h, partial)) |
||
1429 | return false; |
||
1430 | |||
1431 | return true; |
||
1432 | } |
||
1433 | |||
1434 | static bool |
||
1435 | gen5_render_composite(struct sna *sna, |
||
1436 | uint8_t op, |
||
1437 | PicturePtr src, |
||
1438 | PicturePtr mask, |
||
1439 | PicturePtr dst, |
||
1440 | int16_t src_x, int16_t src_y, |
||
1441 | int16_t msk_x, int16_t msk_y, |
||
1442 | int16_t dst_x, int16_t dst_y, |
||
1443 | int16_t width, int16_t height, |
||
1444 | struct sna_composite_op *tmp) |
||
1445 | { |
||
1446 | DBG(("%s: %dx%d, current mode=%d\n", __FUNCTION__, |
||
1447 | width, height, sna->kgem.mode)); |
||
1448 | |||
1449 | if (op >= ARRAY_SIZE(gen5_blend_op)) { |
||
1450 | DBG(("%s: unhandled blend op %d\n", __FUNCTION__, op)); |
||
1451 | return false; |
||
1452 | } |
||
1453 | |||
1454 | if (mask == NULL && |
||
1455 | sna_blt_composite(sna, op, |
||
1456 | src, dst, |
||
1457 | src_x, src_y, |
||
1458 | dst_x, dst_y, |
||
1459 | width, height, |
||
1460 | tmp, false)) |
||
1461 | return true; |
||
1462 | |||
1463 | if (gen5_composite_fallback(sna, src, mask, dst)) |
||
1464 | return false; |
||
1465 | |||
1466 | if (need_tiling(sna, width, height)) |
||
1467 | return sna_tiling_composite(op, src, mask, dst, |
||
1468 | src_x, src_y, |
||
1469 | msk_x, msk_y, |
||
1470 | dst_x, dst_y, |
||
1471 | width, height, |
||
1472 | tmp); |
||
1473 | |||
1474 | if (!gen5_composite_set_target(sna, tmp, dst, |
||
1475 | dst_x, dst_y, width, height, |
||
1476 | op > PictOpSrc || dst->pCompositeClip->data)) { |
||
1477 | DBG(("%s: failed to set composite target\n", __FUNCTION__)); |
||
1478 | return false; |
||
1479 | } |
||
1480 | |||
1481 | DBG(("%s: preparing source\n", __FUNCTION__)); |
||
1482 | tmp->op = op; |
||
1483 | switch (gen5_composite_picture(sna, src, &tmp->src, |
||
1484 | src_x, src_y, |
||
1485 | width, height, |
||
1486 | dst_x, dst_y, |
||
1487 | dst->polyMode == PolyModePrecise)) { |
||
1488 | case -1: |
||
1489 | DBG(("%s: failed to prepare source picture\n", __FUNCTION__)); |
||
1490 | goto cleanup_dst; |
||
1491 | case 0: |
||
1492 | if (!gen4_channel_init_solid(sna, &tmp->src, 0)) |
||
1493 | goto cleanup_dst; |
||
1494 | /* fall through to fixup */ |
||
1495 | case 1: |
||
1496 | if (mask == NULL && |
||
1497 | sna_blt_composite__convert(sna, |
||
1498 | dst_x, dst_y, width, height, |
||
1499 | tmp)) |
||
1500 | return true; |
||
1501 | |||
1502 | gen5_composite_channel_convert(&tmp->src); |
||
1503 | break; |
||
1504 | } |
||
1505 | |||
1506 | tmp->is_affine = tmp->src.is_affine; |
||
1507 | tmp->has_component_alpha = false; |
||
1508 | tmp->need_magic_ca_pass = false; |
||
1509 | |||
1510 | if (mask) { |
||
1511 | if (mask->componentAlpha && PICT_FORMAT_RGB(mask->format)) { |
||
1512 | tmp->has_component_alpha = true; |
||
1513 | |||
1514 | /* Check if it's component alpha that relies on a source alpha and on |
||
1515 | * the source value. We can only get one of those into the single |
||
1516 | * source value that we get to blend with. |
||
1517 | */ |
||
1518 | if (gen5_blend_op[op].src_alpha && |
||
1519 | (gen5_blend_op[op].src_blend != GEN5_BLENDFACTOR_ZERO)) { |
||
1520 | if (op != PictOpOver) { |
||
1521 | DBG(("%s: unhandled CA blend op %d\n", __FUNCTION__, op)); |
||
1522 | goto cleanup_src; |
||
1523 | } |
||
1524 | |||
1525 | tmp->need_magic_ca_pass = true; |
||
1526 | tmp->op = PictOpOutReverse; |
||
1527 | } |
||
1528 | } |
||
1529 | |||
1530 | if (!reuse_source(sna, |
||
1531 | src, &tmp->src, src_x, src_y, |
||
1532 | mask, &tmp->mask, msk_x, msk_y)) { |
||
1533 | DBG(("%s: preparing mask\n", __FUNCTION__)); |
||
1534 | switch (gen5_composite_picture(sna, mask, &tmp->mask, |
||
1535 | msk_x, msk_y, |
||
1536 | width, height, |
||
1537 | dst_x, dst_y, |
||
1538 | dst->polyMode == PolyModePrecise)) { |
||
1539 | case -1: |
||
1540 | DBG(("%s: failed to prepare mask picture\n", __FUNCTION__)); |
||
1541 | goto cleanup_src; |
||
1542 | case 0: |
||
1543 | if (!gen4_channel_init_solid(sna, &tmp->mask, 0)) |
||
1544 | goto cleanup_src; |
||
1545 | /* fall through to fixup */ |
||
1546 | case 1: |
||
1547 | gen5_composite_channel_convert(&tmp->mask); |
||
1548 | break; |
||
1549 | } |
||
1550 | } |
||
1551 | |||
1552 | tmp->is_affine &= tmp->mask.is_affine; |
||
1553 | } |
||
1554 | |||
1555 | tmp->u.gen5.wm_kernel = |
||
1556 | gen5_choose_composite_kernel(tmp->op, |
||
1557 | tmp->mask.bo != NULL, |
||
1558 | tmp->has_component_alpha, |
||
1559 | tmp->is_affine); |
||
1560 | tmp->u.gen5.ve_id = gen4_choose_composite_emitter(sna, tmp); |
||
1561 | |||
1562 | tmp->blt = gen5_render_composite_blt; |
||
1563 | tmp->box = gen5_render_composite_box; |
||
1564 | tmp->boxes = gen5_render_composite_boxes__blt; |
||
1565 | if (tmp->emit_boxes) { |
||
1566 | tmp->boxes = gen5_render_composite_boxes; |
||
1567 | tmp->thread_boxes = gen5_render_composite_boxes__thread; |
||
1568 | } |
||
1569 | tmp->done = gen5_render_composite_done; |
||
1570 | |||
1571 | if (!kgem_check_bo(&sna->kgem, |
||
1572 | tmp->dst.bo, tmp->src.bo, tmp->mask.bo, NULL)) { |
||
1573 | kgem_submit(&sna->kgem); |
||
1574 | if (!kgem_check_bo(&sna->kgem, |
||
1575 | tmp->dst.bo, tmp->src.bo, tmp->mask.bo, NULL)) |
||
1576 | goto cleanup_mask; |
||
1577 | } |
||
1578 | |||
4501 | Serge | 1579 | gen5_align_vertex(sna, tmp); |
4304 | Serge | 1580 | gen5_bind_surfaces(sna, tmp); |
1581 | return true; |
||
1582 | |||
1583 | cleanup_mask: |
||
1584 | if (tmp->mask.bo) |
||
1585 | kgem_bo_destroy(&sna->kgem, tmp->mask.bo); |
||
1586 | cleanup_src: |
||
1587 | if (tmp->src.bo) |
||
1588 | kgem_bo_destroy(&sna->kgem, tmp->src.bo); |
||
1589 | cleanup_dst: |
||
1590 | if (tmp->redirect.real_bo) |
||
1591 | kgem_bo_destroy(&sna->kgem, tmp->dst.bo); |
||
1592 | return false; |
||
1593 | } |
||
1594 | |||
1595 | #if !NO_COMPOSITE_SPANS |
||
1596 | fastcall static void |
||
1597 | gen5_render_composite_spans_box(struct sna *sna, |
||
1598 | const struct sna_composite_spans_op *op, |
||
1599 | const BoxRec *box, float opacity) |
||
1600 | { |
||
1601 | DBG(("%s: src=+(%d, %d), opacity=%f, dst=+(%d, %d), box=(%d, %d) x (%d, %d)\n", |
||
1602 | __FUNCTION__, |
||
1603 | op->base.src.offset[0], op->base.src.offset[1], |
||
1604 | opacity, |
||
1605 | op->base.dst.x, op->base.dst.y, |
||
1606 | box->x1, box->y1, |
||
1607 | box->x2 - box->x1, |
||
1608 | box->y2 - box->y1)); |
||
1609 | |||
1610 | gen5_get_rectangles(sna, &op->base, 1, gen5_bind_surfaces); |
||
1611 | op->prim_emit(sna, op, box, opacity); |
||
1612 | } |
||
1613 | |||
1614 | static void |
||
1615 | gen5_render_composite_spans_boxes(struct sna *sna, |
||
1616 | const struct sna_composite_spans_op *op, |
||
1617 | const BoxRec *box, int nbox, |
||
1618 | float opacity) |
||
1619 | { |
||
1620 | DBG(("%s: nbox=%d, src=+(%d, %d), opacity=%f, dst=+(%d, %d)\n", |
||
1621 | __FUNCTION__, nbox, |
||
1622 | op->base.src.offset[0], op->base.src.offset[1], |
||
1623 | opacity, |
||
1624 | op->base.dst.x, op->base.dst.y)); |
||
1625 | |||
1626 | do { |
||
1627 | int nbox_this_time; |
||
1628 | |||
1629 | nbox_this_time = gen5_get_rectangles(sna, &op->base, nbox, |
||
1630 | gen5_bind_surfaces); |
||
1631 | nbox -= nbox_this_time; |
||
1632 | |||
1633 | do { |
||
1634 | DBG((" %s: (%d, %d) x (%d, %d)\n", __FUNCTION__, |
||
1635 | box->x1, box->y1, |
||
1636 | box->x2 - box->x1, |
||
1637 | box->y2 - box->y1)); |
||
1638 | |||
1639 | op->prim_emit(sna, op, box++, opacity); |
||
1640 | } while (--nbox_this_time); |
||
1641 | } while (nbox); |
||
1642 | } |
||
1643 | |||
1644 | fastcall static void |
||
1645 | gen5_render_composite_spans_boxes__thread(struct sna *sna, |
||
1646 | const struct sna_composite_spans_op *op, |
||
1647 | const struct sna_opacity_box *box, |
||
1648 | int nbox) |
||
1649 | { |
||
1650 | DBG(("%s: nbox=%d, src=+(%d, %d), dst=+(%d, %d)\n", |
||
1651 | __FUNCTION__, nbox, |
||
1652 | op->base.src.offset[0], op->base.src.offset[1], |
||
1653 | op->base.dst.x, op->base.dst.y)); |
||
1654 | |||
1655 | sna_vertex_lock(&sna->render); |
||
1656 | do { |
||
1657 | int nbox_this_time; |
||
1658 | float *v; |
||
1659 | |||
1660 | nbox_this_time = gen5_get_rectangles(sna, &op->base, nbox, |
||
1661 | gen5_bind_surfaces); |
||
1662 | assert(nbox_this_time); |
||
1663 | nbox -= nbox_this_time; |
||
1664 | |||
1665 | v = sna->render.vertices + sna->render.vertex_used; |
||
1666 | sna->render.vertex_used += nbox_this_time * op->base.floats_per_rect; |
||
1667 | |||
1668 | sna_vertex_acquire__locked(&sna->render); |
||
1669 | sna_vertex_unlock(&sna->render); |
||
1670 | |||
1671 | op->emit_boxes(op, box, nbox_this_time, v); |
||
1672 | box += nbox_this_time; |
||
1673 | |||
1674 | sna_vertex_lock(&sna->render); |
||
1675 | sna_vertex_release__locked(&sna->render); |
||
1676 | } while (nbox); |
||
1677 | sna_vertex_unlock(&sna->render); |
||
1678 | } |
||
1679 | |||
1680 | fastcall static void |
||
1681 | gen5_render_composite_spans_done(struct sna *sna, |
||
1682 | const struct sna_composite_spans_op *op) |
||
1683 | { |
||
1684 | if (sna->render.vertex_offset) |
||
1685 | gen4_vertex_flush(sna); |
||
1686 | |||
1687 | DBG(("%s()\n", __FUNCTION__)); |
||
1688 | |||
1689 | kgem_bo_destroy(&sna->kgem, op->base.src.bo); |
||
1690 | sna_render_composite_redirect_done(sna, &op->base); |
||
1691 | } |
||
1692 | |||
1693 | static bool |
||
1694 | gen5_check_composite_spans(struct sna *sna, |
||
1695 | uint8_t op, PicturePtr src, PicturePtr dst, |
||
1696 | int16_t width, int16_t height, |
||
1697 | unsigned flags) |
||
1698 | { |
||
1699 | DBG(("%s: op=%d, width=%d, height=%d, flags=%x\n", |
||
1700 | __FUNCTION__, op, width, height, flags)); |
||
1701 | |||
1702 | if (op >= ARRAY_SIZE(gen5_blend_op)) |
||
1703 | return false; |
||
1704 | |||
1705 | if (gen5_composite_fallback(sna, src, NULL, dst)) { |
||
1706 | DBG(("%s: operation would fallback\n", __FUNCTION__)); |
||
1707 | return false; |
||
1708 | } |
||
1709 | |||
1710 | if (need_tiling(sna, width, height) && |
||
1711 | !is_gpu(sna, dst->pDrawable, PREFER_GPU_SPANS)) { |
||
1712 | DBG(("%s: fallback, tiled operation not on GPU\n", |
||
1713 | __FUNCTION__)); |
||
1714 | return false; |
||
1715 | } |
||
1716 | |||
1717 | if ((flags & COMPOSITE_SPANS_RECTILINEAR) == 0) { |
||
1718 | struct sna_pixmap *priv = sna_pixmap_from_drawable(dst->pDrawable); |
||
1719 | assert(priv); |
||
1720 | |||
1721 | if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)) |
||
1722 | return true; |
||
1723 | |||
1724 | if (flags & COMPOSITE_SPANS_INPLACE_HINT) |
||
1725 | return false; |
||
1726 | |||
1727 | if ((sna->render.prefer_gpu & PREFER_GPU_SPANS) == 0 && |
||
1728 | dst->format == PICT_a8) |
||
1729 | return false; |
||
1730 | |||
1731 | return priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo); |
||
1732 | } |
||
1733 | |||
1734 | return true; |
||
1735 | } |
||
1736 | |||
1737 | static bool |
||
1738 | gen5_render_composite_spans(struct sna *sna, |
||
1739 | uint8_t op, |
||
1740 | PicturePtr src, |
||
1741 | PicturePtr dst, |
||
1742 | int16_t src_x, int16_t src_y, |
||
1743 | int16_t dst_x, int16_t dst_y, |
||
1744 | int16_t width, int16_t height, |
||
1745 | unsigned flags, |
||
1746 | struct sna_composite_spans_op *tmp) |
||
1747 | { |
||
1748 | DBG(("%s: %dx%d with flags=%x, current mode=%d\n", __FUNCTION__, |
||
1749 | width, height, flags, sna->kgem.ring)); |
||
1750 | |||
1751 | assert(gen5_check_composite_spans(sna, op, src, dst, width, height, flags)); |
||
1752 | |||
1753 | if (need_tiling(sna, width, height)) { |
||
1754 | DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n", |
||
1755 | __FUNCTION__, width, height)); |
||
1756 | return sna_tiling_composite_spans(op, src, dst, |
||
1757 | src_x, src_y, dst_x, dst_y, |
||
1758 | width, height, flags, tmp); |
||
1759 | } |
||
1760 | |||
1761 | tmp->base.op = op; |
||
1762 | if (!gen5_composite_set_target(sna, &tmp->base, dst, |
||
1763 | dst_x, dst_y, width, height, |
||
1764 | true)) |
||
1765 | return false; |
||
1766 | |||
1767 | switch (gen5_composite_picture(sna, src, &tmp->base.src, |
||
1768 | src_x, src_y, |
||
1769 | width, height, |
||
1770 | dst_x, dst_y, |
||
1771 | dst->polyMode == PolyModePrecise)) { |
||
1772 | case -1: |
||
1773 | goto cleanup_dst; |
||
1774 | case 0: |
||
1775 | if (!gen4_channel_init_solid(sna, &tmp->base.src, 0)) |
||
1776 | goto cleanup_dst; |
||
1777 | /* fall through to fixup */ |
||
1778 | case 1: |
||
1779 | gen5_composite_channel_convert(&tmp->base.src); |
||
1780 | break; |
||
1781 | } |
||
1782 | |||
1783 | tmp->base.mask.bo = NULL; |
||
1784 | |||
1785 | tmp->base.is_affine = tmp->base.src.is_affine; |
||
1786 | tmp->base.has_component_alpha = false; |
||
1787 | tmp->base.need_magic_ca_pass = false; |
||
1788 | |||
1789 | tmp->base.u.gen5.ve_id = gen4_choose_spans_emitter(sna, tmp); |
||
1790 | tmp->base.u.gen5.wm_kernel = WM_KERNEL_OPACITY | !tmp->base.is_affine; |
||
1791 | |||
1792 | tmp->box = gen5_render_composite_spans_box; |
||
1793 | tmp->boxes = gen5_render_composite_spans_boxes; |
||
1794 | if (tmp->emit_boxes) |
||
1795 | tmp->thread_boxes = gen5_render_composite_spans_boxes__thread; |
||
1796 | tmp->done = gen5_render_composite_spans_done; |
||
1797 | |||
1798 | if (!kgem_check_bo(&sna->kgem, |
||
1799 | tmp->base.dst.bo, tmp->base.src.bo, |
||
1800 | NULL)) { |
||
1801 | kgem_submit(&sna->kgem); |
||
1802 | if (!kgem_check_bo(&sna->kgem, |
||
1803 | tmp->base.dst.bo, tmp->base.src.bo, |
||
1804 | NULL)) |
||
1805 | goto cleanup_src; |
||
1806 | } |
||
1807 | |||
4501 | Serge | 1808 | gen5_align_vertex(sna, &tmp->base); |
4304 | Serge | 1809 | gen5_bind_surfaces(sna, &tmp->base); |
1810 | return true; |
||
1811 | |||
1812 | cleanup_src: |
||
1813 | if (tmp->base.src.bo) |
||
1814 | kgem_bo_destroy(&sna->kgem, tmp->base.src.bo); |
||
1815 | cleanup_dst: |
||
1816 | if (tmp->base.redirect.real_bo) |
||
1817 | kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo); |
||
1818 | return false; |
||
1819 | } |
||
1820 | #endif |
||
1821 | |||
1822 | |||
1823 | |||
1824 | static bool |
||
1825 | gen5_render_copy_boxes(struct sna *sna, uint8_t alu, |
||
1826 | PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy, |
||
1827 | PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy, |
||
1828 | const BoxRec *box, int n, unsigned flags) |
||
1829 | { |
||
1830 | struct sna_composite_op tmp; |
||
1831 | |||
1832 | DBG(("%s alu=%d, src=%ld:handle=%d, dst=%ld:handle=%d boxes=%d x [((%d, %d), (%d, %d))...], flags=%x\n", |
||
1833 | __FUNCTION__, alu, |
||
1834 | src->drawable.serialNumber, src_bo->handle, |
||
1835 | dst->drawable.serialNumber, dst_bo->handle, |
||
1836 | n, box->x1, box->y1, box->x2, box->y2, |
||
1837 | flags)); |
||
1838 | |||
1839 | if (sna_blt_compare_depth(&src->drawable, &dst->drawable) && |
||
1840 | sna_blt_copy_boxes(sna, alu, |
||
1841 | src_bo, src_dx, src_dy, |
||
1842 | dst_bo, dst_dx, dst_dy, |
||
1843 | dst->drawable.bitsPerPixel, |
||
1844 | box, n)) |
||
1845 | return true; |
||
1846 | |||
1847 | if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo) { |
||
1848 | fallback_blt: |
||
1849 | if (!sna_blt_compare_depth(&src->drawable, &dst->drawable)) |
||
1850 | return false; |
||
1851 | |||
1852 | return sna_blt_copy_boxes_fallback(sna, alu, |
||
1853 | src, src_bo, src_dx, src_dy, |
||
1854 | dst, dst_bo, dst_dx, dst_dy, |
||
1855 | box, n); |
||
1856 | } |
||
1857 | |||
1858 | memset(&tmp, 0, sizeof(tmp)); |
||
1859 | |||
1860 | if (dst->drawable.depth == src->drawable.depth) { |
||
1861 | tmp.dst.format = sna_render_format_for_depth(dst->drawable.depth); |
||
1862 | tmp.src.pict_format = tmp.dst.format; |
||
1863 | } else { |
||
1864 | tmp.dst.format = sna_format_for_depth(dst->drawable.depth); |
||
1865 | tmp.src.pict_format = sna_format_for_depth(src->drawable.depth); |
||
1866 | } |
||
1867 | if (!gen5_check_format(tmp.src.pict_format)) { |
||
1868 | DBG(("%s: unsupported source format, %x, use BLT\n", |
||
1869 | __FUNCTION__, tmp.src.pict_format)); |
||
1870 | goto fallback_blt; |
||
1871 | } |
||
1872 | |||
1873 | DBG(("%s (%d, %d)->(%d, %d) x %d\n", |
||
1874 | __FUNCTION__, src_dx, src_dy, dst_dx, dst_dy, n)); |
||
1875 | |||
1876 | tmp.op = alu == GXcopy ? PictOpSrc : PictOpClear; |
||
1877 | |||
1878 | tmp.dst.pixmap = dst; |
||
1879 | tmp.dst.width = dst->drawable.width; |
||
1880 | tmp.dst.height = dst->drawable.height; |
||
1881 | tmp.dst.x = tmp.dst.y = 0; |
||
1882 | tmp.dst.bo = dst_bo; |
||
1883 | tmp.damage = NULL; |
||
1884 | |||
1885 | sna_render_composite_redirect_init(&tmp); |
||
1886 | if (too_large(tmp.dst.width, tmp.dst.height)) { |
||
1887 | BoxRec extents = box[0]; |
||
1888 | int i; |
||
1889 | |||
1890 | for (i = 1; i < n; i++) { |
||
1891 | if (box[i].x1 < extents.x1) |
||
1892 | extents.x1 = box[i].x1; |
||
1893 | if (box[i].y1 < extents.y1) |
||
1894 | extents.y1 = box[i].y1; |
||
1895 | |||
1896 | if (box[i].x2 > extents.x2) |
||
1897 | extents.x2 = box[i].x2; |
||
1898 | if (box[i].y2 > extents.y2) |
||
1899 | extents.y2 = box[i].y2; |
||
1900 | } |
||
1901 | if (!sna_render_composite_redirect(sna, &tmp, |
||
1902 | extents.x1 + dst_dx, |
||
1903 | extents.y1 + dst_dy, |
||
1904 | extents.x2 - extents.x1, |
||
1905 | extents.y2 - extents.y1, |
||
1906 | n > 1)) |
||
1907 | goto fallback_tiled; |
||
1908 | } |
||
1909 | |||
1910 | tmp.src.filter = SAMPLER_FILTER_NEAREST; |
||
1911 | tmp.src.repeat = SAMPLER_EXTEND_NONE; |
||
1912 | tmp.src.card_format = gen5_get_card_format(tmp.src.pict_format); |
||
1913 | if (too_large(src->drawable.width, src->drawable.height)) { |
||
1914 | BoxRec extents = box[0]; |
||
1915 | int i; |
||
1916 | |||
1917 | for (i = 1; i < n; i++) { |
||
1918 | if (box[i].x1 < extents.x1) |
||
1919 | extents.x1 = box[i].x1; |
||
1920 | if (box[i].y1 < extents.y1) |
||
1921 | extents.y1 = box[i].y1; |
||
1922 | |||
1923 | if (box[i].x2 > extents.x2) |
||
1924 | extents.x2 = box[i].x2; |
||
1925 | if (box[i].y2 > extents.y2) |
||
1926 | extents.y2 = box[i].y2; |
||
1927 | } |
||
1928 | |||
1929 | if (!sna_render_pixmap_partial(sna, src, src_bo, &tmp.src, |
||
1930 | extents.x1 + src_dx, |
||
1931 | extents.y1 + src_dy, |
||
1932 | extents.x2 - extents.x1, |
||
1933 | extents.y2 - extents.y1)) |
||
1934 | goto fallback_tiled_dst; |
||
1935 | } else { |
||
1936 | tmp.src.bo = kgem_bo_reference(src_bo); |
||
1937 | tmp.src.width = src->drawable.width; |
||
1938 | tmp.src.height = src->drawable.height; |
||
1939 | tmp.src.offset[0] = tmp.src.offset[1] = 0; |
||
1940 | tmp.src.scale[0] = 1.f/src->drawable.width; |
||
1941 | tmp.src.scale[1] = 1.f/src->drawable.height; |
||
1942 | } |
||
1943 | |||
1944 | tmp.is_affine = true; |
||
1945 | tmp.floats_per_vertex = 3; |
||
1946 | tmp.floats_per_rect = 9; |
||
1947 | tmp.u.gen5.wm_kernel = WM_KERNEL; |
||
1948 | tmp.u.gen5.ve_id = 2; |
||
1949 | |||
1950 | if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) { |
||
1951 | kgem_submit(&sna->kgem); |
||
1952 | if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) { |
||
1953 | DBG(("%s: aperture check failed\n", __FUNCTION__)); |
||
4501 | Serge | 1954 | kgem_bo_destroy(&sna->kgem, tmp.src.bo); |
1955 | if (tmp.redirect.real_bo) |
||
1956 | kgem_bo_destroy(&sna->kgem, tmp.dst.bo); |
||
1957 | goto fallback_blt; |
||
4304 | Serge | 1958 | } |
1959 | } |
||
1960 | |||
1961 | dst_dx += tmp.dst.x; |
||
1962 | dst_dy += tmp.dst.y; |
||
1963 | tmp.dst.x = tmp.dst.y = 0; |
||
1964 | |||
1965 | src_dx += tmp.src.offset[0]; |
||
1966 | src_dy += tmp.src.offset[1]; |
||
1967 | |||
4501 | Serge | 1968 | gen5_align_vertex(sna, &tmp); |
4304 | Serge | 1969 | gen5_copy_bind_surfaces(sna, &tmp); |
1970 | |||
1971 | do { |
||
1972 | int n_this_time; |
||
1973 | |||
1974 | n_this_time = gen5_get_rectangles(sna, &tmp, n, |
||
1975 | gen5_copy_bind_surfaces); |
||
1976 | n -= n_this_time; |
||
1977 | |||
1978 | do { |
||
1979 | DBG((" (%d, %d) -> (%d, %d) + (%d, %d)\n", |
||
1980 | box->x1 + src_dx, box->y1 + src_dy, |
||
1981 | box->x1 + dst_dx, box->y1 + dst_dy, |
||
1982 | box->x2 - box->x1, box->y2 - box->y1)); |
||
1983 | OUT_VERTEX(box->x2 + dst_dx, box->y2 + dst_dy); |
||
1984 | OUT_VERTEX_F((box->x2 + src_dx) * tmp.src.scale[0]); |
||
1985 | OUT_VERTEX_F((box->y2 + src_dy) * tmp.src.scale[1]); |
||
1986 | |||
1987 | OUT_VERTEX(box->x1 + dst_dx, box->y2 + dst_dy); |
||
1988 | OUT_VERTEX_F((box->x1 + src_dx) * tmp.src.scale[0]); |
||
1989 | OUT_VERTEX_F((box->y2 + src_dy) * tmp.src.scale[1]); |
||
1990 | |||
1991 | OUT_VERTEX(box->x1 + dst_dx, box->y1 + dst_dy); |
||
1992 | OUT_VERTEX_F((box->x1 + src_dx) * tmp.src.scale[0]); |
||
1993 | OUT_VERTEX_F((box->y1 + src_dy) * tmp.src.scale[1]); |
||
1994 | |||
1995 | box++; |
||
1996 | } while (--n_this_time); |
||
1997 | } while (n); |
||
1998 | |||
1999 | gen4_vertex_flush(sna); |
||
2000 | sna_render_composite_redirect_done(sna, &tmp); |
||
2001 | kgem_bo_destroy(&sna->kgem, tmp.src.bo); |
||
2002 | return true; |
||
2003 | |||
2004 | fallback_tiled_dst: |
||
2005 | if (tmp.redirect.real_bo) |
||
2006 | kgem_bo_destroy(&sna->kgem, tmp.dst.bo); |
||
2007 | fallback_tiled: |
||
2008 | if (sna_blt_compare_depth(&src->drawable, &dst->drawable) && |
||
2009 | sna_blt_copy_boxes(sna, alu, |
||
2010 | src_bo, src_dx, src_dy, |
||
2011 | dst_bo, dst_dx, dst_dy, |
||
2012 | dst->drawable.bitsPerPixel, |
||
2013 | box, n)) |
||
2014 | return true; |
||
2015 | |||
2016 | DBG(("%s: tiled fallback\n", __FUNCTION__)); |
||
2017 | return sna_tiling_copy_boxes(sna, alu, |
||
2018 | src, src_bo, src_dx, src_dy, |
||
2019 | dst, dst_bo, dst_dx, dst_dy, |
||
2020 | box, n); |
||
2021 | } |
||
2022 | |||
2023 | #endif |
||
2024 | static void |
||
2025 | gen5_render_context_switch(struct kgem *kgem, |
||
2026 | int new_mode) |
||
2027 | { |
||
2028 | if (!kgem->nbatch) |
||
2029 | return; |
||
2030 | |||
2031 | /* WaNonPipelinedStateCommandFlush |
||
2032 | * |
||
2033 | * Ironlake has a limitation that a 3D or Media command can't |
||
2034 | * be the first command after a BLT, unless it's |
||
2035 | * non-pipelined. |
||
2036 | * |
||
2037 | * We do this by ensuring that the non-pipelined drawrect |
||
2038 | * is always emitted first following a switch from BLT. |
||
2039 | */ |
||
2040 | if (kgem->mode == KGEM_BLT) { |
||
2041 | struct sna *sna = to_sna_from_kgem(kgem); |
||
2042 | DBG(("%s: forcing drawrect on next state emission\n", |
||
2043 | __FUNCTION__)); |
||
2044 | sna->render_state.gen5.drawrect_limit = -1; |
||
2045 | } |
||
2046 | |||
2047 | if (kgem_ring_is_idle(kgem, kgem->ring)) { |
||
2048 | DBG(("%s: GPU idle, flushing\n", __FUNCTION__)); |
||
2049 | _kgem_submit(kgem); |
||
2050 | } |
||
2051 | } |
||
2052 | |||
2053 | static void gen5_render_reset(struct sna *sna) |
||
2054 | { |
||
2055 | sna->render_state.gen5.needs_invariant = true; |
||
2056 | sna->render_state.gen5.ve_id = -1; |
||
2057 | sna->render_state.gen5.last_primitive = -1; |
||
2058 | sna->render_state.gen5.last_pipelined_pointers = 0; |
||
2059 | |||
2060 | sna->render_state.gen5.drawrect_offset = -1; |
||
2061 | sna->render_state.gen5.drawrect_limit = -1; |
||
2062 | sna->render_state.gen5.surface_table = -1; |
||
2063 | |||
4501 | Serge | 2064 | if (sna->render.vbo && !kgem_bo_can_map(&sna->kgem, sna->render.vbo)) { |
4304 | Serge | 2065 | DBG(("%s: discarding unmappable vbo\n", __FUNCTION__)); |
2066 | discard_vbo(sna); |
||
2067 | } |
||
2068 | |||
2069 | sna->render.vertex_offset = 0; |
||
2070 | sna->render.nvertex_reloc = 0; |
||
2071 | sna->render.vb_id = 0; |
||
2072 | } |
||
2073 | |||
2074 | static void gen5_render_fini(struct sna *sna) |
||
2075 | { |
||
2076 | kgem_bo_destroy(&sna->kgem, sna->render_state.gen5.general_bo); |
||
2077 | } |
||
2078 | |||
2079 | static uint32_t gen5_create_vs_unit_state(struct sna_static_stream *stream) |
||
2080 | { |
||
2081 | struct gen5_vs_unit_state *vs = sna_static_stream_map(stream, sizeof(*vs), 32); |
||
2082 | |||
2083 | /* Set up the vertex shader to be disabled (passthrough) */ |
||
2084 | vs->thread4.nr_urb_entries = URB_VS_ENTRIES >> 2; |
||
2085 | vs->thread4.urb_entry_allocation_size = URB_VS_ENTRY_SIZE - 1; |
||
2086 | vs->vs6.vs_enable = 0; |
||
2087 | vs->vs6.vert_cache_disable = 1; |
||
2088 | |||
2089 | return sna_static_stream_offsetof(stream, vs); |
||
2090 | } |
||
2091 | |||
2092 | static uint32_t gen5_create_sf_state(struct sna_static_stream *stream, |
||
2093 | uint32_t kernel) |
||
2094 | { |
||
2095 | struct gen5_sf_unit_state *sf_state; |
||
2096 | |||
2097 | sf_state = sna_static_stream_map(stream, sizeof(*sf_state), 32); |
||
2098 | |||
2099 | sf_state->thread0.grf_reg_count = GEN5_GRF_BLOCKS(SF_KERNEL_NUM_GRF); |
||
2100 | sf_state->thread0.kernel_start_pointer = kernel >> 6; |
||
2101 | |||
2102 | sf_state->thread3.const_urb_entry_read_length = 0; /* no const URBs */ |
||
2103 | sf_state->thread3.const_urb_entry_read_offset = 0; /* no const URBs */ |
||
2104 | sf_state->thread3.urb_entry_read_length = 1; /* 1 URB per vertex */ |
||
2105 | /* don't smash vertex header, read start from dw8 */ |
||
2106 | sf_state->thread3.urb_entry_read_offset = 1; |
||
2107 | sf_state->thread3.dispatch_grf_start_reg = 3; |
||
2108 | sf_state->thread4.max_threads = SF_MAX_THREADS - 1; |
||
2109 | sf_state->thread4.urb_entry_allocation_size = URB_SF_ENTRY_SIZE - 1; |
||
2110 | sf_state->thread4.nr_urb_entries = URB_SF_ENTRIES; |
||
2111 | sf_state->sf5.viewport_transform = false; /* skip viewport */ |
||
2112 | sf_state->sf6.cull_mode = GEN5_CULLMODE_NONE; |
||
2113 | sf_state->sf6.scissor = 0; |
||
2114 | sf_state->sf7.trifan_pv = 2; |
||
2115 | sf_state->sf6.dest_org_vbias = 0x8; |
||
2116 | sf_state->sf6.dest_org_hbias = 0x8; |
||
2117 | |||
2118 | return sna_static_stream_offsetof(stream, sf_state); |
||
2119 | } |
||
2120 | |||
2121 | static uint32_t gen5_create_sampler_state(struct sna_static_stream *stream, |
||
2122 | sampler_filter_t src_filter, |
||
2123 | sampler_extend_t src_extend, |
||
2124 | sampler_filter_t mask_filter, |
||
2125 | sampler_extend_t mask_extend) |
||
2126 | { |
||
2127 | struct gen5_sampler_state *sampler_state; |
||
2128 | |||
2129 | sampler_state = sna_static_stream_map(stream, |
||
2130 | sizeof(struct gen5_sampler_state) * 2, |
||
2131 | 32); |
||
2132 | sampler_state_init(&sampler_state[0], src_filter, src_extend); |
||
2133 | sampler_state_init(&sampler_state[1], mask_filter, mask_extend); |
||
2134 | |||
2135 | return sna_static_stream_offsetof(stream, sampler_state); |
||
2136 | } |
||
2137 | |||
2138 | static void gen5_init_wm_state(struct gen5_wm_unit_state *state, |
||
2139 | bool has_mask, |
||
2140 | uint32_t kernel, |
||
2141 | uint32_t sampler) |
||
2142 | { |
||
2143 | state->thread0.grf_reg_count = GEN5_GRF_BLOCKS(PS_KERNEL_NUM_GRF); |
||
2144 | state->thread0.kernel_start_pointer = kernel >> 6; |
||
2145 | |||
2146 | state->thread1.single_program_flow = 0; |
||
2147 | |||
2148 | /* scratch space is not used in our kernel */ |
||
2149 | state->thread2.scratch_space_base_pointer = 0; |
||
2150 | state->thread2.per_thread_scratch_space = 0; |
||
2151 | |||
2152 | state->thread3.const_urb_entry_read_length = 0; |
||
2153 | state->thread3.const_urb_entry_read_offset = 0; |
||
2154 | |||
2155 | state->thread3.urb_entry_read_offset = 0; |
||
2156 | /* wm kernel use urb from 3, see wm_program in compiler module */ |
||
2157 | state->thread3.dispatch_grf_start_reg = 3; /* must match kernel */ |
||
2158 | |||
2159 | state->wm4.sampler_count = 0; /* hardware requirement */ |
||
2160 | |||
2161 | state->wm4.sampler_state_pointer = sampler >> 5; |
||
2162 | state->wm5.max_threads = PS_MAX_THREADS - 1; |
||
2163 | state->wm5.transposed_urb_read = 0; |
||
2164 | state->wm5.thread_dispatch_enable = 1; |
||
2165 | /* just use 16-pixel dispatch (4 subspans), don't need to change kernel |
||
2166 | * start point |
||
2167 | */ |
||
2168 | state->wm5.enable_16_pix = 1; |
||
2169 | state->wm5.enable_8_pix = 0; |
||
2170 | state->wm5.early_depth_test = 1; |
||
2171 | |||
2172 | /* Each pair of attributes (src/mask coords) is two URB entries */ |
||
2173 | if (has_mask) { |
||
2174 | state->thread1.binding_table_entry_count = 3; /* 2 tex and fb */ |
||
2175 | state->thread3.urb_entry_read_length = 4; |
||
2176 | } else { |
||
2177 | state->thread1.binding_table_entry_count = 2; /* 1 tex and fb */ |
||
2178 | state->thread3.urb_entry_read_length = 2; |
||
2179 | } |
||
2180 | |||
2181 | /* binding table entry count is only used for prefetching, |
||
2182 | * and it has to be set 0 for Ironlake |
||
2183 | */ |
||
2184 | state->thread1.binding_table_entry_count = 0; |
||
2185 | } |
||
2186 | |||
2187 | static uint32_t gen5_create_cc_unit_state(struct sna_static_stream *stream) |
||
2188 | { |
||
2189 | uint8_t *ptr, *base; |
||
2190 | int i, j; |
||
2191 | |||
2192 | base = ptr = |
||
2193 | sna_static_stream_map(stream, |
||
2194 | GEN5_BLENDFACTOR_COUNT*GEN5_BLENDFACTOR_COUNT*64, |
||
2195 | 64); |
||
2196 | |||
2197 | for (i = 0; i < GEN5_BLENDFACTOR_COUNT; i++) { |
||
2198 | for (j = 0; j < GEN5_BLENDFACTOR_COUNT; j++) { |
||
2199 | struct gen5_cc_unit_state *state = |
||
2200 | (struct gen5_cc_unit_state *)ptr; |
||
2201 | |||
2202 | state->cc3.blend_enable = |
||
2203 | !(j == GEN5_BLENDFACTOR_ZERO && i == GEN5_BLENDFACTOR_ONE); |
||
2204 | |||
2205 | state->cc5.logicop_func = 0xc; /* COPY */ |
||
2206 | state->cc5.ia_blend_function = GEN5_BLENDFUNCTION_ADD; |
||
2207 | |||
2208 | /* Fill in alpha blend factors same as color, for the future. */ |
||
2209 | state->cc5.ia_src_blend_factor = i; |
||
2210 | state->cc5.ia_dest_blend_factor = j; |
||
2211 | |||
2212 | state->cc6.blend_function = GEN5_BLENDFUNCTION_ADD; |
||
2213 | state->cc6.clamp_post_alpha_blend = 1; |
||
2214 | state->cc6.clamp_pre_alpha_blend = 1; |
||
2215 | state->cc6.src_blend_factor = i; |
||
2216 | state->cc6.dest_blend_factor = j; |
||
2217 | |||
2218 | ptr += 64; |
||
2219 | } |
||
2220 | } |
||
2221 | |||
2222 | return sna_static_stream_offsetof(stream, base); |
||
2223 | } |
||
2224 | |||
2225 | static bool gen5_render_setup(struct sna *sna) |
||
2226 | { |
||
2227 | struct gen5_render_state *state = &sna->render_state.gen5; |
||
2228 | struct sna_static_stream general; |
||
2229 | struct gen5_wm_unit_state_padded *wm_state; |
||
2230 | uint32_t sf[2], wm[KERNEL_COUNT]; |
||
2231 | int i, j, k, l, m; |
||
2232 | |||
2233 | sna_static_stream_init(&general); |
||
2234 | |||
2235 | /* Zero pad the start. If you see an offset of 0x0 in the batchbuffer |
||
2236 | * dumps, you know it points to zero. |
||
2237 | */ |
||
2238 | null_create(&general); |
||
2239 | |||
2240 | /* Set up the two SF states (one for blending with a mask, one without) */ |
||
2241 | sf[0] = sna_static_stream_compile_sf(sna, &general, brw_sf_kernel__nomask); |
||
2242 | sf[1] = sna_static_stream_compile_sf(sna, &general, brw_sf_kernel__mask); |
||
2243 | |||
2244 | for (m = 0; m < KERNEL_COUNT; m++) { |
||
2245 | if (wm_kernels[m].size) { |
||
2246 | wm[m] = sna_static_stream_add(&general, |
||
2247 | wm_kernels[m].data, |
||
2248 | wm_kernels[m].size, |
||
2249 | 64); |
||
2250 | } else { |
||
2251 | wm[m] = sna_static_stream_compile_wm(sna, &general, |
||
2252 | wm_kernels[m].data, |
||
2253 | 16); |
||
2254 | } |
||
2255 | assert(wm[m]); |
||
2256 | } |
||
2257 | |||
2258 | state->vs = gen5_create_vs_unit_state(&general); |
||
2259 | |||
2260 | state->sf[0] = gen5_create_sf_state(&general, sf[0]); |
||
2261 | state->sf[1] = gen5_create_sf_state(&general, sf[1]); |
||
2262 | |||
2263 | |||
2264 | /* Set up the WM states: each filter/extend type for source and mask, per |
||
2265 | * kernel. |
||
2266 | */ |
||
2267 | wm_state = sna_static_stream_map(&general, |
||
2268 | sizeof(*wm_state) * KERNEL_COUNT * |
||
2269 | FILTER_COUNT * EXTEND_COUNT * |
||
2270 | FILTER_COUNT * EXTEND_COUNT, |
||
2271 | 64); |
||
2272 | state->wm = sna_static_stream_offsetof(&general, wm_state); |
||
2273 | for (i = 0; i < FILTER_COUNT; i++) { |
||
2274 | for (j = 0; j < EXTEND_COUNT; j++) { |
||
2275 | for (k = 0; k < FILTER_COUNT; k++) { |
||
2276 | for (l = 0; l < EXTEND_COUNT; l++) { |
||
2277 | uint32_t sampler_state; |
||
2278 | |||
2279 | sampler_state = |
||
2280 | gen5_create_sampler_state(&general, |
||
2281 | i, j, |
||
2282 | k, l); |
||
2283 | |||
2284 | for (m = 0; m < KERNEL_COUNT; m++) { |
||
2285 | gen5_init_wm_state(&wm_state->state, |
||
2286 | wm_kernels[m].has_mask, |
||
2287 | wm[m], sampler_state); |
||
2288 | wm_state++; |
||
2289 | } |
||
2290 | } |
||
2291 | } |
||
2292 | } |
||
2293 | } |
||
2294 | |||
2295 | state->cc = gen5_create_cc_unit_state(&general); |
||
2296 | |||
2297 | state->general_bo = sna_static_stream_fini(sna, &general); |
||
2298 | return state->general_bo != NULL; |
||
2299 | } |
||
2300 | |||
2301 | const char *gen5_render_init(struct sna *sna, const char *backend) |
||
2302 | { |
||
2303 | if (!gen5_render_setup(sna)) |
||
2304 | return backend; |
||
2305 | |||
2306 | sna->kgem.context_switch = gen5_render_context_switch; |
||
4501 | Serge | 2307 | sna->kgem.retire = gen4_render_retire; |
2308 | sna->kgem.expire = gen4_render_expire; |
||
4304 | Serge | 2309 | |
2310 | #if 0 |
||
2311 | #if !NO_COMPOSITE |
||
2312 | sna->render.composite = gen5_render_composite; |
||
2313 | sna->render.prefer_gpu |= PREFER_GPU_RENDER; |
||
2314 | #endif |
||
2315 | #if !NO_COMPOSITE_SPANS |
||
2316 | sna->render.check_composite_spans = gen5_check_composite_spans; |
||
2317 | sna->render.composite_spans = gen5_render_composite_spans; |
||
4501 | Serge | 2318 | if (intel_get_device_id(sna->scrn) == 0x0044) |
4304 | Serge | 2319 | sna->render.prefer_gpu |= PREFER_GPU_SPANS; |
2320 | #endif |
||
2321 | sna->render.video = gen5_render_video; |
||
2322 | |||
2323 | sna->render.copy_boxes = gen5_render_copy_boxes; |
||
2324 | sna->render.copy = gen5_render_copy; |
||
2325 | |||
2326 | sna->render.fill_boxes = gen5_render_fill_boxes; |
||
2327 | sna->render.fill = gen5_render_fill; |
||
2328 | sna->render.fill_one = gen5_render_fill_one; |
||
2329 | #endif |
||
2330 | |||
2331 | sna->render.blit_tex = gen5_blit_tex; |
||
2332 | sna->render.caps = HW_BIT_BLIT | HW_TEX_BLIT; |
||
2333 | |||
4501 | Serge | 2334 | sna->render.flush = gen4_render_flush; |
4304 | Serge | 2335 | sna->render.reset = gen5_render_reset; |
2336 | sna->render.fini = gen5_render_fini; |
||
2337 | |||
2338 | sna->render.max_3d_size = MAX_3D_SIZE; |
||
2339 | sna->render.max_3d_pitch = 1 << 18; |
||
2340 | return "Ironlake (gen5)"; |
||
2341 | }; |
||
2342 | |||
2343 | static bool |
||
2344 | gen5_blit_tex(struct sna *sna, |
||
2345 | uint8_t op, bool scale, |
||
2346 | PixmapPtr src, struct kgem_bo *src_bo, |
||
2347 | PixmapPtr mask,struct kgem_bo *mask_bo, |
||
2348 | PixmapPtr dst, struct kgem_bo *dst_bo, |
||
2349 | int32_t src_x, int32_t src_y, |
||
2350 | int32_t msk_x, int32_t msk_y, |
||
2351 | int32_t dst_x, int32_t dst_y, |
||
2352 | int32_t width, int32_t height, |
||
2353 | struct sna_composite_op *tmp) |
||
2354 | { |
||
2355 | DBG(("%s: %dx%d, current mode=%d\n", __FUNCTION__, |
||
2356 | width, height, sna->kgem.mode)); |
||
2357 | |||
2358 | tmp->op = PictOpSrc; |
||
2359 | |||
2360 | tmp->dst.pixmap = dst; |
||
2361 | tmp->dst.bo = dst_bo; |
||
2362 | tmp->dst.width = dst->drawable.width; |
||
2363 | tmp->dst.height = dst->drawable.height; |
||
2364 | tmp->dst.format = PICT_x8r8g8b8; |
||
2365 | |||
2366 | |||
2367 | tmp->src.repeat = RepeatNone; |
||
2368 | tmp->src.filter = PictFilterNearest; |
||
2369 | tmp->src.is_affine = true; |
||
2370 | |||
2371 | tmp->src.bo = src_bo; |
||
2372 | tmp->src.pict_format = PICT_x8r8g8b8; |
||
2373 | tmp->src.card_format = gen5_get_card_format(tmp->src.pict_format); |
||
2374 | tmp->src.width = src->drawable.width; |
||
2375 | tmp->src.height = src->drawable.height; |
||
2376 | |||
2377 | |||
2378 | tmp->is_affine = tmp->src.is_affine; |
||
2379 | tmp->has_component_alpha = false; |
||
2380 | tmp->need_magic_ca_pass = false; |
||
2381 | |||
2382 | tmp->mask.is_affine = true; |
||
2383 | tmp->mask.repeat = SAMPLER_EXTEND_NONE; |
||
2384 | tmp->mask.filter = SAMPLER_FILTER_NEAREST; |
||
2385 | tmp->mask.bo = mask_bo; |
||
2386 | tmp->mask.pict_format = PIXMAN_a8; |
||
2387 | tmp->mask.card_format = gen5_get_card_format(tmp->mask.pict_format); |
||
2388 | tmp->mask.width = mask->drawable.width; |
||
2389 | tmp->mask.height = mask->drawable.height; |
||
2390 | |||
2391 | if( scale ) |
||
2392 | { |
||
2393 | tmp->src.scale[0] = 1.f/width; |
||
2394 | tmp->src.scale[1] = 1.f/height; |
||
2395 | } |
||
2396 | else |
||
2397 | { |
||
2398 | tmp->src.scale[0] = 1.f/src->drawable.width; |
||
2399 | tmp->src.scale[1] = 1.f/src->drawable.height; |
||
2400 | } |
||
2401 | |||
2402 | tmp->mask.scale[0] = 1.f/mask->drawable.width; |
||
2403 | tmp->mask.scale[1] = 1.f/mask->drawable.height; |
||
2404 | |||
2405 | |||
2406 | tmp->u.gen5.wm_kernel = WM_KERNEL_MASK; |
||
2407 | |||
2408 | // gen5_choose_composite_kernel(tmp->op, |
||
2409 | // tmp->mask.bo != NULL, |
||
2410 | // tmp->has_component_alpha, |
||
2411 | // tmp->is_affine); |
||
2412 | tmp->u.gen5.ve_id = gen4_choose_composite_emitter(sna, tmp); |
||
2413 | |||
2414 | tmp->blt = gen5_render_composite_blt; |
||
2415 | tmp->done = gen5_render_composite_done; |
||
2416 | |||
2417 | if (!kgem_check_bo(&sna->kgem, |
||
2418 | tmp->dst.bo, tmp->src.bo, tmp->mask.bo, NULL)) { |
||
2419 | kgem_submit(&sna->kgem); |
||
2420 | } |
||
2421 | |||
4501 | Serge | 2422 | gen5_align_vertex(sna, tmp); |
4304 | Serge | 2423 | gen5_bind_surfaces(sna, tmp); |
4501 | Serge | 2424 | |
4304 | Serge | 2425 | return true; |
2426 | }><>>>>>>>>>>>>>>>>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>>><>><>><>><>><>><>><>><>><>><>=>=>>>><>><>><>><>><>>><>><>><>><>><>><>><>><>><>><>><>><>><>><> |