Subversion Repositories Kolibri OS

Rev

Rev 3769 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
3280 Serge 1
/*
2
 * Copyright © 2006,2008,2011 Intel Corporation
3
 * Copyright © 2007 Red Hat, Inc.
4
 *
5
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * copy of this software and associated documentation files (the "Software"),
7
 * to deal in the Software without restriction, including without limitation
8
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * Software is furnished to do so, subject to the following conditions:
11
 *
12
 * The above copyright notice and this permission notice (including the next
13
 * paragraph) shall be included in all copies or substantial portions of the
14
 * Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
 * SOFTWARE.
23
 *
24
 * Authors:
25
 *    Wang Zhenyu 
26
 *    Eric Anholt 
27
 *    Carl Worth 
28
 *    Keith Packard 
29
 *    Chris Wilson 
30
 *
31
 */
32
 
33
#ifdef HAVE_CONFIG_H
34
#include "config.h"
35
#endif
36
 
37
#include "sna.h"
38
#include "sna_reg.h"
39
#include "sna_render.h"
40
#include "sna_render_inline.h"
41
//#include "sna_video.h"
42
 
43
#include "brw/brw.h"
44
#include "gen5_render.h"
45
#include "gen4_source.h"
46
#include "gen4_vertex.h"
47
 
48
#define NO_COMPOSITE 0
49
#define NO_COMPOSITE_SPANS 0
50
 
51
#define PREFER_BLT_FILL 1
52
 
53
#define DBG_NO_STATE_CACHE 0
54
#define DBG_NO_SURFACE_CACHE 0
55
 
56
#define MAX_3D_SIZE 8192
57
 
58
#define GEN5_GRF_BLOCKS(nreg)    ((nreg + 15) / 16 - 1)
59
 
60
/* Set up a default static partitioning of the URB, which is supposed to
61
 * allow anything we would want to do, at potentially lower performance.
62
 */
63
#define URB_CS_ENTRY_SIZE     1
64
#define URB_CS_ENTRIES	      0
65
 
66
#define URB_VS_ENTRY_SIZE     1
67
#define URB_VS_ENTRIES	      256 /* minimum of 8 */
68
 
69
#define URB_GS_ENTRY_SIZE     0
70
#define URB_GS_ENTRIES	      0
71
 
72
#define URB_CLIP_ENTRY_SIZE   0
73
#define URB_CLIP_ENTRIES      0
74
 
75
#define URB_SF_ENTRY_SIZE     2
76
#define URB_SF_ENTRIES	      64
77
 
78
/*
79
 * this program computes dA/dx and dA/dy for the texture coordinates along
80
 * with the base texture coordinate. It was extracted from the Mesa driver
81
 */
82
 
83
#define SF_KERNEL_NUM_GRF  16
84
#define SF_MAX_THREADS	   48
85
 
86
#define PS_KERNEL_NUM_GRF   32
87
#define PS_MAX_THREADS	    72
88
 
89
static const uint32_t ps_kernel_packed_static[][4] = {
90
#include "exa_wm_xy.g5b"
91
#include "exa_wm_src_affine.g5b"
92
#include "exa_wm_src_sample_argb.g5b"
93
#include "exa_wm_yuv_rgb.g5b"
94
#include "exa_wm_write.g5b"
95
};
96
 
97
static const uint32_t ps_kernel_planar_static[][4] = {
98
#include "exa_wm_xy.g5b"
99
#include "exa_wm_src_affine.g5b"
100
#include "exa_wm_src_sample_planar.g5b"
101
#include "exa_wm_yuv_rgb.g5b"
102
#include "exa_wm_write.g5b"
103
};
104
 
105
#define NOKERNEL(kernel_enum, func, masked) \
106
    [kernel_enum] = {func, 0, masked}
107
#define KERNEL(kernel_enum, kernel, masked) \
108
    [kernel_enum] = {&kernel, sizeof(kernel), masked}
109
static const struct wm_kernel_info {
110
	const void *data;
111
	unsigned int size;
112
	bool has_mask;
113
} wm_kernels[] = {
114
	NOKERNEL(WM_KERNEL, brw_wm_kernel__affine, false),
115
	NOKERNEL(WM_KERNEL_P, brw_wm_kernel__projective, false),
116
 
117
	NOKERNEL(WM_KERNEL_MASK, brw_wm_kernel__affine_mask, true),
118
	NOKERNEL(WM_KERNEL_MASK_P, brw_wm_kernel__projective_mask, true),
119
 
120
	NOKERNEL(WM_KERNEL_MASKCA, brw_wm_kernel__affine_mask_ca, true),
121
	NOKERNEL(WM_KERNEL_MASKCA_P, brw_wm_kernel__projective_mask_ca, true),
122
 
123
	NOKERNEL(WM_KERNEL_MASKSA, brw_wm_kernel__affine_mask_sa, true),
124
	NOKERNEL(WM_KERNEL_MASKSA_P, brw_wm_kernel__projective_mask_sa, true),
125
 
126
	NOKERNEL(WM_KERNEL_OPACITY, brw_wm_kernel__affine_opacity, true),
127
	NOKERNEL(WM_KERNEL_OPACITY_P, brw_wm_kernel__projective_opacity, true),
128
 
129
	KERNEL(WM_KERNEL_VIDEO_PLANAR, ps_kernel_planar_static, false),
130
	KERNEL(WM_KERNEL_VIDEO_PACKED, ps_kernel_packed_static, false),
131
};
132
#undef KERNEL
133
 
134
static const struct blendinfo {
135
	bool src_alpha;
136
	uint32_t src_blend;
137
	uint32_t dst_blend;
138
} gen5_blend_op[] = {
139
	/* Clear */	{0, GEN5_BLENDFACTOR_ZERO, GEN5_BLENDFACTOR_ZERO},
140
	/* Src */	{0, GEN5_BLENDFACTOR_ONE, GEN5_BLENDFACTOR_ZERO},
141
	/* Dst */	{0, GEN5_BLENDFACTOR_ZERO, GEN5_BLENDFACTOR_ONE},
142
	/* Over */	{1, GEN5_BLENDFACTOR_ONE, GEN5_BLENDFACTOR_INV_SRC_ALPHA},
143
	/* OverReverse */ {0, GEN5_BLENDFACTOR_INV_DST_ALPHA, GEN5_BLENDFACTOR_ONE},
144
	/* In */	{0, GEN5_BLENDFACTOR_DST_ALPHA, GEN5_BLENDFACTOR_ZERO},
145
	/* InReverse */	{1, GEN5_BLENDFACTOR_ZERO, GEN5_BLENDFACTOR_SRC_ALPHA},
146
	/* Out */	{0, GEN5_BLENDFACTOR_INV_DST_ALPHA, GEN5_BLENDFACTOR_ZERO},
147
	/* OutReverse */ {1, GEN5_BLENDFACTOR_ZERO, GEN5_BLENDFACTOR_INV_SRC_ALPHA},
148
	/* Atop */	{1, GEN5_BLENDFACTOR_DST_ALPHA, GEN5_BLENDFACTOR_INV_SRC_ALPHA},
149
	/* AtopReverse */ {1, GEN5_BLENDFACTOR_INV_DST_ALPHA, GEN5_BLENDFACTOR_SRC_ALPHA},
150
	/* Xor */	{1, GEN5_BLENDFACTOR_INV_DST_ALPHA, GEN5_BLENDFACTOR_INV_SRC_ALPHA},
151
	/* Add */	{0, GEN5_BLENDFACTOR_ONE, GEN5_BLENDFACTOR_ONE},
152
};
153
 
154
/**
155
 * Highest-valued BLENDFACTOR used in gen5_blend_op.
156
 *
157
 * This leaves out GEN5_BLENDFACTOR_INV_DST_COLOR,
158
 * GEN5_BLENDFACTOR_INV_CONST_{COLOR,ALPHA},
159
 * GEN5_BLENDFACTOR_INV_SRC1_{COLOR,ALPHA}
160
 */
161
#define GEN5_BLENDFACTOR_COUNT (GEN5_BLENDFACTOR_INV_DST_ALPHA + 1)
162
 
163
#define BLEND_OFFSET(s, d) \
164
	(((s) * GEN5_BLENDFACTOR_COUNT + (d)) * 64)
165
 
166
#define SAMPLER_OFFSET(sf, se, mf, me, k) \
167
	((((((sf) * EXTEND_COUNT + (se)) * FILTER_COUNT + (mf)) * EXTEND_COUNT + (me)) * KERNEL_COUNT + (k)) * 64)
168
 
169
static bool
170
gen5_emit_pipelined_pointers(struct sna *sna,
171
			     const struct sna_composite_op *op,
172
			     int blend, int kernel);
173
 
174
#define OUT_BATCH(v) batch_emit(sna, v)
175
#define OUT_VERTEX(x,y) vertex_emit_2s(sna, x,y)
176
#define OUT_VERTEX_F(v) vertex_emit(sna, v)
177
 
178
static inline bool too_large(int width, int height)
179
{
180
	return width > MAX_3D_SIZE || height > MAX_3D_SIZE;
181
}
182
 
183
static int
184
gen5_choose_composite_kernel(int op, bool has_mask, bool is_ca, bool is_affine)
185
{
186
	int base;
187
 
188
	if (has_mask) {
189
		if (is_ca) {
190
			if (gen5_blend_op[op].src_alpha)
191
				base = WM_KERNEL_MASKSA;
192
			else
193
				base = WM_KERNEL_MASKCA;
194
		} else
195
			base = WM_KERNEL_MASK;
196
	} else
197
		base = WM_KERNEL;
198
 
199
	return base + !is_affine;
200
}
201
 
202
static bool gen5_magic_ca_pass(struct sna *sna,
203
			       const struct sna_composite_op *op)
204
{
205
	struct gen5_render_state *state = &sna->render_state.gen5;
206
 
207
	if (!op->need_magic_ca_pass)
208
		return false;
209
 
210
	assert(sna->render.vertex_index > sna->render.vertex_start);
211
 
212
	DBG(("%s: CA fixup\n", __FUNCTION__));
213
	assert(op->mask.bo != NULL);
214
	assert(op->has_component_alpha);
215
 
216
	gen5_emit_pipelined_pointers
217
		(sna, op, PictOpAdd,
218
		 gen5_choose_composite_kernel(PictOpAdd,
219
					      true, true, op->is_affine));
220
 
221
	OUT_BATCH(GEN5_3DPRIMITIVE |
222
		  GEN5_3DPRIMITIVE_VERTEX_SEQUENTIAL |
223
		  (_3DPRIM_RECTLIST << GEN5_3DPRIMITIVE_TOPOLOGY_SHIFT) |
224
		  (0 << 9) |
225
		  4);
226
	OUT_BATCH(sna->render.vertex_index - sna->render.vertex_start);
227
	OUT_BATCH(sna->render.vertex_start);
228
	OUT_BATCH(1);	/* single instance */
229
	OUT_BATCH(0);	/* start instance location */
230
	OUT_BATCH(0);	/* index buffer offset, ignored */
231
 
232
	state->last_primitive = sna->kgem.nbatch;
233
	return true;
234
}
235
 
236
static uint32_t gen5_get_blend(int op,
237
			       bool has_component_alpha,
238
			       uint32_t dst_format)
239
{
240
	uint32_t src, dst;
241
 
242
    src = GEN5_BLENDFACTOR_ONE; //gen6_blend_op[op].src_blend;
243
    dst = GEN5_BLENDFACTOR_INV_SRC_ALPHA; //gen6_blend_op[op].dst_blend;
244
#if 0
245
	/* If there's no dst alpha channel, adjust the blend op so that we'll treat
246
	 * it as always 1.
247
	 */
248
	if (PICT_FORMAT_A(dst_format) == 0) {
249
		if (src == GEN5_BLENDFACTOR_DST_ALPHA)
250
			src = GEN5_BLENDFACTOR_ONE;
251
		else if (src == GEN5_BLENDFACTOR_INV_DST_ALPHA)
252
			src = GEN5_BLENDFACTOR_ZERO;
253
	}
254
 
255
	/* If the source alpha is being used, then we should only be in a
256
	 * case where the source blend factor is 0, and the source blend
257
	 * value is the mask channels multiplied by the source picture's alpha.
258
	 */
259
	if (has_component_alpha && gen5_blend_op[op].src_alpha) {
260
		if (dst == GEN5_BLENDFACTOR_SRC_ALPHA)
261
			dst = GEN5_BLENDFACTOR_SRC_COLOR;
262
		else if (dst == GEN5_BLENDFACTOR_INV_SRC_ALPHA)
263
			dst = GEN5_BLENDFACTOR_INV_SRC_COLOR;
264
	}
265
#endif
266
 
267
	DBG(("blend op=%d, dst=%x [A=%d] => src=%d, dst=%d => offset=%x\n",
268
	     op, dst_format, PICT_FORMAT_A(dst_format),
269
	     src, dst, BLEND_OFFSET(src, dst)));
270
	return BLEND_OFFSET(src, dst);
271
}
272
 
273
static uint32_t gen5_get_card_format(PictFormat format)
274
{
275
	switch (format) {
276
	default:
277
		return -1;
278
	case PICT_a8r8g8b8:
279
		return GEN5_SURFACEFORMAT_B8G8R8A8_UNORM;
280
	case PICT_x8r8g8b8:
281
		return GEN5_SURFACEFORMAT_B8G8R8X8_UNORM;
4251 Serge 282
	case PICT_a8b8g8r8:
283
		return GEN5_SURFACEFORMAT_R8G8B8A8_UNORM;
284
	case PICT_x8b8g8r8:
285
		return GEN5_SURFACEFORMAT_R8G8B8X8_UNORM;
286
	case PICT_a2r10g10b10:
287
		return GEN5_SURFACEFORMAT_B10G10R10A2_UNORM;
288
	case PICT_x2r10g10b10:
289
		return GEN5_SURFACEFORMAT_B10G10R10X2_UNORM;
290
	case PICT_r8g8b8:
291
		return GEN5_SURFACEFORMAT_R8G8B8_UNORM;
292
	case PICT_r5g6b5:
293
		return GEN5_SURFACEFORMAT_B5G6R5_UNORM;
294
	case PICT_a1r5g5b5:
295
		return GEN5_SURFACEFORMAT_B5G5R5A1_UNORM;
3280 Serge 296
	case PICT_a8:
297
		return GEN5_SURFACEFORMAT_A8_UNORM;
4251 Serge 298
	case PICT_a4r4g4b4:
299
		return GEN5_SURFACEFORMAT_B4G4R4A4_UNORM;
3280 Serge 300
	}
301
}
302
 
303
static uint32_t gen5_get_dest_format(PictFormat format)
304
{
305
	switch (format) {
306
	default:
307
		return -1;
308
	case PICT_a8r8g8b8:
309
	case PICT_x8r8g8b8:
310
		return GEN5_SURFACEFORMAT_B8G8R8A8_UNORM;
4251 Serge 311
	case PICT_a8b8g8r8:
312
	case PICT_x8b8g8r8:
313
		return GEN5_SURFACEFORMAT_R8G8B8A8_UNORM;
314
	case PICT_a2r10g10b10:
315
	case PICT_x2r10g10b10:
316
		return GEN5_SURFACEFORMAT_B10G10R10A2_UNORM;
317
	case PICT_r5g6b5:
318
		return GEN5_SURFACEFORMAT_B5G6R5_UNORM;
319
	case PICT_x1r5g5b5:
320
	case PICT_a1r5g5b5:
321
		return GEN5_SURFACEFORMAT_B5G5R5A1_UNORM;
3280 Serge 322
	case PICT_a8:
323
		return GEN5_SURFACEFORMAT_A8_UNORM;
4251 Serge 324
	case PICT_a4r4g4b4:
325
	case PICT_x4r4g4b4:
326
		return GEN5_SURFACEFORMAT_B4G4R4A4_UNORM;
3280 Serge 327
	}
328
}
329
typedef struct gen5_surface_state_padded {
330
	struct gen5_surface_state state;
331
	char pad[32 - sizeof(struct gen5_surface_state)];
332
} gen5_surface_state_padded;
333
 
334
static void null_create(struct sna_static_stream *stream)
335
{
336
	/* A bunch of zeros useful for legacy border color and depth-stencil */
337
	sna_static_stream_map(stream, 64, 64);
338
}
339
 
340
static void
341
sampler_state_init(struct gen5_sampler_state *sampler_state,
342
		   sampler_filter_t filter,
343
		   sampler_extend_t extend)
344
{
345
	sampler_state->ss0.lod_preclamp = 1;	/* GL mode */
346
 
347
	/* We use the legacy mode to get the semantics specified by
348
	 * the Render extension. */
349
	sampler_state->ss0.border_color_mode = GEN5_BORDER_COLOR_MODE_LEGACY;
350
 
351
	switch (filter) {
352
	default:
353
	case SAMPLER_FILTER_NEAREST:
354
		sampler_state->ss0.min_filter = GEN5_MAPFILTER_NEAREST;
355
		sampler_state->ss0.mag_filter = GEN5_MAPFILTER_NEAREST;
356
		break;
357
	case SAMPLER_FILTER_BILINEAR:
358
		sampler_state->ss0.min_filter = GEN5_MAPFILTER_LINEAR;
359
		sampler_state->ss0.mag_filter = GEN5_MAPFILTER_LINEAR;
360
		break;
361
	}
362
 
363
	switch (extend) {
364
	default:
365
	case SAMPLER_EXTEND_NONE:
366
		sampler_state->ss1.r_wrap_mode = GEN5_TEXCOORDMODE_CLAMP_BORDER;
367
		sampler_state->ss1.s_wrap_mode = GEN5_TEXCOORDMODE_CLAMP_BORDER;
368
		sampler_state->ss1.t_wrap_mode = GEN5_TEXCOORDMODE_CLAMP_BORDER;
369
		break;
370
	case SAMPLER_EXTEND_REPEAT:
371
		sampler_state->ss1.r_wrap_mode = GEN5_TEXCOORDMODE_WRAP;
372
		sampler_state->ss1.s_wrap_mode = GEN5_TEXCOORDMODE_WRAP;
373
		sampler_state->ss1.t_wrap_mode = GEN5_TEXCOORDMODE_WRAP;
374
		break;
375
	case SAMPLER_EXTEND_PAD:
376
		sampler_state->ss1.r_wrap_mode = GEN5_TEXCOORDMODE_CLAMP;
377
		sampler_state->ss1.s_wrap_mode = GEN5_TEXCOORDMODE_CLAMP;
378
		sampler_state->ss1.t_wrap_mode = GEN5_TEXCOORDMODE_CLAMP;
379
		break;
380
	case SAMPLER_EXTEND_REFLECT:
381
		sampler_state->ss1.r_wrap_mode = GEN5_TEXCOORDMODE_MIRROR;
382
		sampler_state->ss1.s_wrap_mode = GEN5_TEXCOORDMODE_MIRROR;
383
		sampler_state->ss1.t_wrap_mode = GEN5_TEXCOORDMODE_MIRROR;
384
		break;
385
	}
386
}
387
 
388
static uint32_t
389
gen5_tiling_bits(uint32_t tiling)
390
{
391
	switch (tiling) {
392
	default: assert(0);
393
	case I915_TILING_NONE: return 0;
394
	case I915_TILING_X: return GEN5_SURFACE_TILED;
395
	case I915_TILING_Y: return GEN5_SURFACE_TILED | GEN5_SURFACE_TILED_Y;
396
	}
397
}
398
 
399
/**
400
 * Sets up the common fields for a surface state buffer for the given
401
 * picture in the given surface state buffer.
402
 */
403
static uint32_t
404
gen5_bind_bo(struct sna *sna,
405
	     struct kgem_bo *bo,
406
	     uint32_t width,
407
	     uint32_t height,
408
	     uint32_t format,
409
	     bool is_dst)
410
{
411
	uint32_t domains;
412
	uint16_t offset;
413
	uint32_t *ss;
414
 
415
	/* After the first bind, we manage the cache domains within the batch */
416
	if (!DBG_NO_SURFACE_CACHE) {
4251 Serge 417
		offset = kgem_bo_get_binding(bo, format | is_dst << 31);
3280 Serge 418
		if (offset) {
419
			if (is_dst)
420
				kgem_bo_mark_dirty(bo);
421
			return offset * sizeof(uint32_t);
422
		}
423
	}
424
 
425
	offset = sna->kgem.surface -=
426
		sizeof(struct gen5_surface_state_padded) / sizeof(uint32_t);
427
	ss = sna->kgem.batch + offset;
428
 
429
	ss[0] = (GEN5_SURFACE_2D << GEN5_SURFACE_TYPE_SHIFT |
430
		 GEN5_SURFACE_BLEND_ENABLED |
431
		 format << GEN5_SURFACE_FORMAT_SHIFT);
432
 
4251 Serge 433
	if (is_dst) {
434
		ss[0] |= GEN5_SURFACE_RC_READ_WRITE;
3280 Serge 435
		domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER;
4251 Serge 436
	} else
3280 Serge 437
		domains = I915_GEM_DOMAIN_SAMPLER << 16;
438
	ss[1] = kgem_add_reloc(&sna->kgem, offset + 1, bo, domains, 0);
439
 
440
	ss[2] = ((width - 1)  << GEN5_SURFACE_WIDTH_SHIFT |
441
		 (height - 1) << GEN5_SURFACE_HEIGHT_SHIFT);
442
	ss[3] = (gen5_tiling_bits(bo->tiling) |
443
		 (bo->pitch - 1) << GEN5_SURFACE_PITCH_SHIFT);
444
	ss[4] = 0;
445
	ss[5] = 0;
446
 
4251 Serge 447
	kgem_bo_set_binding(bo, format | is_dst << 31, offset);
3280 Serge 448
 
449
	DBG(("[%x] bind bo(handle=%d, addr=%d), format=%d, width=%d, height=%d, pitch=%d, tiling=%d -> %s\n",
450
	     offset, bo->handle, ss[1],
451
	     format, width, height, bo->pitch, bo->tiling,
452
	     domains & 0xffff ? "render" : "sampler"));
453
 
454
	return offset * sizeof(uint32_t);
455
}
456
 
457
static void gen5_emit_vertex_buffer(struct sna *sna,
458
				    const struct sna_composite_op *op)
459
{
460
	int id = op->u.gen5.ve_id;
461
 
462
	assert((sna->render.vb_id & (1 << id)) == 0);
463
 
464
	OUT_BATCH(GEN5_3DSTATE_VERTEX_BUFFERS | 3);
465
	OUT_BATCH(id << VB0_BUFFER_INDEX_SHIFT | VB0_VERTEXDATA |
466
		  (4*op->floats_per_vertex << VB0_BUFFER_PITCH_SHIFT));
467
	assert(sna->render.nvertex_reloc < ARRAY_SIZE(sna->render.vertex_reloc));
468
	sna->render.vertex_reloc[sna->render.nvertex_reloc++] = sna->kgem.nbatch;
469
	OUT_BATCH(0);
470
	OUT_BATCH(~0); /* max address: disabled */
471
	OUT_BATCH(0);
472
 
473
	sna->render.vb_id |= 1 << id;
474
}
475
 
476
static void gen5_emit_primitive(struct sna *sna)
477
{
478
	if (sna->kgem.nbatch == sna->render_state.gen5.last_primitive) {
479
		sna->render.vertex_offset = sna->kgem.nbatch - 5;
480
		return;
481
	}
482
 
483
	OUT_BATCH(GEN5_3DPRIMITIVE |
484
		  GEN5_3DPRIMITIVE_VERTEX_SEQUENTIAL |
485
		  (_3DPRIM_RECTLIST << GEN5_3DPRIMITIVE_TOPOLOGY_SHIFT) |
486
		  (0 << 9) |
487
		  4);
488
	sna->render.vertex_offset = sna->kgem.nbatch;
489
	OUT_BATCH(0);	/* vertex count, to be filled in later */
490
	OUT_BATCH(sna->render.vertex_index);
491
	OUT_BATCH(1);	/* single instance */
492
	OUT_BATCH(0);	/* start instance location */
493
	OUT_BATCH(0);	/* index buffer offset, ignored */
494
	sna->render.vertex_start = sna->render.vertex_index;
495
 
496
	sna->render_state.gen5.last_primitive = sna->kgem.nbatch;
497
}
498
 
499
static bool gen5_rectangle_begin(struct sna *sna,
500
				 const struct sna_composite_op *op)
501
{
502
	int id = op->u.gen5.ve_id;
503
	int ndwords;
504
 
505
	if (sna_vertex_wait__locked(&sna->render) && sna->render.vertex_offset)
506
		return true;
507
 
508
	ndwords = op->need_magic_ca_pass ? 20 : 6;
509
	if ((sna->render.vb_id & (1 << id)) == 0)
510
		ndwords += 5;
511
 
512
	if (!kgem_check_batch(&sna->kgem, ndwords))
513
		return false;
514
 
515
	if ((sna->render.vb_id & (1 << id)) == 0)
516
		gen5_emit_vertex_buffer(sna, op);
517
	if (sna->render.vertex_offset == 0)
518
		gen5_emit_primitive(sna);
519
 
520
	return true;
521
}
522
 
523
static int gen5_get_rectangles__flush(struct sna *sna,
524
				      const struct sna_composite_op *op)
525
{
526
	/* Preventing discarding new vbo after lock contention */
527
	if (sna_vertex_wait__locked(&sna->render)) {
528
		int rem = vertex_space(sna);
529
		if (rem > op->floats_per_rect)
530
			return rem;
531
	}
532
 
533
	if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 20 : 6))
534
		return 0;
535
	if (!kgem_check_reloc_and_exec(&sna->kgem, 2))
536
		return 0;
537
 
538
	if (sna->render.vertex_offset) {
539
		gen4_vertex_flush(sna);
540
		if (gen5_magic_ca_pass(sna, op))
541
			gen5_emit_pipelined_pointers(sna, op, op->op,
542
						     op->u.gen5.wm_kernel);
543
	}
544
 
545
	return gen4_vertex_finish(sna);
546
}
547
 
548
inline static int gen5_get_rectangles(struct sna *sna,
549
				      const struct sna_composite_op *op,
550
				      int want,
551
				      void (*emit_state)(struct sna *sna,
552
							 const struct sna_composite_op *op))
553
{
554
	int rem;
555
 
556
	assert(want);
557
 
558
start:
559
	rem = vertex_space(sna);
560
	if (unlikely(rem < op->floats_per_rect)) {
561
		DBG(("flushing vbo for %s: %d < %d\n",
562
		     __FUNCTION__, rem, op->floats_per_rect));
563
		rem = gen5_get_rectangles__flush(sna, op);
564
		if (unlikely (rem == 0))
565
			goto flush;
566
	}
567
 
568
	if (unlikely(sna->render.vertex_offset == 0)) {
569
		if (!gen5_rectangle_begin(sna, op))
570
			goto flush;
571
		else
572
			goto start;
573
	}
574
 
575
	assert(rem <= vertex_space(sna));
4251 Serge 576
	assert(op->floats_per_rect <= rem);
3280 Serge 577
	if (want > 1 && want * op->floats_per_rect > rem)
578
		want = rem / op->floats_per_rect;
579
 
580
	sna->render.vertex_index += 3*want;
581
	return want;
582
 
583
flush:
584
	if (sna->render.vertex_offset) {
585
		gen4_vertex_flush(sna);
586
		gen5_magic_ca_pass(sna, op);
587
	}
588
	sna_vertex_wait__locked(&sna->render);
589
	_kgem_submit(&sna->kgem);
590
	emit_state(sna, op);
591
	goto start;
592
}
593
 
594
static uint32_t *
595
gen5_composite_get_binding_table(struct sna *sna,
596
				 uint16_t *offset)
597
{
598
	sna->kgem.surface -=
599
		sizeof(struct gen5_surface_state_padded) / sizeof(uint32_t);
600
 
601
	DBG(("%s(%x)\n", __FUNCTION__, 4*sna->kgem.surface));
602
 
603
	/* Clear all surplus entries to zero in case of prefetch */
604
	*offset = sna->kgem.surface;
605
	return memset(sna->kgem.batch + sna->kgem.surface,
606
		      0, sizeof(struct gen5_surface_state_padded));
607
}
608
 
609
static void
610
gen5_emit_urb(struct sna *sna)
611
{
612
	int urb_vs_start, urb_vs_size;
613
	int urb_gs_start, urb_gs_size;
614
	int urb_clip_start, urb_clip_size;
615
	int urb_sf_start, urb_sf_size;
616
	int urb_cs_start, urb_cs_size;
617
 
618
	urb_vs_start = 0;
619
	urb_vs_size = URB_VS_ENTRIES * URB_VS_ENTRY_SIZE;
620
	urb_gs_start = urb_vs_start + urb_vs_size;
621
	urb_gs_size = URB_GS_ENTRIES * URB_GS_ENTRY_SIZE;
622
	urb_clip_start = urb_gs_start + urb_gs_size;
623
	urb_clip_size = URB_CLIP_ENTRIES * URB_CLIP_ENTRY_SIZE;
624
	urb_sf_start = urb_clip_start + urb_clip_size;
625
	urb_sf_size = URB_SF_ENTRIES * URB_SF_ENTRY_SIZE;
626
	urb_cs_start = urb_sf_start + urb_sf_size;
627
	urb_cs_size = URB_CS_ENTRIES * URB_CS_ENTRY_SIZE;
628
 
629
	OUT_BATCH(GEN5_URB_FENCE |
630
		  UF0_CS_REALLOC |
631
		  UF0_SF_REALLOC |
632
		  UF0_CLIP_REALLOC |
633
		  UF0_GS_REALLOC |
634
		  UF0_VS_REALLOC |
635
		  1);
636
	OUT_BATCH(((urb_clip_start + urb_clip_size) << UF1_CLIP_FENCE_SHIFT) |
637
		  ((urb_gs_start + urb_gs_size) << UF1_GS_FENCE_SHIFT) |
638
		  ((urb_vs_start + urb_vs_size) << UF1_VS_FENCE_SHIFT));
639
	OUT_BATCH(((urb_cs_start + urb_cs_size) << UF2_CS_FENCE_SHIFT) |
640
		  ((urb_sf_start + urb_sf_size) << UF2_SF_FENCE_SHIFT));
641
 
642
	/* Constant buffer state */
643
	OUT_BATCH(GEN5_CS_URB_STATE | 0);
644
	OUT_BATCH((URB_CS_ENTRY_SIZE - 1) << 4 | URB_CS_ENTRIES << 0);
645
}
646
 
647
static void
648
gen5_emit_state_base_address(struct sna *sna)
649
{
650
	assert(sna->render_state.gen5.general_bo->proxy == NULL);
651
	OUT_BATCH(GEN5_STATE_BASE_ADDRESS | 6);
652
	OUT_BATCH(kgem_add_reloc(&sna->kgem, /* general */
653
				 sna->kgem.nbatch,
654
				 sna->render_state.gen5.general_bo,
655
				 I915_GEM_DOMAIN_INSTRUCTION << 16,
656
				 BASE_ADDRESS_MODIFY));
657
	OUT_BATCH(kgem_add_reloc(&sna->kgem, /* surface */
658
				 sna->kgem.nbatch,
659
				 NULL,
660
				 I915_GEM_DOMAIN_INSTRUCTION << 16,
661
				 BASE_ADDRESS_MODIFY));
662
	OUT_BATCH(0); /* media */
663
	OUT_BATCH(kgem_add_reloc(&sna->kgem, /* instruction */
664
				 sna->kgem.nbatch,
665
				 sna->render_state.gen5.general_bo,
666
				 I915_GEM_DOMAIN_INSTRUCTION << 16,
667
				 BASE_ADDRESS_MODIFY));
668
 
669
	/* upper bounds, all disabled */
670
	OUT_BATCH(BASE_ADDRESS_MODIFY);
671
	OUT_BATCH(0);
672
	OUT_BATCH(BASE_ADDRESS_MODIFY);
673
}
674
 
675
static void
676
gen5_emit_invariant(struct sna *sna)
677
{
678
	/* Ironlake errata workaround: Before disabling the clipper,
679
	 * you have to MI_FLUSH to get the pipeline idle.
680
	 *
681
	 * However, the kernel flushes the pipeline between batches,
682
	 * so we should be safe....
4251 Serge 683
	 *
684
	 * On the other hand, after using BLT we must use a non-pipelined
685
	 * operation...
3280 Serge 686
	 */
4251 Serge 687
	if (sna->kgem.nreloc)
688
		OUT_BATCH(MI_FLUSH | MI_INHIBIT_RENDER_CACHE_FLUSH);
689
 
3280 Serge 690
	OUT_BATCH(GEN5_PIPELINE_SELECT | PIPELINE_SELECT_3D);
691
 
692
	gen5_emit_state_base_address(sna);
693
 
694
	sna->render_state.gen5.needs_invariant = false;
695
}
696
 
697
static void
698
gen5_get_batch(struct sna *sna, const struct sna_composite_op *op)
699
{
700
	kgem_set_mode(&sna->kgem, KGEM_RENDER, op->dst.bo);
701
 
702
	if (!kgem_check_batch_with_surfaces(&sna->kgem, 150, 4)) {
703
		DBG(("%s: flushing batch: %d < %d+%d\n",
704
		     __FUNCTION__, sna->kgem.surface - sna->kgem.nbatch,
705
		     150, 4*8));
706
		kgem_submit(&sna->kgem);
707
		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
708
	}
709
 
710
	if (sna->render_state.gen5.needs_invariant)
711
		gen5_emit_invariant(sna);
712
}
713
 
714
static void
715
gen5_align_vertex(struct sna *sna, const struct sna_composite_op *op)
716
{
717
	assert(op->floats_per_rect == 3*op->floats_per_vertex);
718
	if (op->floats_per_vertex != sna->render_state.gen5.floats_per_vertex) {
719
		if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
720
			gen4_vertex_finish(sna);
721
 
722
		DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
723
		     sna->render_state.gen5.floats_per_vertex,
724
		     op->floats_per_vertex,
725
		     sna->render.vertex_index,
726
		     (sna->render.vertex_used + op->floats_per_vertex - 1) / op->floats_per_vertex));
727
		sna->render.vertex_index = (sna->render.vertex_used + op->floats_per_vertex - 1) / op->floats_per_vertex;
728
		sna->render.vertex_used = sna->render.vertex_index * op->floats_per_vertex;
729
		sna->render_state.gen5.floats_per_vertex = op->floats_per_vertex;
730
	}
731
}
732
 
733
static void
734
gen5_emit_binding_table(struct sna *sna, uint16_t offset)
735
{
736
	if (!DBG_NO_STATE_CACHE &&
737
	    sna->render_state.gen5.surface_table == offset)
738
		return;
739
 
740
	sna->render_state.gen5.surface_table = offset;
741
 
742
	/* Binding table pointers */
743
	OUT_BATCH(GEN5_3DSTATE_BINDING_TABLE_POINTERS | 4);
744
	OUT_BATCH(0);		/* vs */
745
	OUT_BATCH(0);		/* gs */
746
	OUT_BATCH(0);		/* clip */
747
	OUT_BATCH(0);		/* sf */
748
	/* Only the PS uses the binding table */
749
	OUT_BATCH(offset*4);
750
}
751
 
752
static bool
753
gen5_emit_pipelined_pointers(struct sna *sna,
754
			     const struct sna_composite_op *op,
755
			     int blend, int kernel)
756
{
757
	uint16_t sp, bp;
758
	uint32_t key;
759
 
760
	DBG(("%s: has_mask=%d, src=(%d, %d), mask=(%d, %d),kernel=%d, blend=%d, ca=%d, format=%x\n",
761
	     __FUNCTION__, op->u.gen5.ve_id & 2,
762
	     op->src.filter, op->src.repeat,
763
	     op->mask.filter, op->mask.repeat,
764
	     kernel, blend, op->has_component_alpha, (int)op->dst.format));
765
 
766
	sp = SAMPLER_OFFSET(op->src.filter, op->src.repeat,
767
			    op->mask.filter, op->mask.repeat,
768
			    kernel);
769
	bp = gen5_get_blend(blend, op->has_component_alpha, op->dst.format);
770
 
771
	key = sp | (uint32_t)bp << 16 | (op->mask.bo != NULL) << 31;
4251 Serge 772
	DBG(("%s: sp=%d, bp=%d, key=%08x (current sp=%d, bp=%d, key=%08x)\n",
773
	     __FUNCTION__, sp, bp, key,
774
	     sna->render_state.gen5.last_pipelined_pointers & 0xffff,
775
	     (sna->render_state.gen5.last_pipelined_pointers >> 16) & 0x7fff,
776
	     sna->render_state.gen5.last_pipelined_pointers));
3280 Serge 777
	if (key == sna->render_state.gen5.last_pipelined_pointers)
778
		return false;
779
 
780
	OUT_BATCH(GEN5_3DSTATE_PIPELINED_POINTERS | 5);
781
	OUT_BATCH(sna->render_state.gen5.vs);
782
	OUT_BATCH(GEN5_GS_DISABLE); /* passthrough */
783
	OUT_BATCH(GEN5_CLIP_DISABLE); /* passthrough */
784
	OUT_BATCH(sna->render_state.gen5.sf[op->mask.bo != NULL]);
785
	OUT_BATCH(sna->render_state.gen5.wm + sp);
786
	OUT_BATCH(sna->render_state.gen5.cc + bp);
787
 
4251 Serge 788
	bp = (sna->render_state.gen5.last_pipelined_pointers & 0x7fff0000) != ((uint32_t)bp << 16);
3280 Serge 789
	sna->render_state.gen5.last_pipelined_pointers = key;
4251 Serge 790
 
791
	gen5_emit_urb(sna);
792
 
793
	return bp;
3280 Serge 794
}
795
 
4251 Serge 796
static bool
3280 Serge 797
gen5_emit_drawing_rectangle(struct sna *sna, const struct sna_composite_op *op)
798
{
799
	uint32_t limit = (op->dst.height - 1) << 16 | (op->dst.width - 1);
800
	uint32_t offset = (uint16_t)op->dst.y << 16 | (uint16_t)op->dst.x;
801
 
802
	assert(!too_large(op->dst.x, op->dst.y));
803
	assert(!too_large(op->dst.width, op->dst.height));
804
 
805
	if (!DBG_NO_STATE_CACHE &&
806
	    sna->render_state.gen5.drawrect_limit == limit &&
807
	    sna->render_state.gen5.drawrect_offset == offset)
4251 Serge 808
		return false;
3280 Serge 809
 
810
	sna->render_state.gen5.drawrect_offset = offset;
811
	sna->render_state.gen5.drawrect_limit = limit;
812
 
813
	OUT_BATCH(GEN5_3DSTATE_DRAWING_RECTANGLE | (4 - 2));
814
	OUT_BATCH(0x00000000);
815
	OUT_BATCH(limit);
816
	OUT_BATCH(offset);
4251 Serge 817
	return true;
3280 Serge 818
}
819
 
820
static void
821
gen5_emit_vertex_elements(struct sna *sna,
822
			  const struct sna_composite_op *op)
823
{
824
	/*
825
	 * vertex data in vertex buffer
826
	 *    position: (x, y)
827
	 *    texture coordinate 0: (u0, v0) if (is_affine is true) else (u0, v0, w0)
828
	 *    texture coordinate 1 if (has_mask is true): same as above
829
	 */
830
	struct gen5_render_state *render = &sna->render_state.gen5;
831
	int id = op->u.gen5.ve_id;
832
	bool has_mask = id >> 2;
833
	uint32_t format, dw;
834
 
835
	if (!DBG_NO_STATE_CACHE && render->ve_id == id)
836
		return;
837
 
838
	DBG(("%s: changing %d -> %d\n", __FUNCTION__, render->ve_id, id));
839
	render->ve_id = id;
840
 
841
	/* The VUE layout
842
	 *    dword 0-3: pad (0.0, 0.0, 0.0. 0.0)
843
	 *    dword 4-7: position (x, y, 1.0, 1.0),
844
	 *    dword 8-11: texture coordinate 0 (u0, v0, w0, 1.0)
845
	 *    dword 12-15: texture coordinate 1 (u1, v1, w1, 1.0)
846
	 *
847
	 * dword 4-15 are fetched from vertex buffer
848
	 */
849
	OUT_BATCH(GEN5_3DSTATE_VERTEX_ELEMENTS |
850
		((2 * (has_mask ? 4 : 3)) + 1 - 2));
851
 
852
	OUT_BATCH((id << VE0_VERTEX_BUFFER_INDEX_SHIFT) | VE0_VALID |
853
		  (GEN5_SURFACEFORMAT_R32G32B32A32_FLOAT << VE0_FORMAT_SHIFT) |
854
		  (0 << VE0_OFFSET_SHIFT));
855
	OUT_BATCH((VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_0_SHIFT) |
856
		  (VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT) |
857
		  (VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT) |
858
		  (VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_3_SHIFT));
859
 
860
	/* x,y */
861
	OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
862
		  GEN5_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
863
 
864
	OUT_BATCH(VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
865
		  VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
866
		  VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT |
867
		  VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
868
 
869
	/* u0, v0, w0 */
870
	DBG(("%s: id=%d, first channel %d floats, offset=4b\n", __FUNCTION__,
871
	     id, id & 3));
872
	dw = VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT;
873
	switch (id & 3) {
874
	default:
875
		assert(0);
876
	case 0:
877
		format = GEN5_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT;
878
		dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
879
		dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT;
880
		dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT;
881
		break;
882
	case 1:
883
		format = GEN5_SURFACEFORMAT_R32_FLOAT << VE0_FORMAT_SHIFT;
884
		dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
885
		dw |= VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT;
886
		dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT;
887
		break;
888
	case 2:
889
		format = GEN5_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT;
890
		dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
891
		dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT;
892
		dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT;
893
		break;
894
	case 3:
895
		format = GEN5_SURFACEFORMAT_R32G32B32_FLOAT << VE0_FORMAT_SHIFT;
896
		dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
897
		dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT;
898
		dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_2_SHIFT;
899
		break;
900
	}
901
	OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
902
		  format | 4 << VE0_OFFSET_SHIFT);
903
	OUT_BATCH(dw);
904
 
905
	/* u1, v1, w1 */
906
	if (has_mask) {
907
		unsigned offset = 4 + ((id & 3) ?: 1) * sizeof(float);
908
		DBG(("%s: id=%x, second channel %d floats, offset=%db\n", __FUNCTION__,
909
		     id, id >> 2, offset));
910
		dw = VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT;
911
		switch (id >> 2) {
912
		case 1:
913
			format = GEN5_SURFACEFORMAT_R32_FLOAT << VE0_FORMAT_SHIFT;
914
			dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
915
			dw |= VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT;
916
			dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT;
917
			break;
918
		default:
919
			assert(0);
920
		case 2:
921
			format = GEN5_SURFACEFORMAT_R32G32_FLOAT << VE0_FORMAT_SHIFT;
922
			dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
923
			dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT;
924
			dw |= VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT;
925
			break;
926
		case 3:
927
			format = GEN5_SURFACEFORMAT_R32G32B32_FLOAT << VE0_FORMAT_SHIFT;
928
			dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
929
			dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT;
930
			dw |= VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_2_SHIFT;
931
			break;
932
		}
933
		OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
934
			  format | offset << VE0_OFFSET_SHIFT);
935
		OUT_BATCH(dw);
936
	}
937
}
938
 
4251 Serge 939
inline static void
940
gen5_emit_pipe_flush(struct sna *sna)
941
{
942
	OUT_BATCH(GEN5_PIPE_CONTROL | (4 - 2));
943
	OUT_BATCH(GEN5_PIPE_CONTROL_WC_FLUSH);
944
	OUT_BATCH(0);
945
	OUT_BATCH(0);
946
}
947
 
3280 Serge 948
static void
949
gen5_emit_state(struct sna *sna,
950
		const struct sna_composite_op *op,
951
		uint16_t offset)
952
{
4251 Serge 953
	bool flush = false;
954
 
955
	assert(op->dst.bo->exec);
956
 
957
	/* drawrect must be first for Ironlake BLT workaround */
958
	if (gen5_emit_drawing_rectangle(sna, op))
959
		offset &= ~1;
960
	gen5_emit_binding_table(sna, offset & ~1);
961
	if (gen5_emit_pipelined_pointers(sna, op, op->op, op->u.gen5.wm_kernel)){
962
		DBG(("%s: changed blend state, flush required? %d\n",
963
		     __FUNCTION__, (offset & 1) && op->op > PictOpSrc));
964
		flush = (offset & 1) && op->op > PictOpSrc;
965
	}
966
	gen5_emit_vertex_elements(sna, op);
967
 
3280 Serge 968
	if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
969
		DBG(("%s: flushing dirty (%d, %d)\n", __FUNCTION__,
970
		     kgem_bo_is_dirty(op->src.bo),
971
		     kgem_bo_is_dirty(op->mask.bo)));
972
		OUT_BATCH(MI_FLUSH);
973
		kgem_clear_dirty(&sna->kgem);
974
		kgem_bo_mark_dirty(op->dst.bo);
4251 Serge 975
		flush = false;
3280 Serge 976
	}
4251 Serge 977
	if (flush) {
978
		DBG(("%s: forcing flush\n", __FUNCTION__));
979
		gen5_emit_pipe_flush(sna);
980
	}
3280 Serge 981
}
982
 
983
static void gen5_bind_surfaces(struct sna *sna,
984
			       const struct sna_composite_op *op)
985
{
4251 Serge 986
	bool dirty = kgem_bo_is_dirty(op->dst.bo);
3280 Serge 987
	uint32_t *binding_table;
988
	uint16_t offset;
989
 
990
	gen5_get_batch(sna, op);
991
 
992
	binding_table = gen5_composite_get_binding_table(sna, &offset);
993
 
994
	binding_table[0] =
995
		gen5_bind_bo(sna,
996
			    op->dst.bo, op->dst.width, op->dst.height,
997
			    gen5_get_dest_format(op->dst.format),
998
			    true);
999
	binding_table[1] =
1000
		gen5_bind_bo(sna,
1001
			     op->src.bo, op->src.width, op->src.height,
1002
			     op->src.card_format,
1003
			     false);
1004
	if (op->mask.bo) {
1005
		assert(op->u.gen5.ve_id >> 2);
1006
		binding_table[2] =
1007
			gen5_bind_bo(sna,
1008
				     op->mask.bo,
1009
				     op->mask.width,
1010
				     op->mask.height,
1011
				     op->mask.card_format,
1012
				     false);
1013
	}
1014
 
1015
	if (sna->kgem.surface == offset &&
1016
	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen5.surface_table) == *(uint64_t*)binding_table &&
1017
	    (op->mask.bo == NULL ||
1018
	     sna->kgem.batch[sna->render_state.gen5.surface_table+2] == binding_table[2])) {
1019
		sna->kgem.surface += sizeof(struct gen5_surface_state_padded) / sizeof(uint32_t);
1020
		offset = sna->render_state.gen5.surface_table;
1021
	}
1022
 
4251 Serge 1023
	gen5_emit_state(sna, op, offset | dirty);
3280 Serge 1024
}
1025
 
1026
fastcall static void
1027
gen5_render_composite_blt(struct sna *sna,
1028
			  const struct sna_composite_op *op,
1029
			  const struct sna_composite_rectangles *r)
1030
{
1031
	DBG(("%s: src=(%d, %d)+(%d, %d), mask=(%d, %d)+(%d, %d), dst=(%d, %d)+(%d, %d), size=(%d, %d)\n",
1032
	     __FUNCTION__,
1033
	     r->src.x, r->src.y, op->src.offset[0], op->src.offset[1],
1034
	     r->mask.x, r->mask.y, op->mask.offset[0], op->mask.offset[1],
1035
	     r->dst.x, r->dst.y, op->dst.x, op->dst.y,
1036
	     r->width, r->height));
1037
 
1038
	gen5_get_rectangles(sna, op, 1, gen5_bind_surfaces);
1039
	op->prim_emit(sna, op, r);
1040
}
1041
 
4251 Serge 1042
#if 0
1043
fastcall static void
1044
gen5_render_composite_box(struct sna *sna,
1045
			  const struct sna_composite_op *op,
1046
			  const BoxRec *box)
1047
{
1048
	struct sna_composite_rectangles r;
3280 Serge 1049
 
4251 Serge 1050
	DBG(("  %s: (%d, %d), (%d, %d)\n",
1051
	     __FUNCTION__,
1052
	     box->x1, box->y1, box->x2, box->y2));
1053
 
1054
	gen5_get_rectangles(sna, op, 1, gen5_bind_surfaces);
1055
 
1056
	r.dst.x = box->x1;
1057
	r.dst.y = box->y1;
1058
	r.width  = box->x2 - box->x1;
1059
	r.height = box->y2 - box->y1;
1060
	r.mask = r.src = r.dst;
1061
 
1062
	op->prim_emit(sna, op, &r);
1063
}
1064
 
3280 Serge 1065
static void
4251 Serge 1066
gen5_render_composite_boxes__blt(struct sna *sna,
1067
				 const struct sna_composite_op *op,
1068
				 const BoxRec *box, int nbox)
1069
{
1070
	DBG(("%s(%d) delta=(%d, %d), src=(%d, %d)/(%d, %d), mask=(%d, %d)/(%d, %d)\n",
1071
	     __FUNCTION__, nbox, op->dst.x, op->dst.y,
1072
	     op->src.offset[0], op->src.offset[1],
1073
	     op->src.width, op->src.height,
1074
	     op->mask.offset[0], op->mask.offset[1],
1075
	     op->mask.width, op->mask.height));
1076
 
1077
	do {
1078
		int nbox_this_time;
1079
 
1080
		nbox_this_time = gen5_get_rectangles(sna, op, nbox,
1081
						     gen5_bind_surfaces);
1082
		nbox -= nbox_this_time;
1083
 
1084
		do {
1085
			struct sna_composite_rectangles r;
1086
 
1087
			DBG(("  %s: (%d, %d), (%d, %d)\n",
1088
			     __FUNCTION__,
1089
			     box->x1, box->y1, box->x2, box->y2));
1090
 
1091
			r.dst.x = box->x1;
1092
			r.dst.y = box->y1;
1093
			r.width  = box->x2 - box->x1;
1094
			r.height = box->y2 - box->y1;
1095
			r.mask = r.src = r.dst;
1096
			op->prim_emit(sna, op, &r);
1097
			box++;
1098
		} while (--nbox_this_time);
1099
	} while (nbox);
1100
}
1101
 
1102
static void
1103
gen5_render_composite_boxes(struct sna *sna,
1104
			    const struct sna_composite_op *op,
1105
			    const BoxRec *box, int nbox)
1106
{
1107
	DBG(("%s: nbox=%d\n", __FUNCTION__, nbox));
1108
 
1109
	do {
1110
		int nbox_this_time;
1111
		float *v;
1112
 
1113
		nbox_this_time = gen5_get_rectangles(sna, op, nbox,
1114
						     gen5_bind_surfaces);
1115
		assert(nbox_this_time);
1116
		nbox -= nbox_this_time;
1117
 
1118
		v = sna->render.vertices + sna->render.vertex_used;
1119
		sna->render.vertex_used += nbox_this_time * op->floats_per_rect;
1120
 
1121
		op->emit_boxes(op, box, nbox_this_time, v);
1122
		box += nbox_this_time;
1123
	} while (nbox);
1124
}
1125
 
1126
static void
1127
gen5_render_composite_boxes__thread(struct sna *sna,
1128
				    const struct sna_composite_op *op,
1129
				    const BoxRec *box, int nbox)
1130
{
1131
	DBG(("%s: nbox=%d\n", __FUNCTION__, nbox));
1132
 
1133
	sna_vertex_lock(&sna->render);
1134
	do {
1135
		int nbox_this_time;
1136
		float *v;
1137
 
1138
		nbox_this_time = gen5_get_rectangles(sna, op, nbox,
1139
						     gen5_bind_surfaces);
1140
		assert(nbox_this_time);
1141
		nbox -= nbox_this_time;
1142
 
1143
		v = sna->render.vertices + sna->render.vertex_used;
1144
		sna->render.vertex_used += nbox_this_time * op->floats_per_rect;
1145
 
1146
		sna_vertex_acquire__locked(&sna->render);
1147
		sna_vertex_unlock(&sna->render);
1148
 
1149
		op->emit_boxes(op, box, nbox_this_time, v);
1150
		box += nbox_this_time;
1151
 
1152
		sna_vertex_lock(&sna->render);
1153
		sna_vertex_release__locked(&sna->render);
1154
	} while (nbox);
1155
	sna_vertex_unlock(&sna->render);
1156
}
1157
 
1158
#ifndef MAX
1159
#define MAX(a,b) ((a) > (b) ? (a) : (b))
1160
#endif
1161
 
1162
static uint32_t gen5_bind_video_source(struct sna *sna,
1163
				       struct kgem_bo *src_bo,
1164
				       uint32_t src_offset,
1165
				       int src_width,
1166
				       int src_height,
1167
				       int src_pitch,
1168
				       uint32_t src_surf_format)
1169
{
1170
	struct gen5_surface_state *ss;
1171
 
1172
	sna->kgem.surface -= sizeof(struct gen5_surface_state_padded) / sizeof(uint32_t);
1173
 
1174
	ss = memset(sna->kgem.batch + sna->kgem.surface, 0, sizeof(*ss));
1175
	ss->ss0.surface_type = GEN5_SURFACE_2D;
1176
	ss->ss0.surface_format = src_surf_format;
1177
	ss->ss0.color_blend = 1;
1178
 
1179
	ss->ss1.base_addr =
1180
		kgem_add_reloc(&sna->kgem,
1181
			       sna->kgem.surface + 1,
1182
			       src_bo,
1183
			       I915_GEM_DOMAIN_SAMPLER << 16,
1184
			       src_offset);
1185
 
1186
	ss->ss2.width  = src_width - 1;
1187
	ss->ss2.height = src_height - 1;
1188
	ss->ss3.pitch  = src_pitch - 1;
1189
 
1190
	return sna->kgem.surface * sizeof(uint32_t);
1191
}
1192
 
1193
static void gen5_video_bind_surfaces(struct sna *sna,
1194
				     const struct sna_composite_op *op)
1195
{
1196
	bool dirty = kgem_bo_is_dirty(op->dst.bo);
1197
	struct sna_video_frame *frame = op->priv;
1198
	uint32_t src_surf_format;
1199
	uint32_t src_surf_base[6];
1200
	int src_width[6];
1201
	int src_height[6];
1202
	int src_pitch[6];
1203
	uint32_t *binding_table;
1204
	uint16_t offset;
1205
	int n_src, n;
1206
 
1207
	src_surf_base[0] = 0;
1208
	src_surf_base[1] = 0;
1209
	src_surf_base[2] = frame->VBufOffset;
1210
	src_surf_base[3] = frame->VBufOffset;
1211
	src_surf_base[4] = frame->UBufOffset;
1212
	src_surf_base[5] = frame->UBufOffset;
1213
 
1214
	if (is_planar_fourcc(frame->id)) {
1215
		src_surf_format = GEN5_SURFACEFORMAT_R8_UNORM;
1216
		src_width[1]  = src_width[0]  = frame->width;
1217
		src_height[1] = src_height[0] = frame->height;
1218
		src_pitch[1]  = src_pitch[0]  = frame->pitch[1];
1219
		src_width[4]  = src_width[5]  = src_width[2]  = src_width[3] =
1220
			frame->width / 2;
1221
		src_height[4] = src_height[5] = src_height[2] = src_height[3] =
1222
			frame->height / 2;
1223
		src_pitch[4]  = src_pitch[5]  = src_pitch[2]  = src_pitch[3] =
1224
			frame->pitch[0];
1225
		n_src = 6;
1226
	} else {
1227
		if (frame->id == FOURCC_UYVY)
1228
			src_surf_format = GEN5_SURFACEFORMAT_YCRCB_SWAPY;
1229
		else
1230
			src_surf_format = GEN5_SURFACEFORMAT_YCRCB_NORMAL;
1231
 
1232
		src_width[0]  = frame->width;
1233
		src_height[0] = frame->height;
1234
		src_pitch[0]  = frame->pitch[0];
1235
		n_src = 1;
1236
	}
1237
 
1238
	gen5_get_batch(sna, op);
1239
 
1240
	binding_table = gen5_composite_get_binding_table(sna, &offset);
1241
	binding_table[0] =
1242
		gen5_bind_bo(sna,
1243
			     op->dst.bo, op->dst.width, op->dst.height,
1244
			     gen5_get_dest_format(op->dst.format),
1245
			     true);
1246
	for (n = 0; n < n_src; n++) {
1247
		binding_table[1+n] =
1248
			gen5_bind_video_source(sna,
1249
					       frame->bo,
1250
					       src_surf_base[n],
1251
					       src_width[n],
1252
					       src_height[n],
1253
					       src_pitch[n],
1254
					       src_surf_format);
1255
	}
1256
 
1257
	gen5_emit_state(sna, op, offset | dirty);
1258
}
1259
 
1260
static bool
1261
gen5_render_video(struct sna *sna,
1262
		  struct sna_video *video,
1263
		  struct sna_video_frame *frame,
1264
		  RegionPtr dstRegion,
1265
		  PixmapPtr pixmap)
1266
{
1267
	struct sna_composite_op tmp;
1268
	int dst_width = dstRegion->extents.x2 - dstRegion->extents.x1;
1269
	int dst_height = dstRegion->extents.y2 - dstRegion->extents.y1;
1270
	int src_width = frame->src.x2 - frame->src.x1;
1271
	int src_height = frame->src.y2 - frame->src.y1;
1272
	float src_offset_x, src_offset_y;
1273
	float src_scale_x, src_scale_y;
1274
	int nbox, pix_xoff, pix_yoff;
1275
	struct sna_pixmap *priv;
1276
	BoxPtr box;
1277
 
1278
	DBG(("%s: %dx%d -> %dx%d\n", __FUNCTION__,
1279
	     src_width, src_height, dst_width, dst_height));
1280
 
1281
	priv = sna_pixmap_force_to_gpu(pixmap, MOVE_READ | MOVE_WRITE);
1282
	if (priv == NULL)
1283
		return false;
1284
 
1285
	memset(&tmp, 0, sizeof(tmp));
1286
 
1287
	tmp.op = PictOpSrc;
1288
	tmp.dst.pixmap = pixmap;
1289
	tmp.dst.width  = pixmap->drawable.width;
1290
	tmp.dst.height = pixmap->drawable.height;
1291
	tmp.dst.format = sna_format_for_depth(pixmap->drawable.depth);
1292
	tmp.dst.bo = priv->gpu_bo;
1293
 
1294
	if (src_width == dst_width && src_height == dst_height)
1295
		tmp.src.filter = SAMPLER_FILTER_NEAREST;
1296
	else
1297
		tmp.src.filter = SAMPLER_FILTER_BILINEAR;
1298
	tmp.src.repeat = SAMPLER_EXTEND_PAD;
1299
	tmp.src.bo = frame->bo;
1300
	tmp.mask.bo = NULL;
1301
	tmp.u.gen5.wm_kernel =
1302
		is_planar_fourcc(frame->id) ? WM_KERNEL_VIDEO_PLANAR : WM_KERNEL_VIDEO_PACKED;
1303
	tmp.u.gen5.ve_id = 2;
1304
	tmp.is_affine = true;
1305
	tmp.floats_per_vertex = 3;
1306
	tmp.floats_per_rect = 9;
1307
	tmp.priv = frame;
1308
 
1309
	if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) {
1310
		kgem_submit(&sna->kgem);
1311
		assert(kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL));
1312
	}
1313
 
1314
	gen5_video_bind_surfaces(sna, &tmp);
1315
	gen5_align_vertex(sna, &tmp);
1316
 
1317
	/* Set up the offset for translating from the given region (in screen
1318
	 * coordinates) to the backing pixmap.
1319
	 */
1320
#ifdef COMPOSITE
1321
	pix_xoff = -pixmap->screen_x + pixmap->drawable.x;
1322
	pix_yoff = -pixmap->screen_y + pixmap->drawable.y;
1323
#else
1324
	pix_xoff = 0;
1325
	pix_yoff = 0;
1326
#endif
1327
 
1328
	src_scale_x = (float)src_width / dst_width / frame->width;
1329
	src_offset_x = (float)frame->src.x1 / frame->width - dstRegion->extents.x1 * src_scale_x;
1330
 
1331
	src_scale_y = (float)src_height / dst_height / frame->height;
1332
	src_offset_y = (float)frame->src.y1 / frame->height - dstRegion->extents.y1 * src_scale_y;
1333
 
1334
	box = REGION_RECTS(dstRegion);
1335
	nbox = REGION_NUM_RECTS(dstRegion);
1336
	while (nbox--) {
1337
		BoxRec r;
1338
 
1339
		r.x1 = box->x1 + pix_xoff;
1340
		r.x2 = box->x2 + pix_xoff;
1341
		r.y1 = box->y1 + pix_yoff;
1342
		r.y2 = box->y2 + pix_yoff;
1343
 
1344
		gen5_get_rectangles(sna, &tmp, 1, gen5_video_bind_surfaces);
1345
 
1346
		OUT_VERTEX(r.x2, r.y2);
1347
		OUT_VERTEX_F(box->x2 * src_scale_x + src_offset_x);
1348
		OUT_VERTEX_F(box->y2 * src_scale_y + src_offset_y);
1349
 
1350
		OUT_VERTEX(r.x1, r.y2);
1351
		OUT_VERTEX_F(box->x1 * src_scale_x + src_offset_x);
1352
		OUT_VERTEX_F(box->y2 * src_scale_y + src_offset_y);
1353
 
1354
		OUT_VERTEX(r.x1, r.y1);
1355
		OUT_VERTEX_F(box->x1 * src_scale_x + src_offset_x);
1356
		OUT_VERTEX_F(box->y1 * src_scale_y + src_offset_y);
1357
 
1358
		if (!DAMAGE_IS_ALL(priv->gpu_damage)) {
1359
			sna_damage_add_box(&priv->gpu_damage, &r);
1360
			sna_damage_subtract_box(&priv->cpu_damage, &r);
1361
		}
1362
		box++;
1363
	}
1364
 
1365
	gen4_vertex_flush(sna);
1366
	return true;
1367
}
1368
#endif
1369
 
1370
static void
3280 Serge 1371
gen5_render_composite_done(struct sna *sna,
1372
			   const struct sna_composite_op *op)
1373
{
1374
	if (sna->render.vertex_offset) {
1375
		gen4_vertex_flush(sna);
1376
		gen5_magic_ca_pass(sna,op);
1377
	}
1378
 
1379
	DBG(("%s()\n", __FUNCTION__));
1380
 
1381
}
1382
 
4251 Serge 1383
#if 0
1384
static bool
1385
gen5_composite_set_target(struct sna *sna,
1386
			  struct sna_composite_op *op,
1387
			  PicturePtr dst,
1388
			  int x, int y, int w, int h,
1389
			  bool partial)
1390
{
1391
	BoxRec box;
3280 Serge 1392
 
4251 Serge 1393
	op->dst.pixmap = get_drawable_pixmap(dst->pDrawable);
1394
	op->dst.width  = op->dst.pixmap->drawable.width;
1395
	op->dst.height = op->dst.pixmap->drawable.height;
1396
	op->dst.format = dst->format;
1397
	if (w && h) {
1398
		box.x1 = x;
1399
		box.y1 = y;
1400
		box.x2 = x + w;
1401
		box.y2 = y + h;
1402
	} else
1403
		sna_render_picture_extents(dst, &box);
1404
 
1405
	op->dst.bo = sna_drawable_use_bo (dst->pDrawable,
1406
					  PREFER_GPU | FORCE_GPU | RENDER_GPU,
1407
					  &box, &op->damage);
1408
	if (op->dst.bo == NULL)
1409
		return false;
1410
 
1411
	get_drawable_deltas(dst->pDrawable, op->dst.pixmap,
1412
			    &op->dst.x, &op->dst.y);
1413
 
1414
	DBG(("%s: pixmap=%p, format=%08x, size=%dx%d, pitch=%d, delta=(%d,%d),damage=%p\n",
1415
	     __FUNCTION__,
1416
	     op->dst.pixmap, (int)op->dst.format,
1417
	     op->dst.width, op->dst.height,
1418
	     op->dst.bo->pitch,
1419
	     op->dst.x, op->dst.y,
1420
	     op->damage ? *op->damage : (void *)-1));
1421
 
1422
	assert(op->dst.bo->proxy == NULL);
1423
 
1424
	if (too_large(op->dst.width, op->dst.height) &&
1425
	    !sna_render_composite_redirect(sna, op, x, y, w, h, partial))
1426
		return false;
1427
 
1428
	return true;
1429
}
1430
 
3280 Serge 1431
static bool
4251 Serge 1432
gen5_render_composite(struct sna *sna,
1433
		      uint8_t op,
1434
		      PicturePtr src,
1435
		      PicturePtr mask,
1436
		      PicturePtr dst,
1437
		      int16_t src_x, int16_t src_y,
1438
		      int16_t msk_x, int16_t msk_y,
1439
		      int16_t dst_x, int16_t dst_y,
1440
		      int16_t width, int16_t height,
1441
		      struct sna_composite_op *tmp)
3280 Serge 1442
{
1443
	DBG(("%s: %dx%d, current mode=%d\n", __FUNCTION__,
1444
	     width, height, sna->kgem.mode));
1445
 
4251 Serge 1446
	if (op >= ARRAY_SIZE(gen5_blend_op)) {
1447
		DBG(("%s: unhandled blend op %d\n", __FUNCTION__, op));
1448
		return false;
1449
	}
3280 Serge 1450
 
4251 Serge 1451
	if (mask == NULL &&
1452
	    try_blt(sna, dst, src, width, height) &&
1453
	    sna_blt_composite(sna, op,
1454
			      src, dst,
1455
			      src_x, src_y,
1456
			      dst_x, dst_y,
1457
			      width, height,
1458
			      tmp, false))
1459
		return true;
3280 Serge 1460
 
4251 Serge 1461
	if (gen5_composite_fallback(sna, src, mask, dst))
1462
		return false;
3280 Serge 1463
 
4251 Serge 1464
	if (need_tiling(sna, width, height))
1465
		return sna_tiling_composite(op, src, mask, dst,
1466
					    src_x, src_y,
1467
					    msk_x, msk_y,
1468
					    dst_x, dst_y,
1469
					    width, height,
1470
					    tmp);
3280 Serge 1471
 
4251 Serge 1472
	if (!gen5_composite_set_target(sna, tmp, dst,
1473
				       dst_x, dst_y, width, height,
1474
				       op > PictOpSrc || dst->pCompositeClip->data)) {
1475
		DBG(("%s: failed to set composite target\n", __FUNCTION__));
1476
		return false;
1477
	}
3280 Serge 1478
 
4251 Serge 1479
	DBG(("%s: preparing source\n", __FUNCTION__));
1480
	tmp->op = op;
1481
	switch (gen5_composite_picture(sna, src, &tmp->src,
1482
				       src_x, src_y,
1483
				       width, height,
1484
				       dst_x, dst_y,
1485
				       dst->polyMode == PolyModePrecise)) {
1486
	case -1:
1487
		DBG(("%s: failed to prepare source picture\n", __FUNCTION__));
1488
		goto cleanup_dst;
1489
	case 0:
1490
		if (!gen4_channel_init_solid(sna, &tmp->src, 0))
1491
			goto cleanup_dst;
1492
		/* fall through to fixup */
1493
	case 1:
1494
		if (mask == NULL &&
1495
		    sna_blt_composite__convert(sna,
1496
					       dst_x, dst_y, width, height,
1497
					       tmp))
1498
			return true;
3280 Serge 1499
 
4251 Serge 1500
		gen5_composite_channel_convert(&tmp->src);
1501
		break;
1502
	}
1503
 
1504
	tmp->is_affine = tmp->src.is_affine;
3280 Serge 1505
	tmp->has_component_alpha = false;
1506
	tmp->need_magic_ca_pass = false;
1507
 
4251 Serge 1508
	if (mask) {
1509
		if (mask->componentAlpha && PICT_FORMAT_RGB(mask->format)) {
1510
			tmp->has_component_alpha = true;
3280 Serge 1511
 
4251 Serge 1512
			/* Check if it's component alpha that relies on a source alpha and on
1513
			 * the source value.  We can only get one of those into the single
1514
			 * source value that we get to blend with.
1515
			 */
1516
			if (gen5_blend_op[op].src_alpha &&
1517
			    (gen5_blend_op[op].src_blend != GEN5_BLENDFACTOR_ZERO)) {
1518
				if (op != PictOpOver) {
1519
					DBG(("%s: unhandled CA blend op %d\n", __FUNCTION__, op));
1520
					goto cleanup_src;
1521
				}
1522
 
1523
				tmp->need_magic_ca_pass = true;
1524
				tmp->op = PictOpOutReverse;
1525
			}
1526
		}
1527
 
1528
		if (!reuse_source(sna,
1529
				  src, &tmp->src, src_x, src_y,
1530
				  mask, &tmp->mask, msk_x, msk_y)) {
1531
			DBG(("%s: preparing mask\n", __FUNCTION__));
1532
			switch (gen5_composite_picture(sna, mask, &tmp->mask,
1533
						       msk_x, msk_y,
1534
						       width, height,
1535
						       dst_x, dst_y,
1536
						       dst->polyMode == PolyModePrecise)) {
1537
			case -1:
1538
				DBG(("%s: failed to prepare mask picture\n", __FUNCTION__));
1539
				goto cleanup_src;
1540
			case 0:
1541
				if (!gen4_channel_init_solid(sna, &tmp->mask, 0))
1542
					goto cleanup_src;
1543
				/* fall through to fixup */
1544
			case 1:
1545
				gen5_composite_channel_convert(&tmp->mask);
1546
				break;
1547
			}
1548
		}
1549
 
1550
		tmp->is_affine &= tmp->mask.is_affine;
1551
	}
1552
 
3280 Serge 1553
	tmp->u.gen5.wm_kernel =
1554
		gen5_choose_composite_kernel(tmp->op,
1555
					     tmp->mask.bo != NULL,
1556
					     tmp->has_component_alpha,
1557
					     tmp->is_affine);
4251 Serge 1558
	tmp->u.gen5.ve_id = gen4_choose_composite_emitter(sna, tmp);
3280 Serge 1559
 
1560
	tmp->blt   = gen5_render_composite_blt;
4251 Serge 1561
	tmp->box   = gen5_render_composite_box;
1562
	tmp->boxes = gen5_render_composite_boxes__blt;
1563
	if (tmp->emit_boxes) {
1564
		tmp->boxes = gen5_render_composite_boxes;
1565
		tmp->thread_boxes = gen5_render_composite_boxes__thread;
1566
	}
3280 Serge 1567
	tmp->done  = gen5_render_composite_done;
1568
 
1569
	if (!kgem_check_bo(&sna->kgem,
1570
			   tmp->dst.bo, tmp->src.bo, tmp->mask.bo, NULL)) {
1571
		kgem_submit(&sna->kgem);
4251 Serge 1572
		if (!kgem_check_bo(&sna->kgem,
1573
				   tmp->dst.bo, tmp->src.bo, tmp->mask.bo, NULL))
1574
			goto cleanup_mask;
3280 Serge 1575
	}
1576
 
1577
	gen5_bind_surfaces(sna, tmp);
1578
	gen5_align_vertex(sna, tmp);
1579
	return true;
1580
 
4251 Serge 1581
cleanup_mask:
1582
	if (tmp->mask.bo)
1583
		kgem_bo_destroy(&sna->kgem, tmp->mask.bo);
1584
cleanup_src:
1585
	if (tmp->src.bo)
1586
		kgem_bo_destroy(&sna->kgem, tmp->src.bo);
1587
cleanup_dst:
1588
	if (tmp->redirect.real_bo)
1589
		kgem_bo_destroy(&sna->kgem, tmp->dst.bo);
1590
	return false;
3280 Serge 1591
}
1592
 
4251 Serge 1593
#if !NO_COMPOSITE_SPANS
1594
fastcall static void
1595
gen5_render_composite_spans_box(struct sna *sna,
1596
				const struct sna_composite_spans_op *op,
1597
				const BoxRec *box, float opacity)
1598
{
1599
	DBG(("%s: src=+(%d, %d), opacity=%f, dst=+(%d, %d), box=(%d, %d) x (%d, %d)\n",
1600
	     __FUNCTION__,
1601
	     op->base.src.offset[0], op->base.src.offset[1],
1602
	     opacity,
1603
	     op->base.dst.x, op->base.dst.y,
1604
	     box->x1, box->y1,
1605
	     box->x2 - box->x1,
1606
	     box->y2 - box->y1));
3280 Serge 1607
 
4251 Serge 1608
	gen5_get_rectangles(sna, &op->base, 1, gen5_bind_surfaces);
1609
	op->prim_emit(sna, op, box, opacity);
1610
}
3280 Serge 1611
 
1612
static void
4251 Serge 1613
gen5_render_composite_spans_boxes(struct sna *sna,
1614
				  const struct sna_composite_spans_op *op,
1615
				  const BoxRec *box, int nbox,
1616
				  float opacity)
1617
{
1618
	DBG(("%s: nbox=%d, src=+(%d, %d), opacity=%f, dst=+(%d, %d)\n",
1619
	     __FUNCTION__, nbox,
1620
	     op->base.src.offset[0], op->base.src.offset[1],
1621
	     opacity,
1622
	     op->base.dst.x, op->base.dst.y));
1623
 
1624
	do {
1625
		int nbox_this_time;
1626
 
1627
		nbox_this_time = gen5_get_rectangles(sna, &op->base, nbox,
1628
						     gen5_bind_surfaces);
1629
		nbox -= nbox_this_time;
1630
 
1631
		do {
1632
			DBG(("  %s: (%d, %d) x (%d, %d)\n", __FUNCTION__,
1633
			     box->x1, box->y1,
1634
			     box->x2 - box->x1,
1635
			     box->y2 - box->y1));
1636
 
1637
			op->prim_emit(sna, op, box++, opacity);
1638
		} while (--nbox_this_time);
1639
	} while (nbox);
1640
}
1641
 
1642
fastcall static void
1643
gen5_render_composite_spans_boxes__thread(struct sna *sna,
1644
					  const struct sna_composite_spans_op *op,
1645
					  const struct sna_opacity_box *box,
1646
					  int nbox)
1647
{
1648
	DBG(("%s: nbox=%d, src=+(%d, %d), dst=+(%d, %d)\n",
1649
	     __FUNCTION__, nbox,
1650
	     op->base.src.offset[0], op->base.src.offset[1],
1651
	     op->base.dst.x, op->base.dst.y));
1652
 
1653
	sna_vertex_lock(&sna->render);
1654
	do {
1655
		int nbox_this_time;
1656
		float *v;
1657
 
1658
		nbox_this_time = gen5_get_rectangles(sna, &op->base, nbox,
1659
						     gen5_bind_surfaces);
1660
		assert(nbox_this_time);
1661
		nbox -= nbox_this_time;
1662
 
1663
		v = sna->render.vertices + sna->render.vertex_used;
1664
		sna->render.vertex_used += nbox_this_time * op->base.floats_per_rect;
1665
 
1666
		sna_vertex_acquire__locked(&sna->render);
1667
		sna_vertex_unlock(&sna->render);
1668
 
1669
		op->emit_boxes(op, box, nbox_this_time, v);
1670
		box += nbox_this_time;
1671
 
1672
		sna_vertex_lock(&sna->render);
1673
		sna_vertex_release__locked(&sna->render);
1674
	} while (nbox);
1675
	sna_vertex_unlock(&sna->render);
1676
}
1677
 
1678
fastcall static void
1679
gen5_render_composite_spans_done(struct sna *sna,
1680
				 const struct sna_composite_spans_op *op)
1681
{
1682
	if (sna->render.vertex_offset)
1683
		gen4_vertex_flush(sna);
1684
 
1685
	DBG(("%s()\n", __FUNCTION__));
1686
 
1687
	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
1688
	sna_render_composite_redirect_done(sna, &op->base);
1689
}
1690
 
1691
static bool
1692
gen5_check_composite_spans(struct sna *sna,
1693
			   uint8_t op, PicturePtr src, PicturePtr dst,
1694
			   int16_t width, int16_t height,
1695
			   unsigned flags)
1696
{
1697
	DBG(("%s: op=%d, width=%d, height=%d, flags=%x\n",
1698
	     __FUNCTION__, op, width, height, flags));
1699
 
1700
	if (op >= ARRAY_SIZE(gen5_blend_op))
1701
		return false;
1702
 
1703
	if (gen5_composite_fallback(sna, src, NULL, dst)) {
1704
		DBG(("%s: operation would fallback\n", __FUNCTION__));
1705
		return false;
1706
	}
1707
 
1708
	if (need_tiling(sna, width, height) &&
1709
	    !is_gpu(sna, dst->pDrawable, PREFER_GPU_SPANS)) {
1710
		DBG(("%s: fallback, tiled operation not on GPU\n",
1711
		     __FUNCTION__));
1712
		return false;
1713
	}
1714
 
1715
	if ((flags & COMPOSITE_SPANS_RECTILINEAR) == 0) {
1716
		struct sna_pixmap *priv = sna_pixmap_from_drawable(dst->pDrawable);
1717
		assert(priv);
1718
 
1719
		if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
1720
			return true;
1721
 
1722
		if (flags & COMPOSITE_SPANS_INPLACE_HINT)
1723
			return false;
1724
 
1725
		if ((sna->render.prefer_gpu & PREFER_GPU_SPANS) == 0 &&
1726
		    dst->format == PICT_a8)
1727
			return false;
1728
 
1729
		return priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo);
1730
	}
1731
 
1732
	return true;
1733
}
1734
 
1735
static bool
1736
gen5_render_composite_spans(struct sna *sna,
1737
			    uint8_t op,
1738
			    PicturePtr src,
1739
			    PicturePtr dst,
1740
			    int16_t src_x,  int16_t src_y,
1741
			    int16_t dst_x,  int16_t dst_y,
1742
			    int16_t width,  int16_t height,
1743
			    unsigned flags,
1744
			    struct sna_composite_spans_op *tmp)
1745
{
1746
	DBG(("%s: %dx%d with flags=%x, current mode=%d\n", __FUNCTION__,
1747
	     width, height, flags, sna->kgem.ring));
1748
 
1749
	assert(gen5_check_composite_spans(sna, op, src, dst, width, height, flags));
1750
 
1751
	if (need_tiling(sna, width, height)) {
1752
		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
1753
		     __FUNCTION__, width, height));
1754
		return sna_tiling_composite_spans(op, src, dst,
1755
						  src_x, src_y, dst_x, dst_y,
1756
						  width, height, flags, tmp);
1757
	}
1758
 
1759
	tmp->base.op = op;
1760
	if (!gen5_composite_set_target(sna, &tmp->base, dst,
1761
				       dst_x, dst_y, width, height,
1762
				       true))
1763
		return false;
1764
 
1765
	switch (gen5_composite_picture(sna, src, &tmp->base.src,
1766
				       src_x, src_y,
1767
				       width, height,
1768
				       dst_x, dst_y,
1769
				       dst->polyMode == PolyModePrecise)) {
1770
	case -1:
1771
		goto cleanup_dst;
1772
	case 0:
1773
		if (!gen4_channel_init_solid(sna, &tmp->base.src, 0))
1774
			goto cleanup_dst;
1775
		/* fall through to fixup */
1776
	case 1:
1777
		gen5_composite_channel_convert(&tmp->base.src);
1778
		break;
1779
	}
1780
 
1781
	tmp->base.mask.bo = NULL;
1782
 
1783
	tmp->base.is_affine = tmp->base.src.is_affine;
1784
	tmp->base.has_component_alpha = false;
1785
	tmp->base.need_magic_ca_pass = false;
1786
 
1787
	tmp->base.u.gen5.ve_id = gen4_choose_spans_emitter(sna, tmp);
1788
	tmp->base.u.gen5.wm_kernel = WM_KERNEL_OPACITY | !tmp->base.is_affine;
1789
 
1790
	tmp->box   = gen5_render_composite_spans_box;
1791
	tmp->boxes = gen5_render_composite_spans_boxes;
1792
	if (tmp->emit_boxes)
1793
		tmp->thread_boxes = gen5_render_composite_spans_boxes__thread;
1794
	tmp->done  = gen5_render_composite_spans_done;
1795
 
1796
	if (!kgem_check_bo(&sna->kgem,
1797
			   tmp->base.dst.bo, tmp->base.src.bo,
1798
			   NULL))  {
1799
		kgem_submit(&sna->kgem);
1800
		if (!kgem_check_bo(&sna->kgem,
1801
				   tmp->base.dst.bo, tmp->base.src.bo,
1802
				   NULL))
1803
			goto cleanup_src;
1804
	}
1805
 
1806
	gen5_bind_surfaces(sna, &tmp->base);
1807
	gen5_align_vertex(sna, &tmp->base);
1808
	return true;
1809
 
1810
cleanup_src:
1811
	if (tmp->base.src.bo)
1812
		kgem_bo_destroy(&sna->kgem, tmp->base.src.bo);
1813
cleanup_dst:
1814
	if (tmp->base.redirect.real_bo)
1815
		kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
1816
	return false;
1817
}
1818
#endif
1819
 
1820
 
1821
 
1822
static bool
1823
gen5_render_copy_boxes(struct sna *sna, uint8_t alu,
1824
		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
1825
		       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
1826
		       const BoxRec *box, int n, unsigned flags)
1827
{
1828
	struct sna_composite_op tmp;
1829
 
1830
	DBG(("%s alu=%d, src=%ld:handle=%d, dst=%ld:handle=%d boxes=%d x [((%d, %d), (%d, %d))...], flags=%x\n",
1831
	     __FUNCTION__, alu,
1832
	     src->drawable.serialNumber, src_bo->handle,
1833
	     dst->drawable.serialNumber, dst_bo->handle,
1834
	     n, box->x1, box->y1, box->x2, box->y2,
1835
	     flags));
1836
 
1837
	if (sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
1838
	    sna_blt_copy_boxes(sna, alu,
1839
			       src_bo, src_dx, src_dy,
1840
			       dst_bo, dst_dx, dst_dy,
1841
			       dst->drawable.bitsPerPixel,
1842
			       box, n))
1843
		return true;
1844
 
1845
	if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo) {
1846
fallback_blt:
1847
		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
1848
			return false;
1849
 
1850
		return sna_blt_copy_boxes_fallback(sna, alu,
1851
						   src, src_bo, src_dx, src_dy,
1852
						   dst, dst_bo, dst_dx, dst_dy,
1853
						   box, n);
1854
	}
1855
 
1856
	memset(&tmp, 0, sizeof(tmp));
1857
 
1858
	if (dst->drawable.depth == src->drawable.depth) {
1859
		tmp.dst.format = sna_render_format_for_depth(dst->drawable.depth);
1860
		tmp.src.pict_format = tmp.dst.format;
1861
	} else {
1862
		tmp.dst.format = sna_format_for_depth(dst->drawable.depth);
1863
		tmp.src.pict_format = sna_format_for_depth(src->drawable.depth);
1864
	}
1865
	if (!gen5_check_format(tmp.src.pict_format)) {
1866
		DBG(("%s: unsupported source format, %x, use BLT\n",
1867
		     __FUNCTION__, tmp.src.pict_format));
1868
		goto fallback_blt;
1869
	}
1870
 
1871
	DBG(("%s (%d, %d)->(%d, %d) x %d\n",
1872
	     __FUNCTION__, src_dx, src_dy, dst_dx, dst_dy, n));
1873
 
1874
	tmp.op = alu == GXcopy ? PictOpSrc : PictOpClear;
1875
 
1876
	tmp.dst.pixmap = dst;
1877
	tmp.dst.width  = dst->drawable.width;
1878
	tmp.dst.height = dst->drawable.height;
1879
	tmp.dst.x = tmp.dst.y = 0;
1880
	tmp.dst.bo = dst_bo;
1881
	tmp.damage = NULL;
1882
 
1883
	sna_render_composite_redirect_init(&tmp);
1884
	if (too_large(tmp.dst.width, tmp.dst.height)) {
1885
		BoxRec extents = box[0];
1886
		int i;
1887
 
1888
		for (i = 1; i < n; i++) {
1889
			if (box[i].x1 < extents.x1)
1890
				extents.x1 = box[i].x1;
1891
			if (box[i].y1 < extents.y1)
1892
				extents.y1 = box[i].y1;
1893
 
1894
			if (box[i].x2 > extents.x2)
1895
				extents.x2 = box[i].x2;
1896
			if (box[i].y2 > extents.y2)
1897
				extents.y2 = box[i].y2;
1898
		}
1899
		if (!sna_render_composite_redirect(sna, &tmp,
1900
						   extents.x1 + dst_dx,
1901
						   extents.y1 + dst_dy,
1902
						   extents.x2 - extents.x1,
1903
						   extents.y2 - extents.y1,
1904
						   n > 1))
1905
			goto fallback_tiled;
1906
	}
1907
 
1908
	tmp.src.filter = SAMPLER_FILTER_NEAREST;
1909
	tmp.src.repeat = SAMPLER_EXTEND_NONE;
1910
	tmp.src.card_format = gen5_get_card_format(tmp.src.pict_format);
1911
	if (too_large(src->drawable.width, src->drawable.height)) {
1912
		BoxRec extents = box[0];
1913
		int i;
1914
 
1915
		for (i = 1; i < n; i++) {
1916
			if (box[i].x1 < extents.x1)
1917
				extents.x1 = box[i].x1;
1918
			if (box[i].y1 < extents.y1)
1919
				extents.y1 = box[i].y1;
1920
 
1921
			if (box[i].x2 > extents.x2)
1922
				extents.x2 = box[i].x2;
1923
			if (box[i].y2 > extents.y2)
1924
				extents.y2 = box[i].y2;
1925
		}
1926
 
1927
		if (!sna_render_pixmap_partial(sna, src, src_bo, &tmp.src,
1928
					       extents.x1 + src_dx,
1929
					       extents.y1 + src_dy,
1930
					       extents.x2 - extents.x1,
1931
					       extents.y2 - extents.y1))
1932
			goto fallback_tiled_dst;
1933
	} else {
1934
		tmp.src.bo = kgem_bo_reference(src_bo);
1935
		tmp.src.width  = src->drawable.width;
1936
		tmp.src.height = src->drawable.height;
1937
		tmp.src.offset[0] = tmp.src.offset[1] = 0;
1938
		tmp.src.scale[0] = 1.f/src->drawable.width;
1939
		tmp.src.scale[1] = 1.f/src->drawable.height;
1940
	}
1941
 
1942
	tmp.is_affine = true;
1943
	tmp.floats_per_vertex = 3;
1944
	tmp.floats_per_rect = 9;
1945
	tmp.u.gen5.wm_kernel = WM_KERNEL;
1946
	tmp.u.gen5.ve_id = 2;
1947
 
1948
	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
1949
		kgem_submit(&sna->kgem);
1950
		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
1951
			DBG(("%s: aperture check failed\n", __FUNCTION__));
1952
			goto fallback_tiled_src;
1953
		}
1954
	}
1955
 
1956
	dst_dx += tmp.dst.x;
1957
	dst_dy += tmp.dst.y;
1958
	tmp.dst.x = tmp.dst.y = 0;
1959
 
1960
	src_dx += tmp.src.offset[0];
1961
	src_dy += tmp.src.offset[1];
1962
 
1963
	gen5_copy_bind_surfaces(sna, &tmp);
1964
	gen5_align_vertex(sna, &tmp);
1965
 
1966
	do {
1967
		int n_this_time;
1968
 
1969
		n_this_time = gen5_get_rectangles(sna, &tmp, n,
1970
						  gen5_copy_bind_surfaces);
1971
		n -= n_this_time;
1972
 
1973
		do {
1974
			DBG(("	(%d, %d) -> (%d, %d) + (%d, %d)\n",
1975
			     box->x1 + src_dx, box->y1 + src_dy,
1976
			     box->x1 + dst_dx, box->y1 + dst_dy,
1977
			     box->x2 - box->x1, box->y2 - box->y1));
1978
			OUT_VERTEX(box->x2 + dst_dx, box->y2 + dst_dy);
1979
			OUT_VERTEX_F((box->x2 + src_dx) * tmp.src.scale[0]);
1980
			OUT_VERTEX_F((box->y2 + src_dy) * tmp.src.scale[1]);
1981
 
1982
			OUT_VERTEX(box->x1 + dst_dx, box->y2 + dst_dy);
1983
			OUT_VERTEX_F((box->x1 + src_dx) * tmp.src.scale[0]);
1984
			OUT_VERTEX_F((box->y2 + src_dy) * tmp.src.scale[1]);
1985
 
1986
			OUT_VERTEX(box->x1 + dst_dx, box->y1 + dst_dy);
1987
			OUT_VERTEX_F((box->x1 + src_dx) * tmp.src.scale[0]);
1988
			OUT_VERTEX_F((box->y1 + src_dy) * tmp.src.scale[1]);
1989
 
1990
			box++;
1991
		} while (--n_this_time);
1992
	} while (n);
1993
 
1994
	gen4_vertex_flush(sna);
1995
	sna_render_composite_redirect_done(sna, &tmp);
1996
	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
1997
	return true;
1998
 
1999
fallback_tiled_src:
2000
	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
2001
fallback_tiled_dst:
2002
	if (tmp.redirect.real_bo)
2003
		kgem_bo_destroy(&sna->kgem, tmp.dst.bo);
2004
fallback_tiled:
2005
	if (sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
2006
	    sna_blt_copy_boxes(sna, alu,
2007
			       src_bo, src_dx, src_dy,
2008
			       dst_bo, dst_dx, dst_dy,
2009
			       dst->drawable.bitsPerPixel,
2010
			       box, n))
2011
		return true;
2012
 
2013
	DBG(("%s: tiled fallback\n", __FUNCTION__));
2014
	return sna_tiling_copy_boxes(sna, alu,
2015
				     src, src_bo, src_dx, src_dy,
2016
				     dst, dst_bo, dst_dx, dst_dy,
2017
				     box, n);
2018
}
2019
 
2020
#endif
2021
 
2022
static void
3280 Serge 2023
gen5_render_flush(struct sna *sna)
2024
{
2025
	gen4_vertex_close(sna);
2026
 
2027
	assert(sna->render.vb_id == 0);
2028
	assert(sna->render.vertex_offset == 0);
2029
}
2030
 
2031
static void
2032
gen5_render_context_switch(struct kgem *kgem,
2033
			   int new_mode)
2034
{
2035
	if (!kgem->nbatch)
2036
		return;
2037
 
2038
	/* WaNonPipelinedStateCommandFlush
2039
	 *
2040
	 * Ironlake has a limitation that a 3D or Media command can't
2041
	 * be the first command after a BLT, unless it's
2042
	 * non-pipelined.
2043
	 *
2044
	 * We do this by ensuring that the non-pipelined drawrect
2045
	 * is always emitted first following a switch from BLT.
2046
	 */
2047
	if (kgem->mode == KGEM_BLT) {
2048
		struct sna *sna = to_sna_from_kgem(kgem);
2049
		DBG(("%s: forcing drawrect on next state emission\n",
2050
		     __FUNCTION__));
2051
		sna->render_state.gen5.drawrect_limit = -1;
2052
	}
2053
 
2054
	if (kgem_ring_is_idle(kgem, kgem->ring)) {
2055
		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
2056
		_kgem_submit(kgem);
2057
	}
2058
}
2059
 
2060
static void
2061
discard_vbo(struct sna *sna)
2062
{
2063
	kgem_bo_destroy(&sna->kgem, sna->render.vbo);
2064
	sna->render.vbo = NULL;
2065
	sna->render.vertices = sna->render.vertex_data;
2066
	sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
2067
	sna->render.vertex_used = 0;
2068
	sna->render.vertex_index = 0;
2069
}
2070
 
2071
static void
2072
gen5_render_retire(struct kgem *kgem)
2073
{
2074
	struct sna *sna;
2075
 
2076
	sna = container_of(kgem, struct sna, kgem);
2077
	if (kgem->nbatch == 0 && sna->render.vbo && !kgem_bo_is_busy(sna->render.vbo)) {
2078
		DBG(("%s: resetting idle vbo\n", __FUNCTION__));
2079
		sna->render.vertex_used = 0;
2080
		sna->render.vertex_index = 0;
2081
	}
2082
}
2083
 
2084
static void
2085
gen5_render_expire(struct kgem *kgem)
2086
{
2087
	struct sna *sna;
2088
 
2089
	sna = container_of(kgem, struct sna, kgem);
2090
	if (sna->render.vbo && !sna->render.vertex_used) {
2091
		DBG(("%s: discarding vbo\n", __FUNCTION__));
2092
		discard_vbo(sna);
2093
	}
2094
}
2095
 
2096
static void gen5_render_reset(struct sna *sna)
2097
{
2098
	sna->render_state.gen5.needs_invariant = true;
2099
	sna->render_state.gen5.ve_id = -1;
2100
	sna->render_state.gen5.last_primitive = -1;
2101
	sna->render_state.gen5.last_pipelined_pointers = 0;
2102
 
2103
	sna->render_state.gen5.drawrect_offset = -1;
2104
	sna->render_state.gen5.drawrect_limit = -1;
2105
	sna->render_state.gen5.surface_table = -1;
2106
 
2107
	if (sna->render.vbo &&
2108
	    !kgem_bo_is_mappable(&sna->kgem, sna->render.vbo)) {
2109
		DBG(("%s: discarding unmappable vbo\n", __FUNCTION__));
2110
		discard_vbo(sna);
2111
	}
2112
 
2113
	sna->render.vertex_offset = 0;
2114
	sna->render.nvertex_reloc = 0;
2115
	sna->render.vb_id = 0;
2116
}
2117
 
2118
static void gen5_render_fini(struct sna *sna)
2119
{
2120
	kgem_bo_destroy(&sna->kgem, sna->render_state.gen5.general_bo);
2121
}
2122
 
2123
static uint32_t gen5_create_vs_unit_state(struct sna_static_stream *stream)
2124
{
2125
	struct gen5_vs_unit_state *vs = sna_static_stream_map(stream, sizeof(*vs), 32);
2126
 
2127
	/* Set up the vertex shader to be disabled (passthrough) */
2128
	vs->thread4.nr_urb_entries = URB_VS_ENTRIES >> 2;
2129
	vs->thread4.urb_entry_allocation_size = URB_VS_ENTRY_SIZE - 1;
2130
	vs->vs6.vs_enable = 0;
2131
	vs->vs6.vert_cache_disable = 1;
2132
 
2133
	return sna_static_stream_offsetof(stream, vs);
2134
}
2135
 
2136
static uint32_t gen5_create_sf_state(struct sna_static_stream *stream,
2137
				     uint32_t kernel)
2138
{
2139
	struct gen5_sf_unit_state *sf_state;
2140
 
2141
	sf_state = sna_static_stream_map(stream, sizeof(*sf_state), 32);
2142
 
2143
	sf_state->thread0.grf_reg_count = GEN5_GRF_BLOCKS(SF_KERNEL_NUM_GRF);
2144
	sf_state->thread0.kernel_start_pointer = kernel >> 6;
2145
 
2146
	sf_state->thread3.const_urb_entry_read_length = 0;	/* no const URBs */
2147
	sf_state->thread3.const_urb_entry_read_offset = 0;	/* no const URBs */
2148
	sf_state->thread3.urb_entry_read_length = 1;	/* 1 URB per vertex */
2149
	/* don't smash vertex header, read start from dw8 */
2150
	sf_state->thread3.urb_entry_read_offset = 1;
2151
	sf_state->thread3.dispatch_grf_start_reg = 3;
2152
	sf_state->thread4.max_threads = SF_MAX_THREADS - 1;
2153
	sf_state->thread4.urb_entry_allocation_size = URB_SF_ENTRY_SIZE - 1;
2154
	sf_state->thread4.nr_urb_entries = URB_SF_ENTRIES;
2155
	sf_state->sf5.viewport_transform = false;	/* skip viewport */
2156
	sf_state->sf6.cull_mode = GEN5_CULLMODE_NONE;
2157
	sf_state->sf6.scissor = 0;
2158
	sf_state->sf7.trifan_pv = 2;
2159
	sf_state->sf6.dest_org_vbias = 0x8;
2160
	sf_state->sf6.dest_org_hbias = 0x8;
2161
 
2162
	return sna_static_stream_offsetof(stream, sf_state);
2163
}
2164
 
2165
static uint32_t gen5_create_sampler_state(struct sna_static_stream *stream,
2166
					  sampler_filter_t src_filter,
2167
					  sampler_extend_t src_extend,
2168
					  sampler_filter_t mask_filter,
2169
					  sampler_extend_t mask_extend)
2170
{
2171
	struct gen5_sampler_state *sampler_state;
2172
 
2173
	sampler_state = sna_static_stream_map(stream,
2174
					      sizeof(struct gen5_sampler_state) * 2,
2175
					      32);
2176
	sampler_state_init(&sampler_state[0], src_filter, src_extend);
2177
	sampler_state_init(&sampler_state[1], mask_filter, mask_extend);
2178
 
2179
	return sna_static_stream_offsetof(stream, sampler_state);
2180
}
2181
 
2182
static void gen5_init_wm_state(struct gen5_wm_unit_state *state,
2183
			       bool has_mask,
2184
			       uint32_t kernel,
2185
			       uint32_t sampler)
2186
{
2187
	state->thread0.grf_reg_count = GEN5_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
2188
	state->thread0.kernel_start_pointer = kernel >> 6;
2189
 
2190
	state->thread1.single_program_flow = 0;
2191
 
2192
	/* scratch space is not used in our kernel */
2193
	state->thread2.scratch_space_base_pointer = 0;
2194
	state->thread2.per_thread_scratch_space = 0;
2195
 
2196
	state->thread3.const_urb_entry_read_length = 0;
2197
	state->thread3.const_urb_entry_read_offset = 0;
2198
 
2199
	state->thread3.urb_entry_read_offset = 0;
2200
	/* wm kernel use urb from 3, see wm_program in compiler module */
2201
	state->thread3.dispatch_grf_start_reg = 3;	/* must match kernel */
2202
 
2203
	state->wm4.sampler_count = 0;	/* hardware requirement */
2204
 
2205
	state->wm4.sampler_state_pointer = sampler >> 5;
2206
	state->wm5.max_threads = PS_MAX_THREADS - 1;
2207
	state->wm5.transposed_urb_read = 0;
2208
	state->wm5.thread_dispatch_enable = 1;
2209
	/* just use 16-pixel dispatch (4 subspans), don't need to change kernel
2210
	 * start point
2211
	 */
2212
	state->wm5.enable_16_pix = 1;
2213
	state->wm5.enable_8_pix = 0;
2214
	state->wm5.early_depth_test = 1;
2215
 
2216
	/* Each pair of attributes (src/mask coords) is two URB entries */
2217
	if (has_mask) {
2218
		state->thread1.binding_table_entry_count = 3;	/* 2 tex and fb */
2219
		state->thread3.urb_entry_read_length = 4;
2220
	} else {
2221
		state->thread1.binding_table_entry_count = 2;	/* 1 tex and fb */
2222
		state->thread3.urb_entry_read_length = 2;
2223
	}
2224
 
2225
	/* binding table entry count is only used for prefetching,
2226
	 * and it has to be set 0 for Ironlake
2227
	 */
2228
	state->thread1.binding_table_entry_count = 0;
2229
}
2230
 
2231
static uint32_t gen5_create_cc_unit_state(struct sna_static_stream *stream)
2232
{
2233
	uint8_t *ptr, *base;
2234
	int i, j;
2235
 
2236
	base = ptr =
2237
		sna_static_stream_map(stream,
2238
				      GEN5_BLENDFACTOR_COUNT*GEN5_BLENDFACTOR_COUNT*64,
2239
				      64);
2240
 
2241
	for (i = 0; i < GEN5_BLENDFACTOR_COUNT; i++) {
2242
		for (j = 0; j < GEN5_BLENDFACTOR_COUNT; j++) {
2243
			struct gen5_cc_unit_state *state =
2244
				(struct gen5_cc_unit_state *)ptr;
2245
 
2246
			state->cc3.blend_enable =
2247
				!(j == GEN5_BLENDFACTOR_ZERO && i == GEN5_BLENDFACTOR_ONE);
2248
 
2249
			state->cc5.logicop_func = 0xc;	/* COPY */
2250
			state->cc5.ia_blend_function = GEN5_BLENDFUNCTION_ADD;
2251
 
2252
			/* Fill in alpha blend factors same as color, for the future. */
2253
			state->cc5.ia_src_blend_factor = i;
2254
			state->cc5.ia_dest_blend_factor = j;
2255
 
2256
			state->cc6.blend_function = GEN5_BLENDFUNCTION_ADD;
2257
			state->cc6.clamp_post_alpha_blend = 1;
2258
			state->cc6.clamp_pre_alpha_blend = 1;
2259
			state->cc6.src_blend_factor = i;
2260
			state->cc6.dest_blend_factor = j;
2261
 
2262
			ptr += 64;
2263
		}
2264
	}
2265
 
2266
	return sna_static_stream_offsetof(stream, base);
2267
}
2268
 
2269
static bool gen5_render_setup(struct sna *sna)
2270
{
2271
	struct gen5_render_state *state = &sna->render_state.gen5;
2272
	struct sna_static_stream general;
2273
	struct gen5_wm_unit_state_padded *wm_state;
2274
	uint32_t sf[2], wm[KERNEL_COUNT];
2275
	int i, j, k, l, m;
2276
 
2277
	sna_static_stream_init(&general);
2278
 
2279
	/* Zero pad the start. If you see an offset of 0x0 in the batchbuffer
2280
	 * dumps, you know it points to zero.
2281
	 */
2282
	null_create(&general);
2283
 
2284
	/* Set up the two SF states (one for blending with a mask, one without) */
2285
	sf[0] = sna_static_stream_compile_sf(sna, &general, brw_sf_kernel__nomask);
2286
	sf[1] = sna_static_stream_compile_sf(sna, &general, brw_sf_kernel__mask);
2287
 
2288
	for (m = 0; m < KERNEL_COUNT; m++) {
2289
		if (wm_kernels[m].size) {
2290
			wm[m] = sna_static_stream_add(&general,
2291
						      wm_kernels[m].data,
2292
						      wm_kernels[m].size,
2293
						      64);
2294
		} else {
2295
			wm[m] = sna_static_stream_compile_wm(sna, &general,
2296
							     wm_kernels[m].data,
2297
							     16);
2298
		}
2299
		assert(wm[m]);
2300
	}
2301
 
2302
	state->vs = gen5_create_vs_unit_state(&general);
2303
 
2304
	state->sf[0] = gen5_create_sf_state(&general, sf[0]);
2305
	state->sf[1] = gen5_create_sf_state(&general, sf[1]);
2306
 
2307
 
2308
	/* Set up the WM states: each filter/extend type for source and mask, per
2309
	 * kernel.
2310
	 */
2311
	wm_state = sna_static_stream_map(&general,
2312
					  sizeof(*wm_state) * KERNEL_COUNT *
2313
					  FILTER_COUNT * EXTEND_COUNT *
2314
					  FILTER_COUNT * EXTEND_COUNT,
2315
					  64);
2316
	state->wm = sna_static_stream_offsetof(&general, wm_state);
2317
	for (i = 0; i < FILTER_COUNT; i++) {
2318
		for (j = 0; j < EXTEND_COUNT; j++) {
2319
			for (k = 0; k < FILTER_COUNT; k++) {
2320
				for (l = 0; l < EXTEND_COUNT; l++) {
2321
					uint32_t sampler_state;
2322
 
2323
					sampler_state =
2324
						gen5_create_sampler_state(&general,
2325
									  i, j,
2326
									  k, l);
2327
 
2328
					for (m = 0; m < KERNEL_COUNT; m++) {
2329
						gen5_init_wm_state(&wm_state->state,
2330
								   wm_kernels[m].has_mask,
2331
								   wm[m], sampler_state);
2332
						wm_state++;
2333
					}
2334
				}
2335
        }
2336
        }
2337
    }
2338
 
2339
    state->cc = gen5_create_cc_unit_state(&general);
2340
 
2341
    state->general_bo = sna_static_stream_fini(sna, &general);
2342
    return state->general_bo != NULL;
2343
}
2344
 
4251 Serge 2345
const char *gen5_render_init(struct sna *sna, const char *backend)
3280 Serge 2346
{
2347
	if (!gen5_render_setup(sna))
4251 Serge 2348
		return backend;
3280 Serge 2349
 
2350
	sna->kgem.context_switch = gen5_render_context_switch;
2351
	sna->kgem.retire = gen5_render_retire;
2352
	sna->kgem.expire = gen5_render_expire;
2353
 
4251 Serge 2354
#if 0
2355
#if !NO_COMPOSITE
2356
	sna->render.composite = gen5_render_composite;
2357
	sna->render.prefer_gpu |= PREFER_GPU_RENDER;
2358
#endif
2359
#if !NO_COMPOSITE_SPANS
2360
	sna->render.check_composite_spans = gen5_check_composite_spans;
2361
	sna->render.composite_spans = gen5_render_composite_spans;
2362
	if (sna->PciInfo->device_id == 0x0044)
2363
		sna->render.prefer_gpu |= PREFER_GPU_SPANS;
2364
#endif
2365
	sna->render.video = gen5_render_video;
2366
 
2367
	sna->render.copy_boxes = gen5_render_copy_boxes;
2368
	sna->render.copy = gen5_render_copy;
2369
 
2370
	sna->render.fill_boxes = gen5_render_fill_boxes;
2371
	sna->render.fill = gen5_render_fill;
2372
	sna->render.fill_one = gen5_render_fill_one;
2373
#endif
2374
 
3280 Serge 2375
    sna->render.blit_tex = gen5_blit_tex;
4251 Serge 2376
    sna->render.caps = HW_BIT_BLIT | HW_TEX_BLIT;
3280 Serge 2377
 
2378
	sna->render.flush = gen5_render_flush;
2379
	sna->render.reset = gen5_render_reset;
2380
	sna->render.fini = gen5_render_fini;
2381
 
2382
	sna->render.max_3d_size = MAX_3D_SIZE;
2383
	sna->render.max_3d_pitch = 1 << 18;
4251 Serge 2384
	return "Ironlake (gen5)";
2385
};
2386
 
2387
static bool
2388
gen5_blit_tex(struct sna *sna,
2389
              uint8_t op, bool scale,
2390
		      PixmapPtr src, struct kgem_bo *src_bo,
2391
		      PixmapPtr mask,struct kgem_bo *mask_bo,
2392
		      PixmapPtr dst, struct kgem_bo *dst_bo,
2393
              int32_t src_x, int32_t src_y,
2394
              int32_t msk_x, int32_t msk_y,
2395
              int32_t dst_x, int32_t dst_y,
2396
              int32_t width, int32_t height,
2397
              struct sna_composite_op *tmp)
2398
{
2399
	DBG(("%s: %dx%d, current mode=%d\n", __FUNCTION__,
2400
	     width, height, sna->kgem.mode));
2401
 
2402
    tmp->op = PictOpSrc;
2403
 
2404
    tmp->dst.pixmap = dst;
2405
    tmp->dst.bo     = dst_bo;
2406
    tmp->dst.width  = dst->drawable.width;
2407
    tmp->dst.height = dst->drawable.height;
2408
    tmp->dst.format = PICT_x8r8g8b8;
2409
 
2410
 
2411
	tmp->src.repeat = RepeatNone;
2412
	tmp->src.filter = PictFilterNearest;
2413
    tmp->src.is_affine = true;
2414
 
2415
    tmp->src.bo = src_bo;
2416
	tmp->src.pict_format = PICT_x8r8g8b8;
2417
    tmp->src.card_format = gen5_get_card_format(tmp->src.pict_format);
2418
    tmp->src.width  = src->drawable.width;
2419
    tmp->src.height = src->drawable.height;
2420
 
2421
 
2422
    tmp->is_affine = tmp->src.is_affine;
2423
	tmp->has_component_alpha = false;
2424
	tmp->need_magic_ca_pass = false;
2425
 
2426
    tmp->mask.is_affine = true;
2427
 	tmp->mask.repeat = SAMPLER_EXTEND_NONE;
2428
	tmp->mask.filter = SAMPLER_FILTER_NEAREST;
2429
    tmp->mask.bo = mask_bo;
2430
    tmp->mask.pict_format = PIXMAN_a8;
2431
    tmp->mask.card_format = gen5_get_card_format(tmp->mask.pict_format);
2432
    tmp->mask.width  = mask->drawable.width;
2433
    tmp->mask.height = mask->drawable.height;
2434
 
2435
    if( scale )
2436
    {
2437
        tmp->src.scale[0] = 1.f/width;
2438
        tmp->src.scale[1] = 1.f/height;
2439
    }
2440
    else
2441
    {
2442
        tmp->src.scale[0] = 1.f/src->drawable.width;
2443
        tmp->src.scale[1] = 1.f/src->drawable.height;
2444
    }
2445
 
2446
    tmp->mask.scale[0] = 1.f/mask->drawable.width;
2447
    tmp->mask.scale[1] = 1.f/mask->drawable.height;
2448
 
2449
 
2450
	tmp->u.gen5.wm_kernel =
2451
		gen5_choose_composite_kernel(tmp->op,
2452
					     tmp->mask.bo != NULL,
2453
					     tmp->has_component_alpha,
2454
					     tmp->is_affine);
2455
	tmp->u.gen5.ve_id = gen4_choose_composite_emitter(sna, tmp);
2456
 
2457
	tmp->blt   = gen5_render_composite_blt;
2458
	tmp->done  = gen5_render_composite_done;
2459
 
2460
	if (!kgem_check_bo(&sna->kgem,
2461
			   tmp->dst.bo, tmp->src.bo, tmp->mask.bo, NULL)) {
2462
		kgem_submit(&sna->kgem);
2463
	}
2464
 
2465
	gen5_bind_surfaces(sna, tmp);
2466
	gen5_align_vertex(sna, tmp);
3280 Serge 2467
	return true;
4251 Serge 2468
 
3280 Serge 2469
}