Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 3277 → Rev 3278

/drivers/video/Intel-2D/gen6_render.c
37,7 → 37,7
#include "sna.h"
#include "sna_reg.h"
#include "sna_render.h"
//#include "sna_render_inline.h"
#include "sna_render_inline.h"
//#include "sna_video.h"
 
#include "brw/brw.h"
247,7 → 247,16
 
static uint32_t gen6_get_card_format(PictFormat format)
{
switch (format) {
default:
return -1;
case PICT_a8r8g8b8:
return GEN6_SURFACEFORMAT_B8G8R8A8_UNORM;
case PICT_x8r8g8b8:
return GEN6_SURFACEFORMAT_B8G8R8X8_UNORM;
case PICT_a8:
return GEN6_SURFACEFORMAT_A8_UNORM;
};
 
/*
switch (format) {
394,7 → 403,7
int base;
 
if (has_mask) {
/*
 
if (is_ca) {
if (gen6_blend_op[op].src_alpha)
base = GEN6_WM_KERNEL_MASKSA;
402,7 → 411,7
base = GEN6_WM_KERNEL_MASKCA;
} else
base = GEN6_WM_KERNEL_MASK;
*/
 
} else
base = GEN6_WM_KERNEL_NOMASK;
 
1331,7 → 1340,6
assert((sna->render.vertex_used % op->floats_per_vertex) == 0);
}
 
#if 0
 
fastcall static void
gen6_render_composite_blt(struct sna *sna,
1342,6 → 1350,8
op->prim_emit(sna, op, r);
}
 
#if 0
 
fastcall static void
gen6_render_composite_box(struct sna *sna,
const struct sna_composite_op *op,
1813,6 → 1823,8
assert(channel->card_format != (unsigned)-1);
}
 
#endif
 
static void gen6_render_composite_done(struct sna *sna,
const struct sna_composite_op *op)
{
1832,6 → 1844,8
// sna_render_composite_redirect_done(sna, op);
}
 
#if 0
 
static bool
gen6_composite_set_target(struct sna *sna,
struct sna_composite_op *op,
1853,9 → 1867,9
} else
sna_render_picture_extents(dst, &box);
 
op->dst.bo = sna_drawable_use_bo (dst->pDrawable,
PREFER_GPU | FORCE_GPU | RENDER_GPU,
&box, &op->damage);
// op->dst.bo = sna_drawable_use_bo (dst->pDrawable,
// PREFER_GPU | FORCE_GPU | RENDER_GPU,
// &box, &op->damage);
if (op->dst.bo == NULL)
return false;
 
1880,7 → 1894,6
}
 
 
 
static bool
gen6_render_composite(struct sna *sna,
uint8_t op,
1899,27 → 1912,6
DBG(("%s: %dx%d, current mode=%d\n", __FUNCTION__,
width, height, sna->kgem.ring));
 
if (mask == NULL &&
try_blt(sna, dst, src, width, height) &&
sna_blt_composite(sna, op,
src, dst,
src_x, src_y,
dst_x, dst_y,
width, height,
tmp, false))
return true;
 
if (gen6_composite_fallback(sna, src, mask, dst))
return false;
 
if (need_tiling(sna, width, height))
return sna_tiling_composite(op, src, mask, dst,
src_x, src_y,
msk_x, msk_y,
dst_x, dst_y,
width, height,
tmp);
 
if (op == PictOpClear)
op = PictOpSrc;
tmp->op = op;
2014,13 → 2006,13
tmp->is_affine),
gen4_choose_composite_emitter(tmp));
 
tmp->blt = gen6_render_composite_blt;
tmp->box = gen6_render_composite_box;
tmp->boxes = gen6_render_composite_boxes__blt;
if (tmp->emit_boxes) {
tmp->boxes = gen6_render_composite_boxes;
tmp->thread_boxes = gen6_render_composite_boxes__thread;
}
// tmp->blt = gen6_render_composite_blt;
// tmp->box = gen6_render_composite_box;
// tmp->boxes = gen6_render_composite_boxes__blt;
// if (tmp->emit_boxes) {
// tmp->boxes = gen6_render_composite_boxes;
// tmp->thread_boxes = gen6_render_composite_boxes__thread;
// }
tmp->done = gen6_render_composite_done;
 
kgem_set_mode(&sna->kgem, KGEM_RENDER, tmp->dst.bo);
2051,6 → 2043,7
return false;
}
 
 
#if !NO_COMPOSITE_SPANS
fastcall static void
gen6_render_composite_spans_box(struct sna *sna,
2689,6 → 2682,105
return true;
}
 
 
bool
gen6_composite(struct sna *sna,
uint8_t op,
PixmapPtr src, struct kgem_bo *src_bo,
PixmapPtr mask,struct kgem_bo *mask_bo,
PixmapPtr dst, struct kgem_bo *dst_bo,
int32_t src_x, int32_t src_y,
int32_t msk_x, int32_t msk_y,
int32_t dst_x, int32_t dst_y,
int32_t width, int32_t height,
struct sna_composite_op *tmp)
{
 
DBG(("%s: %dx%d, current mode=%d\n", __FUNCTION__,
width, height, sna->kgem.ring));
 
tmp->op = PictOpSrc;
 
tmp->dst.pixmap = dst;
tmp->dst.bo = dst_bo;
tmp->dst.width = dst->drawable.width;
tmp->dst.height = dst->drawable.height;
tmp->dst.format = PICT_x8r8g8b8;
 
 
tmp->src.repeat = RepeatNone;
tmp->src.filter = PictFilterNearest;
tmp->src.is_affine = true;
 
tmp->src.bo = src_bo;
tmp->src.pict_format = PICT_x8r8g8b8;
tmp->src.card_format = gen6_get_card_format(tmp->src.pict_format);
tmp->src.width = src->drawable.width;
tmp->src.height = src->drawable.height;
 
tmp->is_affine = tmp->src.is_affine;
tmp->has_component_alpha = false;
tmp->need_magic_ca_pass = false;
 
 
tmp->mask.repeat = SAMPLER_EXTEND_NONE;
tmp->mask.filter = SAMPLER_FILTER_NEAREST;
tmp->mask.is_affine = true;
 
tmp->mask.bo = mask_bo;
tmp->mask.pict_format = PIXMAN_a8;
tmp->mask.card_format = gen6_get_card_format(tmp->mask.pict_format);
tmp->mask.width = mask->drawable.width;
tmp->mask.height = mask->drawable.height;
 
 
tmp->src.scale[0] = 1.f/width; //src->width;
tmp->src.scale[1] = 1.f/height; //src->height;
// tmp->src.offset[0] = -dst_x;
// tmp->src.offset[1] = -dst_y;
 
 
tmp->mask.scale[0] = 1.f/mask->drawable.width;
tmp->mask.scale[1] = 1.f/mask->drawable.height;
// tmp->mask.offset[0] = -dst_x;
// tmp->mask.offset[1] = -dst_y;
 
tmp->u.gen6.flags =
GEN6_SET_FLAGS(SAMPLER_OFFSET(tmp->src.filter,
tmp->src.repeat,
tmp->mask.filter,
tmp->mask.repeat),
gen6_get_blend(tmp->op,
tmp->has_component_alpha,
tmp->dst.format),
/* gen6_choose_composite_kernel(tmp->op,
tmp->mask.bo != NULL,
tmp->has_component_alpha,
tmp->is_affine),
*/
GEN6_WM_KERNEL_MASK,
gen4_choose_composite_emitter(tmp));
 
tmp->blt = gen6_render_composite_blt;
// tmp->box = gen6_render_composite_box;
tmp->done = gen6_render_composite_done;
 
kgem_set_mode(&sna->kgem, KGEM_RENDER, tmp->dst.bo);
if (!kgem_check_bo(&sna->kgem,
tmp->dst.bo, tmp->src.bo, tmp->mask.bo,
NULL)) {
kgem_submit(&sna->kgem);
_kgem_set_mode(&sna->kgem, KGEM_RENDER);
}
 
gen6_emit_composite_state(sna, tmp);
gen6_align_vertex(sna, tmp);
return true;
 
}
 
 
 
#if 0
 
static void
3404,192 → 3496,3
}
 
 
void gen4_vertex_flush(struct sna *sna)
{
DBG(("%s[%x] = %d\n", __FUNCTION__,
4*sna->render.vertex_offset,
sna->render.vertex_index - sna->render.vertex_start));
 
assert(sna->render.vertex_offset);
assert(sna->render.vertex_index > sna->render.vertex_start);
 
sna->kgem.batch[sna->render.vertex_offset] =
sna->render.vertex_index - sna->render.vertex_start;
sna->render.vertex_offset = 0;
}
 
int gen4_vertex_finish(struct sna *sna)
{
struct kgem_bo *bo;
unsigned int i;
unsigned hint, size;
 
DBG(("%s: used=%d / %d\n", __FUNCTION__,
sna->render.vertex_used, sna->render.vertex_size));
assert(sna->render.vertex_offset == 0);
assert(sna->render.vertex_used);
 
// sna_vertex_wait__locked(&sna->render);
 
/* Note: we only need dword alignment (currently) */
 
bo = sna->render.vbo;
if (bo) {
for (i = 0; i < sna->render.nvertex_reloc; i++) {
DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
i, sna->render.vertex_reloc[i]));
 
sna->kgem.batch[sna->render.vertex_reloc[i]] =
kgem_add_reloc(&sna->kgem,
sna->render.vertex_reloc[i], bo,
I915_GEM_DOMAIN_VERTEX << 16,
0);
}
 
assert(!sna->render.active);
sna->render.nvertex_reloc = 0;
sna->render.vertex_used = 0;
sna->render.vertex_index = 0;
sna->render.vbo = NULL;
sna->render.vb_id = 0;
 
kgem_bo_destroy(&sna->kgem, bo);
}
 
hint = CREATE_GTT_MAP;
if (bo)
hint |= CREATE_CACHED | CREATE_NO_THROTTLE;
 
size = 256*1024;
assert(!sna->render.active);
sna->render.vertices = NULL;
sna->render.vbo = kgem_create_linear(&sna->kgem, size, hint);
while (sna->render.vbo == NULL && size > 16*1024) {
size /= 2;
sna->render.vbo = kgem_create_linear(&sna->kgem, size, hint);
}
if (sna->render.vbo == NULL)
sna->render.vbo = kgem_create_linear(&sna->kgem,
256*1024, CREATE_GTT_MAP);
if (sna->render.vbo)
sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
if (sna->render.vertices == NULL) {
if (sna->render.vbo) {
kgem_bo_destroy(&sna->kgem, sna->render.vbo);
sna->render.vbo = NULL;
}
sna->render.vertices = sna->render.vertex_data;
sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
return 0;
}
 
if (sna->render.vertex_used) {
DBG(("%s: copying initial buffer x %d to handle=%d\n",
__FUNCTION__,
sna->render.vertex_used,
sna->render.vbo->handle));
assert(sizeof(float)*sna->render.vertex_used <=
__kgem_bo_size(sna->render.vbo));
memcpy(sna->render.vertices,
sna->render.vertex_data,
sizeof(float)*sna->render.vertex_used);
}
 
size = __kgem_bo_size(sna->render.vbo)/4;
if (size >= UINT16_MAX)
size = UINT16_MAX - 1;
 
DBG(("%s: create vbo handle=%d, size=%d\n",
__FUNCTION__, sna->render.vbo->handle, size));
 
sna->render.vertex_size = size;
return sna->render.vertex_size - sna->render.vertex_used;
}
 
void gen4_vertex_close(struct sna *sna)
{
struct kgem_bo *bo, *free_bo = NULL;
unsigned int i, delta = 0;
 
assert(sna->render.vertex_offset == 0);
if (!sna->render.vb_id)
return;
 
DBG(("%s: used=%d, vbo active? %d, vb=%x, nreloc=%d\n",
__FUNCTION__, sna->render.vertex_used, sna->render.vbo ? sna->render.vbo->handle : 0,
sna->render.vb_id, sna->render.nvertex_reloc));
 
assert(!sna->render.active);
 
bo = sna->render.vbo;
if (bo) {
if (sna->render.vertex_size - sna->render.vertex_used < 64) {
DBG(("%s: discarding vbo (full), handle=%d\n", __FUNCTION__, sna->render.vbo->handle));
sna->render.vbo = NULL;
sna->render.vertices = sna->render.vertex_data;
sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
free_bo = bo;
} else if (IS_CPU_MAP(bo->map) && !sna->kgem.has_llc) {
DBG(("%s: converting CPU map to GTT\n", __FUNCTION__));
sna->render.vertices =
kgem_bo_map__gtt(&sna->kgem, sna->render.vbo);
if (sna->render.vertices == NULL) {
sna->render.vbo = NULL;
sna->render.vertices = sna->render.vertex_data;
sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
free_bo = bo;
}
 
}
} else {
if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
sna->render.vertex_used, sna->kgem.nbatch));
memcpy(sna->kgem.batch + sna->kgem.nbatch,
sna->render.vertex_data,
sna->render.vertex_used * 4);
delta = sna->kgem.nbatch * 4;
bo = NULL;
sna->kgem.nbatch += sna->render.vertex_used;
} else {
bo = kgem_create_linear(&sna->kgem,
4*sna->render.vertex_used,
CREATE_NO_THROTTLE);
if (bo && !kgem_bo_write(&sna->kgem, bo,
sna->render.vertex_data,
4*sna->render.vertex_used)) {
kgem_bo_destroy(&sna->kgem, bo);
bo = NULL;
}
DBG(("%s: new vbo: %d\n", __FUNCTION__,
sna->render.vertex_used));
free_bo = bo;
}
}
 
assert(sna->render.nvertex_reloc);
for (i = 0; i < sna->render.nvertex_reloc; i++) {
DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
i, sna->render.vertex_reloc[i]));
 
sna->kgem.batch[sna->render.vertex_reloc[i]] =
kgem_add_reloc(&sna->kgem,
sna->render.vertex_reloc[i], bo,
I915_GEM_DOMAIN_VERTEX << 16,
delta);
}
sna->render.nvertex_reloc = 0;
sna->render.vb_id = 0;
 
if (sna->render.vbo == NULL) {
assert(!sna->render.active);
sna->render.vertex_used = 0;
sna->render.vertex_index = 0;
assert(sna->render.vertices == sna->render.vertex_data);
assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
}
 
if (free_bo)
kgem_bo_destroy(&sna->kgem, free_bo);
}
 
/drivers/video/Intel-2D/i915_drm.h
941,4 → 941,12
__u64 offset;
__u64 val; /* Return value */
};
 
struct drm_i915_mask_update {
__u32 handle;
__u32 bo_size;
__u32 bo_pitch;
__u32 bo_map;
};
 
#endif /* _I915_DRM_H_ */
/drivers/video/Intel-2D/sna.c
8,6 → 8,7
#include <pixlib2.h>
 
static struct sna_fb sna_fb;
static struct kgem_bo *mask_bo;
 
typedef struct __attribute__((packed))
{
419,66 → 420,155
 
};
 
int sna_create_mask()
{
struct kgem_bo *bo;
char proc_info[1024];
int width, height;
int i;
 
get_proc_info(proc_info);
 
/*
width = *(uint32_t*)(proc_info+42)+1;
height = *(uint32_t*)(proc_info+46)+1;
 
int sna_blit_tex(bitmap_t *dst_bitmap, int dst_x, int dst_y,
int w, int h, bitmap_t *src_bitmap, int src_x, int src_y,
bitmap_t *mask_bitmap)
printf("%s width %d height %d\n", __FUNCTION__, width, height);
 
bo = kgem_create_2d(&sna_device->kgem, width, height,
8,I915_TILING_NONE, CREATE_CPU_MAP);
if(bo == NULL)
goto err_1;
int *map = kgem_bo_map(&sna_device->kgem, bo);
if(map == NULL)
goto err_2;
memset(map, 0, bo->pitch * height);
mask_bo = bo;
 
return 0;
err_2:
kgem_bo_destroy(&sna_device->kgem, bo);
err_1:
return -1;
};
 
 
bool
gen6_composite(struct sna *sna,
uint8_t op,
PixmapPtr src, struct kgem_bo *src_bo,
PixmapPtr mask,struct kgem_bo *mask_bo,
PixmapPtr dst, struct kgem_bo *dst_bo,
int32_t src_x, int32_t src_y,
int32_t msk_x, int32_t msk_y,
int32_t dst_x, int32_t dst_y,
int32_t width, int32_t height,
struct sna_composite_op *tmp);
 
 
#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
 
int sna_blit_tex(bitmap_t *src_bitmap, int dst_x, int dst_y,
int w, int h, int src_x, int src_y)
 
{
struct sna_composite_op cop;
batchbuffer_t execbuffer;
BoxRec box;
 
struct kgem_bo src_bo, mask_bo, dst_bo;
// box.x1 = dst_x;
// box.y1 = dst_y;
// box.x2 = dst_x+w;
// box.y2 = dst_y+h;
 
memset(&cop, 0, sizeof(cop));
memset(&execbuffer, 0, sizeof(execbuffer));
memset(&src_bo, 0, sizeof(src_bo));
memset(&dst_bo, 0, sizeof(dst_bo));
memset(&mask_bo, 0, sizeof(mask_bo));
 
src_bo.gaddr = src_bitmap->gaddr;
src_bo.pitch = src_bitmap->pitch;
src_bo.tiling = 0;
// cop.box(sna_device, &cop, &box);
 
dst_bo.gaddr = dst_bitmap->gaddr;
dst_bo.pitch = dst_bitmap->pitch;
dst_bo.tiling = 0;
struct drm_i915_mask_update update;
 
mask_bo.gaddr = mask_bitmap->gaddr;
mask_bo.pitch = mask_bitmap->pitch;
mask_bo.tiling = 0;
struct sna_composite_op composite;
struct _Pixmap src, dst, mask;
struct kgem_bo *src_bo;
 
box.x1 = dst_x;
box.y1 = dst_y;
box.x2 = dst_x+w;
box.y2 = dst_y+h;
char proc_info[1024];
int winx, winy, winw, winh;
 
sna_device->render.composite(sna_device, 0,
src_bitmap, &src_bo,
mask_bitmap, &mask_bo,
dst_bitmap, &dst_bo,
get_proc_info(proc_info);
 
winx = *(uint32_t*)(proc_info+34);
winy = *(uint32_t*)(proc_info+38);
winw = *(uint32_t*)(proc_info+42)+1;
winh = *(uint32_t*)(proc_info+46)+1;
memset(&src, 0, sizeof(src));
memset(&dst, 0, sizeof(dst));
memset(&mask, 0, sizeof(dst));
 
src.drawable.bitsPerPixel = 32;
src.drawable.width = src_bitmap->width;
src.drawable.height = src_bitmap->height;
 
dst.drawable.bitsPerPixel = 32;
dst.drawable.width = sna_fb.width;
dst.drawable.height = sna_fb.height;
mask.drawable.bitsPerPixel = 8;
mask.drawable.width = winw;
mask.drawable.height = winh;
 
memset(&composite, 0, sizeof(composite));
 
src_bo = (struct kgem_bo*)src_bitmap->handle;
if( gen6_composite(sna_device, PictOpSrc,
&src, src_bo,
&mask, mask_bo,
&dst, sna_fb.fb_bo,
src_x, src_y,
src_x, src_y,
dst_x, dst_y,
w, h, &cop);
winx+dst_x, winy+dst_y,
w, h,
&composite) )
{
struct sna_composite_rectangles r;
 
cop.box(sna_device, &cop, &box);
cop.done(sna_device, &cop);
r.src.x = src_x;
r.src.y = src_y;
r.mask.x = dst_x;
r.mask.y = dst_y;
r.dst.x = winx+dst_x;
r.dst.y = winy+dst_y;
r.width = w;
r.height = h;
 
INIT_LIST_HEAD(&execbuffer.objects);
list_add_tail(&src_bitmap->obj->exec_list, &execbuffer.objects);
list_add_tail(&mask_bitmap->obj->exec_list, &execbuffer.objects);
composite.blt(sna_device, &composite, &r);
composite.done(sna_device, &composite);
};
 
_kgem_submit(&sna_device->kgem, &execbuffer);
VG_CLEAR(update);
update.handle = mask_bo->handle;
update.bo_size = __kgem_bo_size(mask_bo);
update.bo_pitch = mask_bo->pitch;
update.bo_map = MAP(mask_bo->map);
drmIoctl(sna_device->kgem.fd, SRV_MASK_UPDATE, &update);
 
};
kgem_submit(&sna_device->kgem);
 
*/
return 0;
}
 
 
 
 
 
 
 
 
 
static const struct intel_device_info intel_generic_info = {
.gen = -1,
};
/drivers/video/Intel-2D/sna.h
74,6 → 74,7
#define SRV_I915_GEM_THROTTLE 32
#define SRV_FBINFO 33
#define SRV_I915_GEM_EXECBUFFER2 34
#define SRV_MASK_UPDATE 35
 
#define SRV_I915_GEM_MMAP_GTT 31
 
112,17 → 113,61
PIXMAN_x2r10g10b10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,0,10,10,10),
PIXMAN_a2r10g10b10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB,2,10,10,10),
PIXMAN_x2b10g10r10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,0,10,10,10),
PIXMAN_a2b10g10r10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,2,10,10,10)
PIXMAN_a2b10g10r10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,2,10,10,10),
 
PIXMAN_a8 = PIXMAN_FORMAT(8,PIXMAN_TYPE_A,8,0,0,0)
} pixman_format_code_t;
 
typedef enum _PictFormatShort {
 
PICT_a8r8g8b8 = PIXMAN_a8r8g8b8,
PICT_x8r8g8b8 = PIXMAN_x8r8g8b8,
PICT_a8b8g8r8 = PIXMAN_a8b8g8r8,
PICT_x8b8g8r8 = PIXMAN_x8b8g8r8,
PICT_b8g8r8a8 = PIXMAN_b8g8r8a8,
PICT_b8g8r8x8 = PIXMAN_b8g8r8x8,
 
/* 8bpp formats */
PICT_a8 = PIXMAN_a8,
 
/* 4bpp formats */
} PictFormatShort;
 
#define RepeatNone 0
#define RepeatNormal 1
#define RepeatPad 2
#define RepeatReflect 3
 
#define PictFilterNearest 0
#define PictFilterBilinear 1
 
#define PictFilterFast 2
#define PictFilterGood 3
#define PictFilterBest 4
 
#define PictFilterConvolution 5
 
typedef int32_t pixman_fixed_16_16_t;
typedef pixman_fixed_16_16_t pixman_fixed_t;
 
struct pixman_transform
{
pixman_fixed_t matrix[3][3];
};
 
typedef unsigned long Picture;
typedef unsigned long PictFormat;
 
typedef struct _Pixmap *PixmapPtr;
typedef struct _Picture *PicturePtr;
typedef struct _Drawable *DrawablePtr;
typedef struct _PictFormat *PictFormatPtr;
 
typedef struct pixman_transform PictTransform, *PictTransformPtr;
 
 
 
typedef struct _Drawable {
unsigned char type; /* DRAWABLE_<type> */
unsigned char class; /* specific to type */
154,7 → 199,47
PixmapPtr master_pixmap; /* pointer to master copy of pixmap for pixmap sharing */
} PixmapRec;
 
typedef struct _PictFormat {
uint32_t id;
uint32_t format; /* except bpp */
unsigned char type;
unsigned char depth;
// DirectFormatRec direct;
// IndexFormatRec index;
} PictFormatRec;
 
typedef struct _Picture {
DrawablePtr pDrawable;
// PictFormatPtr pFormat;
PictFormatShort format; /* PICT_FORMAT */
int refcnt;
uint32_t id;
unsigned int repeat:1;
unsigned int graphicsExposures:1;
unsigned int subWindowMode:1;
unsigned int polyEdge:1;
unsigned int polyMode:1;
unsigned int freeCompClip:1;
unsigned int clientClipType:2;
unsigned int componentAlpha:1;
unsigned int repeatType:2;
unsigned int filter:3;
// unsigned int stateChanges:CPLastBit;
// unsigned int unused:18 - CPLastBit;
 
// PicturePtr alphaMap;
 
// PictTransform *transform;
 
// SourcePictPtr pSourcePict;
// xFixed *filter_params;
// int filter_nparams;
} PictureRec;
 
#define PolyModePrecise 0
#define PolyModeImprecise 1
 
 
struct sna_fb
{
uint32_t width;
262,32 → 347,7
#endif
};
 
static inline int vertex_space(struct sna *sna)
{
return sna->render.vertex_size - sna->render.vertex_used;
}
 
static inline void vertex_emit(struct sna *sna, float v)
{
assert(sna->render.vertex_used < sna->render.vertex_size);
sna->render.vertices[sna->render.vertex_used++] = v;
}
 
static inline void vertex_emit_2s(struct sna *sna, int16_t x, int16_t y)
{
int16_t *v = (int16_t *)&sna->render.vertices[sna->render.vertex_used++];
assert(sna->render.vertex_used <= sna->render.vertex_size);
v[0] = x;
v[1] = y;
}
 
static inline void batch_emit(struct sna *sna, uint32_t dword)
{
assert(sna->kgem.mode != KGEM_NONE);
assert(sna->kgem.nbatch + KGEM_BATCH_RESERVED < sna->kgem.surface);
sna->kgem.batch[sna->kgem.nbatch++] = dword;
}
 
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
#endif
/drivers/video/Intel-2D/sna_render.h
47,7 → 47,7
 
struct sna_composite_channel {
struct kgem_bo *bo;
// PictTransform *transform;
PictTransform *transform;
uint16_t width;
uint16_t height;
uint32_t pict_format;
160,7 → 160,6
#define PREFER_GPU_RENDER 0x2
#define PREFER_GPU_SPANS 0x4
 
#if 0
 
bool (*composite)(struct sna *sna, uint8_t op,
PicturePtr dst, PicturePtr src, PicturePtr mask,
170,6 → 169,7
int16_t w, int16_t h,
struct sna_composite_op *tmp);
 
#if 0
bool (*check_composite_spans)(struct sna *sna, uint8_t op,
PicturePtr dst, PicturePtr src,
int16_t w, int16_t h, unsigned flags);